added poseidon with aws to k8s changes

This commit is contained in:
Elmar Kresse
2024-08-12 10:02:36 +02:00
parent 5376f7a027
commit 254460d64c
60 changed files with 6912 additions and 0 deletions

View File

@ -0,0 +1,77 @@
package environment
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
// AbstractManager is used to have a fallback environment manager in the chain of responsibility
// following the null object pattern.
type AbstractManager struct {
nextHandler ManagerHandler
runnerManager runner.Manager
}
func (n *AbstractManager) SetNextHandler(next ManagerHandler) {
n.nextHandler = next
}
func (n *AbstractManager) NextHandler() ManagerHandler {
if n.HasNextHandler() {
return n.nextHandler
} else {
return &AbstractManager{}
}
}
func (n *AbstractManager) HasNextHandler() bool {
return n.nextHandler != nil
}
func (n *AbstractManager) List(_ bool) ([]runner.ExecutionEnvironment, error) {
return []runner.ExecutionEnvironment{}, nil
}
func (n *AbstractManager) Get(_ dto.EnvironmentID, _ bool) (runner.ExecutionEnvironment, error) {
return nil, runner.ErrRunnerNotFound
}
func (n *AbstractManager) CreateOrUpdate(_ dto.EnvironmentID, _ dto.ExecutionEnvironmentRequest, _ context.Context) (
bool, error) {
return false, nil
}
func (n *AbstractManager) Delete(id dto.EnvironmentID) (bool, error) {
if n.runnerManager == nil {
return false, nil
}
e, ok := n.runnerManager.GetEnvironment(id)
if !ok {
isFound, err := n.NextHandler().Delete(id)
if err != nil {
return false, fmt.Errorf("abstract wrapped: %w", err)
}
return isFound, nil
}
n.runnerManager.DeleteEnvironment(id)
if err := e.Delete(runner.ErrDestroyedByAPIRequest); err != nil {
return true, fmt.Errorf("could not delete environment: %w", err)
}
return true, nil
}
func (n *AbstractManager) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
if n.runnerManager == nil {
return map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{}
}
statistics := n.NextHandler().Statistics()
for k, v := range n.runnerManager.EnvironmentStatistics() {
statistics[k] = v
}
return statistics
}

View File

@ -0,0 +1,139 @@
package environment
import (
"encoding/json"
"fmt"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"k8s.io/client-go/kubernetes"
)
type KubernetesEnvironment struct {
id dto.EnvironmentID
image string
cpuLimit uint
memoryLimit uint
networkEnabled bool
mappedPorts []uint16
prewarmingPool uint
onDestroyRunner runner.DestroyRunnerHandler
clientset *kubernetes.Clientset
}
func NewKubernetesEnvironment(onDestroyRunner runner.DestroyRunnerHandler, clientset *kubernetes.Clientset) *KubernetesEnvironment {
return &KubernetesEnvironment{
onDestroyRunner: onDestroyRunner,
clientset: clientset,
cpuLimit: 500, // Default CPU limit (in millicores)
memoryLimit: 512, // Default memory limit (in MB)
networkEnabled: false,
prewarmingPool: 1,
}
}
func (k *KubernetesEnvironment) MarshalJSON() ([]byte, error) {
res, err := json.Marshal(dto.ExecutionEnvironmentData{
ID: int(k.ID()),
ExecutionEnvironmentRequest: dto.ExecutionEnvironmentRequest{Image: k.Image()},
})
if err != nil {
return res, fmt.Errorf("couldn't marshal kubernetes execution environment: %w", err)
}
return res, nil
}
func (k *KubernetesEnvironment) ID() dto.EnvironmentID {
return k.id
}
func (k *KubernetesEnvironment) SetID(id dto.EnvironmentID) {
k.id = id
}
func (k *KubernetesEnvironment) Image() string {
return k.image
}
func (k *KubernetesEnvironment) SetImage(image string) {
k.image = image
}
func (k *KubernetesEnvironment) Delete(_ runner.DestroyReason) error {
// Implement Kubernetes-specific deletion logic here
return nil
}
func (k *KubernetesEnvironment) Sample() (r runner.Runner, ok bool) {
workload, err := runner.NewKubernetesPodWorkload(k, k.onDestroyRunner, k.clientset)
if err != nil {
return nil, false
}
return workload, true
}
func (k *KubernetesEnvironment) IdleRunnerCount() uint {
// Implement logic to count idle runners in Kubernetes
return 0
}
func (k *KubernetesEnvironment) PrewarmingPoolSize() uint {
return k.prewarmingPool
}
func (k *KubernetesEnvironment) SetPrewarmingPoolSize(size uint) {
k.prewarmingPool = size
}
func (k *KubernetesEnvironment) ApplyPrewarmingPoolSize() error {
// Implement logic to apply prewarming pool size in Kubernetes
return nil
}
func (k *KubernetesEnvironment) CPULimit() uint {
return k.cpuLimit
}
func (k *KubernetesEnvironment) SetCPULimit(limit uint) {
k.cpuLimit = limit
}
func (k *KubernetesEnvironment) MemoryLimit() uint {
return k.memoryLimit
}
func (k *KubernetesEnvironment) SetMemoryLimit(limit uint) {
k.memoryLimit = limit
}
func (k *KubernetesEnvironment) NetworkAccess() (enabled bool, mappedPorts []uint16) {
return k.networkEnabled, k.mappedPorts
}
func (k *KubernetesEnvironment) SetNetworkAccess(enabled bool, ports []uint16) {
k.networkEnabled = enabled
k.mappedPorts = ports
}
func (k *KubernetesEnvironment) SetConfigFrom(env runner.ExecutionEnvironment) {
if kEnv, ok := env.(*KubernetesEnvironment); ok {
k.cpuLimit = kEnv.cpuLimit
k.memoryLimit = kEnv.memoryLimit
k.networkEnabled = kEnv.networkEnabled
k.mappedPorts = kEnv.mappedPorts
k.prewarmingPool = kEnv.prewarmingPool
}
}
func (k *KubernetesEnvironment) Register() error {
// Implement Kubernetes-specific registration logic here
return nil
}
func (k *KubernetesEnvironment) AddRunner(runner runner.Runner) {
// Implement logic to add a runner to the Kubernetes environment
}
func (k *KubernetesEnvironment) DeleteRunner(id string) (r runner.Runner, ok bool) {
// Implement logic to delete a runner from the Kubernetes environment
return nil, false
}

View File

@ -0,0 +1,71 @@
package environment
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"k8s.io/client-go/kubernetes"
)
// KubernetesEnvironmentManager manages Kubernetes environments.
type KubernetesEnvironmentManager struct {
*AbstractManager
clientSet *kubernetes.Clientset
}
func NewKubernetesEnvironmentManager(runnerManager runner.Manager, clientset *kubernetes.Clientset) *KubernetesEnvironmentManager {
return &KubernetesEnvironmentManager{
AbstractManager: &AbstractManager{nil, runnerManager},
clientSet: clientset,
}
}
func (k *KubernetesEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
list, err := k.NextHandler().List(fetch)
if err != nil {
return nil, fmt.Errorf("kubernetes wrapped: %w", err)
}
return append(list, k.runnerManager.ListEnvironments()...), nil
}
func (k *KubernetesEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) {
e, ok := k.runnerManager.GetEnvironment(id)
if ok {
return e, nil
} else {
e, err := k.NextHandler().Get(id, fetch)
if err != nil {
return nil, fmt.Errorf("kubernetes wrapped: %w", err)
}
return e, nil
}
}
func (k *KubernetesEnvironmentManager) CreateOrUpdate(
id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) {
if !isKubernetesEnvironment(request) {
isCreated, err := k.NextHandler().CreateOrUpdate(id, request, ctx)
if err != nil {
return false, fmt.Errorf("kubernetes wrapped: %w", err)
}
return isCreated, nil
}
_, ok := k.runnerManager.GetEnvironment(id)
e := NewKubernetesEnvironment(k.runnerManager.Return, k.clientSet)
e.SetID(id)
e.SetImage(request.Image)
k.runnerManager.StoreEnvironment(e)
return !ok, nil
}
func isKubernetesEnvironment(request dto.ExecutionEnvironmentRequest) bool {
for _, image := range config.Config.Kubernetes.Images {
if request.Image == image {
return true
}
}
return false
}

View File

@ -0,0 +1,43 @@
package environment
import (
"context"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
// ManagerHandler is one handler in the chain of responsibility of environment managers.
// Each manager can handle different requests.
type ManagerHandler interface {
Manager
SetNextHandler(next ManagerHandler)
NextHandler() ManagerHandler
HasNextHandler() bool
}
// Manager encapsulates API calls to the executor API for creation and deletion of execution environments.
type Manager interface {
// List returns all environments known by Poseidon.
// When `fetch` is set the environments are fetched from the executor before returning.
List(fetch bool) ([]runner.ExecutionEnvironment, error)
// Get returns the details of the requested environment.
// When `fetch` is set the requested environment is fetched from the executor before returning.
Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error)
// CreateOrUpdate creates/updates an execution environment on the executor.
// If the job was created, the returned boolean is true, if it was updated, it is false.
// If err is not nil, that means the environment was neither created nor updated.
CreateOrUpdate(
id dto.EnvironmentID,
request dto.ExecutionEnvironmentRequest,
ctx context.Context,
) (bool, error)
// Delete removes the specified execution environment.
// Iff the specified environment could not be found Delete returns false.
Delete(id dto.EnvironmentID) (bool, error)
// Statistics returns statistical data for each execution environment.
Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
}

View File

@ -0,0 +1,84 @@
// This is the default job configuration that is used when no path to another default configuration is given
job "template-0" {
datacenters = ["dc1"]
type = "batch"
group "default-group" {
ephemeral_disk {
migrate = false
size = 10
sticky = false
}
count = 1
spread {
// see https://www.nomadproject.io/docs/job-specification/spread#even-spread-across-data-center
// This spreads the load evenly amongst our nodes
attribute = "${node.unique.name}"
weight = 100
}
restart {
attempts = 3
delay = "15s"
interval = "1h"
mode = "fail"
}
reschedule {
unlimited = false
attempts = 3
interval = "6h"
delay = "1m"
max_delay = "4m"
delay_function = "exponential"
}
task "default-task" {
driver = "docker"
kill_timeout = "0s"
kill_signal = "SIGKILL"
config {
image = "openhpi/docker_exec_phusion"
command = "sleep"
args = ["infinity"]
network_mode = "none"
}
logs {
max_files = 1
max_file_size = 1
}
resources {
cpu = 40
memory = 30
}
}
}
group "config" {
// We want to store whether a task is in use in order to recover from a downtime.
// Without a separate config task, marking a task as used would result in a restart of that task,
// as the meta information is passed to the container as environment variables.
count = 0
task "config" {
driver = "exec"
config {
command = "true"
}
logs {
max_files = 1
max_file_size = 1
}
resources {
// minimum values
cpu = 1
memory = 10
}
}
meta {
used = "false"
prewarmingPoolSize = "0"
}
}
}