added poseidon with aws to k8s changes

This commit is contained in:
Elmar Kresse
2024-08-12 10:02:36 +02:00
parent 5376f7a027
commit 254460d64c
60 changed files with 6912 additions and 0 deletions

View File

@ -0,0 +1,125 @@
package runner
import (
"context"
"errors"
"fmt"
"github.com/influxdata/influxdb-client-go/v2/api/write"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"time"
)
var ErrNullObject = errors.New("functionality not available for the null object")
// AbstractManager is used to have a fallback runner manager in the chain of responsibility
// following the null object pattern.
// Remember all functions that can call the NextHandler should call it (See AccessorHandler).
type AbstractManager struct {
nextHandler AccessorHandler
environments storage.Storage[ExecutionEnvironment]
usedRunners storage.Storage[Runner]
}
// NewAbstractManager creates a new abstract runner manager that keeps track of all runners of one kind.
// Since this manager is currently directly bound to the lifespan of Poseidon, it does not need a context cancel.
func NewAbstractManager(ctx context.Context) *AbstractManager {
return &AbstractManager{
environments: storage.NewMonitoredLocalStorage[ExecutionEnvironment](
monitoring.MeasurementEnvironments, monitorEnvironmentData, 0, ctx),
usedRunners: storage.NewMonitoredLocalStorage[Runner](
monitoring.MeasurementUsedRunner, MonitorRunnersEnvironmentID, time.Hour, ctx),
}
}
// MonitorEnvironmentID adds the passed environment id to the monitoring Point p.
func MonitorEnvironmentID[T any](id dto.EnvironmentID) storage.WriteCallback[T] {
return func(p *write.Point, _ T, _ storage.EventType) {
p.AddTag(monitoring.InfluxKeyEnvironmentID, id.ToString())
}
}
// MonitorRunnersEnvironmentID passes the id of the environment e into the monitoring Point p.
func MonitorRunnersEnvironmentID(p *write.Point, e Runner, _ storage.EventType) {
if e != nil {
p.AddTag(monitoring.InfluxKeyEnvironmentID, e.Environment().ToString())
}
}
func (n *AbstractManager) SetNextHandler(next AccessorHandler) {
n.nextHandler = next
}
func (n *AbstractManager) NextHandler() AccessorHandler {
if n.HasNextHandler() {
return n.nextHandler
} else {
ctx, cancel := context.WithCancel(context.Background())
cancel()
return NewAbstractManager(ctx)
}
}
func (n *AbstractManager) HasNextHandler() bool {
return n.nextHandler != nil
}
func (n *AbstractManager) ListEnvironments() []ExecutionEnvironment {
return n.environments.List()
}
func (n *AbstractManager) GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool) {
return n.environments.Get(id.ToString())
}
func (n *AbstractManager) StoreEnvironment(environment ExecutionEnvironment) {
n.environments.Add(environment.ID().ToString(), environment)
}
func (n *AbstractManager) DeleteEnvironment(id dto.EnvironmentID) {
n.environments.Delete(id.ToString())
}
func (n *AbstractManager) EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
environments := make(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData)
for _, e := range n.environments.List() {
environments[e.ID()] = &dto.StatisticalExecutionEnvironmentData{
ID: int(e.ID()),
PrewarmingPoolSize: e.PrewarmingPoolSize(),
IdleRunners: e.IdleRunnerCount(),
UsedRunners: 0, // Increased below.
}
}
for _, r := range n.usedRunners.List() {
environments[r.Environment()].UsedRunners++
}
return environments
}
func (n *AbstractManager) Claim(_ dto.EnvironmentID, _ int) (Runner, error) {
return nil, ErrNullObject
}
func (n *AbstractManager) Get(runnerID string) (Runner, error) {
runner, ok := n.usedRunners.Get(runnerID)
if ok {
return runner, nil
}
if !n.HasNextHandler() {
return nil, ErrRunnerNotFound
}
r, err := n.NextHandler().Get(runnerID)
if err != nil {
return r, fmt.Errorf("abstract manager wrapped: %w", err)
}
return r, nil
}
func (n *AbstractManager) Return(_ Runner) error {
return nil
}

View File

@ -0,0 +1,66 @@
package runner
import (
"encoding/json"
"github.com/influxdata/influxdb-client-go/v2/api/write"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/storage"
"strconv"
)
// ExecutionEnvironment are groups of runner that share the configuration stored in the environment.
type ExecutionEnvironment interface {
json.Marshaler
// ID returns the id of the environment.
ID() dto.EnvironmentID
SetID(id dto.EnvironmentID)
// PrewarmingPoolSize sets the number of idle runner of this environment that should be prewarmed.
PrewarmingPoolSize() uint
SetPrewarmingPoolSize(count uint)
// ApplyPrewarmingPoolSize creates idle runners according to the PrewarmingPoolSize.
ApplyPrewarmingPoolSize() error
// CPULimit sets the share of cpu that a runner should receive at minimum.
CPULimit() uint
SetCPULimit(limit uint)
// MemoryLimit sets the amount of memory that should be available for each runner.
MemoryLimit() uint
SetMemoryLimit(limit uint)
// Image sets the image of the runner, e.g. Docker image.
Image() string
SetImage(image string)
// NetworkAccess sets if a runner should have network access and if ports should be mapped.
NetworkAccess() (bool, []uint16)
SetNetworkAccess(allow bool, ports []uint16)
// SetConfigFrom copies all above attributes from the passed environment to the object itself.
SetConfigFrom(environment ExecutionEnvironment)
// Register saves this environment at the executor.
Register() error
// Delete removes this environment and all it's runner from the executor and Poseidon itself.
// Iff local the environment is just removed from Poseidon without external escalation.
Delete(reason DestroyReason) error
// Sample returns and removes an arbitrary available runner.
// ok is true iff a runner was returned.
Sample() (r Runner, ok bool)
// AddRunner adds an existing runner to the idle runners of the environment.
AddRunner(r Runner)
// DeleteRunner removes an idle runner from the environment and returns it.
// This function handles only the environment. The runner has to be destroyed separately.
// ok is true iff the runner was found (and deleted).
DeleteRunner(id string) (r Runner, ok bool)
// IdleRunnerCount returns the number of idle runners of the environment.
IdleRunnerCount() uint
}
// monitorEnvironmentData passes the configuration of the environment e into the monitoring Point p.
func monitorEnvironmentData(p *write.Point, e ExecutionEnvironment, eventType storage.EventType) {
if eventType == storage.Creation && e != nil {
p.AddTag("image", e.Image())
p.AddTag("cpu_limit", strconv.Itoa(int(e.CPULimit())))
p.AddTag("memory_limit", strconv.Itoa(int(e.MemoryLimit())))
hasNetworkAccess, _ := e.NetworkAccess()
p.AddTag("network_access", strconv.FormatBool(hasNetworkAccess))
}
}

View File

@ -0,0 +1,111 @@
package runner
import (
"errors"
"github.com/openHPI/poseidon/pkg/dto"
"sync"
"time"
)
// InactivityTimer is a wrapper around a timer that is used to delete a a Runner after some time of inactivity.
type InactivityTimer interface {
// SetupTimeout starts the timeout after a runner gets deleted.
SetupTimeout(duration time.Duration)
// ResetTimeout resets the current timeout so that the runner gets deleted after the time set in Setup from now.
// It does not make an already expired timer run again.
ResetTimeout()
// StopTimeout stops the timeout but does not remove the runner.
StopTimeout()
// TimeoutPassed returns true if the timeout expired and false otherwise.
TimeoutPassed() bool
}
type TimerState uint8
const (
TimerInactive TimerState = 0
TimerRunning TimerState = 1
TimerExpired TimerState = 2
)
var (
ErrorRunnerInactivityTimeout DestroyReason = errors.New("runner inactivity timeout exceeded")
ErrorExecutionTimeout = errors.New("execution timeout exceeded")
)
type InactivityTimerImplementation struct {
timer *time.Timer
duration time.Duration
state TimerState
runner Runner
onDestroy DestroyRunnerHandler
mu sync.Mutex
}
func NewInactivityTimer(runner Runner, onDestroy DestroyRunnerHandler) InactivityTimer {
return &InactivityTimerImplementation{
state: TimerInactive,
runner: runner,
onDestroy: onDestroy,
}
}
func (t *InactivityTimerImplementation) SetupTimeout(duration time.Duration) {
t.mu.Lock()
defer t.mu.Unlock()
// Stop old timer if present.
if t.timer != nil {
t.timer.Stop()
}
if duration == 0 {
t.state = TimerInactive
return
}
t.state = TimerRunning
t.duration = duration
t.timer = time.AfterFunc(duration, func() {
t.mu.Lock()
t.state = TimerExpired
// The timer must be unlocked here already in order to avoid a deadlock with the call to StopTimout in Manager.Return.
t.mu.Unlock()
err := t.onDestroy(t.runner)
if err != nil {
log.WithError(err).WithField(dto.KeyRunnerID, t.runner.ID()).
Warn("Returning runner after inactivity caused an error")
} else {
log.WithField(dto.KeyRunnerID, t.runner.ID()).Info("Returning runner due to inactivity timeout")
}
})
}
func (t *InactivityTimerImplementation) ResetTimeout() {
t.mu.Lock()
defer t.mu.Unlock()
if t.state != TimerRunning {
// The timer has already expired or been stopped. We don't want to restart it.
return
}
if t.timer.Stop() {
t.timer.Reset(t.duration)
} else {
log.Error("Timer is in state running but stopped. This should never happen")
}
}
func (t *InactivityTimerImplementation) StopTimeout() {
t.mu.Lock()
defer t.mu.Unlock()
if t.state != TimerRunning {
return
}
t.timer.Stop()
t.state = TimerInactive
}
func (t *InactivityTimerImplementation) TimeoutPassed() bool {
return t.state == TimerExpired
}

View File

@ -0,0 +1,70 @@
package runner
import (
"context"
"errors"
"fmt"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/logging"
"k8s.io/client-go/kubernetes"
"time"
)
var (
log = logging.GetLogger("runner")
ErrUnknownExecutionEnvironment = errors.New("execution environment not found")
ErrNoRunnersAvailable = errors.New("no runners available for this execution environment")
ErrRunnerNotFound = errors.New("no runner found with this id")
)
type KubernetesRunnerManager struct {
*AbstractManager
clientSet *kubernetes.Clientset
}
// NewKubernetesRunnerManager creates a new runner manager that keeps track of all runners in Kubernetes.
func NewKubernetesRunnerManager(ctx context.Context, clientSet *kubernetes.Clientset) *KubernetesRunnerManager {
return &KubernetesRunnerManager{
AbstractManager: NewAbstractManager(ctx),
clientSet: clientSet,
}
}
func (k *KubernetesRunnerManager) Claim(id dto.EnvironmentID, duration int) (Runner, error) {
environment, ok := k.GetEnvironment(id)
if !ok {
r, err := k.NextHandler().Claim(id, duration)
if err != nil {
return nil, fmt.Errorf("kubernetes wrapped: %w", err)
}
return r, nil
}
runner, ok := environment.Sample()
if !ok {
log.Warn("no kubernetes runner available")
return nil, ErrNoRunnersAvailable
}
k.usedRunners.Add(runner.ID(), runner)
runner.SetupTimeout(time.Duration(duration) * time.Second)
// Here you might want to add Kubernetes-specific logic
// For example, updating the pod status or adding labels
return runner, nil
}
func (k *KubernetesRunnerManager) Return(r Runner) error {
_, isKubernetesRunner := r.(*KubernetesPodWorkload)
if isKubernetesRunner {
k.usedRunners.Delete(r.ID())
// Here you might want to add Kubernetes-specific logic
// For example, cleaning up the pod or updating its status
} else if err := k.NextHandler().Return(r); err != nil {
return fmt.Errorf("kubernetes wrapped: %w", err)
}
return nil
}

View File

@ -0,0 +1,251 @@
package runner
import (
"context"
"errors"
"fmt"
"github.com/google/uuid"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"io"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"net/http"
"time"
)
var ErrPodCreationFailed = errors.New("failed to create pod")
var (
ErrorUnknownExecution = errors.New("unknown execution")
ErrFileNotFound = errors.New("file not found or insufficient permissions")
ErrOOMKilled DestroyReason = errors.New("the runner was killed due to out of memory")
ErrDestroyedByAPIRequest DestroyReason = errors.New("the client wants to stop the runner")
)
// KubernetesPodWorkload is an abstraction to manage a Kubernetes pod.
// It is not persisted on a Poseidon restart.
// The InactivityTimer is used actively. It stops and deletes the pod.
type KubernetesPodWorkload struct {
InactivityTimer
id string
fs map[dto.FilePath][]byte
executions storage.Storage[*dto.ExecutionRequest]
runningExecutions map[string]context.CancelFunc
onDestroy DestroyRunnerHandler
environment ExecutionEnvironment
ctx context.Context
cancel context.CancelFunc
clientset *kubernetes.Clientset
podName string
namespace string
}
// NewKubernetesPodWorkload creates a new KubernetesPodWorkload with the provided id.
func NewKubernetesPodWorkload(
environment ExecutionEnvironment, onDestroy DestroyRunnerHandler, clientset *kubernetes.Clientset) (*KubernetesPodWorkload, error) {
newUUID, err := uuid.NewUUID()
if err != nil {
return nil, fmt.Errorf("failed generating runner id: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
workload := &KubernetesPodWorkload{
id: newUUID.String(),
fs: make(map[dto.FilePath][]byte),
runningExecutions: make(map[string]context.CancelFunc),
onDestroy: onDestroy,
environment: environment,
ctx: ctx,
cancel: cancel,
clientset: clientset,
namespace: "default", // You might want to make this configurable
podName: fmt.Sprintf("workload-%s", newUUID.String()),
}
workload.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
monitoring.MeasurementExecutionsK8s, monitorExecutionsRunnerID(environment.ID(), workload.id), time.Minute, ctx)
workload.InactivityTimer = NewInactivityTimer(workload, func(_ Runner) error {
return workload.Destroy(nil)
})
return workload, nil
}
func (w *KubernetesPodWorkload) ID() string {
return w.id
}
func (w *KubernetesPodWorkload) Environment() dto.EnvironmentID {
return w.environment.ID()
}
func (w *KubernetesPodWorkload) MappedPorts() []*dto.MappedPort {
// Implement port mapping logic for Kubernetes
return []*dto.MappedPort{}
}
func (w *KubernetesPodWorkload) StoreExecution(id string, request *dto.ExecutionRequest) {
w.executions.Add(id, request)
}
func (w *KubernetesPodWorkload) ExecutionExists(id string) bool {
_, ok := w.executions.Get(id)
return ok
}
// ExecuteInteractively runs the execution request in a Kubernetes pod.
func (w *KubernetesPodWorkload) ExecuteInteractively(
id string, _ io.ReadWriter, stdout, stderr io.Writer, ctx context.Context) (
<-chan ExitInfo, context.CancelFunc, error) {
w.ResetTimeout()
request, ok := w.executions.Pop(id)
if !ok {
return nil, nil, ErrorUnknownExecution
}
hideEnvironmentVariablesK8s(request, "K8S")
command, executionCtx, cancel := prepareExecution(request, w.ctx)
exitInternal := make(chan ExitInfo)
exit := make(chan ExitInfo, 1)
go w.executeCommand(executionCtx, command, stdout, stderr, exitInternal)
go w.handleRunnerTimeout(executionCtx, exitInternal, exit, id)
return exit, cancel, nil
}
func (w *KubernetesPodWorkload) ListFileSystem(path string, recursive bool, writer io.Writer, humanReadable bool, ctx context.Context) error {
// Implement file system listing for Kubernetes pods
return dto.ErrNotSupported
}
func (w *KubernetesPodWorkload) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error {
// Implement file system update for Kubernetes pods
return nil
}
func (w *KubernetesPodWorkload) GetFileContent(path string, writer http.ResponseWriter, humanReadable bool, ctx context.Context) error {
// Implement file content retrieval for Kubernetes pods
return dto.ErrNotSupported
}
func (w *KubernetesPodWorkload) Destroy(_ DestroyReason) error {
w.cancel()
err := w.clientset.CoreV1().Pods(w.namespace).Delete(context.Background(), w.podName, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("error while destroying kubernetes pod: %w", err)
}
if err := w.onDestroy(w); err != nil {
return fmt.Errorf("error while destroying kubernetes runner: %w", err)
}
return nil
}
func (w *KubernetesPodWorkload) executeCommand(ctx context.Context, command string,
stdout, stderr io.Writer, exit chan<- ExitInfo,
) {
defer close(exit)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: w.podName,
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "workload",
Image: w.environment.Image(),
Command: []string{"/bin/sh", "-c", command},
},
},
},
}
_, err := w.clientset.CoreV1().Pods(w.namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
exit <- ExitInfo{1, fmt.Errorf("%w: %v", ErrPodCreationFailed, err)}
return
}
req := w.clientset.CoreV1().Pods(w.namespace).GetLogs(w.podName, &corev1.PodLogOptions{
Follow: true,
})
podLogs, err := req.Stream(ctx)
if err != nil {
exit <- ExitInfo{1, fmt.Errorf("error in opening stream: %v", err)}
return
}
defer func(podLogs io.ReadCloser) {
err := podLogs.Close()
if err != nil {
exit <- ExitInfo{1, fmt.Errorf("error in closing stream: %v", err)}
}
}(podLogs)
_, err = io.Copy(stdout, podLogs)
if err != nil {
exit <- ExitInfo{1, fmt.Errorf("error in copying logs: %v", err)}
return
}
// Wait for the pod to complete
watch, err := w.clientset.CoreV1().Pods(w.namespace).Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", w.podName),
})
if err != nil {
exit <- ExitInfo{1, fmt.Errorf("error watching pod: %v", err)}
return
}
defer watch.Stop()
for event := range watch.ResultChan() {
pod, ok := event.Object.(*corev1.Pod)
if !ok {
continue
}
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
exitCode := uint8(0)
if pod.Status.Phase == corev1.PodFailed {
exitCode = 1
}
exit <- ExitInfo{exitCode, nil}
return
}
}
}
func (w *KubernetesPodWorkload) handleRunnerTimeout(ctx context.Context,
exitInternal <-chan ExitInfo, exit chan<- ExitInfo, executionID string) {
executionCtx, cancelExecution := context.WithCancel(ctx)
w.runningExecutions[executionID] = cancelExecution
defer delete(w.runningExecutions, executionID)
defer close(exit)
select {
case exitInfo := <-exitInternal:
exit <- exitInfo
case <-executionCtx.Done():
exit <- ExitInfo{255, ErrorRunnerInactivityTimeout}
}
}
// hideEnvironmentVariables sets the CODEOCEAN variable and unsets all variables starting with the passed prefix.
func hideEnvironmentVariablesK8s(request *dto.ExecutionRequest, unsetPrefix string) {
if request.Environment == nil {
request.Environment = make(map[string]string)
}
request.Command = "unset \"${!" + unsetPrefix + "@}\" && " + request.Command
}
func prepareExecution(request *dto.ExecutionRequest, environmentCtx context.Context) (
command string, ctx context.Context, cancel context.CancelFunc,
) {
command = request.FullCommand()
if request.TimeLimit == 0 {
ctx, cancel = context.WithCancel(environmentCtx)
} else {
ctx, cancel = context.WithTimeout(environmentCtx, time.Duration(request.TimeLimit)*time.Second)
}
return command, ctx, cancel
}

View File

@ -0,0 +1,54 @@
package runner
import "github.com/openHPI/poseidon/pkg/dto"
// Manager keeps track of the used and unused runners of all execution environments in order to provide unused
// runners to new clients and ensure no runner is used twice.
type Manager interface {
EnvironmentAccessor
AccessorHandler
}
// EnvironmentAccessor provides access to the stored environments.
type EnvironmentAccessor interface {
// ListEnvironments returns all execution environments known by Poseidon.
ListEnvironments() []ExecutionEnvironment
// GetEnvironment returns the details of the requested environment.
// Iff the requested environment is not stored it returns false.
GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool)
// StoreEnvironment stores the environment in Poseidons memory.
StoreEnvironment(environment ExecutionEnvironment)
// DeleteEnvironment removes the specified execution environment in Poseidons memory.
// It does nothing if the specified environment can not be found.
DeleteEnvironment(id dto.EnvironmentID)
// EnvironmentStatistics returns statistical data for each execution environment.
EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
}
// AccessorHandler is one handler in the chain of responsibility of runner accessors.
// Each runner accessor can handle different requests.
type AccessorHandler interface {
Accessor
SetNextHandler(m AccessorHandler)
NextHandler() AccessorHandler
HasNextHandler() bool
}
// Accessor manages the lifecycle of Runner.
type Accessor interface {
// Claim returns a new runner. The runner is deleted after duration seconds if duration is not 0.
// It makes sure that the runner is not in use yet and returns an error if no runner could be provided.
Claim(id dto.EnvironmentID, duration int) (Runner, error)
// Get returns the used runner with the given runnerId.
// If no runner with the given runnerId is currently used, it returns an error.
Get(runnerID string) (Runner, error)
// Return signals that the runner is no longer used by the caller and can be claimed by someone else.
// The runner is deleted or cleaned up for reuse depending on the used executor.
Return(r Runner) error
}

91
internal/runner/runner.go Normal file
View File

@ -0,0 +1,91 @@
package runner
import (
"context"
"github.com/influxdata/influxdb-client-go/v2/api/write"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"io"
"net/http"
)
type ExitInfo struct {
Code uint8
Err error
}
const (
// runnerContextKey is the key used to store runners in context.Context.
runnerContextKey dto.ContextKey = "runner"
)
type DestroyRunnerHandler = func(r Runner) error
// DestroyReason specifies errors that are expected as reason for destroying a runner.
type DestroyReason error
type Runner interface {
InactivityTimer
// ID returns the id of the runner.
ID() string
// Environment returns the id of the Environment to which the Runner belongs.
Environment() dto.EnvironmentID
// MappedPorts returns the mapped ports of the runner.
MappedPorts() []*dto.MappedPort
// StoreExecution adds a new execution to the runner that can then be executed using ExecuteInteractively.
StoreExecution(id string, executionRequest *dto.ExecutionRequest)
// ExecutionExists returns whether the execution with the given id is already stored.
ExecutionExists(id string) bool
// ExecuteInteractively runs the given execution request and forwards from and to the given reader and writers.
// An ExitInfo is sent to the exit channel on command completion.
// Output from the runner is forwarded immediately.
ExecuteInteractively(
id string,
stdin io.ReadWriter,
stdout,
stderr io.Writer,
ctx context.Context,
) (exit <-chan ExitInfo, cancel context.CancelFunc, err error)
// ListFileSystem streams the listing of the file system of the requested directory into the Writer provided.
// The result is streamed via the io.Writer in order to not overload the memory with user input.
ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error
// UpdateFileSystem processes a dto.UpdateFileSystemRequest by first deleting each given dto.FilePath recursively
// and then copying each given dto.File to the runner.
UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error
// GetFileContent streams the file content at the requested path into the Writer provided at content.
// The result is streamed via the io.Writer in order to not overload the memory with user input.
GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error
// Destroy destroys the Runner in Nomad.
// Depending on the reason special cases of the Destruction will be handled.
Destroy(reason DestroyReason) error
}
// NewContext creates a context containing a runner.
func NewContext(ctx context.Context, runner Runner) context.Context {
return context.WithValue(ctx, runnerContextKey, runner)
}
// FromContext returns a runner from a context.
func FromContext(ctx context.Context) (Runner, bool) {
runner, ok := ctx.Value(runnerContextKey).(Runner)
return runner, ok
}
// monitorExecutionsRunnerID passes the id of the runner executing the execution into the monitoring Point p.
func monitorExecutionsRunnerID(env dto.EnvironmentID, runnerID string) storage.WriteCallback[*dto.ExecutionRequest] {
return func(p *write.Point, _ *dto.ExecutionRequest, _ storage.EventType) {
p.AddTag(monitoring.InfluxKeyEnvironmentID, env.ToString())
p.AddTag(monitoring.InfluxKeyRunnerID, runnerID)
}
}