added k8s stub adapter for execution environment

This commit is contained in:
Elmar Kresse
2024-09-18 10:43:38 +02:00
parent f9a6ba8f1c
commit 12ff205bd2
119 changed files with 1374 additions and 12549 deletions

View File

@ -1,121 +0,0 @@
package environment
import (
"encoding/json"
"fmt"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
type AWSEnvironment struct {
id dto.EnvironmentID
awsEndpoint string
onDestroyRunner runner.DestroyRunnerHandler
}
func NewAWSEnvironment(onDestroyRunner runner.DestroyRunnerHandler) *AWSEnvironment {
return &AWSEnvironment{onDestroyRunner: onDestroyRunner}
}
func (a *AWSEnvironment) MarshalJSON() ([]byte, error) {
res, err := json.Marshal(dto.ExecutionEnvironmentData{
ID: int(a.ID()),
ExecutionEnvironmentRequest: dto.ExecutionEnvironmentRequest{Image: a.Image()},
})
if err != nil {
return res, fmt.Errorf("couldn't marshal aws execution environment: %w", err)
}
return res, nil
}
func (a *AWSEnvironment) ID() dto.EnvironmentID {
return a.id
}
func (a *AWSEnvironment) SetID(id dto.EnvironmentID) {
a.id = id
}
// Image is used to specify the AWS Endpoint Poseidon is connecting to.
func (a *AWSEnvironment) Image() string {
return a.awsEndpoint
}
func (a *AWSEnvironment) SetImage(awsEndpoint string) {
a.awsEndpoint = awsEndpoint
}
func (a *AWSEnvironment) Delete(_ runner.DestroyReason) error {
return nil
}
func (a *AWSEnvironment) Sample() (r runner.Runner, ok bool) {
workload, err := runner.NewAWSFunctionWorkload(a, a.onDestroyRunner)
if err != nil {
return nil, false
}
return workload, true
}
// The following methods are not supported at this moment.
// IdleRunnerCount is not supported as we have no information about the AWS managed prewarming pool.
// For the Poseidon Health check we default to 1.
func (a *AWSEnvironment) IdleRunnerCount() uint {
return 1
}
// PrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
// For easy compatibility with CodeOcean, 1 is the static value.
func (a *AWSEnvironment) PrewarmingPoolSize() uint {
return 1
}
// SetPrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
func (a *AWSEnvironment) SetPrewarmingPoolSize(_ uint) {}
// ApplyPrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
func (a *AWSEnvironment) ApplyPrewarmingPoolSize() error {
return nil
}
// CPULimit is disabled as one can only set the memory limit with AWS Lambda.
func (a *AWSEnvironment) CPULimit() uint {
return 0
}
// SetCPULimit is disabled as one can only set the memory limit with AWS Lambda.
func (a *AWSEnvironment) SetCPULimit(_ uint) {}
func (a *AWSEnvironment) MemoryLimit() uint {
const memorySizeOfDeployedLambdaFunction = 2048 // configured /deploy/aws/template.yaml
return memorySizeOfDeployedLambdaFunction
}
func (a *AWSEnvironment) SetMemoryLimit(_ uint) {
panic("not supported")
}
func (a *AWSEnvironment) NetworkAccess() (enabled bool, mappedPorts []uint16) {
return true, nil
}
func (a *AWSEnvironment) SetNetworkAccess(_ bool, _ []uint16) {
panic("not supported")
}
func (a *AWSEnvironment) SetConfigFrom(_ runner.ExecutionEnvironment) {
panic("not supported")
}
func (a *AWSEnvironment) Register() error {
panic("not supported")
}
func (a *AWSEnvironment) AddRunner(_ runner.Runner) {
panic("not supported")
}
func (a *AWSEnvironment) DeleteRunner(_ string) (r runner.Runner, ok bool) {
panic("not supported")
}

View File

@ -1,67 +0,0 @@
package environment
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
// AWSEnvironmentManager contains no functionality at the moment.
// IMPROVE: Create Lambda functions dynamically.
type AWSEnvironmentManager struct {
*AbstractManager
}
func NewAWSEnvironmentManager(runnerManager runner.Manager) *AWSEnvironmentManager {
return &AWSEnvironmentManager{&AbstractManager{nil, runnerManager}}
}
func (a *AWSEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
list, err := a.NextHandler().List(fetch)
if err != nil {
return nil, fmt.Errorf("aws wrapped: %w", err)
}
return append(list, a.runnerManager.ListEnvironments()...), nil
}
func (a *AWSEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) {
e, ok := a.runnerManager.GetEnvironment(id)
if ok {
return e, nil
} else {
e, err := a.NextHandler().Get(id, fetch)
if err != nil {
return nil, fmt.Errorf("aws wrapped: %w", err)
}
return e, nil
}
}
func (a *AWSEnvironmentManager) CreateOrUpdate(
id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) {
if !isAWSEnvironment(request) {
isCreated, err := a.NextHandler().CreateOrUpdate(id, request, ctx)
if err != nil {
return false, fmt.Errorf("aws wrapped: %w", err)
}
return isCreated, nil
}
_, ok := a.runnerManager.GetEnvironment(id)
e := NewAWSEnvironment(a.runnerManager.Return)
e.SetID(id)
e.SetImage(request.Image)
a.runnerManager.StoreEnvironment(e)
return !ok, nil
}
func isAWSEnvironment(request dto.ExecutionEnvironmentRequest) bool {
for _, function := range config.Config.AWS.Functions {
if request.Image == function {
return true
}
}
return false
}

View File

@ -1,122 +0,0 @@
package environment
import (
"context"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"testing"
)
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestAWSEnvironmentManager_CreateOrUpdate() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
uniqueImage := "java11Exec"
s.Run("can create default Java environment", func() {
config.Config.AWS.Functions = []string{uniqueImage}
_, err := m.CreateOrUpdate(
tests.AnotherEnvironmentIDAsInteger, dto.ExecutionEnvironmentRequest{Image: uniqueImage}, context.Background())
s.NoError(err)
})
s.Run("can retrieve added environment", func() {
environment, err := m.Get(tests.AnotherEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(environment.Image(), uniqueImage)
})
s.Run("non-handleable requests are forwarded to the next manager", func() {
nextHandler := &ManagerHandlerMock{}
nextHandler.On("CreateOrUpdate", mock.AnythingOfType("dto.EnvironmentID"),
mock.AnythingOfType("dto.ExecutionEnvironmentRequest"), mock.Anything).Return(true, nil)
m.SetNextHandler(nextHandler)
request := dto.ExecutionEnvironmentRequest{}
_, err := m.CreateOrUpdate(tests.DefaultEnvironmentIDAsInteger, request, context.Background())
s.NoError(err)
nextHandler.AssertCalled(s.T(), "CreateOrUpdate",
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), request, mock.Anything)
})
}
func (s *MainTestSuite) TestAWSEnvironmentManager_Get() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
s.Run("Calls next handler when not found", func() {
nextHandler := &ManagerHandlerMock{}
nextHandler.On("Get", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("bool")).
Return(nil, nil)
m.SetNextHandler(nextHandler)
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
nextHandler.AssertCalled(s.T(), "Get", dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), false)
})
s.Run("Returns error when not found", func() {
nextHandler := &AbstractManager{nil, nil}
m.SetNextHandler(nextHandler)
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.ErrorIs(err, runner.ErrRunnerNotFound)
})
s.Run("Returns environment when it was added before", func() {
expectedEnvironment := NewAWSEnvironment(nil)
expectedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(expectedEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(expectedEnvironment, environment)
})
}
func (s *MainTestSuite) TestAWSEnvironmentManager_List() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
s.Run("also returns environments of the rest of the manager chain", func() {
nextHandler := &ManagerHandlerMock{}
existingEnvironment := NewAWSEnvironment(nil)
nextHandler.On("List", mock.AnythingOfType("bool")).
Return([]runner.ExecutionEnvironment{existingEnvironment}, nil)
m.SetNextHandler(nextHandler)
environments, err := m.List(false)
s.NoError(err)
s.Require().Len(environments, 1)
s.Contains(environments, existingEnvironment)
})
m.SetNextHandler(nil)
s.Run("Returns added environment", func() {
localEnvironment := NewAWSEnvironment(nil)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environments, err := m.List(false)
s.NoError(err)
s.Len(environments, 1)
s.Contains(environments, localEnvironment)
})
}

View File

@ -0,0 +1,175 @@
package environment
import (
"context"
"fmt"
poseidonK8s "github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
appsv1 "k8s.io/api/apps/v1"
"time"
)
type KubernetesEnvironment struct {
apiClient *poseidonK8s.ExecutorAPI
jobHCL string
deployment *appsv1.Deployment
idleRunners storage.Storage[runner.Runner]
ctx context.Context
cancel context.CancelFunc
}
func (k KubernetesEnvironment) MarshalJSON() ([]byte, error) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) ID() dto.EnvironmentID {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetID(id dto.EnvironmentID) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) PrewarmingPoolSize() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetPrewarmingPoolSize(count uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) ApplyPrewarmingPoolSize() error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) CPULimit() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetCPULimit(limit uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) MemoryLimit() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetMemoryLimit(limit uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Image() string {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetImage(image string) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) NetworkAccess() (bool, []uint16) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetNetworkAccess(allow bool, ports []uint16) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetConfigFrom(environment runner.ExecutionEnvironment) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Register() error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Delete(reason runner.DestroyReason) error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Sample() (r runner.Runner, ok bool) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) AddRunner(r runner.Runner) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) DeleteRunner(id string) (r runner.Runner, ok bool) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) IdleRunnerCount() uint {
//TODO implement me
panic("implement me")
}
func NewKubernetesEnvironmentFromRequest(
apiClient poseidonK8s.ExecutorAPI, jobHCL string, id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest) (
*KubernetesEnvironment, error) {
environment, err := NewKubernetesEnvironment(id, apiClient, jobHCL)
if err != nil {
return nil, err
}
environment.SetID(id)
// Set options according to request
environment.SetPrewarmingPoolSize(request.PrewarmingPoolSize)
environment.SetCPULimit(request.CPULimit)
environment.SetMemoryLimit(request.MemoryLimit)
environment.SetImage(request.Image)
environment.SetNetworkAccess(request.NetworkAccess, request.ExposedPorts)
return environment, nil
}
func NewKubernetesEnvironment(id dto.EnvironmentID, apiClient poseidonK8s.ExecutorAPI, jobHCL string) (*KubernetesEnvironment, error) {
job, err := parseDeployment(jobHCL)
if err != nil {
return nil, fmt.Errorf("error parsing Nomad job: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
e := &KubernetesEnvironment{&apiClient, jobHCL, job, nil, ctx, cancel}
e.idleRunners = storage.NewMonitoredLocalStorage[runner.Runner](monitoring.MeasurementIdleRunnerNomad,
runner.MonitorEnvironmentID[runner.Runner](id), time.Minute, ctx)
return e, nil
}
// TODO MISSING IMPLEMENTATION
func parseDeployment(jobHCL string) (*appsv1.Deployment, error) {
deployment := appsv1.Deployment{}
// jobConfig := jobspec2.ParseConfig{
// Body: []byte(jobHCL),
// AllowFS: false,
// Strict: true,
// }
// job, err := jobspec2.ParseWithConfig(&jobConfig)
// if err != nil {
// return job, fmt.Errorf("couldn't parse job HCL: %w", err)
// }
return &deployment, nil
}

View File

@ -0,0 +1,297 @@
package environment
import (
"context"
"fmt"
poseidonK8s "github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/logging"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strconv"
"time"
)
type KubernetesEnvironmentManager struct {
*AbstractManager
api poseidonK8s.ExecutorAPI
templateEnvironmentHCL string
}
func NewKubernetesEnvironmentManager(
runnerManager runner.Manager,
apiClient *poseidonK8s.ExecutorAPI,
templateJobFile string,
) (*KubernetesEnvironmentManager, error) {
if err := loadTemplateEnvironmentJobHCL(templateJobFile); err != nil {
return nil, err
}
m := &KubernetesEnvironmentManager{
AbstractManager: &AbstractManager{nil, runnerManager},
api: *apiClient,
templateEnvironmentHCL: templateEnvironmentJobHCL,
}
return m, nil
}
func (k *KubernetesEnvironmentManager) SetNextHandler(next ManagerHandler) {
k.nextHandler = next
}
func (k *KubernetesEnvironmentManager) NextHandler() ManagerHandler {
if k.HasNextHandler() {
return k.nextHandler
} else {
return &AbstractManager{}
}
}
func (k *KubernetesEnvironmentManager) HasNextHandler() bool {
return k.nextHandler != nil
}
// List all Kubernetes-based environments
func (k *KubernetesEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
if fetch {
if err := k.fetchEnvironments(); err != nil {
return nil, err
}
}
return k.runnerManager.ListEnvironments(), nil
}
func (k *KubernetesEnvironmentManager) fetchEnvironments() error {
remoteDeploymentResponse, err := k.api.LoadEnvironmentJobs()
if err != nil {
return fmt.Errorf("failed fetching environments: %w", err)
}
remoteDeployments := make(map[string]appsv1.Deployment)
// Update local environments from remote environments.
for _, deployment := range remoteDeploymentResponse {
remoteDeployments[deployment.Name] = *deployment
// Job Id to Environment Id Integer
intIdentifier, err := strconv.Atoi(deployment.Name)
if err != nil {
log.WithError(err).Warn("Failed to convert job name to int")
continue
}
id := dto.EnvironmentID(intIdentifier)
if localEnvironment, ok := k.runnerManager.GetEnvironment(id); ok {
fetchedEnvironment := newKubernetesEnvironmentFromJob(deployment, &k.api)
localEnvironment.SetConfigFrom(fetchedEnvironment)
// We destroy only this (second) local reference to the environment.
if err = fetchedEnvironment.Delete(runner.ErrDestroyedAndReplaced); err != nil {
log.WithError(err).Warn("Failed to remove environment locally")
}
} else {
k.runnerManager.StoreEnvironment(newKubernetesEnvironmentFromJob(deployment, &k.api))
}
}
// Remove local environments that are not remote environments.
for _, localEnvironment := range k.runnerManager.ListEnvironments() {
if _, ok := remoteDeployments[localEnvironment.ID().ToString()]; !ok {
err := localEnvironment.Delete(runner.ErrLocalDestruction)
log.WithError(err).Warn("Failed to remove environment locally")
}
}
return nil
}
// newNomadEnvironmentFromJob creates a Nomad environment from the passed Nomad job definition.
func newKubernetesEnvironmentFromJob(deployment *appsv1.Deployment, apiClient *poseidonK8s.ExecutorAPI) *KubernetesEnvironment {
ctx, cancel := context.WithCancel(context.Background())
e := &KubernetesEnvironment{
apiClient: apiClient,
jobHCL: templateEnvironmentJobHCL,
deployment: deployment,
ctx: ctx,
cancel: cancel,
}
e.idleRunners = storage.NewMonitoredLocalStorage[runner.Runner](monitoring.MeasurementIdleRunnerNomad,
runner.MonitorEnvironmentID[runner.Runner](e.ID()), time.Minute, ctx)
return e
}
// Get retrieves a specific Kubernetes environment
func (k *KubernetesEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (executionEnvironment runner.ExecutionEnvironment, err error) {
executionEnvironment, ok := k.runnerManager.GetEnvironment(id)
if fetch {
fetchedEnvironment, err := fetchK8sEnvironment(id, k.api)
switch {
case err != nil:
return nil, err
case fetchedEnvironment == nil:
_, err = k.Delete(id)
if err != nil {
return nil, err
}
ok = false
case !ok:
k.runnerManager.StoreEnvironment(fetchedEnvironment)
executionEnvironment = fetchedEnvironment
ok = true
default:
executionEnvironment.SetConfigFrom(fetchedEnvironment)
// We destroy only this (second) local reference to the environment.
err = fetchedEnvironment.Delete(runner.ErrDestroyedAndReplaced)
if err != nil {
log.WithError(err).Warn("Failed to remove environment locally")
}
}
}
if !ok {
err = runner.ErrUnknownExecutionEnvironment
}
return executionEnvironment, err
}
// CreateOrUpdate creates or updates an environment in Kubernetes
func (k *KubernetesEnvironmentManager) CreateOrUpdate(
id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (created bool, err error) {
// Check if execution environment is already existing (in the local memory).
environment, isExistingEnvironment := k.runnerManager.GetEnvironment(id)
if isExistingEnvironment {
// Remove existing environment to force downloading the newest Docker image.
// See https://github.com/openHPI/poseidon/issues/69
err = environment.Delete(runner.ErrEnvironmentUpdated)
if err != nil {
return false, fmt.Errorf("failed to remove the environment: %w", err)
}
}
// Create a new environment with the given request options.
environment, err = NewKubernetesEnvironmentFromRequest(k.api, k.templateEnvironmentHCL, id, request)
if err != nil {
return false, fmt.Errorf("error creating Nomad environment: %w", err)
}
// Keep a copy of environment specification in memory.
k.runnerManager.StoreEnvironment(environment)
// Register template Job with Nomad.
logging.StartSpan("env.update.register", "Register Environment", ctx, func(_ context.Context) {
err = environment.Register()
})
if err != nil {
return false, fmt.Errorf("error registering template job in API: %w", err)
}
// Launch idle runners based on the template job.
logging.StartSpan("env.update.poolsize", "Apply Prewarming Pool Size", ctx, func(_ context.Context) {
err = environment.ApplyPrewarmingPoolSize()
})
if err != nil {
return false, fmt.Errorf("error scaling template job in API: %w", err)
}
return !isExistingEnvironment, nil
}
// Statistics fetches statistics from Kubernetes
func (k *KubernetesEnvironmentManager) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
// Collect and return statistics for Kubernetes environments
return map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{}
}
// MapExecutionEnvironmentRequestToDeployment maps ExecutionEnvironmentRequest to a Kubernetes Deployment
func MapExecutionEnvironmentRequestToDeployment(req dto.ExecutionEnvironmentRequest, environmentID string) *appsv1.Deployment {
// Create the Deployment object
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: environmentID, // Set the environment ID as the name of the deployment
Labels: map[string]string{
"environment-id": environmentID,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: int32Ptr(int32(req.PrewarmingPoolSize)), // Use PrewarmingPoolSize to set the number of replicas
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"environment-id": environmentID,
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"environment-id": environmentID,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "runner-container",
Image: req.Image, // Map the image to the container
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(strconv.Itoa(int(req.CPULimit))), // Map CPU request
"memory": resource.MustParse(strconv.Itoa(int(req.MemoryLimit)) + "Mi"), // Map Memory request
},
Limits: v1.ResourceList{
"cpu": resource.MustParse(strconv.Itoa(int(req.CPULimit))), // Map CPU limit
"memory": resource.MustParse(strconv.Itoa(int(req.MemoryLimit)) + "Mi"), // Map Memory limit
},
},
},
},
},
},
},
}
// Handle network access and exposed ports
if req.NetworkAccess {
var containerPorts []v1.ContainerPort
for _, port := range req.ExposedPorts {
containerPorts = append(containerPorts, v1.ContainerPort{
ContainerPort: int32(port),
})
}
deployment.Spec.Template.Spec.Containers[0].Ports = containerPorts
}
return deployment
}
// Helper function to return a pointer to an int32
func int32Ptr(i int32) *int32 {
return &i
}
func fetchK8sEnvironment(id dto.EnvironmentID, apiClient poseidonK8s.ExecutorAPI) (runner.ExecutionEnvironment, error) {
environments, err := apiClient.LoadEnvironmentJobs()
if err != nil {
return nil, fmt.Errorf("error fetching the environment jobs: %w", err)
}
var fetchedEnvironment runner.ExecutionEnvironment
for _, deployment := range environments {
environmentID, err := nomad.EnvironmentIDFromTemplateJobID(deployment.Name)
if err != nil {
log.WithError(err).Warn("Cannot parse environment id of loaded environment")
continue
}
if id == environmentID {
fetchedEnvironment = newKubernetesEnvironmentFromJob(deployment, &apiClient)
}
}
return fetchedEnvironment, nil
}

View File

@ -1,171 +0,0 @@
// Code generated by mockery v2.16.0. DO NOT EDIT.
package environment
import (
context "context"
dto "github.com/openHPI/poseidon/pkg/dto"
mock "github.com/stretchr/testify/mock"
runner "github.com/openHPI/poseidon/internal/runner"
)
// ManagerHandlerMock is an autogenerated mock type for the ManagerHandler type
type ManagerHandlerMock struct {
mock.Mock
}
// CreateOrUpdate provides a mock function with given fields: id, request, ctx
func (_m *ManagerHandlerMock) CreateOrUpdate(id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) {
ret := _m.Called(id, request, ctx)
var r0 bool
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, dto.ExecutionEnvironmentRequest, context.Context) bool); ok {
r0 = rf(id, request, ctx)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, dto.ExecutionEnvironmentRequest, context.Context) error); ok {
r1 = rf(id, request, ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Delete provides a mock function with given fields: id
func (_m *ManagerHandlerMock) Delete(id dto.EnvironmentID) (bool, error) {
ret := _m.Called(id)
var r0 bool
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) bool); ok {
r0 = rf(id)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: id, fetch
func (_m *ManagerHandlerMock) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) {
ret := _m.Called(id, fetch)
var r0 runner.ExecutionEnvironment
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, bool) runner.ExecutionEnvironment); ok {
r0 = rf(id, fetch)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(runner.ExecutionEnvironment)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, bool) error); ok {
r1 = rf(id, fetch)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HasNextHandler provides a mock function with given fields:
func (_m *ManagerHandlerMock) HasNextHandler() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// List provides a mock function with given fields: fetch
func (_m *ManagerHandlerMock) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
ret := _m.Called(fetch)
var r0 []runner.ExecutionEnvironment
if rf, ok := ret.Get(0).(func(bool) []runner.ExecutionEnvironment); ok {
r0 = rf(fetch)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]runner.ExecutionEnvironment)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(bool) error); ok {
r1 = rf(fetch)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NextHandler provides a mock function with given fields:
func (_m *ManagerHandlerMock) NextHandler() ManagerHandler {
ret := _m.Called()
var r0 ManagerHandler
if rf, ok := ret.Get(0).(func() ManagerHandler); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(ManagerHandler)
}
}
return r0
}
// SetNextHandler provides a mock function with given fields: next
func (_m *ManagerHandlerMock) SetNextHandler(next ManagerHandler) {
_m.Called(next)
}
// Statistics provides a mock function with given fields:
func (_m *ManagerHandlerMock) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
ret := _m.Called()
var r0 map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
if rf, ok := ret.Get(0).(func() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData)
}
}
return r0
}
type mockConstructorTestingTNewManagerHandlerMock interface {
mock.TestingT
Cleanup(func())
}
// NewManagerHandlerMock creates a new instance of ManagerHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewManagerHandlerMock(t mockConstructorTestingTNewManagerHandlerMock) *ManagerHandlerMock {
mock := &ManagerHandlerMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,265 +0,0 @@
package environment
import (
"context"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
"time"
)
func (s *MainTestSuite) TestConfigureNetworkCreatesNewNetworkWhenNoNetworkExists() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
environment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
if s.Equal(0, len(defaultTaskGroup.Networks)) {
environment.SetNetworkAccess(true, []uint16{})
s.Equal(1, len(defaultTaskGroup.Networks))
}
}
func (s *MainTestSuite) TestConfigureNetworkDoesNotCreateNewNetworkWhenNetworkExists() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
environment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
networkResource := config.Config.Nomad.Network
defaultTaskGroup.Networks = []*nomadApi.NetworkResource{&networkResource}
if s.Equal(1, len(defaultTaskGroup.Networks)) {
environment.SetNetworkAccess(true, []uint16{})
s.Equal(1, len(defaultTaskGroup.Networks))
s.Equal(&networkResource, defaultTaskGroup.Networks[0])
}
}
func (s *MainTestSuite) TestConfigureNetworkSetsCorrectValues() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup)
mode, ok := defaultTask.Config["network_mode"]
s.True(ok)
s.Equal("none", mode)
s.Equal(0, len(defaultTaskGroup.Networks))
exposedPortsTests := [][]uint16{{}, {1337}, {42, 1337}}
s.Run("with no network access", func() {
for _, ports := range exposedPortsTests {
_, testJob := helpers.CreateTemplateJob()
testTaskGroup := nomad.FindAndValidateDefaultTaskGroup(testJob)
testTask := nomad.FindAndValidateDefaultTask(testTaskGroup)
testEnvironment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
testEnvironment.SetNetworkAccess(false, ports)
mode, ok := testTask.Config["network_mode"]
s.True(ok)
s.Equal("none", mode)
s.Equal(0, len(testTaskGroup.Networks))
}
})
s.Run("with network access", func() {
for _, ports := range exposedPortsTests {
_, testJob := helpers.CreateTemplateJob()
testTaskGroup := nomad.FindAndValidateDefaultTaskGroup(testJob)
testTask := nomad.FindAndValidateDefaultTask(testTaskGroup)
testEnvironment := &NomadEnvironment{nil, "", testJob, nil, context.Background(), nil}
testEnvironment.SetNetworkAccess(true, ports)
s.Require().Equal(1, len(testTaskGroup.Networks))
networkResource := testTaskGroup.Networks[0]
s.Equal(config.Config.Nomad.Network.Mode, networkResource.Mode)
s.Require().Equal(len(ports), len(networkResource.DynamicPorts))
assertExpectedPorts(s.T(), ports, networkResource)
mode, ok := testTask.Config["network_mode"]
s.True(ok)
s.Equal(mode, "")
}
})
}
func assertExpectedPorts(t *testing.T, expectedPorts []uint16, networkResource *nomadApi.NetworkResource) {
t.Helper()
for _, expectedPort := range expectedPorts {
found := false
for _, actualPort := range networkResource.DynamicPorts {
if actualPort.To == int(expectedPort) {
found = true
break
}
}
assert.True(t, found, fmt.Sprintf("port list should contain %v", expectedPort))
}
}
func (s *MainTestSuite) TestRegisterFailsWhenNomadJobRegistrationFails() {
apiClientMock := &nomad.ExecutorAPIMock{}
expectedErr := tests.ErrDefault
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", expectedErr)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), nil, nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.ErrorIs(err, expectedErr)
apiClientMock.AssertNotCalled(s.T(), "MonitorEvaluation")
}
func (s *MainTestSuite) TestRegisterTemplateJobSucceedsWhenMonitoringEvaluationSucceeds() {
apiClientMock := &nomad.ExecutorAPIMock{}
evaluationID := "id"
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
apiClientMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(nil)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.NoError(err)
}
func (s *MainTestSuite) TestRegisterTemplateJobReturnsErrorWhenMonitoringEvaluationFails() {
apiClientMock := &nomad.ExecutorAPIMock{}
evaluationID := "id"
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
apiClientMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(tests.ErrDefault)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.ErrorIs(err, tests.ErrDefault)
}
func (s *MainTestSuite) TestParseJob() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.Run("parses the given default job", func() {
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.NoError(err)
s.NotNil(environment.job)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
s.Run("returns error when given wrong job", func() {
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, nil, "")
s.Error(err)
s.Nil(environment)
})
}
func (s *MainTestSuite) TestTwoSampleAddExactlyTwoRunners() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job")).Return(nil)
_, job := helpers.CreateTemplateJob()
environment := &NomadEnvironment{apiMock, templateEnvironmentJobHCL, job,
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetPrewarmingPoolSize(2)
runner1 := &runner.RunnerMock{}
runner1.On("ID").Return(tests.DefaultRunnerID)
runner2 := &runner.RunnerMock{}
runner2.On("ID").Return(tests.AnotherRunnerID)
environment.AddRunner(runner1)
environment.AddRunner(runner2)
_, ok := environment.Sample()
s.Require().True(ok)
_, ok = environment.Sample()
s.Require().True(ok)
<-time.After(tests.ShortTimeout) // New Runners are requested asynchronously
apiMock.AssertNumberOfCalls(s.T(), "RegisterRunnerJob", 2)
}
func (s *MainTestSuite) TestSampleDoesNotSetForcePullFlag() {
apiMock := &nomad.ExecutorAPIMock{}
call := apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job"))
call.Run(func(args mock.Arguments) {
job, ok := args.Get(0).(*nomadApi.Job)
s.True(ok)
taskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
task := nomad.FindAndValidateDefaultTask(taskGroup)
s.False(task.Config["force_pull"].(bool))
call.ReturnArguments = mock.Arguments{nil}
})
_, job := helpers.CreateTemplateJob()
environment := &NomadEnvironment{apiMock, templateEnvironmentJobHCL, job,
storage.NewLocalStorage[runner.Runner](), s.TestCtx, nil}
runner1 := &runner.RunnerMock{}
runner1.On("ID").Return(tests.DefaultRunnerID)
environment.AddRunner(runner1)
_, ok := environment.Sample()
s.Require().True(ok)
<-time.After(tests.ShortTimeout) // New Runners are requested asynchronously
}
func (s *MainTestSuite) TestNomadEnvironment_DeleteLocally() {
apiMock := &nomad.ExecutorAPIMock{}
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
err = environment.Delete(runner.ErrLocalDestruction)
s.NoError(err)
apiMock.AssertExpectations(s.T())
}
func (s *MainTestSuite) TestNomadEnvironment_AddRunner() {
s.Run("Destroys runner before replacing it", func() {
apiMock := &nomad.ExecutorAPIMock{}
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
r := &runner.RunnerMock{}
r.On("ID").Return(tests.DefaultRunnerID)
r.On("Destroy", mock.Anything).Run(func(args mock.Arguments) {
err, ok := args[0].(error)
s.Require().True(ok)
s.ErrorIs(err, runner.ErrLocalDestruction)
}).Return(nil).Once()
r2 := &runner.RunnerMock{}
r2.On("ID").Return(tests.DefaultRunnerID)
environment.AddRunner(r)
environment.AddRunner(r2)
r.AssertExpectations(s.T())
// Teardown test case
r2.On("Destroy", mock.Anything).Return(nil)
apiMock.On("LoadRunnerIDs", mock.Anything).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.Anything).Return(nil)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
}

View File

@ -1,455 +0,0 @@
package environment
import (
"context"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"os"
"testing"
"time"
)
type CreateOrUpdateTestSuite struct {
tests.MemoryLeakTestSuite
runnerManagerMock runner.ManagerMock
apiMock nomad.ExecutorAPIMock
request dto.ExecutionEnvironmentRequest
manager *NomadEnvironmentManager
environmentID dto.EnvironmentID
}
func TestCreateOrUpdateTestSuite(t *testing.T) {
suite.Run(t, new(CreateOrUpdateTestSuite))
}
func (s *CreateOrUpdateTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.runnerManagerMock = runner.ManagerMock{}
s.apiMock = nomad.ExecutorAPIMock{}
s.request = dto.ExecutionEnvironmentRequest{
PrewarmingPoolSize: 10,
CPULimit: 20,
MemoryLimit: 30,
Image: "my-image",
NetworkAccess: false,
ExposedPorts: nil,
}
s.manager = &NomadEnvironmentManager{
AbstractManager: &AbstractManager{runnerManager: &s.runnerManagerMock},
api: &s.apiMock,
templateEnvironmentHCL: templateEnvironmentJobHCL,
}
s.environmentID = dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger)
}
func (s *CreateOrUpdateTestSuite) TestReturnsErrorIfCreatesOrUpdateEnvironmentReturnsError() {
s.apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", tests.ErrDefault)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runnerManagerMock.On("GetEnvironment", mock.AnythingOfType("dto.EnvironmentID")).Return(nil, false)
s.runnerManagerMock.On("StoreEnvironment", mock.AnythingOfType("*environment.NomadEnvironment")).Return(true)
s.ExpectedGoroutineIncrease++ // We don't care about removing the created environment.
_, err := s.manager.CreateOrUpdate(
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), s.request, context.Background())
s.ErrorIs(err, tests.ErrDefault)
}
func (s *CreateOrUpdateTestSuite) TestCreateOrUpdatesSetsForcePullFlag() {
s.apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", nil)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runnerManagerMock.On("GetEnvironment", mock.AnythingOfType("dto.EnvironmentID")).Return(nil, false)
s.runnerManagerMock.On("StoreEnvironment", mock.AnythingOfType("*environment.NomadEnvironment")).Return(true)
s.apiMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(nil)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
call := s.apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job"))
count := 0
call.Run(func(args mock.Arguments) {
count++
job, ok := args.Get(0).(*nomadApi.Job)
s.True(ok)
// The environment job itself has not the force_pull flag
if count > 1 {
taskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
task := nomad.FindAndValidateDefaultTask(taskGroup)
s.True(task.Config["force_pull"].(bool))
}
call.ReturnArguments = mock.Arguments{nil}
})
s.ExpectedGoroutineIncrease++ // We dont care about removing the created environment at this point.
_, err := s.manager.CreateOrUpdate(
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), s.request, context.Background())
s.NoError(err)
s.True(count > 1)
}
func (s *MainTestSuite) TestNewNomadEnvironmentManager() {
executorAPIMock := &nomad.ExecutorAPIMock{}
executorAPIMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
executorAPIMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
executorAPIMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runnerManagerMock := &runner.ManagerMock{}
runnerManagerMock.On("Load").Return()
previousTemplateEnvironmentJobHCL := templateEnvironmentJobHCL
s.Run("returns error if template file does not exist", func() {
_, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, "/non-existent/file")
s.Error(err)
})
s.Run("loads template environment job from file", func() {
templateJobHCL := "job \"" + tests.DefaultTemplateJobID + "\" {}"
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, executorAPIMock, templateJobHCL)
s.Require().NoError(err)
f := createTempFile(s.T(), templateJobHCL)
defer os.Remove(f.Name())
m, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, f.Name())
s.NoError(err)
s.NotNil(m)
s.Equal(templateJobHCL, m.templateEnvironmentHCL)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
s.Run("returns error if template file is invalid", func() {
templateJobHCL := "invalid hcl file"
f := createTempFile(s.T(), templateJobHCL)
defer os.Remove(f.Name())
m, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, f.Name())
s.Require().NoError(err)
_, err = NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, nil, m.templateEnvironmentHCL)
s.Error(err)
})
templateEnvironmentJobHCL = previousTemplateEnvironmentJobHCL
}
func (s *MainTestSuite) TestNomadEnvironmentManager_Get() {
apiMock := &nomad.ExecutorAPIMock{}
mockWatchAllocations(s.TestCtx, apiMock)
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
call := apiMock.On("LoadEnvironmentJobs")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{}, nil}
})
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("Returns error when not found", func() {
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.Error(err)
})
s.Run("Returns environment when it was added before", func() {
expectedEnvironment, err :=
NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
expectedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
s.Require().NoError(err)
runnerManager.StoreEnvironment(expectedEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(expectedEnvironment, environment)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
s.Run("Fetch", func() {
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.Run("Returns error when not found", func() {
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.Error(err)
})
s.Run("Updates values when environment already known by Poseidon", func() {
fetchedEnvironment, err := NewNomadEnvironment(
tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
fetchedEnvironment.SetImage("random docker image")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
localEnvironment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.NotEqual(fetchedEnvironment.Image(), environment.Image())
environment, err = m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.NoError(err)
s.Equal(fetchedEnvironment.Image(), environment.Image())
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = localEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Adds environment when not already known by Poseidon", func() {
fetchedEnvironment, err := NewNomadEnvironment(
tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
fetchedEnvironment.SetImage("random docker image")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
_, err = m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.Error(err)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.NoError(err)
s.Equal(fetchedEnvironment.Image(), environment.Image())
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_List() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
mockWatchAllocations(s.TestCtx, apiMock)
call := apiMock.On("LoadEnvironmentJobs")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{}, nil}
})
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("with no environments", func() {
environments, err := m.List(true)
s.NoError(err)
s.Empty(environments)
})
s.Run("Returns added environment", func() {
localEnvironment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environments, err := m.List(false)
s.NoError(err)
s.Equal(1, len(environments))
s.Equal(localEnvironment, environments[0])
err = localEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Fetches new Runners via the api client", func() {
fetchedEnvironment, err :=
NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
status := structs.JobStatusRunning
fetchedEnvironment.job.Status = &status
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
environments, err := m.List(false)
s.NoError(err)
s.Empty(environments)
environments, err = m.List(true)
s.NoError(err)
s.Equal(1, len(environments))
nomadEnvironment, ok := environments[0].(*NomadEnvironment)
s.True(ok)
s.Equal(fetchedEnvironment.job, nomadEnvironment.job)
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = nomadEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_Load() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
mockWatchAllocations(s.TestCtx, apiMock)
call := apiMock.On("LoadEnvironmentJobs")
apiMock.On("LoadRunnerJobs", mock.AnythingOfType("dto.EnvironmentID")).
Return([]*nomadApi.Job{}, nil)
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
s.Run("deletes local environments before loading Nomad environments", func() {
call.Return([]*nomadApi.Job{}, nil)
environment := &runner.ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("")
environment.On("CPULimit").Return(uint(0))
environment.On("MemoryLimit").Return(uint(0))
environment.On("NetworkAccess").Return(false, nil)
environment.On("Delete", mock.Anything).Return(nil)
runnerManager.StoreEnvironment(environment)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
err = m.load()
s.Require().NoError(err)
environment.AssertExpectations(s.T())
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Stores fetched environments", func() {
_, job := helpers.CreateTemplateJob()
call.Return([]*nomadApi.Job{job}, nil)
_, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
err = m.load()
s.Require().NoError(err)
environment, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().True(ok)
s.Equal("python:latest", environment.Image())
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Processes only running environments", func() {
_, job := helpers.CreateTemplateJob()
jobStatus := structs.JobStatusDead
job.Status = &jobStatus
call.Return([]*nomadApi.Job{job}, nil)
_, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
_, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
_, ok = runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_KeepEnvironmentsSynced() {
apiMock := &nomad.ExecutorAPIMock{}
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("stops when context is done", func() {
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, context.DeadlineExceeded)
ctx, cancel := context.WithCancel(s.TestCtx)
cancel()
var done bool
go func() {
<-time.After(tests.ShortTimeout)
if !done {
s.FailNow("KeepEnvironmentsSynced is ignoring the context")
}
}()
m.KeepEnvironmentsSynced(func(_ context.Context) error { return nil }, ctx)
done = true
})
apiMock.ExpectedCalls = []*mock.Call{}
apiMock.Calls = []mock.Call{}
s.Run("retries loading environments", func() {
ctx, cancel := context.WithCancel(s.TestCtx)
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, context.DeadlineExceeded).Once()
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil).Run(func(_ mock.Arguments) {
cancel()
}).Once()
m.KeepEnvironmentsSynced(func(_ context.Context) error { return nil }, ctx)
apiMock.AssertExpectations(s.T())
})
apiMock.ExpectedCalls = []*mock.Call{}
apiMock.Calls = []mock.Call{}
s.Run("retries synchronizing runners", func() {
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
ctx, cancel := context.WithCancel(s.TestCtx)
count := 0
synchronizeRunners := func(ctx context.Context) error {
count++
if count >= 2 {
cancel()
return nil
}
return context.DeadlineExceeded
}
m.KeepEnvironmentsSynced(synchronizeRunners, ctx)
if count < 2 {
s.Fail("KeepEnvironmentsSynced is not retrying to synchronize the runners")
}
})
}
func mockWatchAllocations(ctx context.Context, apiMock *nomad.ExecutorAPIMock) {
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-ctx.Done()
call.ReturnArguments = mock.Arguments{nil}
})
}
func createTempFile(t *testing.T, content string) *os.File {
t.Helper()
f, err := os.CreateTemp("", "test")
require.NoError(t, err)
n, err := f.WriteString(content)
require.NoError(t, err)
require.Equal(t, len(content), n)
return f
}