Implement routes to list, get and delete execution environments
* #9 Implement routes to list, get and delete execution environments. A refactoring was required to introduce the ExecutionEnvironment interface. * Fix MR comments, linting issues and bug that lead to e2e test failure * Add e2e tests * Add unit tests
This commit is contained in:
@@ -27,8 +27,8 @@ type apiQuerier interface {
|
||||
// SetJobScale sets the scaling count of the passed job to Nomad.
|
||||
SetJobScale(jobID string, count uint, reason string) (err error)
|
||||
|
||||
// DeleteRunner deletes the runner with the given ID.
|
||||
DeleteRunner(runnerID string) (err error)
|
||||
// DeleteJob deletes the Job with the given ID.
|
||||
DeleteJob(jobID string) (err error)
|
||||
|
||||
// Execute runs a command in the passed job.
|
||||
Execute(jobID string, ctx context.Context, command []string, tty bool,
|
||||
@@ -82,8 +82,8 @@ func (nc *nomadAPIClient) init(nomadConfig *config.Nomad) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *nomadAPIClient) DeleteRunner(runnerID string) (err error) {
|
||||
_, _, err = nc.client.Jobs().Deregister(runnerID, true, nc.writeOptions())
|
||||
func (nc *nomadAPIClient) DeleteJob(jobID string) (err error) {
|
||||
_, _, err = nc.client.Jobs().Deregister(jobID, true, nc.writeOptions())
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -42,7 +42,7 @@ func (_m *apiQuerierMock) AllocationStream(ctx context.Context) (<-chan *api.Eve
|
||||
}
|
||||
|
||||
// DeleteRunner provides a mock function with given fields: runnerID
|
||||
func (_m *apiQuerierMock) DeleteRunner(runnerID string) error {
|
||||
func (_m *apiQuerierMock) DeleteJob(runnerID string) error {
|
||||
ret := _m.Called(runnerID)
|
||||
|
||||
var r0 error
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery v2.9.4. DO NOT EDIT.
|
||||
|
||||
package nomad
|
||||
|
||||
@@ -41,13 +41,13 @@ func (_m *ExecutorAPIMock) AllocationStream(ctx context.Context) (<-chan *api.Ev
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeleteRunner provides a mock function with given fields: runnerID
|
||||
func (_m *ExecutorAPIMock) DeleteRunner(runnerID string) error {
|
||||
ret := _m.Called(runnerID)
|
||||
// DeleteJob provides a mock function with given fields: jobID
|
||||
func (_m *ExecutorAPIMock) DeleteJob(jobID string) error {
|
||||
ret := _m.Called(jobID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string) error); ok {
|
||||
r0 = rf(runnerID)
|
||||
r0 = rf(jobID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
@@ -319,29 +319,6 @@ func (_m *ExecutorAPIMock) RegisterRunnerJob(template *api.Job) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// RegisterTemplateJob provides a mock function with given fields: defaultJob, id, prewarmingPoolSize, cpuLimit, memoryLimit, image, networkAccess, exposedPorts
|
||||
func (_m *ExecutorAPIMock) RegisterTemplateJob(defaultJob *api.Job, id string, prewarmingPoolSize uint, cpuLimit uint, memoryLimit uint, image string, networkAccess bool, exposedPorts []uint16) (*api.Job, error) {
|
||||
ret := _m.Called(defaultJob, id, prewarmingPoolSize, cpuLimit, memoryLimit, image, networkAccess, exposedPorts)
|
||||
|
||||
var r0 *api.Job
|
||||
if rf, ok := ret.Get(0).(func(*api.Job, string, uint, uint, uint, string, bool, []uint16) *api.Job); ok {
|
||||
r0 = rf(defaultJob, id, prewarmingPoolSize, cpuLimit, memoryLimit, image, networkAccess, exposedPorts)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*api.Job)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(*api.Job, string, uint, uint, uint, string, bool, []uint16) error); ok {
|
||||
r1 = rf(defaultJob, id, prewarmingPoolSize, cpuLimit, memoryLimit, image, networkAccess, exposedPorts)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetJobScale provides a mock function with given fields: jobID, count, reason
|
||||
func (_m *ExecutorAPIMock) SetJobScale(jobID string, count uint, reason string) error {
|
||||
ret := _m.Called(jobID, count, reason)
|
||||
|
@@ -5,7 +5,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
nomadApi "github.com/hashicorp/nomad/api"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,73 +26,19 @@ const (
|
||||
ConfigMetaUnusedValue = "false"
|
||||
ConfigMetaTimeoutKey = "timeout"
|
||||
ConfigMetaPoolSizeKey = "prewarmingPoolSize"
|
||||
TemplateJobNameParts = 2
|
||||
)
|
||||
|
||||
var (
|
||||
TaskArgs = []string{"infinity"}
|
||||
ErrorConfigTaskGroupNotFound = errors.New("config task group not found in job")
|
||||
ErrorInvalidJobID = errors.New("invalid job id")
|
||||
TaskArgs = []string{"infinity"}
|
||||
)
|
||||
|
||||
// FindConfigTaskGroup returns the config task group of a job.
|
||||
// The config task group should be included in all jobs.
|
||||
func FindConfigTaskGroup(job *nomadApi.Job) *nomadApi.TaskGroup {
|
||||
for _, tg := range job.TaskGroups {
|
||||
if *tg.Name == ConfigTaskGroupName {
|
||||
return tg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetMetaConfigValue(job *nomadApi.Job, key, value string) error {
|
||||
configTaskGroup := FindConfigTaskGroup(job)
|
||||
if configTaskGroup == nil {
|
||||
return ErrorConfigTaskGroupNotFound
|
||||
}
|
||||
configTaskGroup.Meta[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterTemplateJob creates a Nomad job based on the default job configuration and the given parameters.
|
||||
// It registers the job with Nomad and waits until the registration completes.
|
||||
func (a *APIClient) RegisterTemplateJob(
|
||||
basisJob *nomadApi.Job,
|
||||
id string,
|
||||
prewarmingPoolSize, cpuLimit, memoryLimit uint,
|
||||
image string,
|
||||
networkAccess bool,
|
||||
exposedPorts []uint16) (*nomadApi.Job, error) {
|
||||
job := CreateTemplateJob(basisJob, id, prewarmingPoolSize,
|
||||
cpuLimit, memoryLimit, image, networkAccess, exposedPorts)
|
||||
evalID, err := a.apiQuerier.RegisterNomadJob(job)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't register template job: %w", err)
|
||||
}
|
||||
return job, a.MonitorEvaluation(evalID, context.Background())
|
||||
}
|
||||
|
||||
// CreateTemplateJob creates a Nomad job based on the default job configuration and the given parameters.
|
||||
// It registers the job with Nomad and waits until the registration completes.
|
||||
func CreateTemplateJob(
|
||||
basisJob *nomadApi.Job,
|
||||
id string,
|
||||
prewarmingPoolSize, cpuLimit, memoryLimit uint,
|
||||
image string,
|
||||
networkAccess bool,
|
||||
exposedPorts []uint16) *nomadApi.Job {
|
||||
job := *basisJob
|
||||
job.ID = &id
|
||||
job.Name = &id
|
||||
|
||||
var taskGroup = createTaskGroup(&job, TaskGroupName)
|
||||
configureTask(taskGroup, TaskName, cpuLimit, memoryLimit, image, networkAccess, exposedPorts)
|
||||
storeTemplateConfiguration(&job, prewarmingPoolSize)
|
||||
|
||||
return &job
|
||||
}
|
||||
|
||||
func (a *APIClient) RegisterRunnerJob(template *nomadApi.Job) error {
|
||||
storeRunnerConfiguration(template)
|
||||
taskGroup := FindOrCreateConfigTaskGroup(template)
|
||||
|
||||
taskGroup.Meta = make(map[string]string)
|
||||
taskGroup.Meta[ConfigMetaUsedKey] = ConfigMetaUnusedValue
|
||||
|
||||
evalID, err := a.apiQuerier.RegisterNomadJob(template)
|
||||
if err != nil {
|
||||
@@ -99,126 +47,37 @@ func (a *APIClient) RegisterRunnerJob(template *nomadApi.Job) error {
|
||||
return a.MonitorEvaluation(evalID, context.Background())
|
||||
}
|
||||
|
||||
func createTaskGroup(job *nomadApi.Job, name string) *nomadApi.TaskGroup {
|
||||
var taskGroup *nomadApi.TaskGroup
|
||||
if len(job.TaskGroups) == 0 {
|
||||
taskGroup = nomadApi.NewTaskGroup(name, TaskCount)
|
||||
job.TaskGroups = []*nomadApi.TaskGroup{taskGroup}
|
||||
} else {
|
||||
taskGroup = job.TaskGroups[0]
|
||||
taskGroup.Name = &name
|
||||
count := TaskCount
|
||||
taskGroup.Count = &count
|
||||
func FindTaskGroup(job *nomadApi.Job, name string) *nomadApi.TaskGroup {
|
||||
for _, tg := range job.TaskGroups {
|
||||
if *tg.Name == name {
|
||||
return tg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindOrCreateDefaultTaskGroup(job *nomadApi.Job) *nomadApi.TaskGroup {
|
||||
taskGroup := FindTaskGroup(job, TaskGroupName)
|
||||
if taskGroup == nil {
|
||||
taskGroup = nomadApi.NewTaskGroup(TaskGroupName, TaskCount)
|
||||
job.AddTaskGroup(taskGroup)
|
||||
}
|
||||
FindOrCreateDefaultTask(taskGroup)
|
||||
return taskGroup
|
||||
}
|
||||
|
||||
const portNumberBase = 10
|
||||
|
||||
func configureNetwork(taskGroup *nomadApi.TaskGroup, networkAccess bool, exposedPorts []uint16) {
|
||||
if len(taskGroup.Tasks) == 0 {
|
||||
// This function is only used internally and must be called as last step when configuring the task.
|
||||
// This error is not recoverable.
|
||||
log.Fatal("Can't configure network before task has been configured!")
|
||||
}
|
||||
task := taskGroup.Tasks[0]
|
||||
|
||||
if task.Config == nil {
|
||||
task.Config = make(map[string]interface{})
|
||||
}
|
||||
|
||||
if networkAccess {
|
||||
var networkResource *nomadApi.NetworkResource
|
||||
if len(taskGroup.Networks) == 0 {
|
||||
networkResource = &nomadApi.NetworkResource{}
|
||||
taskGroup.Networks = []*nomadApi.NetworkResource{networkResource}
|
||||
} else {
|
||||
networkResource = taskGroup.Networks[0]
|
||||
}
|
||||
// Prefer "bridge" network over "host" to have an isolated network namespace with bridged interface
|
||||
// instead of joining the host network namespace.
|
||||
networkResource.Mode = "bridge"
|
||||
for _, portNumber := range exposedPorts {
|
||||
port := nomadApi.Port{
|
||||
Label: strconv.FormatUint(uint64(portNumber), portNumberBase),
|
||||
To: int(portNumber),
|
||||
}
|
||||
networkResource.DynamicPorts = append(networkResource.DynamicPorts, port)
|
||||
}
|
||||
|
||||
// Explicitly set mode to override existing settings when updating job from without to with network.
|
||||
// Don't use bridge as it collides with the bridge mode above. This results in Docker using 'bridge'
|
||||
// mode, meaning all allocations will be attached to the `docker0` adapter and could reach other
|
||||
// non-Nomad containers attached to it. This is avoided when using Nomads bridge network mode.
|
||||
task.Config["network_mode"] = ""
|
||||
} else {
|
||||
// Somehow, we can't set the network mode to none in the NetworkResource on task group level.
|
||||
// See https://github.com/hashicorp/nomad/issues/10540
|
||||
task.Config["network_mode"] = "none"
|
||||
// Explicitly set Networks to signal Nomad to remove the possibly existing networkResource
|
||||
taskGroup.Networks = []*nomadApi.NetworkResource{}
|
||||
}
|
||||
}
|
||||
|
||||
func configureTask(
|
||||
taskGroup *nomadApi.TaskGroup,
|
||||
name string,
|
||||
cpuLimit, memoryLimit uint,
|
||||
image string,
|
||||
networkAccess bool,
|
||||
exposedPorts []uint16) {
|
||||
var task *nomadApi.Task
|
||||
if len(taskGroup.Tasks) == 0 {
|
||||
task = nomadApi.NewTask(name, TaskDriver)
|
||||
taskGroup.Tasks = []*nomadApi.Task{task}
|
||||
} else {
|
||||
task = taskGroup.Tasks[0]
|
||||
task.Name = name
|
||||
}
|
||||
integerCPULimit := int(cpuLimit)
|
||||
integerMemoryLimit := int(memoryLimit)
|
||||
if task.Resources == nil {
|
||||
task.Resources = nomadApi.DefaultResources()
|
||||
}
|
||||
task.Resources.CPU = &integerCPULimit
|
||||
task.Resources.MemoryMB = &integerMemoryLimit
|
||||
|
||||
if task.Config == nil {
|
||||
task.Config = make(map[string]interface{})
|
||||
}
|
||||
task.Config["image"] = image
|
||||
task.Config["command"] = TaskCommand
|
||||
task.Config["args"] = TaskArgs
|
||||
|
||||
configureNetwork(taskGroup, networkAccess, exposedPorts)
|
||||
}
|
||||
|
||||
func storeTemplateConfiguration(job *nomadApi.Job, prewarmingPoolSize uint) {
|
||||
taskGroup := findOrCreateConfigTaskGroup(job)
|
||||
|
||||
taskGroup.Meta = make(map[string]string)
|
||||
taskGroup.Meta[ConfigMetaPoolSizeKey] = strconv.Itoa(int(prewarmingPoolSize))
|
||||
}
|
||||
|
||||
func storeRunnerConfiguration(job *nomadApi.Job) {
|
||||
taskGroup := findOrCreateConfigTaskGroup(job)
|
||||
|
||||
taskGroup.Meta = make(map[string]string)
|
||||
taskGroup.Meta[ConfigMetaUsedKey] = ConfigMetaUnusedValue
|
||||
}
|
||||
|
||||
func findOrCreateConfigTaskGroup(job *nomadApi.Job) *nomadApi.TaskGroup {
|
||||
taskGroup := FindConfigTaskGroup(job)
|
||||
func FindOrCreateConfigTaskGroup(job *nomadApi.Job) *nomadApi.TaskGroup {
|
||||
taskGroup := FindTaskGroup(job, ConfigTaskGroupName)
|
||||
if taskGroup == nil {
|
||||
taskGroup = nomadApi.NewTaskGroup(ConfigTaskGroupName, 0)
|
||||
job.AddTaskGroup(taskGroup)
|
||||
}
|
||||
createConfigTaskIfNotPresent(taskGroup)
|
||||
FindOrCreateConfigTask(taskGroup)
|
||||
return taskGroup
|
||||
}
|
||||
|
||||
// createConfigTaskIfNotPresent ensures that a dummy task is in the task group so that the group is accepted by Nomad.
|
||||
func createConfigTaskIfNotPresent(taskGroup *nomadApi.TaskGroup) {
|
||||
// FindOrCreateConfigTask ensures that a dummy task is in the task group so that the group is accepted by Nomad.
|
||||
func FindOrCreateConfigTask(taskGroup *nomadApi.TaskGroup) *nomadApi.Task {
|
||||
var task *nomadApi.Task
|
||||
for _, t := range taskGroup.Tasks {
|
||||
if t.Name == ConfigTaskName {
|
||||
@@ -236,4 +95,75 @@ func createConfigTaskIfNotPresent(taskGroup *nomadApi.TaskGroup) {
|
||||
task.Config = make(map[string]interface{})
|
||||
}
|
||||
task.Config["command"] = ConfigTaskCommand
|
||||
return task
|
||||
}
|
||||
|
||||
// FindOrCreateDefaultTask ensures that a default task is in the task group in that the executions are made.
|
||||
func FindOrCreateDefaultTask(taskGroup *nomadApi.TaskGroup) *nomadApi.Task {
|
||||
var task *nomadApi.Task
|
||||
for _, t := range taskGroup.Tasks {
|
||||
if t.Name == TaskName {
|
||||
task = t
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if task == nil {
|
||||
task = nomadApi.NewTask(TaskName, TaskDriver)
|
||||
taskGroup.Tasks = append(taskGroup.Tasks, task)
|
||||
}
|
||||
|
||||
if task.Resources == nil {
|
||||
task.Resources = nomadApi.DefaultResources()
|
||||
}
|
||||
|
||||
if task.Config == nil {
|
||||
task.Config = make(map[string]interface{})
|
||||
}
|
||||
task.Config["command"] = TaskCommand
|
||||
task.Config["args"] = TaskArgs
|
||||
return task
|
||||
}
|
||||
|
||||
// IsEnvironmentTemplateID checks if the passed job id belongs to a template job.
|
||||
func IsEnvironmentTemplateID(jobID string) bool {
|
||||
parts := strings.Split(jobID, "-")
|
||||
if len(parts) != TemplateJobNameParts || parts[0] != TemplateJobPrefix {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := EnvironmentIDFromTemplateJobID(jobID)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// RunnerJobID returns the nomad job id of the runner with the given environmentID and id.
|
||||
func RunnerJobID(environmentID dto.EnvironmentID, id string) string {
|
||||
return fmt.Sprintf("%d-%s", environmentID, id)
|
||||
}
|
||||
|
||||
// TemplateJobID returns the id of the nomad job for the environment with the given id.
|
||||
func TemplateJobID(id dto.EnvironmentID) string {
|
||||
return fmt.Sprintf("%s-%d", TemplateJobPrefix, id)
|
||||
}
|
||||
|
||||
// EnvironmentIDFromRunnerID returns the environment id that is part of the passed runner job id.
|
||||
func EnvironmentIDFromRunnerID(jobID string) (dto.EnvironmentID, error) {
|
||||
return partOfJobID(jobID, 0)
|
||||
}
|
||||
|
||||
// EnvironmentIDFromTemplateJobID returns the environment id that is part of the passed environment job id.
|
||||
func EnvironmentIDFromTemplateJobID(id string) (dto.EnvironmentID, error) {
|
||||
return partOfJobID(id, 1)
|
||||
}
|
||||
|
||||
func partOfJobID(id string, part uint) (dto.EnvironmentID, error) {
|
||||
parts := strings.Split(id, "-")
|
||||
if len(parts) == 0 {
|
||||
return 0, fmt.Errorf("empty job id: %w", ErrorInvalidJobID)
|
||||
}
|
||||
environmentID, err := strconv.Atoi(parts[part])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid environment id par %v: %w", err, ErrorInvalidJobID)
|
||||
}
|
||||
return dto.EnvironmentID(environmentID), nil
|
||||
}
|
||||
|
@@ -1,279 +1,121 @@
|
||||
package nomad
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
nomadApi "github.com/hashicorp/nomad/api"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/tests/helpers"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func createTestTaskGroup() *nomadApi.TaskGroup {
|
||||
return nomadApi.NewTaskGroup("taskGroup", 1)
|
||||
}
|
||||
|
||||
func createTestTask() *nomadApi.Task {
|
||||
return nomadApi.NewTask("task", "docker")
|
||||
}
|
||||
|
||||
func createTestResources() *nomadApi.Resources {
|
||||
result := nomadApi.DefaultResources()
|
||||
expectedCPULimit := 1337
|
||||
expectedMemoryLimit := 42
|
||||
result.CPU = &expectedCPULimit
|
||||
result.MemoryMB = &expectedMemoryLimit
|
||||
return result
|
||||
}
|
||||
|
||||
func TestCreateTaskGroupCreatesNewTaskGroupWhenJobHasNoTaskGroup(t *testing.T) {
|
||||
job := nomadApi.NewBatchJob("test", "test", "test", 1)
|
||||
|
||||
if assert.Equal(t, 0, len(job.TaskGroups)) {
|
||||
expectedTaskGroup := createTestTaskGroup()
|
||||
taskGroup := createTaskGroup(job, *expectedTaskGroup.Name)
|
||||
|
||||
assert.Equal(t, *expectedTaskGroup, *taskGroup)
|
||||
assert.Equal(t, []*nomadApi.TaskGroup{taskGroup}, job.TaskGroups, "it should add the task group to the job")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTaskGroupOverwritesOptionsWhenJobHasTaskGroup(t *testing.T) {
|
||||
job := nomadApi.NewBatchJob("test", "test", "test", 1)
|
||||
existingTaskGroup := createTestTaskGroup()
|
||||
existingTaskGroup.Meta = map[string]string{"field": "should still exist"}
|
||||
newTaskGroupList := []*nomadApi.TaskGroup{existingTaskGroup}
|
||||
job.TaskGroups = newTaskGroupList
|
||||
|
||||
newName := *existingTaskGroup.Name + "longerName"
|
||||
taskGroup := createTaskGroup(job, newName)
|
||||
|
||||
// create a new copy to avoid changing the original one as it is a pointer
|
||||
expectedTaskGroup := *existingTaskGroup
|
||||
expectedTaskGroup.Name = &newName
|
||||
|
||||
assert.Equal(t, expectedTaskGroup, *taskGroup)
|
||||
assert.Equal(t, newTaskGroupList, job.TaskGroups, "it should not modify the jobs task group list")
|
||||
}
|
||||
|
||||
func TestConfigureNetworkFatalsWhenNoTaskExists(t *testing.T) {
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.ExitFunc = func(i int) {
|
||||
panic(i)
|
||||
}
|
||||
log = logger.WithField("pkg", "job_test")
|
||||
taskGroup := createTestTaskGroup()
|
||||
if assert.Equal(t, 0, len(taskGroup.Tasks)) {
|
||||
assert.Panics(t, func() {
|
||||
configureNetwork(taskGroup, false, nil)
|
||||
})
|
||||
assert.Equal(t, logrus.FatalLevel, hook.LastEntry().Level)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureNetworkCreatesNewNetworkWhenNoNetworkExists(t *testing.T) {
|
||||
taskGroup := createTestTaskGroup()
|
||||
task := createTestTask()
|
||||
taskGroup.Tasks = []*nomadApi.Task{task}
|
||||
|
||||
if assert.Equal(t, 0, len(taskGroup.Networks)) {
|
||||
configureNetwork(taskGroup, true, []uint16{})
|
||||
|
||||
assert.Equal(t, 1, len(taskGroup.Networks))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureNetworkDoesNotCreateNewNetworkWhenNetworkExists(t *testing.T) {
|
||||
taskGroup := createTestTaskGroup()
|
||||
task := createTestTask()
|
||||
taskGroup.Tasks = []*nomadApi.Task{task}
|
||||
networkResource := &nomadApi.NetworkResource{Mode: "bridge"}
|
||||
taskGroup.Networks = []*nomadApi.NetworkResource{networkResource}
|
||||
|
||||
if assert.Equal(t, 1, len(taskGroup.Networks)) {
|
||||
configureNetwork(taskGroup, true, []uint16{})
|
||||
|
||||
assert.Equal(t, 1, len(taskGroup.Networks))
|
||||
assert.Equal(t, networkResource, taskGroup.Networks[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigureNetworkSetsCorrectValues(t *testing.T) {
|
||||
taskGroup := createTestTaskGroup()
|
||||
task := createTestTask()
|
||||
_, ok := task.Config["network_mode"]
|
||||
|
||||
require.False(t, ok, "Test tasks network_mode should not be set")
|
||||
|
||||
taskGroup.Tasks = []*nomadApi.Task{task}
|
||||
exposedPortsTests := [][]uint16{{}, {1337}, {42, 1337}}
|
||||
|
||||
t.Run("with no network access", func(t *testing.T) {
|
||||
for _, ports := range exposedPortsTests {
|
||||
testTaskGroup := *taskGroup
|
||||
testTask := *task
|
||||
testTaskGroup.Tasks = []*nomadApi.Task{&testTask}
|
||||
|
||||
configureNetwork(&testTaskGroup, false, ports)
|
||||
mode, ok := testTask.Config["network_mode"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "none", mode)
|
||||
assert.Equal(t, 0, len(testTaskGroup.Networks))
|
||||
}
|
||||
func TestFindTaskGroup(t *testing.T) {
|
||||
t.Run("Returns nil if task group not found", func(t *testing.T) {
|
||||
group := FindTaskGroup(&nomadApi.Job{}, TaskGroupName)
|
||||
assert.Nil(t, group)
|
||||
})
|
||||
|
||||
t.Run("with network access", func(t *testing.T) {
|
||||
for _, ports := range exposedPortsTests {
|
||||
testTaskGroup := *taskGroup
|
||||
testTask := *task
|
||||
testTaskGroup.Tasks = []*nomadApi.Task{&testTask}
|
||||
|
||||
configureNetwork(&testTaskGroup, true, ports)
|
||||
require.Equal(t, 1, len(testTaskGroup.Networks))
|
||||
|
||||
networkResource := testTaskGroup.Networks[0]
|
||||
assert.Equal(t, "bridge", networkResource.Mode)
|
||||
require.Equal(t, len(ports), len(networkResource.DynamicPorts))
|
||||
|
||||
assertExpectedPorts(t, ports, networkResource)
|
||||
|
||||
mode, ok := testTask.Config["network_mode"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, mode, "")
|
||||
}
|
||||
t.Run("Finds task group when existent", func(t *testing.T) {
|
||||
_, job := helpers.CreateTemplateJob()
|
||||
group := FindTaskGroup(job, TaskGroupName)
|
||||
assert.NotNil(t, group)
|
||||
})
|
||||
}
|
||||
|
||||
func assertExpectedPorts(t *testing.T, expectedPorts []uint16, networkResource *nomadApi.NetworkResource) {
|
||||
t.Helper()
|
||||
for _, expectedPort := range expectedPorts {
|
||||
found := false
|
||||
for _, actualPort := range networkResource.DynamicPorts {
|
||||
if actualPort.To == int(expectedPort) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("port list should contain %v", expectedPort))
|
||||
}
|
||||
func TestFindOrCreateDefaultTask(t *testing.T) {
|
||||
t.Run("Adds default task group when not set", func(t *testing.T) {
|
||||
job := &nomadApi.Job{}
|
||||
group := FindOrCreateDefaultTaskGroup(job)
|
||||
assert.NotNil(t, group)
|
||||
assert.Equal(t, TaskGroupName, *group.Name)
|
||||
assert.Equal(t, 1, len(job.TaskGroups))
|
||||
assert.Equal(t, group, job.TaskGroups[0])
|
||||
assert.Equal(t, TaskCount, *group.Count)
|
||||
})
|
||||
|
||||
t.Run("Does not modify task group when already set", func(t *testing.T) {
|
||||
job := &nomadApi.Job{}
|
||||
groupName := TaskGroupName
|
||||
expectedGroup := &nomadApi.TaskGroup{Name: &groupName}
|
||||
job.TaskGroups = []*nomadApi.TaskGroup{expectedGroup}
|
||||
|
||||
group := FindOrCreateDefaultTaskGroup(job)
|
||||
assert.NotNil(t, group)
|
||||
assert.Equal(t, 1, len(job.TaskGroups))
|
||||
assert.Equal(t, expectedGroup, group)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigureTaskWhenNoTaskExists(t *testing.T) {
|
||||
taskGroup := createTestTaskGroup()
|
||||
require.Equal(t, 0, len(taskGroup.Tasks))
|
||||
func TestFindOrCreateConfigTaskGroup(t *testing.T) {
|
||||
t.Run("Adds config task group when not set", func(t *testing.T) {
|
||||
job := &nomadApi.Job{}
|
||||
group := FindOrCreateConfigTaskGroup(job)
|
||||
assert.NotNil(t, group)
|
||||
assert.Equal(t, group, job.TaskGroups[0])
|
||||
assert.Equal(t, 1, len(job.TaskGroups))
|
||||
|
||||
expectedResources := createTestResources()
|
||||
expectedTaskGroup := *taskGroup
|
||||
expectedTask := nomadApi.NewTask("task", TaskDriver)
|
||||
expectedTask.Resources = expectedResources
|
||||
expectedImage := "python:latest"
|
||||
expectedCommand := "sleep"
|
||||
expectedArgs := []string{"infinity"}
|
||||
expectedTask.Config = map[string]interface{}{
|
||||
"image": expectedImage, "command": expectedCommand, "args": expectedArgs, "network_mode": "none"}
|
||||
expectedTaskGroup.Tasks = []*nomadApi.Task{expectedTask}
|
||||
expectedTaskGroup.Networks = []*nomadApi.NetworkResource{}
|
||||
assert.Equal(t, ConfigTaskGroupName, *group.Name)
|
||||
assert.Equal(t, 0, *group.Count)
|
||||
})
|
||||
|
||||
configureTask(taskGroup, expectedTask.Name,
|
||||
uint(*expectedResources.CPU), uint(*expectedResources.MemoryMB),
|
||||
expectedImage, false, []uint16{})
|
||||
t.Run("Does not modify task group when already set", func(t *testing.T) {
|
||||
job := &nomadApi.Job{}
|
||||
groupName := ConfigTaskGroupName
|
||||
expectedGroup := &nomadApi.TaskGroup{Name: &groupName}
|
||||
job.TaskGroups = []*nomadApi.TaskGroup{expectedGroup}
|
||||
|
||||
assert.Equal(t, expectedTaskGroup, *taskGroup)
|
||||
group := FindOrCreateConfigTaskGroup(job)
|
||||
assert.NotNil(t, group)
|
||||
assert.Equal(t, 1, len(job.TaskGroups))
|
||||
assert.Equal(t, expectedGroup, group)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigureTaskWhenTaskExists(t *testing.T) {
|
||||
taskGroup := createTestTaskGroup()
|
||||
task := createTestTask()
|
||||
task.Config = map[string]interface{}{"my_custom_config": "should not be overwritten"}
|
||||
taskGroup.Tasks = []*nomadApi.Task{task}
|
||||
require.Equal(t, 1, len(taskGroup.Tasks))
|
||||
func TestFindOrCreateTask(t *testing.T) {
|
||||
t.Run("Does not modify default task when already set", func(t *testing.T) {
|
||||
groupName := TaskGroupName
|
||||
group := &nomadApi.TaskGroup{Name: &groupName}
|
||||
expectedTask := &nomadApi.Task{Name: TaskName}
|
||||
group.Tasks = []*nomadApi.Task{expectedTask}
|
||||
|
||||
expectedResources := createTestResources()
|
||||
expectedTaskGroup := *taskGroup
|
||||
expectedTask := *task
|
||||
expectedTask.Resources = expectedResources
|
||||
expectedImage := "python:latest"
|
||||
expectedTask.Config["image"] = expectedImage
|
||||
expectedTask.Config["network_mode"] = "none"
|
||||
expectedTaskGroup.Tasks = []*nomadApi.Task{&expectedTask}
|
||||
expectedTaskGroup.Networks = []*nomadApi.NetworkResource{}
|
||||
task := FindOrCreateDefaultTask(group)
|
||||
assert.NotNil(t, task)
|
||||
assert.Equal(t, 1, len(group.Tasks))
|
||||
assert.Equal(t, expectedTask, task)
|
||||
})
|
||||
|
||||
configureTask(taskGroup, expectedTask.Name,
|
||||
uint(*expectedResources.CPU), uint(*expectedResources.MemoryMB),
|
||||
expectedImage, false, []uint16{})
|
||||
t.Run("Does not modify config task when already set", func(t *testing.T) {
|
||||
groupName := ConfigTaskGroupName
|
||||
group := &nomadApi.TaskGroup{Name: &groupName}
|
||||
expectedTask := &nomadApi.Task{Name: ConfigTaskName}
|
||||
group.Tasks = []*nomadApi.Task{expectedTask}
|
||||
|
||||
assert.Equal(t, expectedTaskGroup, *taskGroup)
|
||||
assert.Equal(t, task, taskGroup.Tasks[0], "it should not create a new task")
|
||||
task := FindOrCreateConfigTask(group)
|
||||
assert.NotNil(t, task)
|
||||
assert.Equal(t, 1, len(group.Tasks))
|
||||
assert.Equal(t, expectedTask, task)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateTemplateJobSetsAllGivenArguments(t *testing.T) {
|
||||
base, testJob := helpers.CreateTemplateJob()
|
||||
prewarmingPoolSize, err := strconv.Atoi(testJob.TaskGroups[1].Meta[ConfigMetaPoolSizeKey])
|
||||
require.NoError(t, err)
|
||||
job := CreateTemplateJob(
|
||||
base,
|
||||
tests.DefaultJobID,
|
||||
uint(prewarmingPoolSize),
|
||||
uint(*testJob.TaskGroups[0].Tasks[0].Resources.CPU),
|
||||
uint(*testJob.TaskGroups[0].Tasks[0].Resources.MemoryMB),
|
||||
testJob.TaskGroups[0].Tasks[0].Config["image"].(string),
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
assert.Equal(t, *testJob, *job)
|
||||
func TestIsEnvironmentTemplateID(t *testing.T) {
|
||||
assert.True(t, IsEnvironmentTemplateID("template-42"))
|
||||
assert.False(t, IsEnvironmentTemplateID("template-42-100"))
|
||||
assert.False(t, IsEnvironmentTemplateID("job-42"))
|
||||
assert.False(t, IsEnvironmentTemplateID("template-top"))
|
||||
}
|
||||
|
||||
func TestRegisterTemplateJobFailsWhenNomadJobRegistrationFails(t *testing.T) {
|
||||
apiMock := apiQuerierMock{}
|
||||
expectedErr := tests.ErrDefault
|
||||
|
||||
apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", expectedErr)
|
||||
|
||||
apiClient := &APIClient{&apiMock}
|
||||
|
||||
_, err := apiClient.RegisterTemplateJob(&nomadApi.Job{}, tests.DefaultJobID,
|
||||
1, 2, 3, "image", false, []uint16{})
|
||||
assert.ErrorIs(t, err, expectedErr)
|
||||
apiMock.AssertNotCalled(t, "EvaluationStream")
|
||||
func TestRunnerJobID(t *testing.T) {
|
||||
assert.Equal(t, "0-RANDOM-UUID", RunnerJobID(0, "RANDOM-UUID"))
|
||||
}
|
||||
|
||||
func TestRegisterTemplateJobSucceedsWhenMonitoringEvaluationSucceeds(t *testing.T) {
|
||||
apiMock := apiQuerierMock{}
|
||||
evaluationID := "id"
|
||||
func TestTemplateJobID(t *testing.T) {
|
||||
assert.Equal(t, "template-42", TemplateJobID(42))
|
||||
}
|
||||
|
||||
stream := make(chan *nomadApi.Events)
|
||||
readonlyStream := func() <-chan *nomadApi.Events {
|
||||
return stream
|
||||
}()
|
||||
// Immediately close stream to avoid any reading from it resulting in endless wait
|
||||
close(stream)
|
||||
|
||||
apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
|
||||
apiMock.On("EvaluationStream", evaluationID, mock.AnythingOfType("*context.emptyCtx")).
|
||||
Return(readonlyStream, nil)
|
||||
|
||||
apiClient := &APIClient{&apiMock}
|
||||
|
||||
_, err := apiClient.RegisterTemplateJob(&nomadApi.Job{}, tests.DefaultJobID,
|
||||
1, 2, 3, "image", false, []uint16{})
|
||||
func TestEnvironmentIDFromRunnerID(t *testing.T) {
|
||||
id, err := EnvironmentIDFromRunnerID("42-RANDOM-UUID")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRegisterTemplateJobReturnsErrorWhenMonitoringEvaluationFails(t *testing.T) {
|
||||
apiMock := apiQuerierMock{}
|
||||
evaluationID := "id"
|
||||
|
||||
apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
|
||||
apiMock.On("EvaluationStream", evaluationID, mock.AnythingOfType("*context.emptyCtx")).Return(nil, tests.ErrDefault)
|
||||
|
||||
apiClient := &APIClient{&apiMock}
|
||||
|
||||
_, err := apiClient.RegisterTemplateJob(&nomadApi.Job{}, tests.DefaultJobID,
|
||||
1, 2, 3, "image", false, []uint16{})
|
||||
assert.ErrorIs(t, err, tests.ErrDefault)
|
||||
assert.Equal(t, dto.EnvironmentID(42), id)
|
||||
|
||||
_, err = EnvironmentIDFromRunnerID("")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
@@ -25,7 +25,7 @@ var (
|
||||
|
||||
type AllocationProcessor func(*nomadApi.Allocation)
|
||||
|
||||
// ExecutorAPI provides access to an container orchestration solution.
|
||||
// ExecutorAPI provides access to a container orchestration solution.
|
||||
type ExecutorAPI interface {
|
||||
apiQuerier
|
||||
|
||||
@@ -42,12 +42,6 @@ type ExecutorAPI interface {
|
||||
// LoadRunnerPortMappings returns the mapped ports of the runner.
|
||||
LoadRunnerPortMappings(runnerID string) ([]nomadApi.PortMapping, error)
|
||||
|
||||
// RegisterTemplateJob creates a template job based on the default job configuration and the given parameters.
|
||||
// It registers the job and waits until the registration completes.
|
||||
RegisterTemplateJob(defaultJob *nomadApi.Job, id string,
|
||||
prewarmingPoolSize, cpuLimit, memoryLimit uint,
|
||||
image string, networkAccess bool, exposedPorts []uint16) (*nomadApi.Job, error)
|
||||
|
||||
// RegisterRunnerJob creates a runner job based on the template job.
|
||||
// It registers the job and waits until the registration completes.
|
||||
RegisterRunnerJob(template *nomadApi.Job) error
|
||||
@@ -278,14 +272,10 @@ func (a *APIClient) MarkRunnerAsUsed(runnerID string, duration int) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't retrieve job info: %w", err)
|
||||
}
|
||||
err = SetMetaConfigValue(job, ConfigMetaUsedKey, ConfigMetaUsedValue)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't update runner in job as used: %w", err)
|
||||
}
|
||||
err = SetMetaConfigValue(job, ConfigMetaTimeoutKey, strconv.Itoa(duration))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't update runner in job with timeout: %w", err)
|
||||
}
|
||||
configTaskGroup := FindOrCreateConfigTaskGroup(job)
|
||||
configTaskGroup.Meta[ConfigMetaUsedKey] = ConfigMetaUsedValue
|
||||
configTaskGroup.Meta[ConfigMetaTimeoutKey] = strconv.Itoa(duration)
|
||||
|
||||
_, err = a.RegisterNomadJob(job)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't update runner config: %w", err)
|
||||
|
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"io"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -38,15 +37,15 @@ type LoadRunnersTestSuite struct {
|
||||
}
|
||||
|
||||
func (s *LoadRunnersTestSuite) SetupTest() {
|
||||
s.jobID = tests.DefaultJobID
|
||||
s.jobID = tests.DefaultRunnerID
|
||||
|
||||
s.mock = &apiQuerierMock{}
|
||||
s.nomadAPIClient = APIClient{apiQuerier: s.mock}
|
||||
|
||||
s.availableRunner = newJobListStub(tests.DefaultJobID, structs.JobStatusRunning, 1)
|
||||
s.anotherAvailableRunner = newJobListStub(tests.AnotherJobID, structs.JobStatusRunning, 1)
|
||||
s.pendingRunner = newJobListStub(tests.DefaultJobID+"-1", structs.JobStatusPending, 0)
|
||||
s.deadRunner = newJobListStub(tests.AnotherJobID+"-1", structs.JobStatusDead, 0)
|
||||
s.availableRunner = newJobListStub(tests.DefaultRunnerID, structs.JobStatusRunning, 1)
|
||||
s.anotherAvailableRunner = newJobListStub(tests.AnotherRunnerID, structs.JobStatusRunning, 1)
|
||||
s.pendingRunner = newJobListStub(tests.DefaultRunnerID+"-1", structs.JobStatusPending, 0)
|
||||
s.deadRunner = newJobListStub(tests.AnotherRunnerID+"-1", structs.JobStatusDead, 0)
|
||||
}
|
||||
|
||||
func newJobListStub(id, status string, amountRunning int) *nomadApi.JobListStub {
|
||||
@@ -122,13 +121,6 @@ func (s *LoadRunnersTestSuite) TestReturnsAllAvailableRunners() {
|
||||
s.Contains(returnedIds, s.anotherAvailableRunner.ID)
|
||||
}
|
||||
|
||||
var (
|
||||
TestURL = url.URL{
|
||||
Scheme: "http",
|
||||
Host: "127.0.0.1:4646",
|
||||
}
|
||||
)
|
||||
|
||||
const TestNamespace = "unit-tests"
|
||||
const TestNomadToken = "n0m4d-t0k3n"
|
||||
const TestDefaultAddress = "127.0.0.1"
|
||||
|
Reference in New Issue
Block a user