Restructure project

We previously didn't really had any structure in our project apart
from creating a new folder for each package in our project root.
Now that we have accumulated some packages, we use the well-known
Golang project layout in order to clearly communicate our intent
with packages. See https://github.com/golang-standards/project-layout
This commit is contained in:
sirkrypt0
2021-07-16 09:19:42 +02:00
parent 2f1383b743
commit 8b26ecbe5f
66 changed files with 95 additions and 95 deletions

View File

@@ -0,0 +1,9 @@
package runner
import "gitlab.hpi.de/codeocean/codemoon/poseidon/tests"
const (
defaultEnvironmentID = EnvironmentID(tests.DefaultEnvironmentIDAsInteger)
anotherEnvironmentID = EnvironmentID(tests.AnotherEnvironmentIDAsInteger)
defaultInactivityTimeout = 0
)

View File

@@ -0,0 +1,46 @@
package runner
import (
"gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/dto"
"sync"
)
// ExecutionStorage stores executions.
type ExecutionStorage interface {
// Add adds a runner to the storage.
// It overwrites the existing execution if an execution with the same id already exists.
Add(id ExecutionID, executionRequest *dto.ExecutionRequest)
// Pop deletes the execution with the given id from the storage and returns it.
// If no such execution exists, ok is false and true otherwise.
Pop(id ExecutionID) (request *dto.ExecutionRequest, ok bool)
}
// localExecutionStorage stores execution objects in the local application memory.
// ToDo: Create implementation that use some persistent storage like a database.
type localExecutionStorage struct {
sync.RWMutex
executions map[ExecutionID]*dto.ExecutionRequest
}
// NewLocalExecutionStorage responds with an ExecutionStorage implementation.
// This implementation stores the data thread-safe in the local application memory.
func NewLocalExecutionStorage() *localExecutionStorage {
return &localExecutionStorage{
executions: make(map[ExecutionID]*dto.ExecutionRequest),
}
}
func (s *localExecutionStorage) Add(id ExecutionID, executionRequest *dto.ExecutionRequest) {
s.Lock()
defer s.Unlock()
s.executions[id] = executionRequest
}
func (s *localExecutionStorage) Pop(id ExecutionID) (*dto.ExecutionRequest, bool) {
s.Lock()
defer s.Unlock()
request, ok := s.executions[id]
delete(s.executions, id)
return request, ok
}

View File

@@ -0,0 +1,43 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package runner
import (
time "time"
mock "github.com/stretchr/testify/mock"
)
// InactivityTimerMock is an autogenerated mock type for the InactivityTimer type
type InactivityTimerMock struct {
mock.Mock
}
// ResetTimeout provides a mock function with given fields:
func (_m *InactivityTimerMock) ResetTimeout() {
_m.Called()
}
// SetupTimeout provides a mock function with given fields: duration
func (_m *InactivityTimerMock) SetupTimeout(duration time.Duration) {
_m.Called(duration)
}
// StopTimeout provides a mock function with given fields:
func (_m *InactivityTimerMock) StopTimeout() {
_m.Called()
}
// TimeoutPassed provides a mock function with given fields:
func (_m *InactivityTimerMock) TimeoutPassed() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}

406
internal/runner/manager.go Normal file
View File

@@ -0,0 +1,406 @@
package runner
import (
"context"
"errors"
"fmt"
"github.com/google/uuid"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/sirupsen/logrus"
"gitlab.hpi.de/codeocean/codemoon/poseidon/internal/nomad"
"gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/logging"
"strconv"
"strings"
"time"
)
var (
log = logging.GetLogger("runner")
ErrUnknownExecutionEnvironment = errors.New("execution environment not found")
ErrNoRunnersAvailable = errors.New("no runners available for this execution environment")
ErrRunnerNotFound = errors.New("no runner found with this id")
ErrorUpdatingExecutionEnvironment = errors.New("errors occurred when updating environment")
ErrorInvalidJobID = errors.New("invalid job id")
)
type EnvironmentID int
func NewEnvironmentID(id string) (EnvironmentID, error) {
environment, err := strconv.Atoi(id)
return EnvironmentID(environment), err
}
func (e EnvironmentID) toString() string {
return strconv.Itoa(int(e))
}
type NomadJobID string
// Manager keeps track of the used and unused runners of all execution environments in order to provide unused
// runners to new clients and ensure no runner is used twice.
type Manager interface {
// CreateOrUpdateEnvironment creates the given environment if it does not exist. Otherwise, it updates
// the existing environment and all runners. Iff a new Environment has been created, it returns true.
// Iff scale is true, runners are created until the desiredIdleRunnersCount is reached.
CreateOrUpdateEnvironment(id EnvironmentID, desiredIdleRunnersCount uint, templateJob *nomadApi.Job,
scale bool) (bool, error)
// Claim returns a new runner. The runner is deleted after duration seconds if duration is not 0.
// It makes sure that the runner is not in use yet and returns an error if no runner could be provided.
Claim(id EnvironmentID, duration int) (Runner, error)
// Get returns the used runner with the given runnerId.
// If no runner with the given runnerId is currently used, it returns an error.
Get(runnerID string) (Runner, error)
// Return signals that the runner is no longer used by the caller and can be claimed by someone else.
// The runner is deleted or cleaned up for reuse depending on the used executor.
Return(r Runner) error
// Load fetches all already created runners from the executor and registers them.
// It should be called during the startup process (e.g. on creation of the Manager).
Load()
}
type NomadRunnerManager struct {
apiClient nomad.ExecutorAPI
environments NomadEnvironmentStorage
usedRunners Storage
}
// NewNomadRunnerManager creates a new runner manager that keeps track of all runners.
// It uses the apiClient for all requests and runs a background task to keep the runners in sync with Nomad.
// If you cancel the context the background synchronization will be stopped.
func NewNomadRunnerManager(apiClient nomad.ExecutorAPI, ctx context.Context) *NomadRunnerManager {
m := &NomadRunnerManager{
apiClient,
NewLocalNomadEnvironmentStorage(),
NewLocalRunnerStorage(),
}
go m.keepRunnersSynced(ctx)
return m
}
type NomadEnvironment struct {
environmentID EnvironmentID
idleRunners Storage
desiredIdleRunnersCount uint
templateJob *nomadApi.Job
}
func (j *NomadEnvironment) ID() EnvironmentID {
return j.environmentID
}
func (m *NomadRunnerManager) CreateOrUpdateEnvironment(id EnvironmentID, desiredIdleRunnersCount uint,
templateJob *nomadApi.Job, scale bool) (bool, error) {
_, ok := m.environments.Get(id)
if !ok {
return true, m.registerEnvironment(id, desiredIdleRunnersCount, templateJob, scale)
}
return false, m.updateEnvironment(id, desiredIdleRunnersCount, templateJob, scale)
}
func (m *NomadRunnerManager) registerEnvironment(environmentID EnvironmentID, desiredIdleRunnersCount uint,
templateJob *nomadApi.Job, scale bool) error {
m.environments.Add(&NomadEnvironment{
environmentID,
NewLocalRunnerStorage(),
desiredIdleRunnersCount,
templateJob,
})
if scale {
err := m.scaleEnvironment(environmentID)
if err != nil {
return fmt.Errorf("couldn't upscale environment %w", err)
}
}
return nil
}
// updateEnvironment updates all runners of the specified environment. This is required as attributes like the
// CPULimit or MemoryMB could be changed in the new template job.
func (m *NomadRunnerManager) updateEnvironment(id EnvironmentID, desiredIdleRunnersCount uint,
newTemplateJob *nomadApi.Job, scale bool) error {
environment, ok := m.environments.Get(id)
if !ok {
return ErrUnknownExecutionEnvironment
}
environment.desiredIdleRunnersCount = desiredIdleRunnersCount
environment.templateJob = newTemplateJob
err := nomad.SetMetaConfigValue(newTemplateJob, nomad.ConfigMetaPoolSizeKey,
strconv.Itoa(int(desiredIdleRunnersCount)))
if err != nil {
return fmt.Errorf("update environment couldn't update template environment: %w", err)
}
err = m.updateRunnerSpecs(id, newTemplateJob)
if err != nil {
return err
}
if scale {
err = m.scaleEnvironment(id)
}
return err
}
func (m *NomadRunnerManager) updateRunnerSpecs(environmentID EnvironmentID, templateJob *nomadApi.Job) error {
runners, err := m.apiClient.LoadRunnerIDs(environmentID.toString())
if err != nil {
return fmt.Errorf("update environment couldn't load runners: %w", err)
}
var occurredError error
for _, id := range runners {
// avoid taking the address of the loop variable
runnerID := id
updatedRunnerJob := *templateJob
updatedRunnerJob.ID = &runnerID
updatedRunnerJob.Name = &runnerID
err := m.apiClient.RegisterRunnerJob(&updatedRunnerJob)
if err != nil {
if occurredError == nil {
occurredError = ErrorUpdatingExecutionEnvironment
}
occurredError = fmt.Errorf("%w; new api error for runner %s - %v", occurredError, id, err)
}
}
return occurredError
}
func (m *NomadRunnerManager) Claim(environmentID EnvironmentID, duration int) (Runner, error) {
environment, ok := m.environments.Get(environmentID)
if !ok {
return nil, ErrUnknownExecutionEnvironment
}
runner, ok := environment.idleRunners.Sample()
if !ok {
return nil, ErrNoRunnersAvailable
}
m.usedRunners.Add(runner)
err := m.apiClient.MarkRunnerAsUsed(runner.ID(), duration)
if err != nil {
return nil, fmt.Errorf("can't mark runner as used: %w", err)
}
runner.SetupTimeout(time.Duration(duration) * time.Second)
err = m.createRunner(environment)
if err != nil {
log.WithError(err).WithField("environmentID", environmentID).Error("Couldn't create new runner for claimed one")
}
return runner, nil
}
func (m *NomadRunnerManager) Get(runnerID string) (Runner, error) {
runner, ok := m.usedRunners.Get(runnerID)
if !ok {
return nil, ErrRunnerNotFound
}
return runner, nil
}
func (m *NomadRunnerManager) Return(r Runner) error {
r.StopTimeout()
err := m.apiClient.DeleteRunner(r.ID())
if err != nil {
return fmt.Errorf("error deleting runner in Nomad: %w", err)
}
m.usedRunners.Delete(r.ID())
return nil
}
func (m *NomadRunnerManager) Load() {
for _, environment := range m.environments.List() {
environmentLogger := log.WithField("environmentID", environment.ID())
runnerJobs, err := m.apiClient.LoadRunnerJobs(environment.ID().toString())
if err != nil {
environmentLogger.WithError(err).Warn("Error fetching the runner jobs")
}
for _, job := range runnerJobs {
m.loadSingleJob(job, environmentLogger, environment)
}
err = m.scaleEnvironment(environment.ID())
if err != nil {
environmentLogger.Error("Couldn't scale environment")
}
}
}
func (m *NomadRunnerManager) loadSingleJob(job *nomadApi.Job, environmentLogger *logrus.Entry,
environment *NomadEnvironment,
) {
configTaskGroup := nomad.FindConfigTaskGroup(job)
if configTaskGroup == nil {
environmentLogger.Infof("Couldn't find config task group in job %s, skipping ...", *job.ID)
return
}
isUsed := configTaskGroup.Meta[nomad.ConfigMetaUsedKey] == nomad.ConfigMetaUsedValue
portMappings, err := m.apiClient.LoadRunnerPortMappings(*job.ID)
if err != nil {
environmentLogger.WithError(err).Warn("Error loading runner portMappings")
return
}
newJob := NewNomadJob(*job.ID, portMappings, m.apiClient, m)
if isUsed {
m.usedRunners.Add(newJob)
timeout, err := strconv.Atoi(configTaskGroup.Meta[nomad.ConfigMetaTimeoutKey])
if err != nil {
environmentLogger.WithError(err).Warn("Error loading timeout from meta values")
} else {
newJob.SetupTimeout(time.Duration(timeout) * time.Second)
}
} else {
environment.idleRunners.Add(newJob)
}
}
func (m *NomadRunnerManager) keepRunnersSynced(ctx context.Context) {
retries := 0
for ctx.Err() == nil {
err := m.apiClient.WatchAllocations(ctx, m.onAllocationAdded, m.onAllocationStopped)
retries += 1
log.WithError(err).Errorf("Stopped updating the runners! Retry %v", retries)
<-time.After(time.Second)
}
}
func (m *NomadRunnerManager) onAllocationAdded(alloc *nomadApi.Allocation) {
log.WithField("id", alloc.JobID).Debug("Runner started")
if IsEnvironmentTemplateID(alloc.JobID) {
return
}
environmentID, err := EnvironmentIDFromJobID(alloc.JobID)
if err != nil {
log.WithError(err).Warn("Allocation could not be added")
return
}
job, ok := m.environments.Get(environmentID)
if ok {
var mappedPorts []nomadApi.PortMapping
if alloc.AllocatedResources != nil {
mappedPorts = alloc.AllocatedResources.Shared.Ports
}
job.idleRunners.Add(NewNomadJob(alloc.JobID, mappedPorts, m.apiClient, m))
}
}
func (m *NomadRunnerManager) onAllocationStopped(alloc *nomadApi.Allocation) {
log.WithField("id", alloc.JobID).Debug("Runner stopped")
environmentID, err := EnvironmentIDFromJobID(alloc.JobID)
if err != nil {
log.WithError(err).Warn("Stopped allocation can not be handled")
return
}
m.usedRunners.Delete(alloc.JobID)
job, ok := m.environments.Get(environmentID)
if ok {
job.idleRunners.Delete(alloc.JobID)
}
}
// scaleEnvironment makes sure that the amount of idle runners is at least the desiredIdleRunnersCount.
func (m *NomadRunnerManager) scaleEnvironment(id EnvironmentID) error {
environment, ok := m.environments.Get(id)
if !ok {
return ErrUnknownExecutionEnvironment
}
required := int(environment.desiredIdleRunnersCount) - environment.idleRunners.Length()
if required > 0 {
return m.createRunners(environment, uint(required))
} else {
return m.removeRunners(environment, uint(-required))
}
}
func (m *NomadRunnerManager) createRunners(environment *NomadEnvironment, count uint) error {
log.WithField("runnersRequired", count).WithField("id", environment.ID()).Debug("Creating new runners")
for i := 0; i < int(count); i++ {
err := m.createRunner(environment)
if err != nil {
return fmt.Errorf("couldn't create new runner: %w", err)
}
}
return nil
}
func (m *NomadRunnerManager) createRunner(environment *NomadEnvironment) error {
newUUID, err := uuid.NewUUID()
if err != nil {
return fmt.Errorf("failed generating runner id: %w", err)
}
newRunnerID := RunnerJobID(environment.ID(), newUUID.String())
template := *environment.templateJob
template.ID = &newRunnerID
template.Name = &newRunnerID
err = m.apiClient.RegisterRunnerJob(&template)
if err != nil {
return fmt.Errorf("error registering new runner job: %w", err)
}
return nil
}
func (m *NomadRunnerManager) removeRunners(environment *NomadEnvironment, count uint) error {
log.WithField("runnersToDelete", count).WithField("id", environment.ID()).Debug("Removing idle runners")
for i := 0; i < int(count); i++ {
r, ok := environment.idleRunners.Sample()
if !ok {
return fmt.Errorf("could not delete expected idle runner: %w", ErrRunnerNotFound)
}
err := m.apiClient.DeleteRunner(r.ID())
if err != nil {
return fmt.Errorf("could not delete expected Nomad idle runner: %w", err)
}
}
return nil
}
// RunnerJobID returns the nomad job id of the runner with the given environmentID and id.
func RunnerJobID(environmentID EnvironmentID, id string) string {
return fmt.Sprintf("%d-%s", environmentID, id)
}
// EnvironmentIDFromJobID returns the environment id that is part of the passed job id.
func EnvironmentIDFromJobID(jobID string) (EnvironmentID, error) {
parts := strings.Split(jobID, "-")
if len(parts) == 0 {
return 0, fmt.Errorf("empty job id: %w", ErrorInvalidJobID)
}
environmentID, err := strconv.Atoi(parts[0])
if err != nil {
return 0, fmt.Errorf("invalid environment id par %v: %w", err, ErrorInvalidJobID)
}
return EnvironmentID(environmentID), nil
}
const templateJobNameParts = 2
// TemplateJobID returns the id of the template job for the environment with the given id.
func TemplateJobID(id EnvironmentID) string {
return fmt.Sprintf("%s-%d", nomad.TemplateJobPrefix, id)
}
// IsEnvironmentTemplateID checks if the passed job id belongs to a template job.
func IsEnvironmentTemplateID(jobID string) bool {
parts := strings.Split(jobID, "-")
return len(parts) == templateJobNameParts && parts[0] == nomad.TemplateJobPrefix
}
func EnvironmentIDFromTemplateJobID(id string) (string, error) {
parts := strings.Split(id, "-")
if len(parts) < templateJobNameParts {
return "", fmt.Errorf("invalid template job id: %w", ErrorInvalidJobID)
}
return parts[1], nil
}

View File

@@ -0,0 +1,99 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package runner
import (
api "github.com/hashicorp/nomad/api"
mock "github.com/stretchr/testify/mock"
)
// ManagerMock is an autogenerated mock type for the Manager type
type ManagerMock struct {
mock.Mock
}
// Claim provides a mock function with given fields: id, duration
func (_m *ManagerMock) Claim(id EnvironmentID, duration int) (Runner, error) {
ret := _m.Called(id, duration)
var r0 Runner
if rf, ok := ret.Get(0).(func(EnvironmentID, int) Runner); ok {
r0 = rf(id, duration)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(EnvironmentID, int) error); ok {
r1 = rf(id, duration)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateOrUpdateEnvironment provides a mock function with given fields: id, desiredIdleRunnersCount, templateJob, scale
func (_m *ManagerMock) CreateOrUpdateEnvironment(id EnvironmentID, desiredIdleRunnersCount uint, templateJob *api.Job, scale bool) (bool, error) {
ret := _m.Called(id, desiredIdleRunnersCount, templateJob, scale)
var r0 bool
if rf, ok := ret.Get(0).(func(EnvironmentID, uint, *api.Job, bool) bool); ok {
r0 = rf(id, desiredIdleRunnersCount, templateJob, scale)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(EnvironmentID, uint, *api.Job, bool) error); ok {
r1 = rf(id, desiredIdleRunnersCount, templateJob, scale)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: runnerID
func (_m *ManagerMock) Get(runnerID string) (Runner, error) {
ret := _m.Called(runnerID)
var r0 Runner
if rf, ok := ret.Get(0).(func(string) Runner); ok {
r0 = rf(runnerID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(runnerID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Load provides a mock function with given fields:
func (_m *ManagerMock) Load() {
_m.Called()
}
// Return provides a mock function with given fields: r
func (_m *ManagerMock) Return(r Runner) error {
ret := _m.Called(r)
var r0 error
if rf, ok := ret.Get(0).(func(Runner) error); ok {
r0 = rf(r)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -0,0 +1,347 @@
package runner
import (
"context"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"gitlab.hpi.de/codeocean/codemoon/poseidon/internal/nomad"
"gitlab.hpi.de/codeocean/codemoon/poseidon/tests"
"gitlab.hpi.de/codeocean/codemoon/poseidon/tests/helpers"
"strconv"
"testing"
"time"
)
const (
defaultDesiredRunnersCount uint = 5
)
func TestGetNextRunnerTestSuite(t *testing.T) {
suite.Run(t, new(ManagerTestSuite))
}
type ManagerTestSuite struct {
suite.Suite
apiMock *nomad.ExecutorAPIMock
nomadRunnerManager *NomadRunnerManager
exerciseRunner Runner
}
func (s *ManagerTestSuite) SetupTest() {
s.apiMock = &nomad.ExecutorAPIMock{}
mockRunnerQueries(s.apiMock, []string{})
// Instantly closed context to manually start the update process in some cases
ctx, cancel := context.WithCancel(context.Background())
cancel()
s.nomadRunnerManager = NewNomadRunnerManager(s.apiMock, ctx)
s.exerciseRunner = NewRunner(tests.DefaultRunnerID, s.nomadRunnerManager)
s.registerDefaultEnvironment()
}
func mockRunnerQueries(apiMock *nomad.ExecutorAPIMock, returnedRunnerIds []string) {
// reset expected calls to allow new mocked return values
apiMock.ExpectedCalls = []*mock.Call{}
call := apiMock.On("WatchAllocations", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-time.After(10 * time.Minute) // 10 minutes is the default test timeout
call.ReturnArguments = mock.Arguments{nil}
})
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
apiMock.On("LoadRunnerIDs", tests.DefaultJobID).Return(returnedRunnerIds, nil)
apiMock.On("JobScale", tests.DefaultJobID).Return(uint(len(returnedRunnerIds)), nil)
apiMock.On("SetJobScale", tests.DefaultJobID, mock.AnythingOfType("uint"), "Runner Requested").Return(nil)
apiMock.On("RegisterRunnerJob", mock.Anything).Return(nil)
apiMock.On("MonitorEvaluation", mock.Anything, mock.Anything).Return(nil)
}
func (s *ManagerTestSuite) registerDefaultEnvironment() {
err := s.nomadRunnerManager.registerEnvironment(defaultEnvironmentID, 0, &nomadApi.Job{}, true)
s.Require().NoError(err)
}
func (s *ManagerTestSuite) AddIdleRunnerForDefaultEnvironment(r Runner) {
job, _ := s.nomadRunnerManager.environments.Get(defaultEnvironmentID)
job.idleRunners.Add(r)
}
func (s *ManagerTestSuite) waitForRunnerRefresh() {
<-time.After(100 * time.Millisecond)
}
func (s *ManagerTestSuite) TestRegisterEnvironmentAddsNewJob() {
err := s.nomadRunnerManager.
registerEnvironment(anotherEnvironmentID, defaultDesiredRunnersCount, &nomadApi.Job{}, true)
s.Require().NoError(err)
job, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID)
s.True(ok)
s.NotNil(job)
}
func (s *ManagerTestSuite) TestClaimReturnsNotFoundErrorIfEnvironmentNotFound() {
runner, err := s.nomadRunnerManager.Claim(EnvironmentID(42), defaultInactivityTimeout)
s.Nil(runner)
s.Equal(ErrUnknownExecutionEnvironment, err)
}
func (s *ManagerTestSuite) TestClaimReturnsRunnerIfAvailable() {
s.AddIdleRunnerForDefaultEnvironment(s.exerciseRunner)
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
s.Equal(s.exerciseRunner, receivedRunner)
}
func (s *ManagerTestSuite) TestClaimReturnsErrorIfNoRunnerAvailable() {
s.waitForRunnerRefresh()
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Nil(runner)
s.Equal(ErrNoRunnersAvailable, err)
}
func (s *ManagerTestSuite) TestClaimReturnsNoRunnerOfDifferentEnvironment() {
s.AddIdleRunnerForDefaultEnvironment(s.exerciseRunner)
receivedRunner, err := s.nomadRunnerManager.Claim(anotherEnvironmentID, defaultInactivityTimeout)
s.Nil(receivedRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestClaimDoesNotReturnTheSameRunnerTwice() {
s.AddIdleRunnerForDefaultEnvironment(s.exerciseRunner)
s.AddIdleRunnerForDefaultEnvironment(NewRunner(tests.AnotherRunnerID, s.nomadRunnerManager))
firstReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
secondReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
s.NotEqual(firstReceivedRunner, secondReceivedRunner)
}
func (s *ManagerTestSuite) TestClaimThrowsAnErrorIfNoRunnersAvailable() {
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Nil(receivedRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestClaimAddsRunnerToUsedRunners() {
s.AddIdleRunnerForDefaultEnvironment(s.exerciseRunner)
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Require().NoError(err)
savedRunner, ok := s.nomadRunnerManager.usedRunners.Get(receivedRunner.ID())
s.True(ok)
s.Equal(savedRunner, receivedRunner)
}
func (s *ManagerTestSuite) TestTwoClaimsAddExactlyTwoRunners() {
s.AddIdleRunnerForDefaultEnvironment(s.exerciseRunner)
s.AddIdleRunnerForDefaultEnvironment(NewRunner(tests.AnotherRunnerID, s.nomadRunnerManager))
_, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Require().NoError(err)
_, err = s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Require().NoError(err)
s.apiMock.AssertNumberOfCalls(s.T(), "RegisterRunnerJob", 2)
}
func (s *ManagerTestSuite) TestGetReturnsRunnerIfRunnerIsUsed() {
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner)
savedRunner, err := s.nomadRunnerManager.Get(s.exerciseRunner.ID())
s.NoError(err)
s.Equal(savedRunner, s.exerciseRunner)
}
func (s *ManagerTestSuite) TestGetReturnsErrorIfRunnerNotFound() {
savedRunner, err := s.nomadRunnerManager.Get(tests.DefaultRunnerID)
s.Nil(savedRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestReturnRemovesRunnerFromUsedRunners() {
s.apiMock.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil)
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner)
err := s.nomadRunnerManager.Return(s.exerciseRunner)
s.Nil(err)
_, ok := s.nomadRunnerManager.usedRunners.Get(s.exerciseRunner.ID())
s.False(ok)
}
func (s *ManagerTestSuite) TestReturnCallsDeleteRunnerApiMethod() {
s.apiMock.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil)
err := s.nomadRunnerManager.Return(s.exerciseRunner)
s.Nil(err)
s.apiMock.AssertCalled(s.T(), "DeleteRunner", s.exerciseRunner.ID())
}
func (s *ManagerTestSuite) TestReturnReturnsErrorWhenApiCallFailed() {
s.apiMock.On("DeleteRunner", mock.AnythingOfType("string")).Return(tests.ErrDefault)
err := s.nomadRunnerManager.Return(s.exerciseRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestUpdateRunnersLogsErrorFromWatchAllocation() {
var hook *test.Hook
logger, hook := test.NewNullLogger()
log = logger.WithField("pkg", "runner")
modifyMockedCall(s.apiMock, "WatchAllocations", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{tests.ErrDefault}
})
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go s.nomadRunnerManager.keepRunnersSynced(ctx)
<-time.After(10 * time.Millisecond)
s.Require().Equal(1, len(hook.Entries))
s.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
s.Equal(hook.LastEntry().Data[logrus.ErrorKey], tests.ErrDefault)
}
func (s *ManagerTestSuite) TestUpdateRunnersAddsIdleRunner() {
allocation := &nomadApi.Allocation{ID: tests.DefaultRunnerID}
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID)
s.Require().True(ok)
allocation.JobID = environment.environmentID.toString()
_, ok = environment.idleRunners.Get(allocation.ID)
s.Require().False(ok)
modifyMockedCall(s.apiMock, "WatchAllocations", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
onCreate, ok := args.Get(1).(nomad.AllocationProcessor)
s.Require().True(ok)
onCreate(allocation)
call.ReturnArguments = mock.Arguments{nil}
})
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go s.nomadRunnerManager.keepRunnersSynced(ctx)
<-time.After(10 * time.Millisecond)
_, ok = environment.idleRunners.Get(allocation.JobID)
s.True(ok)
}
func (s *ManagerTestSuite) TestUpdateRunnersRemovesIdleAndUsedRunner() {
allocation := &nomadApi.Allocation{JobID: tests.DefaultJobID}
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID)
s.Require().True(ok)
testRunner := NewRunner(allocation.JobID, s.nomadRunnerManager)
environment.idleRunners.Add(testRunner)
s.nomadRunnerManager.usedRunners.Add(testRunner)
modifyMockedCall(s.apiMock, "WatchAllocations", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
onDelete, ok := args.Get(2).(nomad.AllocationProcessor)
s.Require().True(ok)
onDelete(allocation)
call.ReturnArguments = mock.Arguments{nil}
})
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go s.nomadRunnerManager.keepRunnersSynced(ctx)
<-time.After(10 * time.Millisecond)
_, ok = environment.idleRunners.Get(allocation.JobID)
s.False(ok)
_, ok = s.nomadRunnerManager.usedRunners.Get(allocation.JobID)
s.False(ok)
}
func (s *ManagerTestSuite) TestUpdateEnvironmentRemovesIdleRunnersWhenScalingDown() {
_, job := helpers.CreateTemplateJob()
initialRunners := uint(40)
updatedRunners := uint(10)
err := s.nomadRunnerManager.registerEnvironment(anotherEnvironmentID, initialRunners, job, true)
s.Require().NoError(err)
s.apiMock.AssertNumberOfCalls(s.T(), "RegisterRunnerJob", int(initialRunners))
environment, ok := s.nomadRunnerManager.environments.Get(anotherEnvironmentID)
s.Require().True(ok)
for i := 0; i < int(initialRunners); i++ {
environment.idleRunners.Add(NewRunner("active-runner-"+strconv.Itoa(i), s.nomadRunnerManager))
}
s.apiMock.On("LoadRunnerIDs", anotherEnvironmentID.toString()).Return([]string{}, nil)
s.apiMock.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil)
err = s.nomadRunnerManager.updateEnvironment(tests.AnotherEnvironmentIDAsInteger, updatedRunners, job, true)
s.Require().NoError(err)
s.apiMock.AssertNumberOfCalls(s.T(), "DeleteRunner", int(initialRunners-updatedRunners))
}
func modifyMockedCall(apiMock *nomad.ExecutorAPIMock, method string, modifier func(call *mock.Call)) {
for _, c := range apiMock.ExpectedCalls {
if c.Method == method {
modifier(c)
}
}
}
func (s *ManagerTestSuite) TestOnAllocationAdded() {
s.registerDefaultEnvironment()
s.Run("does not add environment template id job", func() {
alloc := &nomadApi.Allocation{JobID: TemplateJobID(tests.DefaultEnvironmentIDAsInteger)}
s.nomadRunnerManager.onAllocationAdded(alloc)
job, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsInteger)
s.True(ok)
s.Zero(job.idleRunners.Length())
})
s.Run("does not panic when environment id cannot be parsed", func() {
alloc := &nomadApi.Allocation{JobID: ""}
s.NotPanics(func() {
s.nomadRunnerManager.onAllocationAdded(alloc)
})
})
s.Run("does not panic when environment does not exist", func() {
nonExistentEnvironment := EnvironmentID(1234)
_, ok := s.nomadRunnerManager.environments.Get(nonExistentEnvironment)
s.Require().False(ok)
alloc := &nomadApi.Allocation{JobID: RunnerJobID(nonExistentEnvironment, "1-1-1-1")}
s.NotPanics(func() {
s.nomadRunnerManager.onAllocationAdded(alloc)
})
})
s.Run("adds correct job", func() {
s.Run("without allocated resources", func() {
alloc := &nomadApi.Allocation{
JobID: tests.DefaultJobID,
AllocatedResources: nil,
}
s.nomadRunnerManager.onAllocationAdded(alloc)
job, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsInteger)
s.True(ok)
runner, ok := job.idleRunners.Get(tests.DefaultJobID)
s.True(ok)
nomadJob, ok := runner.(*NomadJob)
s.True(ok)
s.Equal(nomadJob.id, tests.DefaultJobID)
s.Empty(nomadJob.portMappings)
})
s.Run("with mapped ports", func() {
alloc := &nomadApi.Allocation{
JobID: tests.DefaultJobID,
AllocatedResources: &nomadApi.AllocatedResources{
Shared: nomadApi.AllocatedSharedResources{Ports: tests.DefaultPortMappings},
},
}
s.nomadRunnerManager.onAllocationAdded(alloc)
job, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsInteger)
s.True(ok)
runner, ok := job.idleRunners.Get(tests.DefaultJobID)
s.True(ok)
nomadJob, ok := runner.(*NomadJob)
s.True(ok)
s.Equal(nomadJob.id, tests.DefaultJobID)
s.Equal(nomadJob.portMappings, tests.DefaultPortMappings)
})
})
}

View File

@@ -0,0 +1,75 @@
package runner
import (
"sync"
)
// NomadEnvironmentStorage is an interface for storing Nomad environments.
type NomadEnvironmentStorage interface {
// List returns all environments stored in this storage.
List() []*NomadEnvironment
// Add adds an environment to the storage.
// It overwrites the old environment if one with the same id was already stored.
Add(environment *NomadEnvironment)
// Get returns an environment from the storage.
// Iff the environment does not exist in the store, ok will be false.
Get(id EnvironmentID) (environment *NomadEnvironment, ok bool)
// Delete deletes the environment with the passed id from the storage. It does nothing if no environment with the id
// is present in the storage.
Delete(id EnvironmentID)
// Length returns the number of currently stored environments in the storage.
Length() int
}
// localNomadEnvironmentStorage stores NomadEnvironment objects in the local application memory.
type localNomadEnvironmentStorage struct {
sync.RWMutex
environments map[EnvironmentID]*NomadEnvironment
}
// NewLocalNomadEnvironmentStorage responds with an empty localNomadEnvironmentStorage.
// This implementation stores the data thread-safe in the local application memory.
func NewLocalNomadEnvironmentStorage() *localNomadEnvironmentStorage {
return &localNomadEnvironmentStorage{
environments: make(map[EnvironmentID]*NomadEnvironment),
}
}
func (s *localNomadEnvironmentStorage) List() []*NomadEnvironment {
s.RLock()
defer s.RUnlock()
values := make([]*NomadEnvironment, 0, len(s.environments))
for _, v := range s.environments {
values = append(values, v)
}
return values
}
func (s *localNomadEnvironmentStorage) Add(environment *NomadEnvironment) {
s.Lock()
defer s.Unlock()
s.environments[environment.ID()] = environment
}
func (s *localNomadEnvironmentStorage) Get(id EnvironmentID) (environment *NomadEnvironment, ok bool) {
s.RLock()
defer s.RUnlock()
environment, ok = s.environments[id]
return
}
func (s *localNomadEnvironmentStorage) Delete(id EnvironmentID) {
s.Lock()
defer s.Unlock()
delete(s.environments, id)
}
func (s *localNomadEnvironmentStorage) Length() int {
s.RLock()
defer s.RUnlock()
return len(s.environments)
}

View File

@@ -0,0 +1,76 @@
package runner
import (
nomadApi "github.com/hashicorp/nomad/api"
"github.com/stretchr/testify/suite"
"testing"
)
func TestEnvironmentStoreTestSuite(t *testing.T) {
suite.Run(t, new(EnvironmentStoreTestSuite))
}
type EnvironmentStoreTestSuite struct {
suite.Suite
environmentStorage *localNomadEnvironmentStorage
environment *NomadEnvironment
}
func (s *EnvironmentStoreTestSuite) SetupTest() {
s.environmentStorage = NewLocalNomadEnvironmentStorage()
s.environment = &NomadEnvironment{environmentID: defaultEnvironmentID}
}
func (s *EnvironmentStoreTestSuite) TestAddedEnvironmentCanBeRetrieved() {
s.environmentStorage.Add(s.environment)
retrievedEnvironment, ok := s.environmentStorage.Get(s.environment.ID())
s.True(ok, "A saved runner should be retrievable")
s.Equal(s.environment, retrievedEnvironment)
}
func (s *EnvironmentStoreTestSuite) TestEnvironmentWithSameIdOverwritesOldOne() {
otherEnvironmentWithSameID := &NomadEnvironment{environmentID: defaultEnvironmentID}
otherEnvironmentWithSameID.templateJob = &nomadApi.Job{}
s.NotEqual(s.environment, otherEnvironmentWithSameID)
s.environmentStorage.Add(s.environment)
s.environmentStorage.Add(otherEnvironmentWithSameID)
retrievedEnvironment, _ := s.environmentStorage.Get(s.environment.ID())
s.NotEqual(s.environment, retrievedEnvironment)
s.Equal(otherEnvironmentWithSameID, retrievedEnvironment)
}
func (s *EnvironmentStoreTestSuite) TestDeletedEnvironmentIsNotAccessible() {
s.environmentStorage.Add(s.environment)
s.environmentStorage.Delete(s.environment.ID())
retrievedRunner, ok := s.environmentStorage.Get(s.environment.ID())
s.Nil(retrievedRunner)
s.False(ok, "A deleted runner should not be accessible")
}
func (s *EnvironmentStoreTestSuite) TestLenOfEmptyPoolIsZero() {
s.Equal(0, s.environmentStorage.Length())
}
func (s *EnvironmentStoreTestSuite) TestLenChangesOnStoreContentChange() {
s.Run("len increases when environment is added", func() {
s.environmentStorage.Add(s.environment)
s.Equal(1, s.environmentStorage.Length())
})
s.Run("len does not increase when environment with same id is added", func() {
s.environmentStorage.Add(s.environment)
s.Equal(1, s.environmentStorage.Length())
})
s.Run("len increases again when different environment is added", func() {
anotherEnvironment := &NomadEnvironment{environmentID: anotherEnvironmentID}
s.environmentStorage.Add(anotherEnvironment)
s.Equal(2, s.environmentStorage.Length())
})
s.Run("len decreases when environment is deleted", func() {
s.environmentStorage.Delete(s.environment.ID())
s.Equal(1, s.environmentStorage.Length())
})
}

338
internal/runner/runner.go Normal file
View File

@@ -0,0 +1,338 @@
package runner
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"gitlab.hpi.de/codeocean/codemoon/poseidon/internal/nomad"
"gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/dto"
"io"
"strings"
"sync"
"time"
)
// ContextKey is the type for keys in a request context.
type ContextKey string
// ExecutionID is an id for an execution in a Runner.
type ExecutionID string
const (
// runnerContextKey is the key used to store runners in context.Context.
runnerContextKey ContextKey = "runner"
)
var (
ErrorFileCopyFailed = errors.New("file copy failed")
ErrorRunnerInactivityTimeout = errors.New("runner inactivity timeout exceeded")
)
// InactivityTimer is a wrapper around a timer that is used to delete a a Runner after some time of inactivity.
type InactivityTimer interface {
// SetupTimeout starts the timeout after a runner gets deleted.
SetupTimeout(duration time.Duration)
// ResetTimeout resets the current timeout so that the runner gets deleted after the time set in Setup from now.
// It does not make an already expired timer run again.
ResetTimeout()
// StopTimeout stops the timeout but does not remove the runner.
StopTimeout()
// TimeoutPassed returns true if the timeout expired and false otherwise.
TimeoutPassed() bool
}
type TimerState uint8
const (
TimerInactive TimerState = 0
TimerRunning TimerState = 1
TimerExpired TimerState = 2
)
type InactivityTimerImplementation struct {
timer *time.Timer
duration time.Duration
state TimerState
runner Runner
manager Manager
sync.Mutex
}
func NewInactivityTimer(runner Runner, manager Manager) InactivityTimer {
return &InactivityTimerImplementation{
state: TimerInactive,
runner: runner,
manager: manager,
}
}
func (t *InactivityTimerImplementation) SetupTimeout(duration time.Duration) {
t.Lock()
defer t.Unlock()
// Stop old timer if present.
if t.timer != nil {
t.timer.Stop()
}
if duration == 0 {
t.state = TimerInactive
return
}
t.state = TimerRunning
t.duration = duration
t.timer = time.AfterFunc(duration, func() {
t.Lock()
t.state = TimerExpired
// The timer must be unlocked here already in order to avoid a deadlock with the call to StopTimout in Manager.Return.
t.Unlock()
err := t.manager.Return(t.runner)
if err != nil {
log.WithError(err).WithField("id", t.runner.ID()).Warn("Returning runner after inactivity caused an error")
} else {
log.WithField("id", t.runner.ID()).Info("Returning runner due to inactivity timeout")
}
})
}
func (t *InactivityTimerImplementation) ResetTimeout() {
t.Lock()
defer t.Unlock()
if t.state != TimerRunning {
// The timer has already expired or been stopped. We don't want to restart it.
return
}
if t.timer.Stop() {
t.timer.Reset(t.duration)
} else {
log.Error("Timer is in state running but stopped. This should never happen")
}
}
func (t *InactivityTimerImplementation) StopTimeout() {
t.Lock()
defer t.Unlock()
if t.state != TimerRunning {
return
}
t.timer.Stop()
t.state = TimerInactive
}
func (t *InactivityTimerImplementation) TimeoutPassed() bool {
return t.state == TimerExpired
}
type Runner interface {
// ID returns the id of the runner.
ID() string
// MappedPorts returns the mapped ports of the runner.
MappedPorts() []*dto.MappedPort
ExecutionStorage
InactivityTimer
// ExecuteInteractively runs the given execution request and forwards from and to the given reader and writers.
// An ExitInfo is sent to the exit channel on command completion.
// Output from the runner is forwarded immediately.
ExecuteInteractively(
request *dto.ExecutionRequest,
stdin io.Reader,
stdout,
stderr io.Writer,
) (exit <-chan ExitInfo, cancel context.CancelFunc)
// UpdateFileSystem processes a dto.UpdateFileSystemRequest by first deleting each given dto.FilePath recursively
// and then copying each given dto.File to the runner.
UpdateFileSystem(request *dto.UpdateFileSystemRequest) error
}
// NomadJob is an abstraction to communicate with Nomad environments.
type NomadJob struct {
ExecutionStorage
InactivityTimer
id string
portMappings []nomadApi.PortMapping
api nomad.ExecutorAPI
}
// NewNomadJob creates a new NomadJob with the provided id.
func NewNomadJob(id string, portMappings []nomadApi.PortMapping,
apiClient nomad.ExecutorAPI, manager Manager,
) *NomadJob {
job := &NomadJob{
id: id,
portMappings: portMappings,
api: apiClient,
ExecutionStorage: NewLocalExecutionStorage(),
}
job.InactivityTimer = NewInactivityTimer(job, manager)
return job
}
func (r *NomadJob) ID() string {
return r.id
}
func (r *NomadJob) MappedPorts() []*dto.MappedPort {
ports := make([]*dto.MappedPort, 0, len(r.portMappings))
for _, portMapping := range r.portMappings {
ports = append(ports, &dto.MappedPort{
ExposedPort: uint(portMapping.To),
HostAddress: fmt.Sprintf("%s:%d", portMapping.HostIP, portMapping.Value),
})
}
return ports
}
type ExitInfo struct {
Code uint8
Err error
}
func (r *NomadJob) ExecuteInteractively(
request *dto.ExecutionRequest,
stdin io.Reader,
stdout, stderr io.Writer,
) (<-chan ExitInfo, context.CancelFunc) {
r.ResetTimeout()
command := request.FullCommand()
var ctx context.Context
var cancel context.CancelFunc
if request.TimeLimit == 0 {
ctx, cancel = context.WithCancel(context.Background())
} else {
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(request.TimeLimit)*time.Second)
}
exit := make(chan ExitInfo)
go func() {
exitCode, err := r.api.ExecuteCommand(r.id, ctx, command, true, stdin, stdout, stderr)
if err == nil && r.TimeoutPassed() {
err = ErrorRunnerInactivityTimeout
}
exit <- ExitInfo{uint8(exitCode), err}
close(exit)
}()
return exit, cancel
}
func (r *NomadJob) UpdateFileSystem(copyRequest *dto.UpdateFileSystemRequest) error {
r.ResetTimeout()
var tarBuffer bytes.Buffer
if err := createTarArchiveForFiles(copyRequest.Copy, &tarBuffer); err != nil {
return err
}
fileDeletionCommand := fileDeletionCommand(copyRequest.Delete)
copyCommand := "tar --extract --absolute-names --verbose --file=/dev/stdin;"
updateFileCommand := (&dto.ExecutionRequest{Command: fileDeletionCommand + copyCommand}).FullCommand()
stdOut := bytes.Buffer{}
stdErr := bytes.Buffer{}
exitCode, err := r.api.ExecuteCommand(r.id, context.Background(), updateFileCommand, false,
&tarBuffer, &stdOut, &stdErr)
if err != nil {
return fmt.Errorf(
"%w: nomad error during file copy: %v",
nomad.ErrorExecutorCommunicationFailed,
err)
}
if exitCode != 0 {
return fmt.Errorf(
"%w: stderr output '%s' and stdout output '%s'",
ErrorFileCopyFailed,
stdErr.String(),
stdOut.String())
}
return nil
}
func createTarArchiveForFiles(filesToCopy []dto.File, w io.Writer) error {
tarWriter := tar.NewWriter(w)
for _, file := range filesToCopy {
if err := tarWriter.WriteHeader(tarHeader(file)); err != nil {
err := fmt.Errorf("error writing tar file header: %w", err)
log.
WithField("file", file).
Error(err)
return err
}
if _, err := tarWriter.Write(file.ByteContent()); err != nil {
err := fmt.Errorf("error writing tar file content: %w", err)
log.
WithField("file", file).
Error(err)
return err
}
}
if err := tarWriter.Close(); err != nil {
return fmt.Errorf("error closing tar writer: %w", err)
}
return nil
}
func fileDeletionCommand(pathsToDelete []dto.FilePath) string {
if len(pathsToDelete) == 0 {
return ""
}
command := "rm --recursive --force "
for _, filePath := range pathsToDelete {
// To avoid command injection, filenames need to be quoted.
// See https://unix.stackexchange.com/questions/347332/what-characters-need-to-be-escaped-in-files-without-quotes
// for details.
singleQuoteEscapedFileName := strings.ReplaceAll(filePath.Cleaned(), "'", "'\\''")
command += fmt.Sprintf("'%s' ", singleQuoteEscapedFileName)
}
command += ";"
return command
}
func tarHeader(file dto.File) *tar.Header {
if file.IsDirectory() {
return &tar.Header{
Typeflag: tar.TypeDir,
Name: file.CleanedPath(),
Mode: 0755,
}
} else {
return &tar.Header{
Typeflag: tar.TypeReg,
Name: file.CleanedPath(),
Mode: 0744,
Size: int64(len(file.Content)),
}
}
}
// MarshalJSON implements json.Marshaler interface.
// This exports private attributes like the id too.
func (r *NomadJob) MarshalJSON() ([]byte, error) {
res, err := json.Marshal(struct {
ID string `json:"runnerId"`
}{
ID: r.ID(),
})
if err != nil {
return nil, fmt.Errorf("error marshaling Nomad job: %w", err)
}
return res, nil
}
// NewContext creates a context containing a runner.
func NewContext(ctx context.Context, runner Runner) context.Context {
return context.WithValue(ctx, runnerContextKey, runner)
}
// FromContext returns a runner from a context.
func FromContext(ctx context.Context) (Runner, bool) {
runner, ok := ctx.Value(runnerContextKey).(Runner)
return runner, ok
}

View File

@@ -0,0 +1,145 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package runner
import (
context "context"
io "io"
dto "gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/dto"
mock "github.com/stretchr/testify/mock"
time "time"
)
// RunnerMock is an autogenerated mock type for the Runner type
type RunnerMock struct {
mock.Mock
}
// Add provides a mock function with given fields: id, executionRequest
func (_m *RunnerMock) Add(id ExecutionID, executionRequest *dto.ExecutionRequest) {
_m.Called(id, executionRequest)
}
// ExecuteInteractively provides a mock function with given fields: request, stdin, stdout, stderr
func (_m *RunnerMock) ExecuteInteractively(request *dto.ExecutionRequest, stdin io.Reader, stdout io.Writer, stderr io.Writer) (<-chan ExitInfo, context.CancelFunc) {
ret := _m.Called(request, stdin, stdout, stderr)
var r0 <-chan ExitInfo
if rf, ok := ret.Get(0).(func(*dto.ExecutionRequest, io.Reader, io.Writer, io.Writer) <-chan ExitInfo); ok {
r0 = rf(request, stdin, stdout, stderr)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan ExitInfo)
}
}
var r1 context.CancelFunc
if rf, ok := ret.Get(1).(func(*dto.ExecutionRequest, io.Reader, io.Writer, io.Writer) context.CancelFunc); ok {
r1 = rf(request, stdin, stdout, stderr)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(context.CancelFunc)
}
}
return r0, r1
}
// Id provides a mock function with given fields:
func (_m *RunnerMock) ID() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// MappedPorts provides a mock function with given fields:
func (_m *RunnerMock) MappedPorts() []*dto.MappedPort {
ret := _m.Called()
var r0 []*dto.MappedPort
if rf, ok := ret.Get(0).(func() []*dto.MappedPort); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*dto.MappedPort)
}
}
return r0
}
// Pop provides a mock function with given fields: id
func (_m *RunnerMock) Pop(id ExecutionID) (*dto.ExecutionRequest, bool) {
ret := _m.Called(id)
var r0 *dto.ExecutionRequest
if rf, ok := ret.Get(0).(func(ExecutionID) *dto.ExecutionRequest); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*dto.ExecutionRequest)
}
}
var r1 bool
if rf, ok := ret.Get(1).(func(ExecutionID) bool); ok {
r1 = rf(id)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// ResetTimeout provides a mock function with given fields:
func (_m *RunnerMock) ResetTimeout() {
_m.Called()
}
// SetupTimeout provides a mock function with given fields: duration
func (_m *RunnerMock) SetupTimeout(duration time.Duration) {
_m.Called(duration)
}
// StopTimeout provides a mock function with given fields:
func (_m *RunnerMock) StopTimeout() {
_m.Called()
}
// TimeoutPassed provides a mock function with given fields:
func (_m *RunnerMock) TimeoutPassed() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// UpdateFileSystem provides a mock function with given fields: request
func (_m *RunnerMock) UpdateFileSystem(request *dto.UpdateFileSystemRequest) error {
ret := _m.Called(request)
var r0 error
if rf, ok := ret.Get(0).(func(*dto.UpdateFileSystemRequest) error); ok {
r0 = rf(request)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@@ -0,0 +1,401 @@
package runner
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"gitlab.hpi.de/codeocean/codemoon/poseidon/internal/nomad"
"gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/dto"
"gitlab.hpi.de/codeocean/codemoon/poseidon/tests"
"io"
"regexp"
"strings"
"testing"
"time"
)
func TestIdIsStored(t *testing.T) {
runner := NewNomadJob(tests.DefaultJobID, nil, nil, nil)
assert.Equal(t, tests.DefaultJobID, runner.ID())
}
func TestMappedPortsAreStoredCorrectly(t *testing.T) {
runner := NewNomadJob(tests.DefaultJobID, tests.DefaultPortMappings, nil, nil)
assert.Equal(t, tests.DefaultMappedPorts, runner.MappedPorts())
runner = NewNomadJob(tests.DefaultJobID, nil, nil, nil)
assert.Empty(t, runner.MappedPorts())
}
func TestMarshalRunner(t *testing.T) {
runner := NewNomadJob(tests.DefaultJobID, nil, nil, nil)
marshal, err := json.Marshal(runner)
assert.NoError(t, err)
assert.Equal(t, "{\"runnerId\":\""+tests.DefaultJobID+"\"}", string(marshal))
}
func TestExecutionRequestIsStored(t *testing.T) {
runner := NewNomadJob(tests.DefaultJobID, nil, nil, nil)
executionRequest := &dto.ExecutionRequest{
Command: "command",
TimeLimit: 10,
Environment: nil,
}
id := ExecutionID("test-execution")
runner.Add(id, executionRequest)
storedExecutionRunner, ok := runner.Pop(id)
assert.True(t, ok, "Getting an execution should not return ok false")
assert.Equal(t, executionRequest, storedExecutionRunner)
}
func TestNewContextReturnsNewContextWithRunner(t *testing.T) {
runner := NewNomadJob(tests.DefaultRunnerID, nil, nil, nil)
ctx := context.Background()
newCtx := NewContext(ctx, runner)
storedRunner, ok := newCtx.Value(runnerContextKey).(Runner)
require.True(t, ok)
assert.NotEqual(t, ctx, newCtx)
assert.Equal(t, runner, storedRunner)
}
func TestFromContextReturnsRunner(t *testing.T) {
runner := NewNomadJob(tests.DefaultRunnerID, nil, nil, nil)
ctx := NewContext(context.Background(), runner)
storedRunner, ok := FromContext(ctx)
assert.True(t, ok)
assert.Equal(t, runner, storedRunner)
}
func TestFromContextReturnsIsNotOkWhenContextHasNoRunner(t *testing.T) {
ctx := context.Background()
_, ok := FromContext(ctx)
assert.False(t, ok)
}
func TestExecuteInteractivelyTestSuite(t *testing.T) {
suite.Run(t, new(ExecuteInteractivelyTestSuite))
}
type ExecuteInteractivelyTestSuite struct {
suite.Suite
runner *NomadJob
apiMock *nomad.ExecutorAPIMock
timer *InactivityTimerMock
mockedExecuteCommandCall *mock.Call
mockedTimeoutPassedCall *mock.Call
}
func (s *ExecuteInteractivelyTestSuite) SetupTest() {
s.apiMock = &nomad.ExecutorAPIMock{}
s.mockedExecuteCommandCall = s.apiMock.
On("ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, true, mock.Anything, mock.Anything, mock.Anything).
Return(0, nil)
s.timer = &InactivityTimerMock{}
s.timer.On("ResetTimeout").Return()
s.mockedTimeoutPassedCall = s.timer.On("TimeoutPassed").Return(false)
s.runner = &NomadJob{
ExecutionStorage: NewLocalExecutionStorage(),
InactivityTimer: s.timer,
id: tests.DefaultRunnerID,
api: s.apiMock,
}
}
func (s *ExecuteInteractivelyTestSuite) TestCallsApi() {
request := &dto.ExecutionRequest{Command: "echo 'Hello World!'"}
s.runner.ExecuteInteractively(request, nil, nil, nil)
time.Sleep(tests.ShortTimeout)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", tests.DefaultRunnerID, mock.Anything, request.FullCommand(),
true, mock.Anything, mock.Anything, mock.Anything)
}
func (s *ExecuteInteractivelyTestSuite) TestReturnsAfterTimeout() {
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
ctx, ok := args.Get(1).(context.Context)
s.Require().True(ok)
<-ctx.Done()
}).
Return(0, nil)
timeLimit := 1
execution := &dto.ExecutionRequest{TimeLimit: timeLimit}
exit, _ := s.runner.ExecuteInteractively(execution, nil, nil, nil)
select {
case <-exit:
s.FailNow("ExecuteInteractively should not terminate instantly")
case <-time.After(tests.ShortTimeout):
}
select {
case <-time.After(time.Duration(timeLimit) * time.Second):
s.FailNow("ExecuteInteractively should return after the time limit")
case exitInfo := <-exit:
s.Equal(uint8(0), exitInfo.Code)
}
}
func (s *ExecuteInteractivelyTestSuite) TestResetTimerGetsCalled() {
execution := &dto.ExecutionRequest{}
s.runner.ExecuteInteractively(execution, nil, nil, nil)
s.timer.AssertCalled(s.T(), "ResetTimeout")
}
func (s *ExecuteInteractivelyTestSuite) TestExitHasTimeoutErrorIfExecutionTimesOut() {
s.mockedTimeoutPassedCall.Return(true)
execution := &dto.ExecutionRequest{}
exitChannel, _ := s.runner.ExecuteInteractively(execution, nil, nil, nil)
exit := <-exitChannel
s.Equal(ErrorRunnerInactivityTimeout, exit.Err)
}
func TestUpdateFileSystemTestSuite(t *testing.T) {
suite.Run(t, new(UpdateFileSystemTestSuite))
}
type UpdateFileSystemTestSuite struct {
suite.Suite
runner *NomadJob
timer *InactivityTimerMock
apiMock *nomad.ExecutorAPIMock
mockedExecuteCommandCall *mock.Call
command []string
stdin *bytes.Buffer
}
func (s *UpdateFileSystemTestSuite) SetupTest() {
s.apiMock = &nomad.ExecutorAPIMock{}
s.timer = &InactivityTimerMock{}
s.timer.On("ResetTimeout").Return()
s.timer.On("TimeoutPassed").Return(false)
s.runner = &NomadJob{
ExecutionStorage: NewLocalExecutionStorage(),
InactivityTimer: s.timer,
id: tests.DefaultRunnerID,
api: s.apiMock,
}
s.mockedExecuteCommandCall = s.apiMock.On("ExecuteCommand", tests.DefaultRunnerID, mock.Anything,
mock.Anything, false, mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
var ok bool
s.command, ok = args.Get(2).([]string)
s.Require().True(ok)
s.stdin, ok = args.Get(4).(*bytes.Buffer)
s.Require().True(ok)
}).Return(0, nil)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerPerformsTarExtractionWithAbsoluteNamesOnRunner() {
// note: this method tests an implementation detail of the method UpdateFileSystemOfRunner method
// if the implementation changes, delete this test and write a new one
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest)
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything,
false, mock.Anything, mock.Anything, mock.Anything)
s.Regexp("tar --extract --absolute-names", s.command)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfExitCodeIsNotZero() {
s.mockedExecuteCommandCall.Return(1, nil)
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest)
s.ErrorIs(err, ErrorFileCopyFailed)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfApiCallDid() {
s.mockedExecuteCommandCall.Return(0, tests.ErrDefault)
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest)
s.ErrorIs(err, nomad.ErrorExecutorCommunicationFailed)
}
func (s *UpdateFileSystemTestSuite) TestFilesToCopyAreIncludedInTarArchive() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest)
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false,
mock.Anything, mock.Anything, mock.Anything)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
tarFile := tarFiles[0]
s.True(strings.HasSuffix(tarFile.Name, tests.DefaultFileName))
s.Equal(byte(tar.TypeReg), tarFile.TypeFlag)
s.Equal(tests.DefaultFileContent, tarFile.Content)
}
func (s *UpdateFileSystemTestSuite) TestTarFilesContainCorrectPathForRelativeFilePath() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest)
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
// tar is extracted in the active workdir of the container, file will be put relative to that
s.Equal(tests.DefaultFileName, tarFiles[0].Name)
}
func (s *UpdateFileSystemTestSuite) TestFilesWithAbsolutePathArePutInAbsoluteLocation() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.FileNameWithAbsolutePath, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest)
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
s.Equal(tarFiles[0].Name, tests.FileNameWithAbsolutePath)
}
func (s *UpdateFileSystemTestSuite) TestDirectoriesAreMarkedAsDirectoryInTar() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{{Path: tests.DefaultDirectoryName, Content: []byte{}}}}
err := s.runner.UpdateFileSystem(copyRequest)
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
tarFile := tarFiles[0]
s.True(strings.HasSuffix(tarFile.Name+"/", tests.DefaultDirectoryName))
s.Equal(byte(tar.TypeDir), tarFile.TypeFlag)
s.Equal("", tarFile.Content)
}
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetRemoved() {
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{tests.DefaultFileName}}
err := s.runner.UpdateFileSystem(copyRequest)
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false,
mock.Anything, mock.Anything, mock.Anything)
s.Regexp(fmt.Sprintf("rm[^;]+%s' *;", regexp.QuoteMeta(tests.DefaultFileName)), s.command)
}
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetEscaped() {
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{"/some/potentially/harmful'filename"}}
err := s.runner.UpdateFileSystem(copyRequest)
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false,
mock.Anything, mock.Anything, mock.Anything)
s.Contains(strings.Join(s.command, " "), "'/some/potentially/harmful'\\''filename'")
}
func (s *UpdateFileSystemTestSuite) TestResetTimerGetsCalled() {
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest)
s.NoError(err)
s.timer.AssertCalled(s.T(), "ResetTimeout")
}
type TarFile struct {
Name string
Content string
TypeFlag byte
}
func (s *UpdateFileSystemTestSuite) readFilesFromTarArchive(tarArchive io.Reader) (files []TarFile) {
reader := tar.NewReader(tarArchive)
for {
hdr, err := reader.Next()
if err != nil {
break
}
bf, err := io.ReadAll(reader)
s.Require().NoError(err)
files = append(files, TarFile{Name: hdr.Name, Content: string(bf), TypeFlag: hdr.Typeflag})
}
return files
}
func TestInactivityTimerTestSuite(t *testing.T) {
suite.Run(t, new(InactivityTimerTestSuite))
}
type InactivityTimerTestSuite struct {
suite.Suite
runner Runner
manager *ManagerMock
returned chan bool
}
func (s *InactivityTimerTestSuite) SetupTest() {
s.returned = make(chan bool, 1)
s.manager = &ManagerMock{}
s.manager.On("Return", mock.Anything).Run(func(_ mock.Arguments) {
s.returned <- true
}).Return(nil)
s.runner = NewRunner(tests.DefaultRunnerID, s.manager)
s.runner.SetupTimeout(tests.ShortTimeout)
}
func (s *InactivityTimerTestSuite) TearDownTest() {
s.runner.StopTimeout()
}
func (s *InactivityTimerTestSuite) TestRunnerIsReturnedAfterTimeout() {
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestRunnerIsNotReturnedBeforeTimeout() {
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout/2))
}
func (s *InactivityTimerTestSuite) TestResetTimeoutExtendsTheDeadline() {
time.Sleep(3 * tests.ShortTimeout / 4)
s.runner.ResetTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 3*tests.ShortTimeout/4),
"Because of the reset, the timeout should not be reached by now.")
s.True(tests.ChannelReceivesSomething(s.returned, 5*tests.ShortTimeout/4),
"After reset, the timout should be reached by now.")
}
func (s *InactivityTimerTestSuite) TestStopTimeoutStopsTimeout() {
s.runner.StopTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsFalseBeforeDeadline() {
s.False(s.runner.TimeoutPassed())
}
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsTrueAfterDeadline() {
time.Sleep(2 * tests.ShortTimeout)
s.True(s.runner.TimeoutPassed())
}
func (s *InactivityTimerTestSuite) TestTimerIsNotResetAfterDeadline() {
time.Sleep(2 * tests.ShortTimeout)
// We need to empty the returned channel so Return can send to it again.
tests.ChannelReceivesSomething(s.returned, 0)
s.runner.ResetTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestSetupTimeoutStopsOldTimeout() {
s.runner.SetupTimeout(3 * tests.ShortTimeout)
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestTimerIsInactiveWhenDurationIsZero() {
s.runner.SetupTimeout(0)
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout))
}
// NewRunner creates a new runner with the provided id and manager.
func NewRunner(id string, manager Manager) Runner {
return NewNomadJob(id, nil, nil, manager)
}

View File

@@ -0,0 +1,77 @@
package runner
import (
"sync"
)
// Storage is an interface for storing runners.
type Storage interface {
// Add adds an runner to the storage.
// It overwrites the old runner if one with the same id was already stored.
Add(Runner)
// Get returns a runner from the storage.
// Iff the runner does not exist in the storage, ok will be false.
Get(id string) (r Runner, ok bool)
// Delete deletes the runner with the passed id from the storage.
// It does nothing if no runner with the id is present in the store.
Delete(id string)
// Length returns the number of currently stored runners in the storage.
Length() int
// Sample returns and removes an arbitrary runner from the storage.
// ok is true iff a runner was returned.
Sample() (r Runner, ok bool)
}
// localRunnerStorage stores runner objects in the local application memory.
// ToDo: Create implementation that use some persistent storage like a database.
type localRunnerStorage struct {
sync.RWMutex
runners map[string]Runner
}
// NewLocalRunnerStorage responds with a Storage implementation.
// This implementation stores the data thread-safe in the local application memory.
func NewLocalRunnerStorage() *localRunnerStorage {
return &localRunnerStorage{
runners: make(map[string]Runner),
}
}
func (s *localRunnerStorage) Add(r Runner) {
s.Lock()
defer s.Unlock()
s.runners[r.ID()] = r
}
func (s *localRunnerStorage) Get(id string) (r Runner, ok bool) {
s.RLock()
defer s.RUnlock()
r, ok = s.runners[id]
return
}
func (s *localRunnerStorage) Delete(id string) {
s.Lock()
defer s.Unlock()
delete(s.runners, id)
}
func (s *localRunnerStorage) Sample() (Runner, bool) {
s.Lock()
defer s.Unlock()
for _, runner := range s.runners {
delete(s.runners, runner.ID())
return runner, true
}
return nil, false
}
func (s *localRunnerStorage) Length() int {
s.RLock()
defer s.RUnlock()
return len(s.runners)
}

View File

@@ -0,0 +1,103 @@
package runner
import (
"github.com/stretchr/testify/suite"
"gitlab.hpi.de/codeocean/codemoon/poseidon/pkg/dto"
"gitlab.hpi.de/codeocean/codemoon/poseidon/tests"
"testing"
)
func TestRunnerPoolTestSuite(t *testing.T) {
suite.Run(t, new(RunnerPoolTestSuite))
}
type RunnerPoolTestSuite struct {
suite.Suite
runnerStorage *localRunnerStorage
runner Runner
}
func (s *RunnerPoolTestSuite) SetupTest() {
s.runnerStorage = NewLocalRunnerStorage()
s.runner = NewRunner(tests.DefaultRunnerID, nil)
s.runner.Add(tests.DefaultExecutionID, &dto.ExecutionRequest{Command: "true"})
}
func (s *RunnerPoolTestSuite) TestAddedRunnerCanBeRetrieved() {
s.runnerStorage.Add(s.runner)
retrievedRunner, ok := s.runnerStorage.Get(s.runner.ID())
s.True(ok, "A saved runner should be retrievable")
s.Equal(s.runner, retrievedRunner)
}
func (s *RunnerPoolTestSuite) TestRunnerWithSameIdOverwritesOldOne() {
otherRunnerWithSameID := NewRunner(s.runner.ID(), nil)
// assure runner is actually different
s.NotEqual(s.runner, otherRunnerWithSameID)
s.runnerStorage.Add(s.runner)
s.runnerStorage.Add(otherRunnerWithSameID)
retrievedRunner, _ := s.runnerStorage.Get(s.runner.ID())
s.NotEqual(s.runner, retrievedRunner)
s.Equal(otherRunnerWithSameID, retrievedRunner)
}
func (s *RunnerPoolTestSuite) TestDeletedRunnersAreNotAccessible() {
s.runnerStorage.Add(s.runner)
s.runnerStorage.Delete(s.runner.ID())
retrievedRunner, ok := s.runnerStorage.Get(s.runner.ID())
s.Nil(retrievedRunner)
s.False(ok, "A deleted runner should not be accessible")
}
func (s *RunnerPoolTestSuite) TestSampleReturnsRunnerWhenOneIsAvailable() {
s.runnerStorage.Add(s.runner)
sampledRunner, ok := s.runnerStorage.Sample()
s.NotNil(sampledRunner)
s.True(ok)
}
func (s *RunnerPoolTestSuite) TestSampleReturnsFalseWhenNoneIsAvailable() {
sampledRunner, ok := s.runnerStorage.Sample()
s.Nil(sampledRunner)
s.False(ok)
}
func (s *RunnerPoolTestSuite) TestSampleRemovesRunnerFromPool() {
s.runnerStorage.Add(s.runner)
sampledRunner, _ := s.runnerStorage.Sample()
_, ok := s.runnerStorage.Get(sampledRunner.ID())
s.False(ok)
}
func (s *RunnerPoolTestSuite) TestLenOfEmptyPoolIsZero() {
s.Equal(0, s.runnerStorage.Length())
}
func (s *RunnerPoolTestSuite) TestLenChangesOnStoreContentChange() {
s.Run("len increases when runner is added", func() {
s.runnerStorage.Add(s.runner)
s.Equal(1, s.runnerStorage.Length())
})
s.Run("len does not increase when runner with same id is added", func() {
s.runnerStorage.Add(s.runner)
s.Equal(1, s.runnerStorage.Length())
})
s.Run("len increases again when different runner is added", func() {
anotherRunner := NewRunner(tests.AnotherRunnerID, nil)
s.runnerStorage.Add(anotherRunner)
s.Equal(2, s.runnerStorage.Length())
})
s.Run("len decreases when runner is deleted", func() {
s.runnerStorage.Delete(s.runner.ID())
s.Equal(1, s.runnerStorage.Length())
})
s.Run("len decreases when runner is sampled", func() {
_, _ = s.runnerStorage.Sample()
s.Equal(0, s.runnerStorage.Length())
})
}