added k8s stub adapter for execution environment
This commit is contained in:
@@ -1,48 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AWSRunnerManager struct {
|
||||
*AbstractManager
|
||||
}
|
||||
|
||||
// NewAWSRunnerManager creates a new runner manager that keeps track of all runners at AWS.
|
||||
func NewAWSRunnerManager(ctx context.Context) *AWSRunnerManager {
|
||||
return &AWSRunnerManager{NewAbstractManager(ctx)}
|
||||
}
|
||||
|
||||
func (a AWSRunnerManager) Claim(id dto.EnvironmentID, duration int) (Runner, error) {
|
||||
environment, ok := a.GetEnvironment(id)
|
||||
if !ok {
|
||||
r, err := a.NextHandler().Claim(id, duration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("aws wrapped: %w", err)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
runner, ok := environment.Sample()
|
||||
if !ok {
|
||||
log.Warn("no aws runner available")
|
||||
return nil, ErrNoRunnersAvailable
|
||||
}
|
||||
|
||||
a.usedRunners.Add(runner.ID(), runner)
|
||||
runner.SetupTimeout(time.Duration(duration) * time.Second)
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
func (a AWSRunnerManager) Return(r Runner) error {
|
||||
_, isAWSRunner := r.(*AWSFunctionWorkload)
|
||||
if isAWSRunner {
|
||||
a.usedRunners.Delete(r.ID())
|
||||
} else if err := a.NextHandler().Return(r); err != nil {
|
||||
return fmt.Errorf("aws wrapped: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,118 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type MainTestSuite struct {
|
||||
tests.MemoryLeakTestSuite
|
||||
}
|
||||
|
||||
func TestMainTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(MainTestSuite))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSRunnerManager_EnvironmentAccessor() {
|
||||
m := NewAWSRunnerManager(s.TestCtx)
|
||||
|
||||
environments := m.ListEnvironments()
|
||||
s.Empty(environments)
|
||||
|
||||
environment := createBasicEnvironmentMock(defaultEnvironmentID)
|
||||
m.StoreEnvironment(environment)
|
||||
|
||||
environments = m.ListEnvironments()
|
||||
s.Len(environments, 1)
|
||||
s.Equal(environments[0].ID(), dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
|
||||
e, ok := m.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
|
||||
s.True(ok)
|
||||
s.Equal(environment, e)
|
||||
|
||||
_, ok = m.GetEnvironment(tests.AnotherEnvironmentIDAsInteger)
|
||||
s.False(ok)
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSRunnerManager_Claim() {
|
||||
m := NewAWSRunnerManager(s.TestCtx)
|
||||
environment := createBasicEnvironmentMock(defaultEnvironmentID)
|
||||
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
|
||||
s.NoError(err)
|
||||
environment.On("Sample").Return(r, true)
|
||||
m.StoreEnvironment(environment)
|
||||
|
||||
s.Run("returns runner for AWS environment", func() {
|
||||
r, err := m.Claim(tests.DefaultEnvironmentIDAsInteger, 60)
|
||||
s.NoError(err)
|
||||
s.NotNil(r)
|
||||
})
|
||||
|
||||
s.Run("forwards request for non-AWS environments", func() {
|
||||
nextHandler := &ManagerMock{}
|
||||
nextHandler.On("Claim", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("int")).
|
||||
Return(nil, nil)
|
||||
m.SetNextHandler(nextHandler)
|
||||
|
||||
_, err := m.Claim(tests.AnotherEnvironmentIDAsInteger, 60)
|
||||
s.Nil(err)
|
||||
nextHandler.AssertCalled(s.T(), "Claim", dto.EnvironmentID(tests.AnotherEnvironmentIDAsInteger), 60)
|
||||
})
|
||||
|
||||
err = r.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSRunnerManager_Return() {
|
||||
m := NewAWSRunnerManager(s.TestCtx)
|
||||
environment := createBasicEnvironmentMock(defaultEnvironmentID)
|
||||
m.StoreEnvironment(environment)
|
||||
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
|
||||
s.NoError(err)
|
||||
|
||||
s.Run("removes usedRunner", func() {
|
||||
m.usedRunners.Add(r.ID(), r)
|
||||
s.Contains(m.usedRunners.List(), r)
|
||||
|
||||
err := m.Return(r)
|
||||
s.NoError(err)
|
||||
s.NotContains(m.usedRunners.List(), r)
|
||||
})
|
||||
|
||||
s.Run("calls nextHandler for non-AWS runner", func() {
|
||||
nextHandler := &ManagerMock{}
|
||||
nextHandler.On("Return", mock.AnythingOfType("*runner.NomadJob")).Return(nil)
|
||||
m.SetNextHandler(nextHandler)
|
||||
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
nonAWSRunner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, nil)
|
||||
err := m.Return(nonAWSRunner)
|
||||
s.NoError(err)
|
||||
nextHandler.AssertCalled(s.T(), "Return", nonAWSRunner)
|
||||
|
||||
err = nonAWSRunner.Destroy(nil)
|
||||
s.NoError(err)
|
||||
})
|
||||
|
||||
err = r.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func createBasicEnvironmentMock(id dto.EnvironmentID) *ExecutionEnvironmentMock {
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(id)
|
||||
environment.On("Image").Return("")
|
||||
environment.On("CPULimit").Return(uint(0))
|
||||
environment.On("MemoryLimit").Return(uint(0))
|
||||
environment.On("NetworkAccess").Return(false, nil)
|
||||
environment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
|
||||
environment.On("ApplyPrewarmingPoolSize").Return(nil)
|
||||
environment.On("IdleRunnerCount").Return(uint(1)).Maybe()
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Maybe()
|
||||
return environment
|
||||
}
|
@@ -1,246 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/openHPI/poseidon/internal/config"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrWrongMessageType = errors.New("received message that is not a text message")
|
||||
|
||||
type awsFunctionRequest struct {
|
||||
Action string `json:"action"`
|
||||
Cmd []string `json:"cmd"`
|
||||
Files map[dto.FilePath][]byte `json:"files"`
|
||||
}
|
||||
|
||||
// AWSFunctionWorkload is an abstraction to build a request to an AWS Lambda Function.
|
||||
// It is not persisted on a Poseidon restart.
|
||||
// The InactivityTimer is used actively. It stops listening to the Lambda function.
|
||||
// AWS terminates the Lambda Function after the [Globals.Function.Timeout](deploy/aws/template.yaml).
|
||||
type AWSFunctionWorkload struct {
|
||||
InactivityTimer
|
||||
id string
|
||||
fs map[dto.FilePath][]byte
|
||||
executions storage.Storage[*dto.ExecutionRequest]
|
||||
runningExecutions map[string]context.CancelFunc
|
||||
onDestroy DestroyRunnerHandler
|
||||
environment ExecutionEnvironment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewAWSFunctionWorkload creates a new AWSFunctionWorkload with the provided id.
|
||||
func NewAWSFunctionWorkload(
|
||||
environment ExecutionEnvironment, onDestroy DestroyRunnerHandler) (*AWSFunctionWorkload, error) {
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed generating runner id: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
workload := &AWSFunctionWorkload{
|
||||
id: newUUID.String(),
|
||||
fs: make(map[dto.FilePath][]byte),
|
||||
runningExecutions: make(map[string]context.CancelFunc),
|
||||
onDestroy: onDestroy,
|
||||
environment: environment,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
workload.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
|
||||
monitoring.MeasurementExecutionsAWS, monitorExecutionsRunnerID(environment.ID(), workload.id), time.Minute, ctx)
|
||||
workload.InactivityTimer = NewInactivityTimer(workload, func(_ Runner) error {
|
||||
return workload.Destroy(nil)
|
||||
})
|
||||
return workload, nil
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) ID() string {
|
||||
return w.id
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) Environment() dto.EnvironmentID {
|
||||
return w.environment.ID()
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) MappedPorts() []*dto.MappedPort {
|
||||
return []*dto.MappedPort{}
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) StoreExecution(id string, request *dto.ExecutionRequest) {
|
||||
w.executions.Add(id, request)
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) ExecutionExists(id string) bool {
|
||||
_, ok := w.executions.Get(id)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ExecuteInteractively runs the execution request in an AWS function.
|
||||
// It should be further improved by using the passed context to handle lost connections.
|
||||
func (w *AWSFunctionWorkload) ExecuteInteractively(
|
||||
id string, _ io.ReadWriter, stdout, stderr io.Writer, _ context.Context) (
|
||||
<-chan ExitInfo, context.CancelFunc, error) {
|
||||
w.ResetTimeout()
|
||||
request, ok := w.executions.Pop(id)
|
||||
if !ok {
|
||||
return nil, nil, ErrorUnknownExecution
|
||||
}
|
||||
hideEnvironmentVariables(request, "AWS")
|
||||
request.PrivilegedExecution = true // AWS does not support multiple users at this moment.
|
||||
command, ctx, cancel := prepareExecution(request, w.ctx)
|
||||
commands := []string{"/bin/bash", "-c", command}
|
||||
exitInternal := make(chan ExitInfo)
|
||||
exit := make(chan ExitInfo, 1)
|
||||
|
||||
go w.executeCommand(ctx, commands, stdout, stderr, exitInternal)
|
||||
go w.handleRunnerTimeout(ctx, exitInternal, exit, id)
|
||||
|
||||
return exit, cancel, nil
|
||||
}
|
||||
|
||||
// ListFileSystem is currently not supported with this aws serverless function.
|
||||
// This is because the function execution ends with the termination of the workload code.
|
||||
// So an on-demand file system listing after the termination is not possible. Also, we do not want to copy all files.
|
||||
func (w *AWSFunctionWorkload) ListFileSystem(_ string, _ bool, _ io.Writer, _ bool, _ context.Context) error {
|
||||
return dto.ErrNotSupported
|
||||
}
|
||||
|
||||
// UpdateFileSystem copies Files into the executor.
|
||||
// Current limitation: No files can be deleted apart from the previously added files.
|
||||
// Future Work: Deduplication of the file systems, as the largest workload is likely to be used by additional
|
||||
// CSV files or similar, which are the same for many executions.
|
||||
func (w *AWSFunctionWorkload) UpdateFileSystem(request *dto.UpdateFileSystemRequest, _ context.Context) error {
|
||||
for _, path := range request.Delete {
|
||||
delete(w.fs, path)
|
||||
}
|
||||
for _, file := range request.Copy {
|
||||
w.fs[file.Path] = file.Content
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFileContent is currently not supported with this aws serverless function.
|
||||
// This is because the function execution ends with the termination of the workload code.
|
||||
// So an on-demand file streaming after the termination is not possible. Also, we do not want to copy all files.
|
||||
func (w *AWSFunctionWorkload) GetFileContent(_ string, _ http.ResponseWriter, _ bool, _ context.Context) error {
|
||||
return dto.ErrNotSupported
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) Destroy(_ DestroyReason) error {
|
||||
w.cancel()
|
||||
if err := w.onDestroy(w); err != nil {
|
||||
return fmt.Errorf("error while destroying aws runner: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) executeCommand(ctx context.Context, command []string,
|
||||
stdout, stderr io.Writer, exit chan<- ExitInfo,
|
||||
) {
|
||||
defer close(exit)
|
||||
data := &awsFunctionRequest{
|
||||
Action: w.environment.Image(),
|
||||
Cmd: command,
|
||||
Files: w.fs,
|
||||
}
|
||||
log.WithContext(ctx).WithField("request", data).Trace("Sending request to AWS")
|
||||
rawData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
exit <- ExitInfo{uint8(1), fmt.Errorf("cannot stingify aws function request: %w", err)}
|
||||
return
|
||||
}
|
||||
|
||||
wsConn, response, err := websocket.DefaultDialer.Dial(config.Config.AWS.Endpoint, nil)
|
||||
if err != nil {
|
||||
exit <- ExitInfo{uint8(1), fmt.Errorf("failed to establish aws connection: %w", err)}
|
||||
return
|
||||
}
|
||||
_ = response.Body.Close()
|
||||
defer wsConn.Close()
|
||||
err = wsConn.WriteMessage(websocket.TextMessage, rawData)
|
||||
if err != nil {
|
||||
exit <- ExitInfo{uint8(1), fmt.Errorf("cannot send aws request: %w", err)}
|
||||
return
|
||||
}
|
||||
|
||||
// receiveOutput listens for the execution timeout (or the exit code).
|
||||
exitCode, err := w.receiveOutput(wsConn, stdout, stderr, ctx)
|
||||
// TimeoutPassed checks the runner timeout
|
||||
if w.TimeoutPassed() {
|
||||
err = ErrorRunnerInactivityTimeout
|
||||
}
|
||||
exit <- ExitInfo{exitCode, err}
|
||||
}
|
||||
|
||||
func (w *AWSFunctionWorkload) receiveOutput(
|
||||
conn *websocket.Conn, stdout, stderr io.Writer, ctx context.Context) (uint8, error) {
|
||||
for ctx.Err() == nil {
|
||||
messageType, reader, err := conn.NextReader()
|
||||
if err != nil {
|
||||
return 1, fmt.Errorf("cannot read from aws connection: %w", err)
|
||||
}
|
||||
if messageType != websocket.TextMessage {
|
||||
return 1, ErrWrongMessageType
|
||||
}
|
||||
var wsMessage dto.WebSocketMessage
|
||||
err = json.NewDecoder(reader).Decode(&wsMessage)
|
||||
if err != nil {
|
||||
return 1, fmt.Errorf("failed to decode message from aws: %w", err)
|
||||
}
|
||||
|
||||
log.WithField("msg", wsMessage).Info("New Message from AWS function")
|
||||
|
||||
switch wsMessage.Type {
|
||||
default:
|
||||
log.WithContext(ctx).WithField("data", wsMessage).Warn("unexpected message from aws function")
|
||||
case dto.WebSocketExit:
|
||||
return wsMessage.ExitCode, nil
|
||||
case dto.WebSocketOutputStdout:
|
||||
// We do not check the written bytes as the rawToCodeOceanWriter receives everything or nothing.
|
||||
_, err = stdout.Write([]byte(wsMessage.Data))
|
||||
case dto.WebSocketOutputStderr, dto.WebSocketOutputError:
|
||||
_, err = stderr.Write([]byte(wsMessage.Data))
|
||||
}
|
||||
if err != nil {
|
||||
return 1, fmt.Errorf("failed to forward message: %w", err)
|
||||
}
|
||||
}
|
||||
return 1, fmt.Errorf("receiveOutput stpped by context: %w", ctx.Err())
|
||||
}
|
||||
|
||||
// handleRunnerTimeout listens for a runner timeout and aborts the execution in that case.
|
||||
// It listens via a context in runningExecutions that is canceled on the timeout event.
|
||||
func (w *AWSFunctionWorkload) handleRunnerTimeout(ctx context.Context,
|
||||
exitInternal <-chan ExitInfo, exit chan<- ExitInfo, executionID string) {
|
||||
executionCtx, cancelExecution := context.WithCancel(ctx)
|
||||
w.runningExecutions[executionID] = cancelExecution
|
||||
defer delete(w.runningExecutions, executionID)
|
||||
defer close(exit)
|
||||
|
||||
select {
|
||||
case exitInfo := <-exitInternal:
|
||||
exit <- exitInfo
|
||||
case <-executionCtx.Done():
|
||||
exit <- ExitInfo{255, ErrorRunnerInactivityTimeout}
|
||||
}
|
||||
}
|
||||
|
||||
// hideEnvironmentVariables sets the CODEOCEAN variable and unsets all variables starting with the passed prefix.
|
||||
func hideEnvironmentVariables(request *dto.ExecutionRequest, unsetPrefix string) {
|
||||
if request.Environment == nil {
|
||||
request.Environment = make(map[string]string)
|
||||
}
|
||||
request.Command = "unset \"${!" + unsetPrefix + "@}\" && " + request.Command
|
||||
}
|
@@ -1,165 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/openHPI/poseidon/internal/config"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *MainTestSuite) TestAWSExecutionRequestIsStored() {
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
|
||||
s.NoError(err)
|
||||
executionRequest := &dto.ExecutionRequest{
|
||||
Command: "command",
|
||||
TimeLimit: 10,
|
||||
Environment: nil,
|
||||
}
|
||||
r.StoreExecution(tests.DefaultEnvironmentIDAsString, executionRequest)
|
||||
s.True(r.ExecutionExists(tests.DefaultEnvironmentIDAsString))
|
||||
storedExecutionRunner, ok := r.executions.Pop(tests.DefaultEnvironmentIDAsString)
|
||||
s.True(ok, "Getting an execution should not return ok false")
|
||||
s.Equal(executionRequest, storedExecutionRunner)
|
||||
|
||||
err = r.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
type awsEndpointMock struct {
|
||||
hasConnected bool
|
||||
ctx context.Context
|
||||
receivedData string
|
||||
}
|
||||
|
||||
func (a *awsEndpointMock) handler(w http.ResponseWriter, r *http.Request) {
|
||||
upgrader := websocket.Upgrader{}
|
||||
c, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
a.hasConnected = true
|
||||
for a.ctx.Err() == nil {
|
||||
_, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
a.receivedData = string(message)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSFunctionWorkload_ExecuteInteractively() {
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
environment.On("Image").Return("testImage or AWS endpoint")
|
||||
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
|
||||
s.Require().NoError(err)
|
||||
|
||||
var cancel context.CancelFunc
|
||||
awsMock := &awsEndpointMock{}
|
||||
sv := httptest.NewServer(http.HandlerFunc(awsMock.handler))
|
||||
defer sv.Close()
|
||||
|
||||
s.Run("establishes WebSocket connection to AWS endpoint", func() {
|
||||
// Convert http://127.0.0.1 to ws://127.0.0.1
|
||||
config.Config.AWS.Endpoint = "ws" + strings.TrimPrefix(sv.URL, "http")
|
||||
awsMock.ctx, cancel = context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
r.StoreExecution(tests.DefaultEnvironmentIDAsString, &dto.ExecutionRequest{})
|
||||
exit, _, err := r.ExecuteInteractively(
|
||||
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
|
||||
s.Require().NoError(err)
|
||||
<-exit
|
||||
s.True(awsMock.hasConnected)
|
||||
})
|
||||
|
||||
s.Run("sends execution request", func() {
|
||||
s.T().Skip("The AWS runner ignores its context for executions and waits infinetly for the exit message.") // ToDo
|
||||
awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout)
|
||||
defer cancel()
|
||||
command := "sl"
|
||||
request := &dto.ExecutionRequest{Command: command}
|
||||
r.StoreExecution(tests.DefaultEnvironmentIDAsString, request)
|
||||
|
||||
_, cancel, err := r.ExecuteInteractively(
|
||||
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
|
||||
s.Require().NoError(err)
|
||||
<-time.After(tests.ShortTimeout)
|
||||
cancel()
|
||||
|
||||
expectedRequestData := `{"action":"` + environment.Image() +
|
||||
`","cmd":["/bin/bash","-c","env CODEOCEAN=true /bin/bash -c \"unset \\\"\\${!AWS@}\\\" \u0026\u0026 ` + command +
|
||||
`\""],"files":{}}`
|
||||
s.Equal(expectedRequestData, awsMock.receivedData)
|
||||
})
|
||||
|
||||
err = r.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSFunctionWorkload_UpdateFileSystem() {
|
||||
s.T().Skip("The AWS runner ignores its context for executions and waits infinetly for the exit message.") // ToDo
|
||||
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
environment.On("Image").Return("testImage or AWS endpoint")
|
||||
r, err := NewAWSFunctionWorkload(environment, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
var cancel context.CancelFunc
|
||||
awsMock := &awsEndpointMock{}
|
||||
sv := httptest.NewServer(http.HandlerFunc(awsMock.handler))
|
||||
defer sv.Close()
|
||||
|
||||
// Convert http://127.0.0.1 to ws://127.0.0.1
|
||||
config.Config.AWS.Endpoint = "ws" + strings.TrimPrefix(sv.URL, "http")
|
||||
awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout)
|
||||
defer cancel()
|
||||
command := "sl"
|
||||
request := &dto.ExecutionRequest{Command: command}
|
||||
r.StoreExecution(tests.DefaultEnvironmentIDAsString, request)
|
||||
myFile := dto.File{Path: "myPath", Content: []byte("myContent")}
|
||||
|
||||
err = r.UpdateFileSystem(&dto.UpdateFileSystemRequest{Copy: []dto.File{myFile}}, s.TestCtx)
|
||||
s.NoError(err)
|
||||
_, execCancel, err := r.ExecuteInteractively(
|
||||
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
|
||||
s.Require().NoError(err)
|
||||
<-time.After(tests.ShortTimeout)
|
||||
execCancel()
|
||||
|
||||
expectedRequestData := `{"action":"` + environment.Image() +
|
||||
`","cmd":["/bin/bash","-c","env CODEOCEAN=true /bin/bash -c \"unset \\\"\\${!AWS@}\\\" \u0026\u0026 ` + command +
|
||||
`\""],"files":{"` + string(myFile.Path) + `":"` + base64.StdEncoding.EncodeToString(myFile.Content) + `"}}`
|
||||
s.Equal(expectedRequestData, awsMock.receivedData)
|
||||
|
||||
err = r.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestAWSFunctionWorkload_Destroy() {
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
hasDestroyBeenCalled := false
|
||||
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error {
|
||||
hasDestroyBeenCalled = true
|
||||
return nil
|
||||
})
|
||||
s.Require().NoError(err)
|
||||
|
||||
var reason error
|
||||
err = r.Destroy(reason)
|
||||
s.NoError(err)
|
||||
s.True(hasDestroyBeenCalled)
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEnvironmentID = dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger)
|
||||
anotherEnvironmentID = dto.EnvironmentID(tests.AnotherEnvironmentIDAsInteger)
|
||||
defaultInactivityTimeout = 0
|
||||
)
|
@@ -1,349 +0,0 @@
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
dto "github.com/openHPI/poseidon/pkg/dto"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ExecutionEnvironmentMock is an autogenerated mock type for the ExecutionEnvironment type
|
||||
type ExecutionEnvironmentMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// AddRunner provides a mock function with given fields: r
|
||||
func (_m *ExecutionEnvironmentMock) AddRunner(r Runner) {
|
||||
_m.Called(r)
|
||||
}
|
||||
|
||||
// ApplyPrewarmingPoolSize provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) ApplyPrewarmingPoolSize() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ApplyPrewarmingPoolSize")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CPULimit provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) CPULimit() uint {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CPULimit")
|
||||
}
|
||||
|
||||
var r0 uint
|
||||
if rf, ok := ret.Get(0).(func() uint); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Delete provides a mock function with given fields: reason
|
||||
func (_m *ExecutionEnvironmentMock) Delete(reason DestroyReason) error {
|
||||
ret := _m.Called(reason)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Delete")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(DestroyReason) error); ok {
|
||||
r0 = rf(reason)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteRunner provides a mock function with given fields: id
|
||||
func (_m *ExecutionEnvironmentMock) DeleteRunner(id string) (Runner, bool) {
|
||||
ret := _m.Called(id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteRunner")
|
||||
}
|
||||
|
||||
var r0 Runner
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(0).(func(string) (Runner, bool)); ok {
|
||||
return rf(id)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(string) Runner); ok {
|
||||
r0 = rf(id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(Runner)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(string) bool); ok {
|
||||
r1 = rf(id)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ID provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) ID() dto.EnvironmentID {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ID")
|
||||
}
|
||||
|
||||
var r0 dto.EnvironmentID
|
||||
if rf, ok := ret.Get(0).(func() dto.EnvironmentID); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(dto.EnvironmentID)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// IdleRunnerCount provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) IdleRunnerCount() uint {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IdleRunnerCount")
|
||||
}
|
||||
|
||||
var r0 uint
|
||||
if rf, ok := ret.Get(0).(func() uint); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Image provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) Image() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Image")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MarshalJSON provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) MarshalJSON() ([]byte, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MarshalJSON")
|
||||
}
|
||||
|
||||
var r0 []byte
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() ([]byte, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() []byte); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MemoryLimit provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) MemoryLimit() uint {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MemoryLimit")
|
||||
}
|
||||
|
||||
var r0 uint
|
||||
if rf, ok := ret.Get(0).(func() uint); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NetworkAccess provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) NetworkAccess() (bool, []uint16) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for NetworkAccess")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 []uint16
|
||||
if rf, ok := ret.Get(0).(func() (bool, []uint16)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() []uint16); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).([]uint16)
|
||||
}
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrewarmingPoolSize provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) PrewarmingPoolSize() uint {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PrewarmingPoolSize")
|
||||
}
|
||||
|
||||
var r0 uint
|
||||
if rf, ok := ret.Get(0).(func() uint); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Register provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) Register() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Register")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Sample provides a mock function with given fields:
|
||||
func (_m *ExecutionEnvironmentMock) Sample() (Runner, bool) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Sample")
|
||||
}
|
||||
|
||||
var r0 Runner
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(0).(func() (Runner, bool)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() Runner); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(Runner)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() bool); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetCPULimit provides a mock function with given fields: limit
|
||||
func (_m *ExecutionEnvironmentMock) SetCPULimit(limit uint) {
|
||||
_m.Called(limit)
|
||||
}
|
||||
|
||||
// SetConfigFrom provides a mock function with given fields: environment
|
||||
func (_m *ExecutionEnvironmentMock) SetConfigFrom(environment ExecutionEnvironment) {
|
||||
_m.Called(environment)
|
||||
}
|
||||
|
||||
// SetID provides a mock function with given fields: id
|
||||
func (_m *ExecutionEnvironmentMock) SetID(id dto.EnvironmentID) {
|
||||
_m.Called(id)
|
||||
}
|
||||
|
||||
// SetImage provides a mock function with given fields: image
|
||||
func (_m *ExecutionEnvironmentMock) SetImage(image string) {
|
||||
_m.Called(image)
|
||||
}
|
||||
|
||||
// SetMemoryLimit provides a mock function with given fields: limit
|
||||
func (_m *ExecutionEnvironmentMock) SetMemoryLimit(limit uint) {
|
||||
_m.Called(limit)
|
||||
}
|
||||
|
||||
// SetNetworkAccess provides a mock function with given fields: allow, ports
|
||||
func (_m *ExecutionEnvironmentMock) SetNetworkAccess(allow bool, ports []uint16) {
|
||||
_m.Called(allow, ports)
|
||||
}
|
||||
|
||||
// SetPrewarmingPoolSize provides a mock function with given fields: count
|
||||
func (_m *ExecutionEnvironmentMock) SetPrewarmingPoolSize(count uint) {
|
||||
_m.Called(count)
|
||||
}
|
||||
|
||||
// NewExecutionEnvironmentMock creates a new instance of ExecutionEnvironmentMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewExecutionEnvironmentMock(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *ExecutionEnvironmentMock {
|
||||
mock := &ExecutionEnvironmentMock{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@@ -1,43 +0,0 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// InactivityTimerMock is an autogenerated mock type for the InactivityTimer type
|
||||
type InactivityTimerMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ResetTimeout provides a mock function with given fields:
|
||||
func (_m *InactivityTimerMock) ResetTimeout() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// SetupTimeout provides a mock function with given fields: duration
|
||||
func (_m *InactivityTimerMock) SetupTimeout(duration time.Duration) {
|
||||
_m.Called(duration)
|
||||
}
|
||||
|
||||
// StopTimeout provides a mock function with given fields:
|
||||
func (_m *InactivityTimerMock) StopTimeout() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// TimeoutPassed provides a mock function with given fields:
|
||||
func (_m *InactivityTimerMock) TimeoutPassed() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
@@ -1,95 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInactivityTimerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(InactivityTimerTestSuite))
|
||||
}
|
||||
|
||||
type InactivityTimerTestSuite struct {
|
||||
tests.MemoryLeakTestSuite
|
||||
runner Runner
|
||||
returned chan bool
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) SetupTest() {
|
||||
s.MemoryLeakTestSuite.SetupTest()
|
||||
s.returned = make(chan bool, 1)
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", tests.DefaultRunnerID).Return(nil)
|
||||
s.runner = NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error {
|
||||
s.returned <- true
|
||||
return nil
|
||||
})
|
||||
|
||||
s.runner.SetupTimeout(tests.ShortTimeout)
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TearDownTest() {
|
||||
defer s.MemoryLeakTestSuite.TearDownTest()
|
||||
go func() {
|
||||
select {
|
||||
case <-s.returned:
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
}
|
||||
}()
|
||||
|
||||
err := s.runner.Destroy(nil)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestRunnerIsReturnedAfterTimeout() {
|
||||
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestRunnerIsNotReturnedBeforeTimeout() {
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout/2))
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestResetTimeoutExtendsTheDeadline() {
|
||||
time.Sleep(3 * tests.ShortTimeout / 4)
|
||||
s.runner.ResetTimeout()
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, 3*tests.ShortTimeout/4),
|
||||
"Because of the reset, the timeout should not be reached by now.")
|
||||
s.True(tests.ChannelReceivesSomething(s.returned, 5*tests.ShortTimeout/4),
|
||||
"After reset, the timout should be reached by now.")
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestStopTimeoutStopsTimeout() {
|
||||
s.runner.StopTimeout()
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsFalseBeforeDeadline() {
|
||||
s.False(s.runner.TimeoutPassed())
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsTrueAfterDeadline() {
|
||||
<-time.After(2 * tests.ShortTimeout)
|
||||
s.True(s.runner.TimeoutPassed())
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestTimerIsNotResetAfterDeadline() {
|
||||
time.Sleep(2 * tests.ShortTimeout)
|
||||
// We need to empty the returned channel so Return can send to it again.
|
||||
tests.ChannelReceivesSomething(s.returned, 0)
|
||||
s.runner.ResetTimeout()
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestSetupTimeoutStopsOldTimeout() {
|
||||
s.runner.SetupTimeout(3 * tests.ShortTimeout)
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
|
||||
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
|
||||
}
|
||||
|
||||
func (s *InactivityTimerTestSuite) TestTimerIsInactiveWhenDurationIsZero() {
|
||||
s.runner.SetupTimeout(0)
|
||||
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout))
|
||||
}
|
125
internal/runner/kubernetes_manager.go
Normal file
125
internal/runner/kubernetes_manager.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openHPI/poseidon/internal/kubernetes"
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"github.com/openHPI/poseidon/pkg/util"
|
||||
appv1 "k8s.io/api/apps/v1"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type KubernetesRunnerManager struct {
|
||||
*AbstractManager
|
||||
apiClient kubernetes.ExecutorAPI
|
||||
reloadingEnvironment storage.Storage[*alertData]
|
||||
}
|
||||
|
||||
func NewKubernetesRunnerManager(apiClient *kubernetes.ExecutorAPI, ctx context.Context) *KubernetesRunnerManager {
|
||||
return &KubernetesRunnerManager{
|
||||
AbstractManager: NewAbstractManager(ctx),
|
||||
apiClient: *apiClient,
|
||||
reloadingEnvironment: storage.NewLocalStorage[*alertData](),
|
||||
}
|
||||
}
|
||||
|
||||
// Load recovers all runners for all existing environments.
|
||||
func (k *KubernetesRunnerManager) Load() {
|
||||
log.Info("Loading runners")
|
||||
newUsedRunners := storage.NewLocalStorage[Runner]()
|
||||
for _, environment := range k.ListEnvironments() {
|
||||
usedRunners, err := k.loadEnvironment(environment)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField(dto.KeyEnvironmentID, environment.ID().ToString()).
|
||||
Warn("Failed loading environment. Skipping...")
|
||||
continue
|
||||
}
|
||||
for _, r := range usedRunners.List() {
|
||||
newUsedRunners.Add(r.ID(), r)
|
||||
}
|
||||
}
|
||||
// TODO MISSING IMPLEMENTATION
|
||||
//k.updateUsedRunners(newUsedRunners, true)
|
||||
}
|
||||
|
||||
func (k *KubernetesRunnerManager) loadEnvironment(environment ExecutionEnvironment) (used storage.Storage[Runner], err error) {
|
||||
used = storage.NewLocalStorage[Runner]()
|
||||
|
||||
runnerJobs, err := k.apiClient.LoadRunnerJobs(environment.ID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed fetching the runner jobs: %w", err)
|
||||
}
|
||||
for _, job := range runnerJobs {
|
||||
r, isUsed, err := k.loadSingleJob(job, environment)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField(dto.KeyEnvironmentID, environment.ID().ToString()).
|
||||
WithField("used", isUsed).Warn("Failed loading job. Skipping...")
|
||||
continue
|
||||
} else if isUsed {
|
||||
used.Add(r.ID(), r)
|
||||
}
|
||||
}
|
||||
err = environment.ApplyPrewarmingPoolSize()
|
||||
if err != nil {
|
||||
return used, fmt.Errorf("couldn't scale environment: %w", err)
|
||||
}
|
||||
return used, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesRunnerManager) loadSingleJob(deployment *appv1.Deployment, environment ExecutionEnvironment) (r Runner, isUsed bool, err error) {
|
||||
configTaskGroup := deployment.Spec.Template
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("%w, %s", nomad.ErrorMissingTaskGroup, deployment.Name)
|
||||
}
|
||||
|
||||
isUsed = configTaskGroup.Annotations[nomad.ConfigMetaUsedKey] == nomad.ConfigMetaUsedValue
|
||||
portMappings, err := k.apiClient.LoadRunnerPortMappings(deployment.Name)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error loading runner portMappings: %w", err)
|
||||
}
|
||||
|
||||
newJob := NewKubernetesDeployment(deployment.Name, portMappings, k.apiClient, k.onRunnerDestroyed)
|
||||
log.WithField("isUsed", isUsed).WithField(dto.KeyRunnerID, newJob.ID()).Debug("Recovered Runner")
|
||||
if isUsed {
|
||||
timeout, err := strconv.Atoi(configTaskGroup.ObjectMeta.Annotations[nomad.ConfigMetaTimeoutKey])
|
||||
if err != nil {
|
||||
log.WithField(dto.KeyRunnerID, newJob.ID()).WithError(err).Warn("failed loading timeout from meta values")
|
||||
timeout = int(nomad.RunnerTimeoutFallback.Seconds())
|
||||
go k.markRunnerAsUsed(newJob, timeout)
|
||||
}
|
||||
newJob.SetupTimeout(time.Duration(timeout) * time.Second)
|
||||
} else {
|
||||
environment.AddRunner(newJob)
|
||||
}
|
||||
return newJob, isUsed, nil
|
||||
}
|
||||
|
||||
func (k *KubernetesRunnerManager) markRunnerAsUsed(runner Runner, timeoutDuration int) {
|
||||
err := util.RetryExponential(func() (err error) {
|
||||
if err = k.apiClient.MarkRunnerAsUsed(runner.ID(), timeoutDuration); err != nil {
|
||||
err = fmt.Errorf("cannot mark runner as used: %w", err)
|
||||
}
|
||||
return
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).WithField(dto.KeyRunnerID, runner.ID()).Error("cannot mark runner as used")
|
||||
err := k.Return(runner)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField(dto.KeyRunnerID, runner.ID()).Error("can't mark runner as used and can't return runner")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *KubernetesRunnerManager) onRunnerDestroyed(r Runner) error {
|
||||
k.usedRunners.Delete(r.ID())
|
||||
|
||||
environment, ok := k.GetEnvironment(r.Environment())
|
||||
if ok {
|
||||
environment.DeleteRunner(r.ID())
|
||||
}
|
||||
return nil
|
||||
}
|
107
internal/runner/kubernetes_runner.go
Normal file
107
internal/runner/kubernetes_runner.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openHPI/poseidon/internal/kubernetes"
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"io"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NomadJob is an abstraction to communicate with Nomad environments.
|
||||
type KubernetesDeployment struct {
|
||||
InactivityTimer
|
||||
executions storage.Storage[*dto.ExecutionRequest]
|
||||
id string
|
||||
portMappings []v1.ContainerPort
|
||||
api kubernetes.ExecutorAPI
|
||||
onDestroy DestroyRunnerHandler
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) MappedPorts() []*dto.MappedPort {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) StoreExecution(id string, executionRequest *dto.ExecutionRequest) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) ExecutionExists(id string) bool {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) ExecuteInteractively(id string, stdin io.ReadWriter, stdout, stderr io.Writer, ctx context.Context) (exit <-chan ExitInfo, cancel context.CancelFunc, err error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) Destroy(reason DestroyReason) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) ID() string {
|
||||
return r.id
|
||||
}
|
||||
|
||||
func (r *KubernetesDeployment) Environment() dto.EnvironmentID {
|
||||
id, err := nomad.EnvironmentIDFromRunnerID(r.ID())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Runners must have correct IDs")
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// NewNomadJob creates a new NomadJob with the provided id.
|
||||
// The InactivityTimer is used actively. It executes onDestroy when it has expired.
|
||||
// The InactivityTimer is persisted in Nomad by the runner manager's Claim Function.
|
||||
func NewKubernetesDeployment(id string, portMappings []v1.ContainerPort,
|
||||
apiClient kubernetes.ExecutorAPI, onDestroy DestroyRunnerHandler,
|
||||
) *KubernetesDeployment {
|
||||
ctx := context.WithValue(context.Background(), dto.ContextKey(dto.KeyRunnerID), id)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
job := &KubernetesDeployment{
|
||||
id: id,
|
||||
portMappings: portMappings,
|
||||
api: apiClient,
|
||||
onDestroy: onDestroy,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
job.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
|
||||
monitoring.MeasurementExecutionsNomad, monitorExecutionsRunnerID(job.Environment(), id), time.Minute, ctx)
|
||||
job.InactivityTimer = NewInactivityTimer(job, func(r Runner) error {
|
||||
err := r.Destroy(ErrorRunnerInactivityTimeout)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("NomadJob: %w", err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
return job
|
||||
}
|
@@ -1,178 +0,0 @@
|
||||
// Code generated by mockery v2.10.0. DO NOT EDIT.
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
dto "github.com/openHPI/poseidon/pkg/dto"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ManagerMock is an autogenerated mock type for the Manager type
|
||||
type ManagerMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Claim provides a mock function with given fields: id, duration
|
||||
func (_m *ManagerMock) Claim(id dto.EnvironmentID, duration int) (Runner, error) {
|
||||
ret := _m.Called(id, duration)
|
||||
|
||||
var r0 Runner
|
||||
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, int) Runner); ok {
|
||||
r0 = rf(id, duration)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(Runner)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, int) error); ok {
|
||||
r1 = rf(id, duration)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeleteEnvironment provides a mock function with given fields: id
|
||||
func (_m *ManagerMock) DeleteEnvironment(id dto.EnvironmentID) {
|
||||
_m.Called(id)
|
||||
}
|
||||
|
||||
// EnvironmentStatistics provides a mock function with given fields:
|
||||
func (_m *ManagerMock) EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
|
||||
if rf, ok := ret.Get(0).(func() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: runnerID
|
||||
func (_m *ManagerMock) Get(runnerID string) (Runner, error) {
|
||||
ret := _m.Called(runnerID)
|
||||
|
||||
var r0 Runner
|
||||
if rf, ok := ret.Get(0).(func(string) Runner); ok {
|
||||
r0 = rf(runnerID)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(Runner)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(runnerID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// GetEnvironment provides a mock function with given fields: id
|
||||
func (_m *ManagerMock) GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool) {
|
||||
ret := _m.Called(id)
|
||||
|
||||
var r0 ExecutionEnvironment
|
||||
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) ExecutionEnvironment); ok {
|
||||
r0 = rf(id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(ExecutionEnvironment)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 bool
|
||||
if rf, ok := ret.Get(1).(func(dto.EnvironmentID) bool); ok {
|
||||
r1 = rf(id)
|
||||
} else {
|
||||
r1 = ret.Get(1).(bool)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// HasNextHandler provides a mock function with given fields:
|
||||
func (_m *ManagerMock) HasNextHandler() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListEnvironments provides a mock function with given fields:
|
||||
func (_m *ManagerMock) ListEnvironments() []ExecutionEnvironment {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []ExecutionEnvironment
|
||||
if rf, ok := ret.Get(0).(func() []ExecutionEnvironment); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]ExecutionEnvironment)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Load provides a mock function with given fields:
|
||||
func (_m *ManagerMock) Load() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// NextHandler provides a mock function with given fields:
|
||||
func (_m *ManagerMock) NextHandler() AccessorHandler {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 AccessorHandler
|
||||
if rf, ok := ret.Get(0).(func() AccessorHandler); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(AccessorHandler)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Return provides a mock function with given fields: r
|
||||
func (_m *ManagerMock) Return(r Runner) error {
|
||||
ret := _m.Called(r)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(Runner) error); ok {
|
||||
r0 = rf(r)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetNextHandler provides a mock function with given fields: m
|
||||
func (_m *ManagerMock) SetNextHandler(m AccessorHandler) {
|
||||
_m.Called(m)
|
||||
}
|
||||
|
||||
// StoreEnvironment provides a mock function with given fields: environment
|
||||
func (_m *ManagerMock) StoreEnvironment(environment ExecutionEnvironment) {
|
||||
_m.Called(environment)
|
||||
}
|
@@ -42,6 +42,7 @@ func NewNomadRunnerManager(apiClient nomad.ExecutorAPI, ctx context.Context) *No
|
||||
return &NomadRunnerManager{NewAbstractManager(ctx), apiClient, storage.NewLocalStorage[*alertData]()}
|
||||
}
|
||||
|
||||
// Claim returns a runner for the given environment. The runner will be marked as used for the given duration.
|
||||
func (m *NomadRunnerManager) Claim(environmentID dto.EnvironmentID, duration int) (Runner, error) {
|
||||
environment, ok := m.GetEnvironment(environmentID)
|
||||
if !ok {
|
||||
@@ -185,6 +186,7 @@ func (m *NomadRunnerManager) checkPrewarmingPoolAlert(environment ExecutionEnvir
|
||||
|
||||
func (m *NomadRunnerManager) loadEnvironment(environment ExecutionEnvironment) (used storage.Storage[Runner], err error) {
|
||||
used = storage.NewLocalStorage[Runner]()
|
||||
|
||||
runnerJobs, err := m.apiClient.LoadRunnerJobs(environment.ID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed fetching the runner jobs: %w", err)
|
||||
|
@@ -1,716 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
nomadApi "github.com/hashicorp/nomad/api"
|
||||
"github.com/openHPI/poseidon/internal/config"
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"github.com/openHPI/poseidon/pkg/util"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"github.com/openHPI/poseidon/tests/helpers"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetNextRunnerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(ManagerTestSuite))
|
||||
}
|
||||
|
||||
type ManagerTestSuite struct {
|
||||
tests.MemoryLeakTestSuite
|
||||
apiMock *nomad.ExecutorAPIMock
|
||||
nomadRunnerManager *NomadRunnerManager
|
||||
exerciseEnvironment *ExecutionEnvironmentMock
|
||||
exerciseRunner Runner
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) SetupTest() {
|
||||
s.MemoryLeakTestSuite.SetupTest()
|
||||
s.apiMock = &nomad.ExecutorAPIMock{}
|
||||
mockRunnerQueries(s.TestCtx, s.apiMock, []string{})
|
||||
// Instantly closed context to manually start the update process in some cases
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
s.nomadRunnerManager = NewNomadRunnerManager(s.apiMock, ctx)
|
||||
|
||||
s.exerciseRunner = NewNomadJob(tests.DefaultRunnerID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
|
||||
s.exerciseEnvironment = createBasicEnvironmentMock(defaultEnvironmentID)
|
||||
s.nomadRunnerManager.StoreEnvironment(s.exerciseEnvironment)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TearDownTest() {
|
||||
defer s.MemoryLeakTestSuite.TearDownTest()
|
||||
err := s.exerciseRunner.Destroy(nil)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func mockRunnerQueries(ctx context.Context, apiMock *nomad.ExecutorAPIMock, returnedRunnerIds []string) {
|
||||
// reset expected calls to allow new mocked return values
|
||||
apiMock.ExpectedCalls = []*mock.Call{}
|
||||
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
|
||||
call.Run(func(args mock.Arguments) {
|
||||
<-ctx.Done()
|
||||
call.ReturnArguments = mock.Arguments{nil}
|
||||
})
|
||||
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
|
||||
apiMock.On("LoadRunnerJobs", mock.AnythingOfType("dto.EnvironmentID")).Return([]*nomadApi.Job{}, nil)
|
||||
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
|
||||
apiMock.On("LoadRunnerIDs", tests.DefaultRunnerID).Return(returnedRunnerIds, nil)
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
apiMock.On("JobScale", tests.DefaultRunnerID).Return(uint(len(returnedRunnerIds)), nil)
|
||||
apiMock.On("SetJobScale", tests.DefaultRunnerID, mock.AnythingOfType("uint"), "Runner Requested").Return(nil)
|
||||
apiMock.On("RegisterRunnerJob", mock.Anything).Return(nil)
|
||||
apiMock.On("MonitorEvaluation", mock.Anything, mock.Anything).Return(nil)
|
||||
}
|
||||
|
||||
func mockIdleRunners(environmentMock *ExecutionEnvironmentMock) {
|
||||
tests.RemoveMethodFromMock(&environmentMock.Mock, "DeleteRunner")
|
||||
idleRunner := storage.NewLocalStorage[Runner]()
|
||||
environmentMock.On("AddRunner", mock.Anything).Run(func(args mock.Arguments) {
|
||||
r, ok := args.Get(0).(Runner)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
idleRunner.Add(r.ID(), r)
|
||||
})
|
||||
sampleCall := environmentMock.On("Sample", mock.Anything)
|
||||
sampleCall.Run(func(args mock.Arguments) {
|
||||
r, ok := idleRunner.Sample()
|
||||
sampleCall.ReturnArguments = mock.Arguments{r, ok}
|
||||
})
|
||||
deleteCall := environmentMock.On("DeleteRunner", mock.AnythingOfType("string"))
|
||||
deleteCall.Run(func(args mock.Arguments) {
|
||||
id, ok := args.Get(0).(string)
|
||||
if !ok {
|
||||
log.Fatal("Cannot parse ID")
|
||||
}
|
||||
r, ok := idleRunner.Get(id)
|
||||
deleteCall.ReturnArguments = mock.Arguments{r, ok}
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
idleRunner.Delete(id)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) waitForRunnerRefresh() {
|
||||
<-time.After(tests.ShortTimeout)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestSetEnvironmentAddsNewEnvironment() {
|
||||
anotherEnvironment := createBasicEnvironmentMock(anotherEnvironmentID)
|
||||
s.nomadRunnerManager.StoreEnvironment(anotherEnvironment)
|
||||
|
||||
job, ok := s.nomadRunnerManager.environments.Get(anotherEnvironmentID.ToString())
|
||||
s.True(ok)
|
||||
s.NotNil(job)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimReturnsNotFoundErrorIfEnvironmentNotFound() {
|
||||
runner, err := s.nomadRunnerManager.Claim(anotherEnvironmentID, defaultInactivityTimeout)
|
||||
s.Nil(runner)
|
||||
s.Equal(ErrUnknownExecutionEnvironment, err)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimReturnsRunnerIfAvailable() {
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
|
||||
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.NoError(err)
|
||||
s.Equal(s.exerciseRunner, receivedRunner)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimReturnsErrorIfNoRunnerAvailable() {
|
||||
s.waitForRunnerRefresh()
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(nil, false)
|
||||
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.Nil(runner)
|
||||
s.Equal(ErrNoRunnersAvailable, err)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimReturnsNoRunnerOfDifferentEnvironment() {
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
|
||||
receivedRunner, err := s.nomadRunnerManager.Claim(anotherEnvironmentID, defaultInactivityTimeout)
|
||||
s.Nil(receivedRunner)
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimDoesNotReturnTheSameRunnerTwice() {
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true).Once()
|
||||
secondRunner := NewNomadJob(tests.AnotherRunnerID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(secondRunner, true).Once()
|
||||
|
||||
firstReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.NoError(err)
|
||||
secondReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.NoError(err)
|
||||
s.NotEqual(firstReceivedRunner, secondReceivedRunner)
|
||||
|
||||
err = secondRunner.Destroy(nil)
|
||||
s.NoError(err)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimAddsRunnerToUsedRunners() {
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
|
||||
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.Require().NoError(err)
|
||||
savedRunner, ok := s.nomadRunnerManager.usedRunners.Get(receivedRunner.ID())
|
||||
s.True(ok)
|
||||
s.Equal(savedRunner, receivedRunner)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestClaimRemovesRunnerWhenMarkAsUsedFails() {
|
||||
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
|
||||
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
util.MaxConnectionRetriesExponential = 1
|
||||
modifyMockedCall(s.apiMock, "MarkRunnerAsUsed", func(call *mock.Call) {
|
||||
call.Run(func(args mock.Arguments) {
|
||||
call.ReturnArguments = mock.Arguments{tests.ErrDefault}
|
||||
})
|
||||
})
|
||||
|
||||
claimedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.Require().NoError(err)
|
||||
<-time.After(time.Second + tests.ShortTimeout) // Claimed runners are marked as used asynchronously
|
||||
s.apiMock.AssertCalled(s.T(), "DeleteJob", claimedRunner.ID())
|
||||
_, ok := s.nomadRunnerManager.usedRunners.Get(claimedRunner.ID())
|
||||
s.False(ok)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestGetReturnsRunnerIfRunnerIsUsed() {
|
||||
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner.ID(), s.exerciseRunner)
|
||||
savedRunner, err := s.nomadRunnerManager.Get(s.exerciseRunner.ID())
|
||||
s.NoError(err)
|
||||
s.Equal(savedRunner, s.exerciseRunner)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestGetReturnsErrorIfRunnerNotFound() {
|
||||
savedRunner, err := s.nomadRunnerManager.Get(tests.DefaultRunnerID)
|
||||
s.Nil(savedRunner)
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestReturnRemovesRunnerFromUsedRunners() {
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
|
||||
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner.ID(), s.exerciseRunner)
|
||||
err := s.nomadRunnerManager.Return(s.exerciseRunner)
|
||||
s.Nil(err)
|
||||
_, ok := s.nomadRunnerManager.usedRunners.Get(s.exerciseRunner.ID())
|
||||
s.False(ok)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestReturnCallsDeleteRunnerApiMethod() {
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
|
||||
err := s.nomadRunnerManager.Return(s.exerciseRunner)
|
||||
s.Nil(err)
|
||||
s.apiMock.AssertCalled(s.T(), "DeleteJob", s.exerciseRunner.ID())
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestReturnReturnsErrorWhenApiCallFailed() {
|
||||
tests.RemoveMethodFromMock(&s.apiMock.Mock, "DeleteJob")
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(tests.ErrDefault)
|
||||
defer s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
defer tests.RemoveMethodFromMock(&s.apiMock.Mock, "DeleteJob")
|
||||
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
|
||||
|
||||
util.MaxConnectionRetriesExponential = 1
|
||||
util.InitialWaitingDuration = 2 * tests.ShortTimeout
|
||||
|
||||
chReturnDone := make(chan error)
|
||||
go func(done chan<- error) {
|
||||
err := s.nomadRunnerManager.Return(s.exerciseRunner)
|
||||
select {
|
||||
case <-s.TestCtx.Done():
|
||||
case done <- err:
|
||||
}
|
||||
close(done)
|
||||
}(chReturnDone)
|
||||
|
||||
select {
|
||||
case <-chReturnDone:
|
||||
s.Fail("Return should not return if the API request failed")
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-chReturnDone:
|
||||
s.ErrorIs(err, tests.ErrDefault)
|
||||
case <-time.After(2 * tests.ShortTimeout):
|
||||
s.Fail("Return should return after the retry mechanism")
|
||||
// note: MaxConnectionRetriesExponential and InitialWaitingDuration is decreased extremely here.
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestUpdateRunnersLogsErrorFromWatchAllocation() {
|
||||
var hook *test.Hook
|
||||
logger, hook := test.NewNullLogger()
|
||||
log = logger.WithField("pkg", "runner")
|
||||
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
|
||||
call.Run(func(args mock.Arguments) {
|
||||
call.ReturnArguments = mock.Arguments{tests.ErrDefault}
|
||||
})
|
||||
})
|
||||
|
||||
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to synchronize runners")
|
||||
}
|
||||
|
||||
s.Require().Equal(2, len(hook.Entries))
|
||||
s.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
err, ok := hook.LastEntry().Data[logrus.ErrorKey].(error)
|
||||
s.Require().True(ok)
|
||||
s.ErrorIs(err, tests.ErrDefault)
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestUpdateRunnersAddsIdleRunner() {
|
||||
allocation := &nomadApi.Allocation{ID: tests.DefaultRunnerID}
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID.ToString())
|
||||
s.Require().True(ok)
|
||||
allocation.JobID = environment.ID().ToString()
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
_, ok = environment.Sample()
|
||||
s.Require().False(ok)
|
||||
|
||||
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
|
||||
call.Run(func(args mock.Arguments) {
|
||||
callbacks, ok := args.Get(1).(*nomad.AllocationProcessing)
|
||||
s.Require().True(ok)
|
||||
callbacks.OnNew(allocation, 0)
|
||||
call.ReturnArguments = mock.Arguments{nil}
|
||||
})
|
||||
})
|
||||
|
||||
go func() {
|
||||
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to synchronize runners")
|
||||
}
|
||||
}()
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
r, ok := environment.Sample()
|
||||
s.True(ok)
|
||||
s.NoError(r.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestUpdateRunnersRemovesIdleAndUsedRunner() {
|
||||
allocation := &nomadApi.Allocation{JobID: tests.DefaultRunnerID}
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID.ToString())
|
||||
s.Require().True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
testRunner := NewNomadJob(allocation.JobID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
environment.AddRunner(testRunner)
|
||||
s.nomadRunnerManager.usedRunners.Add(testRunner.ID(), testRunner)
|
||||
|
||||
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
|
||||
call.Run(func(args mock.Arguments) {
|
||||
callbacks, ok := args.Get(1).(*nomad.AllocationProcessing)
|
||||
s.Require().True(ok)
|
||||
callbacks.OnDeleted(allocation.JobID, nil)
|
||||
call.ReturnArguments = mock.Arguments{nil}
|
||||
})
|
||||
})
|
||||
|
||||
go func() {
|
||||
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to synchronize runners")
|
||||
}
|
||||
}()
|
||||
<-time.After(tests.ShortTimeout)
|
||||
|
||||
_, ok = environment.Sample()
|
||||
s.False(ok)
|
||||
_, ok = s.nomadRunnerManager.usedRunners.Get(allocation.JobID)
|
||||
s.False(ok)
|
||||
}
|
||||
|
||||
func modifyMockedCall(apiMock *nomad.ExecutorAPIMock, method string, modifier func(call *mock.Call)) {
|
||||
for _, c := range apiMock.ExpectedCalls {
|
||||
if c.Method == method {
|
||||
modifier(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestOnAllocationAdded() {
|
||||
s.Run("does not add environment template id job", func() {
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
|
||||
s.True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
alloc := &nomadApi.Allocation{JobID: nomad.TemplateJobID(tests.DefaultEnvironmentIDAsInteger)}
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
|
||||
_, ok = environment.Sample()
|
||||
s.False(ok)
|
||||
})
|
||||
s.Run("does not panic when environment id cannot be parsed", func() {
|
||||
alloc := &nomadApi.Allocation{JobID: ""}
|
||||
s.NotPanics(func() {
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
})
|
||||
})
|
||||
s.Run("does not panic when environment does not exist", func() {
|
||||
nonExistentEnvironment := dto.EnvironmentID(1234)
|
||||
_, ok := s.nomadRunnerManager.environments.Get(nonExistentEnvironment.ToString())
|
||||
s.Require().False(ok)
|
||||
|
||||
alloc := &nomadApi.Allocation{JobID: nomad.RunnerJobID(nonExistentEnvironment, "1-1-1-1")}
|
||||
s.NotPanics(func() {
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
})
|
||||
})
|
||||
s.Run("adds correct job", func() {
|
||||
s.Run("without allocated resources", func() {
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
|
||||
s.True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
_, ok = environment.Sample()
|
||||
s.Require().False(ok)
|
||||
|
||||
alloc := &nomadApi.Allocation{
|
||||
JobID: tests.DefaultRunnerID,
|
||||
AllocatedResources: nil,
|
||||
}
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
|
||||
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.NoError(err)
|
||||
nomadJob, ok := runner.(*NomadJob)
|
||||
s.True(ok)
|
||||
s.Equal(nomadJob.id, tests.DefaultRunnerID)
|
||||
s.Empty(nomadJob.portMappings)
|
||||
|
||||
s.Run("but not again", func() {
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
runner, err = s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
|
||||
s.Error(err)
|
||||
})
|
||||
|
||||
err = nomadJob.Destroy(nil)
|
||||
s.NoError(err)
|
||||
})
|
||||
s.nomadRunnerManager.usedRunners.Purge()
|
||||
s.Run("with mapped ports", func() {
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
|
||||
s.True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
alloc := &nomadApi.Allocation{
|
||||
JobID: tests.DefaultRunnerID,
|
||||
AllocatedResources: &nomadApi.AllocatedResources{
|
||||
Shared: nomadApi.AllocatedSharedResources{Ports: tests.DefaultPortMappings},
|
||||
},
|
||||
}
|
||||
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
|
||||
|
||||
runner, ok := environment.Sample()
|
||||
s.True(ok)
|
||||
nomadJob, ok := runner.(*NomadJob)
|
||||
s.True(ok)
|
||||
s.Equal(nomadJob.id, tests.DefaultRunnerID)
|
||||
s.Equal(nomadJob.portMappings, tests.DefaultPortMappings)
|
||||
|
||||
err := runner.Destroy(nil)
|
||||
s.NoError(err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestOnAllocationStopped() {
|
||||
s.Run("returns false for idle runner", func() {
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
|
||||
s.Require().True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
r := NewNomadJob(tests.DefaultRunnerID, []nomadApi.PortMapping{}, s.apiMock, func(r Runner) error { return nil })
|
||||
environment.AddRunner(r)
|
||||
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(tests.DefaultRunnerID, nil)
|
||||
s.False(alreadyRemoved)
|
||||
s.Error(r.ctx.Err(), "The runner should be destroyed and its context canceled")
|
||||
})
|
||||
s.Run("returns false and stops inactivity timer", func() {
|
||||
runner, runnerDestroyed := testStoppedInactivityTimer(s)
|
||||
|
||||
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(runner.ID(), nil)
|
||||
s.False(alreadyRemoved)
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second + tests.ShortTimeout):
|
||||
s.Fail("runner was stopped too late")
|
||||
case <-runnerDestroyed:
|
||||
s.False(runner.TimeoutPassed())
|
||||
}
|
||||
})
|
||||
s.Run("stops inactivity timer - counter check", func() {
|
||||
runner, runnerDestroyed := testStoppedInactivityTimer(s)
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second + tests.ShortTimeout):
|
||||
s.Fail("runner was stopped too late")
|
||||
case <-runnerDestroyed:
|
||||
s.True(runner.TimeoutPassed())
|
||||
}
|
||||
})
|
||||
s.Run("returns true when the runner is already removed", func() {
|
||||
s.Run("by the inactivity timer", func() {
|
||||
runner, _ := testStoppedInactivityTimer(s)
|
||||
|
||||
<-time.After(time.Second)
|
||||
s.Require().True(runner.TimeoutPassed())
|
||||
|
||||
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(runner.ID(), nil)
|
||||
s.True(alreadyRemoved)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func testStoppedInactivityTimer(s *ManagerTestSuite) (r Runner, destroyed chan struct{}) {
|
||||
s.T().Helper()
|
||||
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
|
||||
s.Require().True(ok)
|
||||
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
|
||||
|
||||
runnerDestroyed := make(chan struct{})
|
||||
environment.AddRunner(NewNomadJob(tests.DefaultRunnerID, []nomadApi.PortMapping{}, s.apiMock, func(r Runner) error {
|
||||
go func() {
|
||||
select {
|
||||
case runnerDestroyed <- struct{}{}:
|
||||
case <-s.TestCtx.Done():
|
||||
}
|
||||
}()
|
||||
return s.nomadRunnerManager.onRunnerDestroyed(r)
|
||||
}))
|
||||
|
||||
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, 1)
|
||||
s.Require().NoError(err)
|
||||
s.Require().False(runner.TimeoutPassed())
|
||||
select {
|
||||
case runnerDestroyed <- struct{}{}:
|
||||
s.Fail("The runner should not be removed by now")
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
}
|
||||
|
||||
return runner, runnerDestroyed
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestNomadRunnerManager_Load() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
mockWatchAllocations(s.TestCtx, apiMock)
|
||||
apiMock.On("LoadRunnerPortMappings", mock.AnythingOfType("string")).
|
||||
Return([]nomadApi.PortMapping{}, nil)
|
||||
call := apiMock.On("LoadRunnerJobs", dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
runnerManager := NewNomadRunnerManager(apiMock, s.TestCtx)
|
||||
environmentMock := createBasicEnvironmentMock(tests.DefaultEnvironmentIDAsInteger)
|
||||
environmentMock.On("ApplyPrewarmingPoolSize").Return(nil)
|
||||
runnerManager.StoreEnvironment(environmentMock)
|
||||
|
||||
s.Run("Stores unused runner", func() {
|
||||
tests.RemoveMethodFromMock(&environmentMock.Mock, "DeleteRunner")
|
||||
environmentMock.On("AddRunner", mock.AnythingOfType("*runner.NomadJob")).Once()
|
||||
|
||||
_, job := helpers.CreateTemplateJob()
|
||||
jobID := tests.DefaultRunnerID
|
||||
job.ID = &jobID
|
||||
job.Name = &jobID
|
||||
s.ExpectedGoroutineIncrease++ // We dont care about destroying the created runner.
|
||||
call.Return([]*nomadApi.Job{job}, nil)
|
||||
|
||||
runnerManager.Load()
|
||||
environmentMock.AssertExpectations(s.T())
|
||||
})
|
||||
|
||||
s.Run("Stores used runner", func() {
|
||||
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
|
||||
_, job := helpers.CreateTemplateJob()
|
||||
jobID := tests.DefaultRunnerID
|
||||
job.ID = &jobID
|
||||
job.Name = &jobID
|
||||
configTaskGroup := nomad.FindTaskGroup(job, nomad.ConfigTaskGroupName)
|
||||
s.Require().NotNil(configTaskGroup)
|
||||
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
|
||||
s.ExpectedGoroutineIncrease++ // We don't care about destroying the created runner.
|
||||
call.Return([]*nomadApi.Job{job}, nil)
|
||||
|
||||
s.Require().Zero(runnerManager.usedRunners.Length())
|
||||
runnerManager.Load()
|
||||
_, ok := runnerManager.usedRunners.Get(tests.DefaultRunnerID)
|
||||
s.True(ok)
|
||||
})
|
||||
|
||||
runnerManager.usedRunners.Purge()
|
||||
s.Run("Restart timeout of used runner", func() {
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
environmentMock.On("DeleteRunner", mock.AnythingOfType("string")).Once().Return(nil, false)
|
||||
timeout := 1
|
||||
|
||||
_, job := helpers.CreateTemplateJob()
|
||||
jobID := tests.DefaultRunnerID
|
||||
job.ID = &jobID
|
||||
job.Name = &jobID
|
||||
configTaskGroup := nomad.FindTaskGroup(job, nomad.ConfigTaskGroupName)
|
||||
s.Require().NotNil(configTaskGroup)
|
||||
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
|
||||
configTaskGroup.Meta[nomad.ConfigMetaTimeoutKey] = strconv.Itoa(timeout)
|
||||
call.Return([]*nomadApi.Job{job}, nil)
|
||||
|
||||
s.Require().Zero(runnerManager.usedRunners.Length())
|
||||
runnerManager.Load()
|
||||
s.Require().NotZero(runnerManager.usedRunners.Length())
|
||||
|
||||
<-time.After(time.Duration(timeout*2) * time.Second)
|
||||
s.Require().Zero(runnerManager.usedRunners.Length())
|
||||
})
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestNomadRunnerManager_checkPrewarmingPoolAlert() {
|
||||
timeout := uint(1)
|
||||
config.Config.Server.Alert.PrewarmingPoolReloadTimeout = timeout
|
||||
config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
environment.On("Image").Return("")
|
||||
environment.On("CPULimit").Return(uint(0))
|
||||
environment.On("MemoryLimit").Return(uint(0))
|
||||
environment.On("NetworkAccess").Return(false, nil)
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
m := NewNomadRunnerManager(apiMock, s.TestCtx)
|
||||
m.StoreEnvironment(environment)
|
||||
s.Run("checks the alert condition again after the reload timeout", func() {
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Once()
|
||||
environment.On("IdleRunnerCount").Return(uint(0)).Once()
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Once()
|
||||
environment.On("IdleRunnerCount").Return(uint(1)).Once()
|
||||
|
||||
checkDone := make(chan struct{})
|
||||
go func() {
|
||||
m.checkPrewarmingPoolAlert(environment, false)
|
||||
close(checkDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-checkDone:
|
||||
s.Fail("checkPrewarmingPoolAlert returned before the reload timeout")
|
||||
case <-time.After(time.Duration(timeout) * time.Second / 2):
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
s.Fail("checkPrewarmingPoolAlert did not return after checking the alert condition again")
|
||||
case <-checkDone:
|
||||
}
|
||||
environment.AssertExpectations(s.T())
|
||||
})
|
||||
s.Run("checks the alert condition again after the reload timeout", func() {
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
|
||||
environment.On("IdleRunnerCount").Return(uint(0)).Twice()
|
||||
apiMock.On("LoadRunnerJobs", environment.ID()).Return([]*nomadApi.Job{}, nil).Once()
|
||||
environment.On("ApplyPrewarmingPoolSize").Return(nil).Once()
|
||||
|
||||
checkDone := make(chan struct{})
|
||||
go func() {
|
||||
m.checkPrewarmingPoolAlert(environment, false)
|
||||
close(checkDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(timeout) * time.Second * 2):
|
||||
s.Fail("checkPrewarmingPoolAlert did not return")
|
||||
case <-checkDone:
|
||||
}
|
||||
environment.AssertExpectations(s.T())
|
||||
})
|
||||
s.Run("is canceled by an added runner", func() {
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
|
||||
environment.On("IdleRunnerCount").Return(uint(0)).Once()
|
||||
environment.On("IdleRunnerCount").Return(uint(1)).Once()
|
||||
|
||||
checkDone := make(chan struct{})
|
||||
go func() {
|
||||
m.checkPrewarmingPoolAlert(environment, false)
|
||||
close(checkDone)
|
||||
}()
|
||||
|
||||
<-time.After(tests.ShortTimeout)
|
||||
go m.checkPrewarmingPoolAlert(environment, true)
|
||||
<-time.After(tests.ShortTimeout)
|
||||
|
||||
select {
|
||||
case <-time.After(100 * time.Duration(timeout) * time.Second):
|
||||
s.Fail("checkPrewarmingPoolAlert was not canceled")
|
||||
case <-checkDone:
|
||||
}
|
||||
environment.AssertExpectations(s.T())
|
||||
})
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestNomadRunnerManager_checkPrewarmingPoolAlert_reloadsRunners() {
|
||||
config.Config.Server.Alert.PrewarmingPoolReloadTimeout = uint(1)
|
||||
config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5
|
||||
environment := &ExecutionEnvironmentMock{}
|
||||
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
|
||||
environment.On("Image").Return("")
|
||||
environment.On("CPULimit").Return(uint(0))
|
||||
environment.On("MemoryLimit").Return(uint(0))
|
||||
environment.On("NetworkAccess").Return(false, nil)
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
m := NewNomadRunnerManager(apiMock, s.TestCtx)
|
||||
m.StoreEnvironment(environment)
|
||||
|
||||
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
|
||||
environment.On("IdleRunnerCount").Return(uint(0)).Twice()
|
||||
environment.On("DeleteRunner", mock.Anything).Return(nil, false).Once()
|
||||
|
||||
s.Require().Empty(m.usedRunners.Length())
|
||||
_, usedJob := helpers.CreateTemplateJob()
|
||||
id := tests.DefaultRunnerID
|
||||
usedJob.ID = &id
|
||||
configTaskGroup := nomad.FindTaskGroup(usedJob, nomad.ConfigTaskGroupName)
|
||||
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
|
||||
configTaskGroup.Meta[nomad.ConfigMetaTimeoutKey] = "42"
|
||||
_, idleJob := helpers.CreateTemplateJob()
|
||||
idleID := tests.AnotherRunnerID
|
||||
idleJob.ID = &idleID
|
||||
nomad.FindTaskGroup(idleJob, nomad.ConfigTaskGroupName).Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUnusedValue
|
||||
apiMock.On("LoadRunnerJobs", environment.ID()).Return([]*nomadApi.Job{usedJob, idleJob}, nil).Once()
|
||||
apiMock.On("LoadRunnerPortMappings", mock.Anything).Return(nil, nil).Twice()
|
||||
environment.On("ApplyPrewarmingPoolSize").Return(nil).Once()
|
||||
environment.On("AddRunner", mock.Anything).Run(func(args mock.Arguments) {
|
||||
job, ok := args[0].(*NomadJob)
|
||||
s.Require().True(ok)
|
||||
err := job.Destroy(ErrLocalDestruction)
|
||||
s.NoError(err)
|
||||
}).Return().Once()
|
||||
|
||||
m.checkPrewarmingPoolAlert(environment, false)
|
||||
|
||||
r, ok := m.usedRunners.Get(tests.DefaultRunnerID)
|
||||
s.Require().True(ok)
|
||||
err := r.Destroy(ErrLocalDestruction)
|
||||
s.NoError(err)
|
||||
|
||||
environment.AssertExpectations(s.T())
|
||||
}
|
||||
|
||||
func mockWatchAllocations(ctx context.Context, apiMock *nomad.ExecutorAPIMock) {
|
||||
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
|
||||
call.Run(func(args mock.Arguments) {
|
||||
<-ctx.Done()
|
||||
call.ReturnArguments = mock.Arguments{nil}
|
||||
})
|
||||
}
|
@@ -1,548 +0,0 @@
|
||||
package runner
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/openHPI/poseidon/internal/nomad"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/logging"
|
||||
"github.com/openHPI/poseidon/pkg/nullio"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"github.com/openHPI/poseidon/tests"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultExecutionID = "execution-id"
|
||||
|
||||
func (s *MainTestSuite) TestIdIsStored() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
s.Equal(tests.DefaultRunnerID, runner.ID())
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestMappedPortsAreStoredCorrectly() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, tests.DefaultPortMappings, apiMock, func(_ Runner) error { return nil })
|
||||
s.Equal(tests.DefaultMappedPorts, runner.MappedPorts())
|
||||
s.NoError(runner.Destroy(nil))
|
||||
|
||||
runner = NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
s.Empty(runner.MappedPorts())
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestMarshalRunner() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
marshal, err := json.Marshal(runner)
|
||||
s.NoError(err)
|
||||
s.Equal("{\"runnerId\":\""+tests.DefaultRunnerID+"\"}", string(marshal))
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestExecutionRequestIsStored() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
executionRequest := &dto.ExecutionRequest{
|
||||
Command: "command",
|
||||
TimeLimit: 10,
|
||||
Environment: nil,
|
||||
}
|
||||
id := "test-execution"
|
||||
runner.StoreExecution(id, executionRequest)
|
||||
storedExecutionRunner, ok := runner.executions.Pop(id)
|
||||
|
||||
s.True(ok, "Getting an execution should not return ok false")
|
||||
s.Equal(executionRequest, storedExecutionRunner)
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestNewContextReturnsNewContextWithRunner() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
ctx := context.Background()
|
||||
newCtx := NewContext(ctx, runner)
|
||||
storedRunner, ok := newCtx.Value(runnerContextKey).(Runner)
|
||||
s.Require().True(ok)
|
||||
|
||||
s.NotEqual(ctx, newCtx)
|
||||
s.Equal(runner, storedRunner)
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestFromContextReturnsRunner() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
|
||||
ctx := NewContext(context.Background(), runner)
|
||||
storedRunner, ok := FromContext(ctx)
|
||||
|
||||
s.True(ok)
|
||||
s.Equal(runner, storedRunner)
|
||||
s.NoError(runner.Destroy(nil))
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestFromContextReturnsIsNotOkWhenContextHasNoRunner() {
|
||||
ctx := context.Background()
|
||||
_, ok := FromContext(ctx)
|
||||
|
||||
s.False(ok)
|
||||
}
|
||||
|
||||
func (s *MainTestSuite) TestDestroyDoesNotPropagateToNomadForSomeReasons() {
|
||||
apiMock := &nomad.ExecutorAPIMock{}
|
||||
timer := &InactivityTimerMock{}
|
||||
timer.On("StopTimeout").Return()
|
||||
ctx, cancel := context.WithCancel(s.TestCtx)
|
||||
r := &NomadJob{
|
||||
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
|
||||
InactivityTimer: timer,
|
||||
id: tests.DefaultRunnerID,
|
||||
api: apiMock,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
s.Run("destroy removes the runner only locally for OOM Killed Allocations", func() {
|
||||
err := r.Destroy(ErrOOMKilled)
|
||||
s.NoError(err)
|
||||
apiMock.AssertExpectations(s.T())
|
||||
})
|
||||
|
||||
s.Run("destroy removes the runner only locally for rescheduled allocations", func() {
|
||||
err := r.Destroy(nomad.ErrorAllocationRescheduled)
|
||||
s.NoError(err)
|
||||
apiMock.AssertExpectations(s.T())
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecuteInteractivelyTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(ExecuteInteractivelyTestSuite))
|
||||
}
|
||||
|
||||
type ExecuteInteractivelyTestSuite struct {
|
||||
tests.MemoryLeakTestSuite
|
||||
runner *NomadJob
|
||||
apiMock *nomad.ExecutorAPIMock
|
||||
timer *InactivityTimerMock
|
||||
manager *ManagerMock
|
||||
mockedExecuteCommandCall *mock.Call
|
||||
mockedTimeoutPassedCall *mock.Call
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) SetupTest() {
|
||||
s.MemoryLeakTestSuite.SetupTest()
|
||||
s.apiMock = &nomad.ExecutorAPIMock{}
|
||||
s.mockedExecuteCommandCall = s.apiMock.On("ExecuteCommand", mock.Anything, mock.Anything, mock.Anything,
|
||||
true, false, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(0, nil)
|
||||
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
|
||||
s.timer = &InactivityTimerMock{}
|
||||
s.timer.On("StopTimeout").Return()
|
||||
s.timer.On("ResetTimeout").Return()
|
||||
s.mockedTimeoutPassedCall = s.timer.On("TimeoutPassed").Return(false)
|
||||
s.manager = &ManagerMock{}
|
||||
s.manager.On("Return", mock.Anything).Return(nil)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.runner = &NomadJob{
|
||||
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
|
||||
InactivityTimer: s.timer,
|
||||
id: tests.DefaultRunnerID,
|
||||
api: s.apiMock,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestReturnsErrorWhenExecutionDoesNotExist() {
|
||||
_, _, err := s.runner.ExecuteInteractively("non-existent-id", nil, nil, nil, context.Background())
|
||||
s.ErrorIs(err, ErrorUnknownExecution)
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestCallsApi() {
|
||||
request := &dto.ExecutionRequest{Command: "echo 'Hello World!'"}
|
||||
s.runner.StoreExecution(defaultExecutionID, request)
|
||||
_, _, err := s.runner.ExecuteInteractively(defaultExecutionID, nil, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
time.Sleep(tests.ShortTimeout)
|
||||
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", tests.DefaultRunnerID, mock.Anything, request.FullCommand(),
|
||||
true, false, mock.Anything, mock.Anything, mock.Anything)
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestReturnsAfterTimeout() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
<-ctx.Done()
|
||||
}).Return(0, nil)
|
||||
|
||||
timeLimit := 1
|
||||
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
exit, _, err := s.runner.ExecuteInteractively(defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
select {
|
||||
case <-exit:
|
||||
s.FailNow("ExecuteInteractively should not terminate instantly")
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(timeLimit) * time.Second):
|
||||
s.FailNow("ExecuteInteractively should return after the time limit")
|
||||
case exitInfo := <-exit:
|
||||
s.Equal(uint8(255), exitInfo.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestSendsSignalAfterTimeout() {
|
||||
quit := make(chan struct{})
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
stdin, ok := args.Get(5).(io.Reader)
|
||||
s.Require().True(ok)
|
||||
buffer := make([]byte, 1) //nolint:makezero,lll // If the length is zero, the Read call never reads anything. gofmt want this alignment.
|
||||
for n := 0; !(n == 1 && buffer[0] == SIGQUIT); {
|
||||
<-time.After(tests.ShortTimeout)
|
||||
n, _ = stdin.Read(buffer) //nolint:errcheck,lll // Read returns EOF errors but that is expected. This nolint makes the line too long.
|
||||
if n > 0 {
|
||||
log.WithField("buffer", fmt.Sprintf("%x", buffer[0])).Info("Received Stdin")
|
||||
}
|
||||
}
|
||||
log.Info("After loop")
|
||||
close(quit)
|
||||
}).Return(0, nil)
|
||||
timeLimit := 1
|
||||
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
_, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, bytes.NewBuffer(make([]byte, 1)), nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
log.Info("Before waiting")
|
||||
select {
|
||||
case <-time.After(2 * (time.Duration(timeLimit) * time.Second)):
|
||||
s.FailNow("The execution should receive a SIGQUIT after the timeout")
|
||||
case <-quit:
|
||||
log.Info("Received quit")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestDestroysRunnerAfterTimeoutAndSignal() {
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
<-s.TestCtx.Done()
|
||||
})
|
||||
runnerDestroyed := false
|
||||
s.runner.onDestroy = func(_ Runner) error {
|
||||
runnerDestroyed = true
|
||||
return nil
|
||||
}
|
||||
timeLimit := 1
|
||||
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
|
||||
s.runner.cancel = func() {}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
|
||||
_, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, bytes.NewBuffer(make([]byte, 1)), nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
<-time.After(executionTimeoutGracePeriod + time.Duration(timeLimit)*time.Second)
|
||||
// Even if we expect the timeout to be exceeded now, Poseidon sometimes take a couple of hundred ms longer.
|
||||
<-time.After(2 * tests.ShortTimeout)
|
||||
s.manager.AssertNotCalled(s.T(), "Return", s.runner)
|
||||
s.apiMock.AssertCalled(s.T(), "DeleteJob", s.runner.ID())
|
||||
s.True(runnerDestroyed)
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestResetTimerGetsCalled() {
|
||||
executionRequest := &dto.ExecutionRequest{}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
_, _, err := s.runner.ExecuteInteractively(defaultExecutionID, nil, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
s.timer.AssertCalled(s.T(), "ResetTimeout")
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestExitHasTimeoutErrorIfRunnerTimesOut() {
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
<-s.TestCtx.Done()
|
||||
}).Return(0, nil)
|
||||
s.mockedTimeoutPassedCall.Return(true)
|
||||
executionRequest := &dto.ExecutionRequest{}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
|
||||
exitChannel, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
err = s.runner.Destroy(ErrorRunnerInactivityTimeout)
|
||||
s.Require().NoError(err)
|
||||
exit := <-exitChannel
|
||||
s.ErrorIs(exit.Err, ErrorRunnerInactivityTimeout)
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestDestroyReasonIsPassedToExecution() {
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
<-s.TestCtx.Done()
|
||||
}).Return(0, nil)
|
||||
s.mockedTimeoutPassedCall.Return(true)
|
||||
executionRequest := &dto.ExecutionRequest{}
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
|
||||
exitChannel, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
err = s.runner.Destroy(ErrOOMKilled)
|
||||
s.Require().NoError(err)
|
||||
exit := <-exitChannel
|
||||
s.ErrorIs(exit.Err, ErrOOMKilled)
|
||||
}
|
||||
|
||||
func (s *ExecuteInteractivelyTestSuite) TestSuspectedOOMKilledExecutionWaitsForVerification() {
|
||||
s.mockedExecuteCommandCall.Return(128, nil)
|
||||
executionRequest := &dto.ExecutionRequest{}
|
||||
s.Run("Actually OOM Killed", func() {
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
exitChannel, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
select {
|
||||
case <-exitChannel:
|
||||
s.FailNow("For exit code 128 Poseidon should wait a while to verify the OOM Kill assumption.")
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
// All good. Poseidon waited.
|
||||
}
|
||||
|
||||
err = s.runner.Destroy(ErrOOMKilled)
|
||||
s.Require().NoError(err)
|
||||
exit := <-exitChannel
|
||||
s.ErrorIs(exit.Err, ErrOOMKilled)
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.runner.ctx = ctx
|
||||
s.runner.cancel = cancel
|
||||
s.Run("Not OOM Killed", func() {
|
||||
s.runner.StoreExecution(defaultExecutionID, executionRequest)
|
||||
exitChannel, _, err := s.runner.ExecuteInteractively(
|
||||
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
select {
|
||||
case <-time.After(tests.ShortTimeout + time.Second):
|
||||
s.FailNow("Poseidon should not wait too long for verifying the OOM Kill assumption.")
|
||||
case exit := <-exitChannel:
|
||||
s.Equal(uint8(128), exit.Code)
|
||||
s.Nil(exit.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateFileSystemTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(UpdateFileSystemTestSuite))
|
||||
}
|
||||
|
||||
type UpdateFileSystemTestSuite struct {
|
||||
tests.MemoryLeakTestSuite
|
||||
runner *NomadJob
|
||||
timer *InactivityTimerMock
|
||||
apiMock *nomad.ExecutorAPIMock
|
||||
mockedExecuteCommandCall *mock.Call
|
||||
command string
|
||||
stdin *bytes.Buffer
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) SetupTest() {
|
||||
s.MemoryLeakTestSuite.SetupTest()
|
||||
s.apiMock = &nomad.ExecutorAPIMock{}
|
||||
s.timer = &InactivityTimerMock{}
|
||||
s.timer.On("ResetTimeout").Return()
|
||||
s.timer.On("TimeoutPassed").Return(false)
|
||||
s.runner = &NomadJob{
|
||||
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
|
||||
InactivityTimer: s.timer,
|
||||
id: tests.DefaultRunnerID,
|
||||
api: s.apiMock,
|
||||
}
|
||||
s.mockedExecuteCommandCall = s.apiMock.On("ExecuteCommand", tests.DefaultRunnerID, mock.Anything,
|
||||
mock.Anything, false, mock.AnythingOfType("bool"), mock.Anything, mock.Anything, mock.Anything).
|
||||
Run(func(args mock.Arguments) {
|
||||
var ok bool
|
||||
s.command, ok = args.Get(2).(string)
|
||||
s.Require().True(ok)
|
||||
s.stdin, ok = args.Get(5).(*bytes.Buffer)
|
||||
s.Require().True(ok)
|
||||
}).Return(0, nil)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerPerformsTarExtractionWithAbsoluteNamesOnRunner() {
|
||||
// note: this method tests an implementation detail of the method UpdateFileSystemOfRunner method
|
||||
// if the implementation changes, delete this test and write a new one
|
||||
copyRequest := &dto.UpdateFileSystemRequest{}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.NoError(err)
|
||||
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything,
|
||||
false, mock.AnythingOfType("bool"), mock.Anything, mock.Anything, mock.Anything)
|
||||
s.Regexp("tar --extract --absolute-names", s.command)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfExitCodeIsNotZero() {
|
||||
s.mockedExecuteCommandCall.Return(1, nil)
|
||||
copyRequest := &dto.UpdateFileSystemRequest{}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.ErrorIs(err, ErrorFileCopyFailed)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfApiCallDid() {
|
||||
s.mockedExecuteCommandCall.Return(0, tests.ErrDefault)
|
||||
copyRequest := &dto.UpdateFileSystemRequest{}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.ErrorIs(err, nomad.ErrorExecutorCommunicationFailed)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestFilesToCopyAreIncludedInTarArchive() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
|
||||
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.NoError(err)
|
||||
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
|
||||
mock.Anything, mock.Anything, mock.Anything)
|
||||
|
||||
tarFiles := s.readFilesFromTarArchive(s.stdin)
|
||||
s.Len(tarFiles, 1)
|
||||
tarFile := tarFiles[0]
|
||||
s.True(strings.HasSuffix(tarFile.Name, tests.DefaultFileName))
|
||||
s.Equal(byte(tar.TypeReg), tarFile.TypeFlag)
|
||||
s.Equal(tests.DefaultFileContent, tarFile.Content)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestTarFilesContainCorrectPathForRelativeFilePath() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
|
||||
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
tarFiles := s.readFilesFromTarArchive(s.stdin)
|
||||
s.Len(tarFiles, 1)
|
||||
// tar is extracted in the active workdir of the container, file will be put relative to that
|
||||
s.Equal(tests.DefaultFileName, tarFiles[0].Name)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestFilesWithAbsolutePathArePutInAbsoluteLocation() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
|
||||
{Path: tests.FileNameWithAbsolutePath, Content: []byte(tests.DefaultFileContent)}}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
tarFiles := s.readFilesFromTarArchive(s.stdin)
|
||||
s.Len(tarFiles, 1)
|
||||
s.Equal(tarFiles[0].Name, tests.FileNameWithAbsolutePath)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestDirectoriesAreMarkedAsDirectoryInTar() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{{Path: tests.DefaultDirectoryName, Content: []byte{}}}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.Require().NoError(err)
|
||||
|
||||
tarFiles := s.readFilesFromTarArchive(s.stdin)
|
||||
s.Len(tarFiles, 1)
|
||||
tarFile := tarFiles[0]
|
||||
s.True(strings.HasSuffix(tarFile.Name+"/", tests.DefaultDirectoryName))
|
||||
s.Equal(byte(tar.TypeDir), tarFile.TypeFlag)
|
||||
s.Equal("", tarFile.Content)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetRemoved() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{tests.DefaultFileName}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.NoError(err)
|
||||
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
|
||||
mock.Anything, mock.Anything, mock.Anything)
|
||||
s.Regexp(fmt.Sprintf("rm[^;]+%s' *;", regexp.QuoteMeta(tests.DefaultFileName)), s.command)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetEscaped() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{"/some/potentially/harmful'filename"}}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.NoError(err)
|
||||
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
|
||||
mock.Anything, mock.Anything, mock.Anything)
|
||||
s.Contains(s.command, "'/some/potentially/harmful'\\\\''filename'")
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestResetTimerGetsCalled() {
|
||||
copyRequest := &dto.UpdateFileSystemRequest{}
|
||||
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
|
||||
s.NoError(err)
|
||||
s.timer.AssertCalled(s.T(), "ResetTimeout")
|
||||
}
|
||||
|
||||
type TarFile struct {
|
||||
Name string
|
||||
Content string
|
||||
TypeFlag byte
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) readFilesFromTarArchive(tarArchive io.Reader) (files []TarFile) {
|
||||
reader := tar.NewReader(tarArchive)
|
||||
for {
|
||||
hdr, err := reader.Next()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
bf, err := io.ReadAll(reader)
|
||||
s.Require().NoError(err)
|
||||
files = append(files, TarFile{Name: hdr.Name, Content: string(bf), TypeFlag: hdr.Typeflag})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestGetFileContentReturnsErrorIfExitCodeIsNotZero() {
|
||||
s.mockedExecuteCommandCall.RunFn = nil
|
||||
s.mockedExecuteCommandCall.Return(1, nil)
|
||||
err := s.runner.GetFileContent("", logging.NewLoggingResponseWriter(nil), false, context.Background())
|
||||
s.ErrorIs(err, ErrFileNotFound)
|
||||
}
|
||||
|
||||
func (s *UpdateFileSystemTestSuite) TestFileCopyIsCanceledOnRunnerDestroy() {
|
||||
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
|
||||
ctx, ok := args.Get(1).(context.Context)
|
||||
s.Require().True(ok)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.Fail("mergeContext is done before any of its parents")
|
||||
return
|
||||
case <-time.After(tests.ShortTimeout):
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(3 * tests.ShortTimeout):
|
||||
s.Fail("mergeContext is not done after the earliest of its parents")
|
||||
return
|
||||
}
|
||||
})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.runner.ctx = ctx
|
||||
s.runner.cancel = cancel
|
||||
|
||||
<-time.After(2 * tests.ShortTimeout)
|
||||
s.runner.cancel()
|
||||
}
|
@@ -1,218 +0,0 @@
|
||||
// Code generated by mockery v2.30.16. DO NOT EDIT.
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
context "context"
|
||||
http "net/http"
|
||||
|
||||
dto "github.com/openHPI/poseidon/pkg/dto"
|
||||
|
||||
io "io"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
time "time"
|
||||
)
|
||||
|
||||
// RunnerMock is an autogenerated mock type for the Runner type
|
||||
type RunnerMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Destroy provides a mock function with given fields: reason
|
||||
func (_m *RunnerMock) Destroy(reason DestroyReason) error {
|
||||
ret := _m.Called(reason)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(DestroyReason) error); ok {
|
||||
r0 = rf(reason)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Environment provides a mock function with given fields:
|
||||
func (_m *RunnerMock) Environment() dto.EnvironmentID {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 dto.EnvironmentID
|
||||
if rf, ok := ret.Get(0).(func() dto.EnvironmentID); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(dto.EnvironmentID)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExecuteInteractively provides a mock function with given fields: id, stdin, stdout, stderr, ctx
|
||||
func (_m *RunnerMock) ExecuteInteractively(id string, stdin io.ReadWriter, stdout io.Writer, stderr io.Writer, ctx context.Context) (<-chan ExitInfo, context.CancelFunc, error) {
|
||||
ret := _m.Called(id, stdin, stdout, stderr, ctx)
|
||||
|
||||
var r0 <-chan ExitInfo
|
||||
var r1 context.CancelFunc
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) (<-chan ExitInfo, context.CancelFunc, error)); ok {
|
||||
return rf(id, stdin, stdout, stderr, ctx)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) <-chan ExitInfo); ok {
|
||||
r0 = rf(id, stdin, stdout, stderr, ctx)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan ExitInfo)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) context.CancelFunc); ok {
|
||||
r1 = rf(id, stdin, stdout, stderr, ctx)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(context.CancelFunc)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) error); ok {
|
||||
r2 = rf(id, stdin, stdout, stderr, ctx)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// ExecutionExists provides a mock function with given fields: id
|
||||
func (_m *RunnerMock) ExecutionExists(id string) bool {
|
||||
ret := _m.Called(id)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(string) bool); ok {
|
||||
r0 = rf(id)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetFileContent provides a mock function with given fields: path, content, privilegedExecution, ctx
|
||||
func (_m *RunnerMock) GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error {
|
||||
ret := _m.Called(path, content, privilegedExecution, ctx)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, http.ResponseWriter, bool, context.Context) error); ok {
|
||||
r0 = rf(path, content, privilegedExecution, ctx)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ID provides a mock function with given fields:
|
||||
func (_m *RunnerMock) ID() string {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListFileSystem provides a mock function with given fields: path, recursive, result, privilegedExecution, ctx
|
||||
func (_m *RunnerMock) ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error {
|
||||
ret := _m.Called(path, recursive, result, privilegedExecution, ctx)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, bool, io.Writer, bool, context.Context) error); ok {
|
||||
r0 = rf(path, recursive, result, privilegedExecution, ctx)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MappedPorts provides a mock function with given fields:
|
||||
func (_m *RunnerMock) MappedPorts() []*dto.MappedPort {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*dto.MappedPort
|
||||
if rf, ok := ret.Get(0).(func() []*dto.MappedPort); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*dto.MappedPort)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ResetTimeout provides a mock function with given fields:
|
||||
func (_m *RunnerMock) ResetTimeout() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// SetupTimeout provides a mock function with given fields: duration
|
||||
func (_m *RunnerMock) SetupTimeout(duration time.Duration) {
|
||||
_m.Called(duration)
|
||||
}
|
||||
|
||||
// StopTimeout provides a mock function with given fields:
|
||||
func (_m *RunnerMock) StopTimeout() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// StoreExecution provides a mock function with given fields: id, executionRequest
|
||||
func (_m *RunnerMock) StoreExecution(id string, executionRequest *dto.ExecutionRequest) {
|
||||
_m.Called(id, executionRequest)
|
||||
}
|
||||
|
||||
// TimeoutPassed provides a mock function with given fields:
|
||||
func (_m *RunnerMock) TimeoutPassed() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UpdateFileSystem provides a mock function with given fields: request, ctx
|
||||
func (_m *RunnerMock) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error {
|
||||
ret := _m.Called(request, ctx)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*dto.UpdateFileSystemRequest, context.Context) error); ok {
|
||||
r0 = rf(request, ctx)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewRunnerMock creates a new instance of RunnerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewRunnerMock(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *RunnerMock {
|
||||
mock := &RunnerMock{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
Reference in New Issue
Block a user