added k8s stub adapter for execution environment

This commit is contained in:
Elmar Kresse
2024-09-18 10:43:38 +02:00
parent f9a6ba8f1c
commit 12ff205bd2
119 changed files with 1374 additions and 12549 deletions

View File

@ -1,85 +0,0 @@
package api
import (
"github.com/gorilla/mux"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/environment"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/suite"
"net/http"
"net/http/httptest"
"testing"
)
func mockHTTPHandler(writer http.ResponseWriter, _ *http.Request) {
writer.WriteHeader(http.StatusOK)
}
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestNewRouterV1WithAuthenticationDisabled() {
config.Config.Server.Token = ""
router := mux.NewRouter()
m := &environment.ManagerHandlerMock{}
m.On("Statistics").Return(make(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData))
configureV1Router(router, nil, m)
s.Run("health route is accessible", func() {
request, err := http.NewRequest(http.MethodGet, "/api/v1/health", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
s.Equal(http.StatusNoContent, recorder.Code)
})
s.Run("added route is accessible", func() {
router.HandleFunc("/api/v1/test", mockHTTPHandler)
request, err := http.NewRequest(http.MethodGet, "/api/v1/test", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
s.Equal(http.StatusOK, recorder.Code)
})
}
func (s *MainTestSuite) TestNewRouterV1WithAuthenticationEnabled() {
config.Config.Server.Token = "TestToken"
router := mux.NewRouter()
m := &environment.ManagerHandlerMock{}
m.On("Statistics").Return(make(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData))
configureV1Router(router, nil, m)
s.Run("health route is accessible", func() {
request, err := http.NewRequest(http.MethodGet, "/api/v1/health", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
s.Equal(http.StatusNoContent, recorder.Code)
})
s.Run("protected route is not accessible", func() {
// request an available API route that should be guarded by authentication.
// (which one, in particular, does not matter here)
request, err := http.NewRequest(http.MethodPost, "/api/v1/runners", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
router.ServeHTTP(recorder, request)
s.Equal(http.StatusUnauthorized, recorder.Code)
})
config.Config.Server.Token = ""
}

View File

@ -1,94 +0,0 @@
package auth
import (
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/tests"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"net/http"
"net/http/httptest"
"testing"
)
const testToken = "C0rr3ctT0k3n"
type AuthenticationMiddlewareTestSuite struct {
tests.MemoryLeakTestSuite
request *http.Request
recorder *httptest.ResponseRecorder
httpAuthenticationMiddleware http.Handler
}
func (s *AuthenticationMiddlewareTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
correctAuthenticationToken = []byte(testToken)
s.recorder = httptest.NewRecorder()
request, err := http.NewRequest(http.MethodGet, "/api/v1/test", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
s.request = request
s.httpAuthenticationMiddleware = HTTPAuthenticationMiddleware(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
}
func (s *AuthenticationMiddlewareTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
correctAuthenticationToken = []byte(nil)
}
func (s *AuthenticationMiddlewareTestSuite) TestReturns401WhenHeaderUnset() {
s.httpAuthenticationMiddleware.ServeHTTP(s.recorder, s.request)
assert.Equal(s.T(), http.StatusUnauthorized, s.recorder.Code)
}
func (s *AuthenticationMiddlewareTestSuite) TestReturns401WhenTokenWrong() {
s.request.Header.Set(TokenHeader, "Wr0ngT0k3n")
s.httpAuthenticationMiddleware.ServeHTTP(s.recorder, s.request)
assert.Equal(s.T(), http.StatusUnauthorized, s.recorder.Code)
}
func (s *AuthenticationMiddlewareTestSuite) TestWarnsWhenUnauthorized() {
var hook *test.Hook
logger, hook := test.NewNullLogger()
log = logger.WithField("pkg", "api/auth")
s.request.Header.Set(TokenHeader, "Wr0ngT0k3n")
s.httpAuthenticationMiddleware.ServeHTTP(s.recorder, s.request)
assert.Equal(s.T(), http.StatusUnauthorized, s.recorder.Code)
assert.Equal(s.T(), logrus.WarnLevel, hook.LastEntry().Level)
assert.Equal(s.T(), hook.LastEntry().Data["token"], "Wr0ngT0k3n")
}
func (s *AuthenticationMiddlewareTestSuite) TestPassesWhenTokenCorrect() {
s.request.Header.Set(TokenHeader, testToken)
s.httpAuthenticationMiddleware.ServeHTTP(s.recorder, s.request)
assert.Equal(s.T(), http.StatusOK, s.recorder.Code)
}
func TestHTTPAuthenticationMiddleware(t *testing.T) {
suite.Run(t, new(AuthenticationMiddlewareTestSuite))
}
func TestInitializeAuthentication(t *testing.T) {
t.Run("if token unset", func(t *testing.T) {
config.Config.Server.Token = ""
initialized := InitializeAuthentication()
assert.Equal(t, false, initialized)
assert.Equal(t, []byte(nil), correctAuthenticationToken, "it should not set correctAuthenticationToken")
})
t.Run("if token set", func(t *testing.T) {
config.Config.Server.Token = testToken
initialized := InitializeAuthentication()
assert.Equal(t, true, initialized)
assert.Equal(t, []byte(testToken), correctAuthenticationToken, "it should set correctAuthenticationToken")
config.Config.Server.Token = ""
correctAuthenticationToken = []byte(nil)
})
}

View File

@ -1,308 +0,0 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/openHPI/poseidon/internal/environment"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"math"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
)
const jobHCLBasicFormat = "job \"%s\" {}"
type EnvironmentControllerTestSuite struct {
tests.MemoryLeakTestSuite
manager *environment.ManagerHandlerMock
router *mux.Router
}
func TestEnvironmentControllerTestSuite(t *testing.T) {
suite.Run(t, new(EnvironmentControllerTestSuite))
}
func (s *EnvironmentControllerTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.manager = &environment.ManagerHandlerMock{}
s.router = NewRouter(nil, s.manager)
}
func (s *EnvironmentControllerTestSuite) TestList() {
call := s.manager.On("List", mock.AnythingOfType("bool"))
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]runner.ExecutionEnvironment{}, nil}
})
path, err := s.router.Get(listRouteName).URL()
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodGet, path.String(), http.NoBody)
s.Require().NoError(err)
s.Run("with no Environments", func() {
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusOK, recorder.Code)
var environmentsResponse ExecutionEnvironmentsResponse
err = json.NewDecoder(recorder.Result().Body).Decode(&environmentsResponse)
s.Require().NoError(err)
_ = recorder.Result().Body.Close()
s.Empty(environmentsResponse.ExecutionEnvironments)
})
s.manager.Calls = []mock.Call{}
s.Run("with fetch", func() {
recorder := httptest.NewRecorder()
query := path.Query()
query.Set("fetch", "true")
path.RawQuery = query.Encode()
request, err := http.NewRequest(http.MethodGet, path.String(), http.NoBody)
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusOK, recorder.Code)
s.manager.AssertCalled(s.T(), "List", true)
})
s.manager.Calls = []mock.Call{}
s.Run("with bad fetch", func() {
recorder := httptest.NewRecorder()
query := path.Query()
query.Set("fetch", "YouDecide")
path.RawQuery = query.Encode()
request, err := http.NewRequest(http.MethodGet, path.String(), http.NoBody)
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusBadRequest, recorder.Code)
s.manager.AssertNotCalled(s.T(), "List")
})
s.Run("returns multiple environments", func() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
var firstEnvironment, secondEnvironment *environment.NomadEnvironment
call.Run(func(args mock.Arguments) {
firstEnvironment, err = environment.NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock,
fmt.Sprintf(jobHCLBasicFormat, nomad.TemplateJobID(tests.DefaultEnvironmentIDAsInteger)))
s.Require().NoError(err)
secondEnvironment, err = environment.NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock,
fmt.Sprintf(jobHCLBasicFormat, nomad.TemplateJobID(tests.DefaultEnvironmentIDAsInteger)))
s.Require().NoError(err)
call.ReturnArguments = mock.Arguments{[]runner.ExecutionEnvironment{firstEnvironment, secondEnvironment}, nil}
})
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusOK, recorder.Code)
paramMap := make(map[string]interface{})
err := json.NewDecoder(recorder.Result().Body).Decode(&paramMap)
s.Require().NoError(err)
environmentsInterface, ok := paramMap["executionEnvironments"]
s.Require().True(ok)
environments, ok := environmentsInterface.([]interface{})
s.Require().True(ok)
s.Equal(2, len(environments))
err = firstEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.NoError(err)
err = secondEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.NoError(err)
})
}
func (s *EnvironmentControllerTestSuite) TestGet() {
call := s.manager.On("Get", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("bool"))
path, err := s.router.Get(getRouteName).URL(executionEnvironmentIDKey, tests.DefaultEnvironmentIDAsString)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodGet, path.String(), http.NoBody)
s.Require().NoError(err)
s.Run("with unknown environment", func() {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{nil, runner.ErrUnknownExecutionEnvironment}
})
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusNotFound, recorder.Code)
s.manager.AssertCalled(s.T(), "Get", dto.EnvironmentID(0), false)
})
s.manager.Calls = []mock.Call{}
s.Run("not found with fetch", func() {
recorder := httptest.NewRecorder()
query := path.Query()
query.Set("fetch", "true")
path.RawQuery = query.Encode()
request, err := http.NewRequest(http.MethodGet, path.String(), http.NoBody)
s.Require().NoError(err)
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{nil, runner.ErrUnknownExecutionEnvironment}
})
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusNotFound, recorder.Code)
s.manager.AssertCalled(s.T(), "Get", dto.EnvironmentID(0), true)
})
s.manager.Calls = []mock.Call{}
s.Run("returns environment", func() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
var testEnvironment *environment.NomadEnvironment
call.Run(func(args mock.Arguments) {
testEnvironment, err = environment.NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock,
fmt.Sprintf(jobHCLBasicFormat, nomad.TemplateJobID(tests.DefaultEnvironmentIDAsInteger)))
s.Require().NoError(err)
call.ReturnArguments = mock.Arguments{testEnvironment, nil}
})
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusOK, recorder.Code)
var environmentParams map[string]interface{}
err := json.NewDecoder(recorder.Result().Body).Decode(&environmentParams)
s.Require().NoError(err)
idInterface, ok := environmentParams["id"]
s.Require().True(ok)
idFloat, ok := idInterface.(float64)
s.Require().True(ok)
s.Equal(tests.DefaultEnvironmentIDAsInteger, int(idFloat))
err = testEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.NoError(err)
})
}
func (s *EnvironmentControllerTestSuite) TestDelete() {
call := s.manager.On("Delete", mock.AnythingOfType("dto.EnvironmentID"))
path, err := s.router.Get(deleteRouteName).URL(executionEnvironmentIDKey, tests.DefaultEnvironmentIDAsString)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodDelete, path.String(), http.NoBody)
s.Require().NoError(err)
s.Run("environment not found", func() {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{false, nil}
})
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusNotFound, recorder.Code)
})
s.Run("environment deleted", func() {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{true, nil}
})
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusNoContent, recorder.Code)
})
s.manager.Calls = []mock.Call{}
s.Run("with bad environment id", func() {
_, err := s.router.Get(deleteRouteName).URL(executionEnvironmentIDKey, "MagicNonNumberID")
s.Error(err)
})
}
type CreateOrUpdateEnvironmentTestSuite struct {
EnvironmentControllerTestSuite
path string
id dto.EnvironmentID
body []byte
}
func TestCreateOrUpdateEnvironmentTestSuite(t *testing.T) {
suite.Run(t, new(CreateOrUpdateEnvironmentTestSuite))
}
func (s *CreateOrUpdateEnvironmentTestSuite) SetupTest() {
s.EnvironmentControllerTestSuite.SetupTest()
s.id = tests.DefaultEnvironmentIDAsInteger
testURL, err := s.router.Get(createOrUpdateRouteName).URL(executionEnvironmentIDKey, strconv.Itoa(int(s.id)))
if err != nil {
s.T().Fatal(err)
}
s.path = testURL.String()
s.body, err = json.Marshal(dto.ExecutionEnvironmentRequest{})
if err != nil {
s.T().Fatal(err)
}
}
func (s *CreateOrUpdateEnvironmentTestSuite) recordRequest() *httptest.ResponseRecorder {
recorder := httptest.NewRecorder()
request, err := http.NewRequest(http.MethodPut, s.path, bytes.NewReader(s.body))
if err != nil {
s.T().Fatal(err)
}
s.router.ServeHTTP(recorder, request)
return recorder
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestReturnsBadRequestWhenBadBody() {
s.body = []byte{}
recorder := s.recordRequest()
s.Equal(http.StatusBadRequest, recorder.Code)
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestReturnsInternalServerErrorWhenManagerReturnsError() {
testError := tests.ErrDefault
s.manager.
On("CreateOrUpdate", s.id, mock.AnythingOfType("dto.ExecutionEnvironmentRequest"), mock.Anything).
Return(false, testError)
recorder := s.recordRequest()
s.Equal(http.StatusInternalServerError, recorder.Code)
s.Contains(recorder.Body.String(), testError.Error())
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestReturnsCreatedIfNewEnvironment() {
s.manager.
On("CreateOrUpdate", s.id, mock.AnythingOfType("dto.ExecutionEnvironmentRequest"), mock.Anything).
Return(true, nil)
recorder := s.recordRequest()
s.Equal(http.StatusCreated, recorder.Code)
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestReturnsNoContentIfNotNewEnvironment() {
s.manager.
On("CreateOrUpdate", s.id, mock.AnythingOfType("dto.ExecutionEnvironmentRequest"), mock.Anything).
Return(false, nil)
recorder := s.recordRequest()
s.Equal(http.StatusNoContent, recorder.Code)
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestReturnsNotFoundOnNonIntegerID() {
s.path = strings.Join([]string{BasePath, EnvironmentsPath, "/", "invalid-id"}, "")
recorder := s.recordRequest()
s.Equal(http.StatusNotFound, recorder.Code)
}
func (s *CreateOrUpdateEnvironmentTestSuite) TestFailsOnTooLargeID() {
tooLargeIntStr := strconv.Itoa(math.MaxInt64) + "0"
s.path = strings.Join([]string{BasePath, EnvironmentsPath, "/", tooLargeIntStr}, "")
recorder := s.recordRequest()
s.Equal(http.StatusBadRequest, recorder.Code)
}

View File

@ -1,55 +0,0 @@
package api
import (
"encoding/json"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/environment"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"io"
"net/http"
"net/http/httptest"
)
func (s *MainTestSuite) TestHealth() {
s.Run("returns StatusNoContent as default", func() {
request, err := http.NewRequest(http.MethodGet, "/health", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
manager := &environment.ManagerHandlerMock{}
manager.On("Statistics").Return(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{})
Health(manager).ServeHTTP(recorder, request)
s.Equal(http.StatusNoContent, recorder.Code)
})
s.Run("returns InternalServerError for warnings and errors", func() {
s.Run("Prewarming Pool Alert", func() {
request, err := http.NewRequest(http.MethodGet, "/health", http.NoBody)
if err != nil {
s.T().Fatal(err)
}
recorder := httptest.NewRecorder()
manager := &environment.ManagerHandlerMock{}
manager.On("Statistics").Return(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{
tests.DefaultEnvironmentIDAsInteger: {
ID: tests.DefaultEnvironmentIDAsInteger,
PrewarmingPoolSize: 3,
IdleRunners: 1,
},
})
config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5
Health(manager).ServeHTTP(recorder, request)
s.Equal(http.StatusServiceUnavailable, recorder.Code)
b, err := io.ReadAll(recorder.Body)
s.Require().NoError(err)
var details dto.InternalServerError
err = json.Unmarshal(b, &details)
s.Require().NoError(err)
s.Contains(details.Message, ErrorPrewarmingPoolDepleting.Error())
})
})
}

View File

@ -1,501 +0,0 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
)
const invalidID = "some-invalid-runner-id"
type MiddlewareTestSuite struct {
tests.MemoryLeakTestSuite
manager *runner.ManagerMock
router *mux.Router
runner runner.Runner
capturedRunner runner.Runner
runnerRequest func(string) *http.Request
}
func (s *MiddlewareTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.manager = &runner.ManagerMock{}
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runner = runner.NewNomadJob(tests.DefaultRunnerID, nil, apiMock, nil)
s.capturedRunner = nil
s.runnerRequest = func(runnerId string) *http.Request {
path, err := s.router.Get("test-runner-id").URL(RunnerIDKey, runnerId)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPost, path.String(), http.NoBody)
s.Require().NoError(err)
return request
}
runnerRouteHandler := func(writer http.ResponseWriter, request *http.Request) {
var ok bool
s.capturedRunner, ok = runner.FromContext(request.Context())
if ok {
writer.WriteHeader(http.StatusOK)
} else {
writer.WriteHeader(http.StatusInternalServerError)
}
}
s.router = mux.NewRouter()
runnerController := &RunnerController{s.manager, s.router}
s.router.Use(monitoring.InfluxDB2Middleware)
s.router.Use(runnerController.findRunnerMiddleware)
s.router.HandleFunc(fmt.Sprintf("/test/{%s}", RunnerIDKey), runnerRouteHandler).Name("test-runner-id")
}
func (s *MiddlewareTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
err := s.runner.Destroy(nil)
s.Require().NoError(err)
}
func TestMiddlewareTestSuite(t *testing.T) {
suite.Run(t, new(MiddlewareTestSuite))
}
func (s *MiddlewareTestSuite) TestFindRunnerMiddlewareIfRunnerExists() {
s.manager.On("Get", s.runner.ID()).Return(s.runner, nil)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, s.runnerRequest(s.runner.ID()))
s.Equal(http.StatusOK, recorder.Code)
s.Equal(s.runner, s.capturedRunner)
}
func (s *MiddlewareTestSuite) TestFindRunnerMiddlewareIfRunnerDoesNotExist() {
s.manager.On("Get", invalidID).Return(nil, runner.ErrRunnerNotFound)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, s.runnerRequest(invalidID))
s.Equal(http.StatusGone, recorder.Code)
}
func (s *MiddlewareTestSuite) TestFindRunnerMiddlewareDoesNotEarlyRespond() {
body := strings.NewReader(strings.Repeat("A", 798968))
path, err := s.router.Get("test-runner-id").URL(RunnerIDKey, invalidID)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPost, path.String(), body)
s.Require().NoError(err)
s.manager.On("Get", mock.AnythingOfType("string")).Return(nil, runner.ErrRunnerNotFound)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusGone, recorder.Code)
s.Equal(0, body.Len()) // No data should be unread
}
func TestRunnerRouteTestSuite(t *testing.T) {
suite.Run(t, new(RunnerRouteTestSuite))
}
type RunnerRouteTestSuite struct {
tests.MemoryLeakTestSuite
runnerManager *runner.ManagerMock
router *mux.Router
runner runner.Runner
executionID string
}
func (s *RunnerRouteTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.runnerManager = &runner.ManagerMock{}
s.router = NewRouter(s.runnerManager, nil)
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runner = runner.NewNomadJob("some-id", nil, apiMock, func(_ runner.Runner) error { return nil })
s.executionID = "execution"
s.runner.StoreExecution(s.executionID, &dto.ExecutionRequest{})
s.runnerManager.On("Get", s.runner.ID()).Return(s.runner, nil)
}
func (s *RunnerRouteTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
s.Require().NoError(s.runner.Destroy(nil))
}
func TestProvideRunnerTestSuite(t *testing.T) {
suite.Run(t, new(ProvideRunnerTestSuite))
}
type ProvideRunnerTestSuite struct {
RunnerRouteTestSuite
defaultRequest *http.Request
path string
}
func (s *ProvideRunnerTestSuite) SetupTest() {
s.RunnerRouteTestSuite.SetupTest()
path, err := s.router.Get(ProvideRoute).URL()
s.Require().NoError(err)
s.path = path.String()
runnerRequest := dto.RunnerRequest{ExecutionEnvironmentID: tests.DefaultEnvironmentIDAsInteger}
body, err := json.Marshal(runnerRequest)
s.Require().NoError(err)
s.defaultRequest, err = http.NewRequest(http.MethodPost, s.path, bytes.NewReader(body))
s.Require().NoError(err)
}
func (s *ProvideRunnerTestSuite) TestValidRequestReturnsRunner() {
s.runnerManager.
On("Claim", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("int")).
Return(s.runner, nil)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, s.defaultRequest)
s.Equal(http.StatusOK, recorder.Code)
s.Run("response contains runnerId", func() {
var runnerResponse dto.RunnerResponse
err := json.NewDecoder(recorder.Result().Body).Decode(&runnerResponse)
s.Require().NoError(err)
_ = recorder.Result().Body.Close()
s.Equal(s.runner.ID(), runnerResponse.ID)
})
}
func (s *ProvideRunnerTestSuite) TestInvalidRequestReturnsBadRequest() {
badRequest, err := http.NewRequest(http.MethodPost, s.path, strings.NewReader(""))
s.Require().NoError(err)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, badRequest)
s.Equal(http.StatusBadRequest, recorder.Code)
}
func (s *ProvideRunnerTestSuite) TestWhenExecutionEnvironmentDoesNotExistReturnsNotFound() {
s.runnerManager.
On("Claim", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("int")).
Return(nil, runner.ErrUnknownExecutionEnvironment)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, s.defaultRequest)
s.Equal(http.StatusNotFound, recorder.Code)
}
func (s *ProvideRunnerTestSuite) TestWhenNoRunnerAvailableReturnsNomadOverload() {
s.runnerManager.
On("Claim", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("int")).
Return(nil, runner.ErrNoRunnersAvailable)
recorder := httptest.NewRecorder()
s.router.ServeHTTP(recorder, s.defaultRequest)
s.Equal(http.StatusInternalServerError, recorder.Code)
var internalServerError dto.InternalServerError
err := json.NewDecoder(recorder.Result().Body).Decode(&internalServerError)
s.Require().NoError(err)
_ = recorder.Result().Body.Close()
s.Equal(dto.ErrorNomadOverload, internalServerError.ErrorCode)
}
func (s *RunnerRouteTestSuite) TestExecuteRoute() {
path, err := s.router.Get(ExecutePath).URL(RunnerIDKey, s.runner.ID())
s.Require().NoError(err)
s.Run("valid request", func() {
recorder := httptest.NewRecorder()
executionRequest := dto.ExecutionRequest{
Command: "command",
TimeLimit: 10,
Environment: nil,
}
body, err := json.Marshal(executionRequest)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPost, path.String(), bytes.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
var webSocketResponse dto.ExecutionResponse
err = json.NewDecoder(recorder.Result().Body).Decode(&webSocketResponse)
s.Require().NoError(err)
s.Equal(http.StatusOK, recorder.Code)
s.Run("creates an execution request for the runner", func() {
webSocketURL, err := url.Parse(webSocketResponse.WebSocketURL)
s.Require().NoError(err)
executionID := webSocketURL.Query().Get(ExecutionIDKey)
ok := s.runner.ExecutionExists(executionID)
s.True(ok, "No execution request with this id: ", executionID)
})
})
s.Run("invalid request", func() {
recorder := httptest.NewRecorder()
body := ""
request, err := http.NewRequest(http.MethodPost, path.String(), strings.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusBadRequest, recorder.Code)
})
s.Run("forbidden characters in command", func() {
recorder := httptest.NewRecorder()
executionRequest := dto.ExecutionRequest{
Command: "echo 'forbidden'",
TimeLimit: 10,
}
body, err := json.Marshal(executionRequest)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPost, path.String(), bytes.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusBadRequest, recorder.Code)
})
}
func TestUpdateFileSystemRouteTestSuite(t *testing.T) {
suite.Run(t, new(UpdateFileSystemRouteTestSuite))
}
type UpdateFileSystemRouteTestSuite struct {
RunnerRouteTestSuite
path string
recorder *httptest.ResponseRecorder
runnerMock *runner.RunnerMock
}
func (s *UpdateFileSystemRouteTestSuite) SetupTest() {
s.RunnerRouteTestSuite.SetupTest()
routeURL, err := s.router.Get(UpdateFileSystemPath).URL(RunnerIDKey, tests.DefaultMockID)
s.Require().NoError(err)
s.path = routeURL.String()
s.runnerMock = &runner.RunnerMock{}
s.runnerMock.On("ID").Return(tests.DefaultMockID)
s.runnerMock.On("Environment").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
s.runnerManager.On("Get", tests.DefaultMockID).Return(s.runnerMock, nil)
s.recorder = httptest.NewRecorder()
}
func (s *UpdateFileSystemRouteTestSuite) TestUpdateFileSystemReturnsNoContentOnValidRequest() {
s.runnerMock.On("UpdateFileSystem", mock.AnythingOfType("*dto.UpdateFileSystemRequest"), mock.Anything).
Return(nil)
copyRequest := dto.UpdateFileSystemRequest{}
body, err := json.Marshal(copyRequest)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPatch, s.path, bytes.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusNoContent, s.recorder.Code)
s.runnerMock.AssertCalled(s.T(), "UpdateFileSystem",
mock.AnythingOfType("*dto.UpdateFileSystemRequest"), mock.Anything)
}
func (s *UpdateFileSystemRouteTestSuite) TestUpdateFileSystemReturnsBadRequestOnInvalidRequestBody() {
request, err := http.NewRequest(http.MethodPatch, s.path, strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusBadRequest, s.recorder.Code)
}
func (s *UpdateFileSystemRouteTestSuite) TestUpdateFileSystemToNonExistingRunnerReturnsGone() {
s.runnerManager.On("Get", invalidID).Return(nil, runner.ErrRunnerNotFound)
path, err := s.router.Get(UpdateFileSystemPath).URL(RunnerIDKey, invalidID)
s.Require().NoError(err)
copyRequest := dto.UpdateFileSystemRequest{}
body, err := json.Marshal(copyRequest)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPatch, path.String(), bytes.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusGone, s.recorder.Code)
}
func (s *UpdateFileSystemRouteTestSuite) TestUpdateFileSystemReturnsInternalServerErrorWhenCopyFailed() {
s.runnerMock.
On("UpdateFileSystem", mock.AnythingOfType("*dto.UpdateFileSystemRequest"), mock.Anything).
Return(runner.ErrorFileCopyFailed)
copyRequest := dto.UpdateFileSystemRequest{}
body, err := json.Marshal(copyRequest)
s.Require().NoError(err)
request, err := http.NewRequest(http.MethodPatch, s.path, bytes.NewReader(body))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusInternalServerError, s.recorder.Code)
}
func (s *UpdateFileSystemRouteTestSuite) TestListFileSystem() {
routeURL, err := s.router.Get(UpdateFileSystemPath).URL(RunnerIDKey, tests.DefaultMockID)
s.Require().NoError(err)
mockCall := s.runnerMock.On("ListFileSystem", mock.AnythingOfType("string"),
mock.AnythingOfType("bool"), mock.Anything, mock.AnythingOfType("bool"), mock.Anything)
s.Run("default parameters", func() {
mockCall.Run(func(args mock.Arguments) {
path, ok := args.Get(0).(string)
s.True(ok)
s.Equal("./", path)
recursive, ok := args.Get(1).(bool)
s.True(ok)
s.True(recursive)
mockCall.ReturnArguments = mock.Arguments{nil}
})
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusOK, s.recorder.Code)
})
s.recorder = httptest.NewRecorder()
s.Run("passed parameters", func() {
expectedPath := "/flag"
mockCall.Run(func(args mock.Arguments) {
path, ok := args.Get(0).(string)
s.True(ok)
s.Equal(expectedPath, path)
recursive, ok := args.Get(1).(bool)
s.True(ok)
s.False(recursive)
mockCall.ReturnArguments = mock.Arguments{nil}
})
query := routeURL.Query()
query.Set(PathKey, expectedPath)
query.Set(RecursiveKey, strconv.FormatBool(false))
routeURL.RawQuery = query.Encode()
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusOK, s.recorder.Code)
})
s.recorder = httptest.NewRecorder()
s.Run("Internal Server Error on failure", func() {
mockCall.Run(func(args mock.Arguments) {
mockCall.ReturnArguments = mock.Arguments{runner.ErrRunnerNotFound}
})
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusInternalServerError, s.recorder.Code)
})
}
func (s *UpdateFileSystemRouteTestSuite) TestFileContent() {
routeURL, err := s.router.Get(FileContentRawPath).URL(RunnerIDKey, tests.DefaultMockID)
s.Require().NoError(err)
mockCall := s.runnerMock.On("GetFileContent",
mock.AnythingOfType("string"), mock.Anything, mock.AnythingOfType("bool"), mock.Anything)
s.Run("Not Found", func() {
mockCall.Return(runner.ErrFileNotFound)
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusFailedDependency, s.recorder.Code)
})
s.recorder = httptest.NewRecorder()
s.Run("Unknown Error", func() {
mockCall.Return(nomad.ErrorExecutorCommunicationFailed)
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusInternalServerError, s.recorder.Code)
})
s.recorder = httptest.NewRecorder()
s.Run("No Error", func() {
mockCall.Return(nil)
request, err := http.NewRequest(http.MethodGet, routeURL.String(), strings.NewReader(""))
s.Require().NoError(err)
s.router.ServeHTTP(s.recorder, request)
s.Equal(http.StatusOK, s.recorder.Code)
})
}
func TestDeleteRunnerRouteTestSuite(t *testing.T) {
suite.Run(t, new(DeleteRunnerRouteTestSuite))
}
type DeleteRunnerRouteTestSuite struct {
RunnerRouteTestSuite
path string
}
func (s *DeleteRunnerRouteTestSuite) SetupTest() {
s.RunnerRouteTestSuite.SetupTest()
deleteURL, err := s.router.Get(DeleteRoute).URL(RunnerIDKey, s.runner.ID())
s.Require().NoError(err)
s.path = deleteURL.String()
}
func (s *DeleteRunnerRouteTestSuite) TestValidRequestReturnsNoContent() {
s.runnerManager.On("Return", s.runner).Return(nil)
recorder := httptest.NewRecorder()
request, err := http.NewRequest(http.MethodDelete, s.path, http.NoBody)
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusNoContent, recorder.Code)
s.Run("runner was returned to runner manager", func() {
s.runnerManager.AssertCalled(s.T(), "Return", s.runner)
})
}
func (s *DeleteRunnerRouteTestSuite) TestReturnInternalServerErrorWhenApiCallToNomadFailed() {
s.runnerManager.On("Return", s.runner).Return(tests.ErrDefault)
recorder := httptest.NewRecorder()
request, err := http.NewRequest(http.MethodDelete, s.path, http.NoBody)
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusInternalServerError, recorder.Code)
}
func (s *DeleteRunnerRouteTestSuite) TestDeleteInvalidRunnerIdReturnsGone() {
s.runnerManager.On("Get", mock.AnythingOfType("string")).Return(nil, tests.ErrDefault)
deleteURL, err := s.router.Get(DeleteRoute).URL(RunnerIDKey, "1nv4l1dID")
s.Require().NoError(err)
deletePath := deleteURL.String()
recorder := httptest.NewRecorder()
request, err := http.NewRequest(http.MethodDelete, deletePath, http.NoBody)
s.Require().NoError(err)
s.router.ServeHTTP(recorder, request)
s.Equal(http.StatusGone, recorder.Code)
}

View File

@ -1,494 +0,0 @@
package api
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/internal/environment"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"io"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
)
func TestWebSocketTestSuite(t *testing.T) {
suite.Run(t, new(WebSocketTestSuite))
}
type WebSocketTestSuite struct {
tests.MemoryLeakTestSuite
router *mux.Router
executionID string
runner runner.Runner
apiMock *nomad.ExecutorAPIMock
server *httptest.Server
}
func (s *WebSocketTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
runnerID := "runner-id"
s.runner, s.apiMock = newNomadAllocationWithMockedAPIClient(runnerID)
// default execution
s.executionID = tests.DefaultExecutionID
s.runner.StoreExecution(s.executionID, &executionRequestLs)
mockAPIExecuteLs(s.apiMock)
runnerManager := &runner.ManagerMock{}
runnerManager.On("Get", s.runner.ID()).Return(s.runner, nil)
s.router = NewRouter(runnerManager, nil)
s.server = httptest.NewServer(s.router)
}
func (s *WebSocketTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
s.server.Close()
err := s.runner.Destroy(nil)
s.Require().NoError(err)
}
func (s *WebSocketTestSuite) TestWebsocketConnectionCanBeEstablished() {
wsURL, err := s.webSocketURL("ws", s.runner.ID(), s.executionID)
s.Require().NoError(err)
conn, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
<-time.After(tests.ShortTimeout)
err = conn.Close()
s.NoError(err)
}
func (s *WebSocketTestSuite) TestWebsocketReturns404IfExecutionDoesNotExist() {
wsURL, err := s.webSocketURL("ws", s.runner.ID(), "invalid-execution-id")
s.Require().NoError(err)
_, response, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().ErrorIs(err, websocket.ErrBadHandshake)
s.Equal(http.StatusNotFound, response.StatusCode)
}
func (s *WebSocketTestSuite) TestWebsocketReturns400IfRequestedViaHttp() {
wsURL, err := s.webSocketURL("http", s.runner.ID(), s.executionID)
s.Require().NoError(err)
response, err := http.Get(wsURL.String())
s.Require().NoError(err)
// This functionality is implemented by the WebSocket library.
s.Equal(http.StatusBadRequest, response.StatusCode)
_, err = io.ReadAll(response.Body)
s.NoError(err)
}
func (s *WebSocketTestSuite) TestWebsocketConnection() {
s.runner.StoreExecution(s.executionID, &executionRequestHead)
mockAPIExecuteHead(s.apiMock)
wsURL, err := s.webSocketURL("ws", s.runner.ID(), s.executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
err = connection.SetReadDeadline(time.Now().Add(5 * time.Second))
s.Require().NoError(err)
s.Run("Receives start message", func() {
message, err := helpers.ReceiveNextWebSocketMessage(connection)
s.Require().NoError(err)
s.Equal(dto.WebSocketMetaStart, message.Type)
})
s.Run("Executes the request in the runner", func() {
<-time.After(tests.ShortTimeout)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand",
mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.AnythingOfType("bool"),
mock.Anything, mock.Anything, mock.Anything)
})
s.Run("Can send input", func() {
err = connection.WriteMessage(websocket.TextMessage, []byte("Hello World\n"))
s.Require().NoError(err)
})
messages, err := helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
s.Run("Receives output message", func() {
stdout, _, _ := helpers.WebSocketOutputMessages(messages)
s.Equal("Hello World", stdout)
})
s.Run("Receives exit message", func() {
controlMessages := helpers.WebSocketControlMessages(messages)
s.Require().Equal(1, len(controlMessages))
s.Equal(dto.WebSocketExit, controlMessages[0].Type)
})
}
func (s *WebSocketTestSuite) TestCancelWebSocketConnection() {
executionID := "sleeping-execution"
s.runner.StoreExecution(executionID, &executionRequestSleep)
canceled := mockAPIExecuteSleep(s.apiMock)
wsURL, err := webSocketURL("ws", s.server, s.router, s.runner.ID(), executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
message, err := helpers.ReceiveNextWebSocketMessage(connection)
s.Require().NoError(err)
s.Equal(dto.WebSocketMetaStart, message.Type)
select {
case <-canceled:
s.Fail("ExecuteInteractively canceled unexpected")
default:
}
err = connection.WriteControl(websocket.CloseMessage,
websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Now().Add(time.Second))
s.Require().NoError(err)
select {
case <-canceled:
case <-time.After(time.Second):
s.Fail("ExecuteInteractively not canceled")
}
}
func (s *WebSocketTestSuite) TestWebSocketConnectionTimeout() {
executionID := "time-out-execution"
limitExecution := executionRequestSleep
limitExecution.TimeLimit = 2
s.runner.StoreExecution(executionID, &limitExecution)
canceled := mockAPIExecuteSleep(s.apiMock)
wsURL, err := webSocketURL("ws", s.server, s.router, s.runner.ID(), executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
message, err := helpers.ReceiveNextWebSocketMessage(connection)
s.Require().NoError(err)
s.Equal(dto.WebSocketMetaStart, message.Type)
select {
case <-canceled:
s.Fail("ExecuteInteractively canceled unexpected")
case <-time.After(time.Duration(limitExecution.TimeLimit-1) * time.Second):
<-time.After(time.Second)
}
select {
case <-canceled:
case <-time.After(time.Second):
s.Fail("ExecuteInteractively not canceled")
}
message, err = helpers.ReceiveNextWebSocketMessage(connection)
s.Require().NoError(err)
s.Equal(dto.WebSocketMetaTimeout, message.Type)
}
func (s *WebSocketTestSuite) TestWebsocketStdoutAndStderr() {
executionID := "ls-execution"
s.runner.StoreExecution(executionID, &executionRequestLs)
mockAPIExecuteLs(s.apiMock)
wsURL, err := webSocketURL("ws", s.server, s.router, s.runner.ID(), executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
messages, err := helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
stdout, stderr, _ := helpers.WebSocketOutputMessages(messages)
s.Contains(stdout, "existing-file")
s.Contains(stderr, "non-existing-file")
}
func (s *WebSocketTestSuite) TestWebsocketError() {
executionID := "error-execution"
s.runner.StoreExecution(executionID, &executionRequestError)
mockAPIExecuteError(s.apiMock)
wsURL, err := webSocketURL("ws", s.server, s.router, s.runner.ID(), executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
messages, err := helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
_, _, errMessages := helpers.WebSocketOutputMessages(messages)
s.Require().Equal(1, len(errMessages))
s.Equal("Error executing the request", errMessages[0])
}
func (s *WebSocketTestSuite) TestWebsocketNonZeroExit() {
executionID := "exit-execution"
s.runner.StoreExecution(executionID, &executionRequestExitNonZero)
mockAPIExecuteExitNonZero(s.apiMock)
wsURL, err := webSocketURL("ws", s.server, s.router, s.runner.ID(), executionID)
s.Require().NoError(err)
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
messages, err := helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
controlMessages := helpers.WebSocketControlMessages(messages)
s.Equal(2, len(controlMessages))
s.Equal(&dto.WebSocketMessage{Type: dto.WebSocketExit, ExitCode: 42}, controlMessages[1])
}
func (s *MainTestSuite) TestWebsocketTLS() {
runnerID := "runner-id"
r, apiMock := newNomadAllocationWithMockedAPIClient(runnerID)
executionID := tests.DefaultExecutionID
r.StoreExecution(executionID, &executionRequestLs)
mockAPIExecuteLs(apiMock)
runnerManager := &runner.ManagerMock{}
runnerManager.On("Get", r.ID()).Return(r, nil)
router := NewRouter(runnerManager, nil)
server, err := helpers.StartTLSServer(s.T(), router)
s.Require().NoError(err)
defer server.Close()
wsURL, err := webSocketURL("wss", server, router, runnerID, executionID)
s.Require().NoError(err)
config := &tls.Config{RootCAs: nil, InsecureSkipVerify: true} //nolint:gosec // test needs self-signed cert
d := websocket.Dialer{TLSClientConfig: config}
connection, _, err := d.Dial(wsURL.String(), nil)
s.Require().NoError(err)
message, err := helpers.ReceiveNextWebSocketMessage(connection)
s.Require().NoError(err)
s.Equal(dto.WebSocketMetaStart, message.Type)
_, err = helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
s.NoError(r.Destroy(nil))
}
func (s *MainTestSuite) TestWebSocketProxyStopsReadingTheWebSocketAfterClosingIt() {
apiMock := &nomad.ExecutorAPIMock{}
executionID := tests.DefaultExecutionID
r, wsURL, cleanup := newRunnerWithNotMockedRunnerManager(s, apiMock, executionID)
defer cleanup()
logger, hook := test.NewNullLogger()
log = logger.WithField("pkg", "api")
r.StoreExecution(executionID, &executionRequestHead)
mockAPIExecute(apiMock, &executionRequestHead,
func(_ string, ctx context.Context, _ string, _ bool, _ io.Reader, _, _ io.Writer) (int, error) {
return 0, nil
})
connection, _, err := websocket.DefaultDialer.Dial(wsURL.String(), nil)
s.Require().NoError(err)
_, err = helpers.ReceiveAllWebSocketMessages(connection)
s.Require().Error(err)
s.True(websocket.IsCloseError(err, websocket.CloseNormalClosure))
for _, logMsg := range hook.Entries {
if logMsg.Level < logrus.InfoLevel {
s.Fail(logMsg.Message)
}
}
}
// --- Test suite specific test helpers ---
func newNomadAllocationWithMockedAPIClient(runnerID string) (runner.Runner, *nomad.ExecutorAPIMock) {
executorAPIMock := &nomad.ExecutorAPIMock{}
executorAPIMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
manager := &runner.ManagerMock{}
manager.On("Return", mock.Anything).Return(nil)
r := runner.NewNomadJob(runnerID, nil, executorAPIMock, nil)
return r, executorAPIMock
}
func newRunnerWithNotMockedRunnerManager(s *MainTestSuite, apiMock *nomad.ExecutorAPIMock, executionID string) (
r runner.Runner, wsURL *url.URL, cleanup func()) {
s.T().Helper()
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job")).Return(nil)
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-s.TestCtx.Done()
call.ReturnArguments = mock.Arguments{nil}
})
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
router := NewRouter(runnerManager, nil)
s.ExpectedGoroutineIncrease++ // The server is not closing properly. Therefore, we don't even try.
server := httptest.NewServer(router)
runnerID := tests.DefaultRunnerID
runnerJob := runner.NewNomadJob(runnerID, nil, apiMock, nil)
e, err := environment.NewNomadEnvironment(0, apiMock, "job \"template-0\" {}")
s.Require().NoError(err)
eID, err := nomad.EnvironmentIDFromRunnerID(runnerID)
s.Require().NoError(err)
e.SetID(eID)
e.SetPrewarmingPoolSize(0)
runnerManager.StoreEnvironment(e)
e.AddRunner(runnerJob)
r, err = runnerManager.Claim(e.ID(), int(tests.DefaultTestTimeout.Seconds()))
s.Require().NoError(err)
wsURL, err = webSocketURL("ws", server, router, r.ID(), executionID)
s.Require().NoError(err)
return r, wsURL, func() {
err = r.Destroy(tests.ErrCleanupDestroyReason)
s.NoError(err)
err = e.Delete(tests.ErrCleanupDestroyReason)
s.NoError(err)
}
}
func webSocketURL(scheme string, server *httptest.Server, router *mux.Router,
runnerID string, executionID string,
) (*url.URL, error) {
websocketURL, err := url.Parse(server.URL)
if err != nil {
return nil, err
}
path, err := router.Get(WebsocketPath).URL(RunnerIDKey, runnerID)
if err != nil {
return nil, err
}
websocketURL.Scheme = scheme
websocketURL.Path = path.Path
websocketURL.RawQuery = fmt.Sprintf("executionID=%s", executionID)
return websocketURL, nil
}
func (s *WebSocketTestSuite) webSocketURL(scheme, runnerID, executionID string) (*url.URL, error) {
return webSocketURL(scheme, s.server, s.router, runnerID, executionID)
}
var executionRequestLs = dto.ExecutionRequest{Command: "ls"}
// mockAPIExecuteLs mocks the ExecuteCommand of an ExecutorApi to act as if
// 'ls existing-file non-existing-file' was executed.
func mockAPIExecuteLs(api *nomad.ExecutorAPIMock) {
mockAPIExecute(api, &executionRequestLs,
func(_ string, _ context.Context, _ string, _ bool, _ io.Reader, stdout, stderr io.Writer) (int, error) {
_, _ = stdout.Write([]byte("existing-file\n"))
_, _ = stderr.Write([]byte("ls: cannot access 'non-existing-file': No such file or directory\n"))
return 0, nil
})
}
var executionRequestHead = dto.ExecutionRequest{Command: "head -n 1"}
// mockAPIExecuteHead mocks the ExecuteCommand of an ExecutorApi to act as if 'head -n 1' was executed.
func mockAPIExecuteHead(api *nomad.ExecutorAPIMock) {
mockAPIExecute(api, &executionRequestHead,
func(_ string, _ context.Context, _ string, _ bool,
stdin io.Reader, stdout io.Writer, stderr io.Writer,
) (int, error) {
scanner := bufio.NewScanner(stdin)
for !scanner.Scan() {
scanner = bufio.NewScanner(stdin)
}
_, _ = stdout.Write(scanner.Bytes())
return 0, nil
})
}
var executionRequestSleep = dto.ExecutionRequest{Command: "sleep infinity"}
// mockAPIExecuteSleep mocks the ExecuteCommand method of an ExecutorAPI to sleep
// until the execution receives a SIGQUIT.
func mockAPIExecuteSleep(api *nomad.ExecutorAPIMock) <-chan bool {
canceled := make(chan bool, 1)
mockAPIExecute(api, &executionRequestSleep,
func(_ string, ctx context.Context, _ string, _ bool,
stdin io.Reader, stdout io.Writer, stderr io.Writer,
) (int, error) {
var err error
buffer := make([]byte, 1) //nolint:makezero // if the length is zero, the Read call never reads anything
for n := 0; !(n == 1 && buffer[0] == runner.SIGQUIT); n, err = stdin.Read(buffer) {
if err != nil {
return 0, fmt.Errorf("error while reading stdin: %w", err)
}
}
close(canceled)
return 0, ctx.Err()
})
return canceled
}
var executionRequestError = dto.ExecutionRequest{Command: "error"}
// mockAPIExecuteError mocks the ExecuteCommand method of an ExecutorApi to return an error.
func mockAPIExecuteError(api *nomad.ExecutorAPIMock) {
mockAPIExecute(api, &executionRequestError,
func(_ string, _ context.Context, _ string, _ bool, _ io.Reader, _, _ io.Writer) (int, error) {
return 0, tests.ErrDefault
})
}
var executionRequestExitNonZero = dto.ExecutionRequest{Command: "exit 42"}
// mockAPIExecuteExitNonZero mocks the ExecuteCommand method of an ExecutorApi to exit with exit status 42.
func mockAPIExecuteExitNonZero(api *nomad.ExecutorAPIMock) {
mockAPIExecute(api, &executionRequestExitNonZero,
func(_ string, _ context.Context, _ string, _ bool, _ io.Reader, _, _ io.Writer) (int, error) {
return 42, nil
})
}
// mockAPIExecute mocks the ExecuteCommand method of an ExecutorApi to call the given method run when the command
// corresponding to the given ExecutionRequest is called.
func mockAPIExecute(api *nomad.ExecutorAPIMock, request *dto.ExecutionRequest,
run func(runnerId string, ctx context.Context, command string, tty bool,
stdin io.Reader, stdout, stderr io.Writer) (int, error)) {
tests.RemoveMethodFromMock(&api.Mock, "ExecuteCommand")
call := api.On("ExecuteCommand",
mock.AnythingOfType("string"),
mock.Anything,
request.FullCommand(),
mock.AnythingOfType("bool"),
mock.AnythingOfType("bool"),
mock.Anything,
mock.Anything,
mock.Anything)
call.Run(func(args mock.Arguments) {
exit, err := run(args.Get(0).(string),
args.Get(1).(context.Context),
args.Get(2).(string),
args.Get(3).(bool),
args.Get(5).(io.Reader),
args.Get(6).(io.Writer),
args.Get(7).(io.Writer))
call.ReturnArguments = mock.Arguments{exit, err}
})
}

View File

@ -1,83 +0,0 @@
package ws
import (
"context"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"io"
"strings"
"testing"
)
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestCodeOceanToRawReaderReturnsOnlyAfterOneByteWasRead() {
readingCtx, cancel := context.WithCancel(context.Background())
forwardingCtx := readingCtx
defer cancel()
reader := NewCodeOceanToRawReader(nil, readingCtx, forwardingCtx)
read := make(chan bool)
go func() {
//nolint:makezero // we can't make zero initial length here as the reader otherwise doesn't block
p := make([]byte, 10)
_, err := reader.Read(p)
s.Require().NoError(err)
read <- true
}()
s.Run("Does not return immediately when there is no data", func() {
s.False(tests.ChannelReceivesSomething(read, tests.ShortTimeout))
})
s.Run("Returns when there is data available", func() {
reader.buffer <- byte(42)
s.True(tests.ChannelReceivesSomething(read, tests.ShortTimeout))
})
}
func (s *MainTestSuite) TestCodeOceanToRawReaderReturnsOnlyAfterOneByteWasReadFromConnection() {
messages := make(chan io.Reader)
defer close(messages)
connection := &ConnectionMock{}
connection.On("WriteMessage", mock.AnythingOfType("int"), mock.AnythingOfType("[]uint8")).Return(nil)
connection.On("CloseHandler").Return(nil)
connection.On("SetCloseHandler", mock.Anything).Return()
call := connection.On("NextReader")
call.Run(func(_ mock.Arguments) {
call.Return(websocket.TextMessage, <-messages, nil)
})
readingCtx, cancel := context.WithCancel(context.Background())
forwardingCtx := readingCtx
defer cancel()
reader := NewCodeOceanToRawReader(connection, readingCtx, forwardingCtx)
reader.Start()
read := make(chan bool)
//nolint:makezero // this is required here to make the Read call blocking
message := make([]byte, 10)
go func() {
_, err := reader.Read(message)
s.Require().NoError(err)
read <- true
}()
s.Run("Does not return immediately when there is no data", func() {
s.False(tests.ChannelReceivesSomething(read, tests.ShortTimeout))
})
s.Run("Returns when there is data available", func() {
messages <- strings.NewReader("Hello")
s.True(tests.ChannelReceivesSomething(read, tests.ShortTimeout))
})
}

View File

@ -1,107 +0,0 @@
package ws
import (
"context"
"encoding/json"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
)
func (s *MainTestSuite) TestRawToCodeOceanWriter() {
connectionMock, messages := buildConnectionMock(&s.MemoryLeakTestSuite)
proxyCtx, cancel := context.WithCancel(context.Background())
defer cancel()
output := NewCodeOceanOutputWriter(connectionMock, proxyCtx, cancel)
defer output.Close(nil)
<-messages // start messages
s.Run("StdOut", func() {
testMessage := "testStdOut"
_, err := output.StdOut().Write([]byte(testMessage))
s.Require().NoError(err)
expected, err := json.Marshal(struct {
Type string `json:"type"`
Data string `json:"data"`
}{string(dto.WebSocketOutputStdout), testMessage})
s.Require().NoError(err)
s.Equal(expected, <-messages)
})
s.Run("StdErr", func() {
testMessage := "testStdErr"
_, err := output.StdErr().Write([]byte(testMessage))
s.Require().NoError(err)
expected, err := json.Marshal(struct {
Type string `json:"type"`
Data string `json:"data"`
}{string(dto.WebSocketOutputStderr), testMessage})
s.Require().NoError(err)
s.Equal(expected, <-messages)
})
}
type sendExitInfoTestCase struct {
name string
info *runner.ExitInfo
message dto.WebSocketMessage
}
func (s *MainTestSuite) TestCodeOceanOutputWriter_SendExitInfo() {
testCases := []sendExitInfoTestCase{
{"Timeout", &runner.ExitInfo{Err: runner.ErrorRunnerInactivityTimeout},
dto.WebSocketMessage{Type: dto.WebSocketMetaTimeout}},
{"Error", &runner.ExitInfo{Err: websocket.ErrCloseSent},
dto.WebSocketMessage{Type: dto.WebSocketOutputError, Data: "Error executing the request"}},
// CodeOcean expects this exact string in case of a OOM Killed runner.
{"Specific data for OOM Killed runner", &runner.ExitInfo{Err: runner.ErrOOMKilled},
dto.WebSocketMessage{Type: dto.WebSocketOutputError, Data: "the allocation was OOM Killed"}},
{"Exit", &runner.ExitInfo{Code: 21},
dto.WebSocketMessage{Type: dto.WebSocketExit, ExitCode: 21}},
}
for _, test := range testCases {
s.Run(test.name, func() {
connectionMock, messages := buildConnectionMock(&s.MemoryLeakTestSuite)
proxyCtx, cancel := context.WithCancel(context.Background())
defer cancel()
output := NewCodeOceanOutputWriter(connectionMock, proxyCtx, cancel)
<-messages // start messages
output.Close(test.info)
expected, err := json.Marshal(test.message)
s.Require().NoError(err)
msg := <-messages
s.Equal(expected, msg)
<-messages // close message
})
}
}
func buildConnectionMock(s *tests.MemoryLeakTestSuite) (conn *ConnectionMock, messages <-chan []byte) {
s.T().Helper()
message := make(chan []byte)
connectionMock := &ConnectionMock{}
connectionMock.On("WriteMessage", mock.AnythingOfType("int"), mock.AnythingOfType("[]uint8")).
Run(func(args mock.Arguments) {
m, ok := args.Get(1).([]byte)
s.Require().True(ok)
select {
case <-s.TestCtx.Done():
case message <- m:
}
}).
Return(nil)
connectionMock.On("CloseHandler").Return(nil)
connectionMock.On("SetCloseHandler", mock.Anything).Return()
connectionMock.On("Close").Return(nil)
return connectionMock, message
}

View File

@ -1,108 +0,0 @@
// Code generated by mockery v2.13.1. DO NOT EDIT.
package ws
import (
io "io"
mock "github.com/stretchr/testify/mock"
)
// ConnectionMock is an autogenerated mock type for the Connection type
type ConnectionMock struct {
mock.Mock
}
// Close provides a mock function with given fields:
func (_m *ConnectionMock) Close() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// CloseHandler provides a mock function with given fields:
func (_m *ConnectionMock) CloseHandler() func(int, string) error {
ret := _m.Called()
var r0 func(int, string) error
if rf, ok := ret.Get(0).(func() func(int, string) error); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(func(int, string) error)
}
}
return r0
}
// NextReader provides a mock function with given fields:
func (_m *ConnectionMock) NextReader() (int, io.Reader, error) {
ret := _m.Called()
var r0 int
if rf, ok := ret.Get(0).(func() int); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int)
}
var r1 io.Reader
if rf, ok := ret.Get(1).(func() io.Reader); ok {
r1 = rf()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(io.Reader)
}
}
var r2 error
if rf, ok := ret.Get(2).(func() error); ok {
r2 = rf()
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// SetCloseHandler provides a mock function with given fields: handler
func (_m *ConnectionMock) SetCloseHandler(handler func(int, string) error) {
_m.Called(handler)
}
// WriteMessage provides a mock function with given fields: messageType, data
func (_m *ConnectionMock) WriteMessage(messageType int, data []byte) error {
ret := _m.Called(messageType, data)
var r0 error
if rf, ok := ret.Get(0).(func(int, []byte) error); ok {
r0 = rf(messageType, data)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewConnectionMock interface {
mock.TestingT
Cleanup(func())
}
// NewConnectionMock creates a new instance of ConnectionMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewConnectionMock(t mockConstructorTestingTNewConnectionMock) *ConnectionMock {
mock := &ConnectionMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -13,6 +13,7 @@ import (
"github.com/openHPI/poseidon/pkg/logging"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
"k8s.io/client-go/rest"
"net/url"
"os"
"reflect"
@ -66,6 +67,17 @@ var (
DNS: nil,
},
},
Kubernetes: Kubernetes{
Enabled: false,
KubeConfig: rest.Config{
Host: "",
TLSClientConfig: rest.TLSClientConfig{
Insecure: false,
ServerName: "",
},
BearerToken: "",
},
},
AWS: AWS{
Enabled: false,
Endpoint: "",
@ -134,6 +146,11 @@ type Nomad struct {
Network nomadApi.NetworkResource
}
type Kubernetes struct {
Enabled bool
KubeConfig rest.Config
}
// URL returns the URL for the configured Nomad cluster.
func (n *Nomad) URL() *url.URL {
return parseURL(n.Address, n.Port, n.TLS.Active)
@ -179,13 +196,14 @@ type InfluxDB struct {
// configuration contains the complete configuration of Poseidon.
type configuration struct {
Server server
Nomad Nomad
AWS AWS
Logger Logger
Profiling Profiling
Sentry sentry.ClientOptions
InfluxDB InfluxDB
Server server
Nomad Nomad
Kubernetes Kubernetes
AWS AWS
Logger Logger
Profiling Profiling
Sentry sentry.ClientOptions
InfluxDB InfluxDB
}
// InitConfig merges configuration options from environment variables and

View File

@ -1,223 +0,0 @@
package config
import (
"fmt"
"github.com/openHPI/poseidon/tests"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"os"
"path/filepath"
"reflect"
"testing"
)
var (
getServerPort = func(c *configuration) interface{} { return c.Server.Port }
getNomadToken = func(c *configuration) interface{} { return c.Nomad.Token }
getNomadTLSActive = func(c *configuration) interface{} { return c.Nomad.TLS.Active }
getAWSFunctions = func(c *configuration) interface{} { return c.AWS.Functions }
)
func newTestConfiguration() *configuration {
return &configuration{
Server: server{
Address: "127.0.0.1",
Port: 3000,
},
Nomad: Nomad{
Address: "127.0.0.2",
Port: 4646,
Token: "SECRET",
TLS: TLS{
Active: false,
},
},
Logger: Logger{
Level: "INFO",
},
}
}
func (c *configuration) getReflectValue() reflect.Value {
return reflect.ValueOf(c).Elem()
}
// writeConfigurationFile creates a file on disk and returns the path to it.
func writeConfigurationFile(t *testing.T, name string, content []byte) string {
t.Helper()
directory := t.TempDir()
filePath := filepath.Join(directory, name)
file, err := os.Create(filePath)
if err != nil {
t.Fatal("Could not create config file")
}
defer file.Close()
_, err = file.Write(content)
require.NoError(t, err)
return filePath
}
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestCallingInitConfigTwiceReturnsError() {
configurationInitialized = false
err := InitConfig()
s.NoError(err)
err = InitConfig()
s.Error(err)
}
func (s *MainTestSuite) TestCallingInitConfigTwiceDoesNotChangeConfig() {
configurationInitialized = false
err := InitConfig()
s.Require().NoError(err)
Config = newTestConfiguration()
filePath := writeConfigurationFile(s.T(), "test.yaml", []byte("server:\n port: 5000\n"))
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
os.Args = append(os.Args, "-config", filePath)
err = InitConfig()
s.Require().Error(err)
s.Equal(3000, Config.Server.Port)
}
func (s *MainTestSuite) TestReadEnvironmentVariables() {
var environmentTests = []struct {
variableSuffix string
valueToSet string
expectedValue interface{}
getTargetField func(*configuration) interface{}
}{
{"SERVER_PORT", "4000", 4000, getServerPort},
{"SERVER_PORT", "hello", 3000, getServerPort},
{"NOMAD_TOKEN", "ACCESS", "ACCESS", getNomadToken},
{"NOMAD_TLS_ACTIVE", "true", true, getNomadTLSActive},
{"NOMAD_TLS_ACTIVE", "hello", false, getNomadTLSActive},
{"AWS_FUNCTIONS", "java11Exec go118Exec", []string{"java11Exec", "go118Exec"}, getAWSFunctions},
}
prefix := "POSEIDON_TEST"
for _, testCase := range environmentTests {
config := newTestConfiguration()
environmentVariable := fmt.Sprintf("%s_%s", prefix, testCase.variableSuffix)
_ = os.Setenv(environmentVariable, testCase.valueToSet)
readFromEnvironment(prefix, config.getReflectValue())
_ = os.Unsetenv(environmentVariable)
s.Equal(testCase.expectedValue, testCase.getTargetField(config))
}
}
func (s *MainTestSuite) TestReadEnvironmentIgnoresNonPointerValue() {
config := newTestConfiguration()
_ = os.Setenv("POSEIDON_TEST_SERVER_PORT", "4000")
readFromEnvironment("POSEIDON_TEST", reflect.ValueOf(config))
_ = os.Unsetenv("POSEIDON_TEST_SERVER_PORT")
s.Equal(3000, config.Server.Port)
}
func (s *MainTestSuite) TestReadEnvironmentIgnoresNotSupportedType() {
config := &struct{ Timeout float64 }{1.0}
_ = os.Setenv("POSEIDON_TEST_TIMEOUT", "2.5")
readFromEnvironment("POSEIDON_TEST", reflect.ValueOf(config).Elem())
_ = os.Unsetenv("POSEIDON_TEST_TIMEOUT")
s.Equal(1.0, config.Timeout)
}
func (s *MainTestSuite) TestUnsetEnvironmentVariableDoesNotChangeConfig() {
config := newTestConfiguration()
readFromEnvironment("POSEIDON_TEST", config.getReflectValue())
s.Equal("INFO", config.Logger.Level)
}
func (s *MainTestSuite) TestReadYamlConfigFile() {
var yamlTests = []struct {
content []byte
expectedValue interface{}
getTargetField func(*configuration) interface{}
}{
{[]byte("server:\n port: 5000\n"), 5000, getServerPort},
{[]byte("nomad:\n token: ACCESS\n"), "ACCESS", getNomadToken},
{[]byte("nomad:\n tls:\n active: true\n"), true, getNomadTLSActive},
{[]byte(""), false, getNomadTLSActive},
{[]byte("nomad:\n token:\n"), "SECRET", getNomadToken},
{[]byte("aws:\n functions:\n - java11Exec\n - go118Exec\n"),
[]string{"java11Exec", "go118Exec"}, getAWSFunctions},
}
for _, testCase := range yamlTests {
config := newTestConfiguration()
config.mergeYaml(testCase.content)
s.Equal(testCase.expectedValue, testCase.getTargetField(config))
}
}
func (s *MainTestSuite) TestInvalidYamlExitsProgram() {
logger, hook := test.NewNullLogger()
// this function is used when calling log.Fatal() and
// prevents the program from exiting during this test
logger.ExitFunc = func(code int) {}
log = logger.WithField("package", "config_test")
config := newTestConfiguration()
config.mergeYaml([]byte("logger: level: DEBUG"))
s.Equal(1, len(hook.Entries))
s.Equal(logrus.FatalLevel, hook.LastEntry().Level)
}
func (s *MainTestSuite) TestReadConfigFileOverwritesConfig() {
Config = newTestConfiguration()
filePath := writeConfigurationFile(s.T(), "test.yaml", []byte("server:\n port: 5000\n"))
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
os.Args = append(os.Args, "-config", filePath)
configurationInitialized = false
err := InitConfig()
s.Require().NoError(err)
s.Equal(5000, Config.Server.Port)
}
func (s *MainTestSuite) TestReadNonExistingConfigFileDoesNotOverwriteConfig() {
Config = newTestConfiguration()
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
os.Args = append(os.Args, "-config", "file_does_not_exist.yaml")
configurationInitialized = false
err := InitConfig()
s.Require().NoError(err)
s.Equal(3000, Config.Server.Port)
}
func (s *MainTestSuite) TestURLParsing() {
var urlTests = []struct {
address string
port int
tls bool
expectedScheme string
expectedHost string
}{
{"localhost", 3000, false, "http", "localhost:3000"},
{"127.0.0.1", 4000, true, "https", "127.0.0.1:4000"},
}
for _, testCase := range urlTests {
url := parseURL(testCase.address, testCase.port, testCase.tls)
s.Equal(testCase.expectedScheme, url.Scheme)
s.Equal(testCase.expectedHost, url.Host)
}
}
func (s *MainTestSuite) TestNomadAPIURL() {
config := newTestConfiguration()
s.Equal("http", config.Nomad.URL().Scheme)
s.Equal("127.0.0.2:4646", config.Nomad.URL().Host)
}
func (s *MainTestSuite) TestPoseidonAPIURL() {
config := newTestConfiguration()
s.Equal("http", config.Server.URL().Scheme)
s.Equal("127.0.0.1:3000", config.Server.URL().Host)
}

View File

@ -1,121 +0,0 @@
package environment
import (
"encoding/json"
"fmt"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
type AWSEnvironment struct {
id dto.EnvironmentID
awsEndpoint string
onDestroyRunner runner.DestroyRunnerHandler
}
func NewAWSEnvironment(onDestroyRunner runner.DestroyRunnerHandler) *AWSEnvironment {
return &AWSEnvironment{onDestroyRunner: onDestroyRunner}
}
func (a *AWSEnvironment) MarshalJSON() ([]byte, error) {
res, err := json.Marshal(dto.ExecutionEnvironmentData{
ID: int(a.ID()),
ExecutionEnvironmentRequest: dto.ExecutionEnvironmentRequest{Image: a.Image()},
})
if err != nil {
return res, fmt.Errorf("couldn't marshal aws execution environment: %w", err)
}
return res, nil
}
func (a *AWSEnvironment) ID() dto.EnvironmentID {
return a.id
}
func (a *AWSEnvironment) SetID(id dto.EnvironmentID) {
a.id = id
}
// Image is used to specify the AWS Endpoint Poseidon is connecting to.
func (a *AWSEnvironment) Image() string {
return a.awsEndpoint
}
func (a *AWSEnvironment) SetImage(awsEndpoint string) {
a.awsEndpoint = awsEndpoint
}
func (a *AWSEnvironment) Delete(_ runner.DestroyReason) error {
return nil
}
func (a *AWSEnvironment) Sample() (r runner.Runner, ok bool) {
workload, err := runner.NewAWSFunctionWorkload(a, a.onDestroyRunner)
if err != nil {
return nil, false
}
return workload, true
}
// The following methods are not supported at this moment.
// IdleRunnerCount is not supported as we have no information about the AWS managed prewarming pool.
// For the Poseidon Health check we default to 1.
func (a *AWSEnvironment) IdleRunnerCount() uint {
return 1
}
// PrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
// For easy compatibility with CodeOcean, 1 is the static value.
func (a *AWSEnvironment) PrewarmingPoolSize() uint {
return 1
}
// SetPrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
func (a *AWSEnvironment) SetPrewarmingPoolSize(_ uint) {}
// ApplyPrewarmingPoolSize is neither supported nor required. It is handled transparently by AWS.
func (a *AWSEnvironment) ApplyPrewarmingPoolSize() error {
return nil
}
// CPULimit is disabled as one can only set the memory limit with AWS Lambda.
func (a *AWSEnvironment) CPULimit() uint {
return 0
}
// SetCPULimit is disabled as one can only set the memory limit with AWS Lambda.
func (a *AWSEnvironment) SetCPULimit(_ uint) {}
func (a *AWSEnvironment) MemoryLimit() uint {
const memorySizeOfDeployedLambdaFunction = 2048 // configured /deploy/aws/template.yaml
return memorySizeOfDeployedLambdaFunction
}
func (a *AWSEnvironment) SetMemoryLimit(_ uint) {
panic("not supported")
}
func (a *AWSEnvironment) NetworkAccess() (enabled bool, mappedPorts []uint16) {
return true, nil
}
func (a *AWSEnvironment) SetNetworkAccess(_ bool, _ []uint16) {
panic("not supported")
}
func (a *AWSEnvironment) SetConfigFrom(_ runner.ExecutionEnvironment) {
panic("not supported")
}
func (a *AWSEnvironment) Register() error {
panic("not supported")
}
func (a *AWSEnvironment) AddRunner(_ runner.Runner) {
panic("not supported")
}
func (a *AWSEnvironment) DeleteRunner(_ string) (r runner.Runner, ok bool) {
panic("not supported")
}

View File

@ -1,67 +0,0 @@
package environment
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
)
// AWSEnvironmentManager contains no functionality at the moment.
// IMPROVE: Create Lambda functions dynamically.
type AWSEnvironmentManager struct {
*AbstractManager
}
func NewAWSEnvironmentManager(runnerManager runner.Manager) *AWSEnvironmentManager {
return &AWSEnvironmentManager{&AbstractManager{nil, runnerManager}}
}
func (a *AWSEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
list, err := a.NextHandler().List(fetch)
if err != nil {
return nil, fmt.Errorf("aws wrapped: %w", err)
}
return append(list, a.runnerManager.ListEnvironments()...), nil
}
func (a *AWSEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) {
e, ok := a.runnerManager.GetEnvironment(id)
if ok {
return e, nil
} else {
e, err := a.NextHandler().Get(id, fetch)
if err != nil {
return nil, fmt.Errorf("aws wrapped: %w", err)
}
return e, nil
}
}
func (a *AWSEnvironmentManager) CreateOrUpdate(
id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) {
if !isAWSEnvironment(request) {
isCreated, err := a.NextHandler().CreateOrUpdate(id, request, ctx)
if err != nil {
return false, fmt.Errorf("aws wrapped: %w", err)
}
return isCreated, nil
}
_, ok := a.runnerManager.GetEnvironment(id)
e := NewAWSEnvironment(a.runnerManager.Return)
e.SetID(id)
e.SetImage(request.Image)
a.runnerManager.StoreEnvironment(e)
return !ok, nil
}
func isAWSEnvironment(request dto.ExecutionEnvironmentRequest) bool {
for _, function := range config.Config.AWS.Functions {
if request.Image == function {
return true
}
}
return false
}

View File

@ -1,122 +0,0 @@
package environment
import (
"context"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"testing"
)
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestAWSEnvironmentManager_CreateOrUpdate() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
uniqueImage := "java11Exec"
s.Run("can create default Java environment", func() {
config.Config.AWS.Functions = []string{uniqueImage}
_, err := m.CreateOrUpdate(
tests.AnotherEnvironmentIDAsInteger, dto.ExecutionEnvironmentRequest{Image: uniqueImage}, context.Background())
s.NoError(err)
})
s.Run("can retrieve added environment", func() {
environment, err := m.Get(tests.AnotherEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(environment.Image(), uniqueImage)
})
s.Run("non-handleable requests are forwarded to the next manager", func() {
nextHandler := &ManagerHandlerMock{}
nextHandler.On("CreateOrUpdate", mock.AnythingOfType("dto.EnvironmentID"),
mock.AnythingOfType("dto.ExecutionEnvironmentRequest"), mock.Anything).Return(true, nil)
m.SetNextHandler(nextHandler)
request := dto.ExecutionEnvironmentRequest{}
_, err := m.CreateOrUpdate(tests.DefaultEnvironmentIDAsInteger, request, context.Background())
s.NoError(err)
nextHandler.AssertCalled(s.T(), "CreateOrUpdate",
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), request, mock.Anything)
})
}
func (s *MainTestSuite) TestAWSEnvironmentManager_Get() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
s.Run("Calls next handler when not found", func() {
nextHandler := &ManagerHandlerMock{}
nextHandler.On("Get", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("bool")).
Return(nil, nil)
m.SetNextHandler(nextHandler)
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
nextHandler.AssertCalled(s.T(), "Get", dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), false)
})
s.Run("Returns error when not found", func() {
nextHandler := &AbstractManager{nil, nil}
m.SetNextHandler(nextHandler)
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.ErrorIs(err, runner.ErrRunnerNotFound)
})
s.Run("Returns environment when it was added before", func() {
expectedEnvironment := NewAWSEnvironment(nil)
expectedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(expectedEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(expectedEnvironment, environment)
})
}
func (s *MainTestSuite) TestAWSEnvironmentManager_List() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
runnerManager := runner.NewAWSRunnerManager(ctx)
m := NewAWSEnvironmentManager(runnerManager)
s.Run("also returns environments of the rest of the manager chain", func() {
nextHandler := &ManagerHandlerMock{}
existingEnvironment := NewAWSEnvironment(nil)
nextHandler.On("List", mock.AnythingOfType("bool")).
Return([]runner.ExecutionEnvironment{existingEnvironment}, nil)
m.SetNextHandler(nextHandler)
environments, err := m.List(false)
s.NoError(err)
s.Require().Len(environments, 1)
s.Contains(environments, existingEnvironment)
})
m.SetNextHandler(nil)
s.Run("Returns added environment", func() {
localEnvironment := NewAWSEnvironment(nil)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environments, err := m.List(false)
s.NoError(err)
s.Len(environments, 1)
s.Contains(environments, localEnvironment)
})
}

View File

@ -0,0 +1,175 @@
package environment
import (
"context"
"fmt"
poseidonK8s "github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
appsv1 "k8s.io/api/apps/v1"
"time"
)
type KubernetesEnvironment struct {
apiClient *poseidonK8s.ExecutorAPI
jobHCL string
deployment *appsv1.Deployment
idleRunners storage.Storage[runner.Runner]
ctx context.Context
cancel context.CancelFunc
}
func (k KubernetesEnvironment) MarshalJSON() ([]byte, error) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) ID() dto.EnvironmentID {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetID(id dto.EnvironmentID) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) PrewarmingPoolSize() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetPrewarmingPoolSize(count uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) ApplyPrewarmingPoolSize() error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) CPULimit() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetCPULimit(limit uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) MemoryLimit() uint {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetMemoryLimit(limit uint) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Image() string {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetImage(image string) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) NetworkAccess() (bool, []uint16) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetNetworkAccess(allow bool, ports []uint16) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) SetConfigFrom(environment runner.ExecutionEnvironment) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Register() error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Delete(reason runner.DestroyReason) error {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) Sample() (r runner.Runner, ok bool) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) AddRunner(r runner.Runner) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) DeleteRunner(id string) (r runner.Runner, ok bool) {
//TODO implement me
panic("implement me")
}
func (k KubernetesEnvironment) IdleRunnerCount() uint {
//TODO implement me
panic("implement me")
}
func NewKubernetesEnvironmentFromRequest(
apiClient poseidonK8s.ExecutorAPI, jobHCL string, id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest) (
*KubernetesEnvironment, error) {
environment, err := NewKubernetesEnvironment(id, apiClient, jobHCL)
if err != nil {
return nil, err
}
environment.SetID(id)
// Set options according to request
environment.SetPrewarmingPoolSize(request.PrewarmingPoolSize)
environment.SetCPULimit(request.CPULimit)
environment.SetMemoryLimit(request.MemoryLimit)
environment.SetImage(request.Image)
environment.SetNetworkAccess(request.NetworkAccess, request.ExposedPorts)
return environment, nil
}
func NewKubernetesEnvironment(id dto.EnvironmentID, apiClient poseidonK8s.ExecutorAPI, jobHCL string) (*KubernetesEnvironment, error) {
job, err := parseDeployment(jobHCL)
if err != nil {
return nil, fmt.Errorf("error parsing Nomad job: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
e := &KubernetesEnvironment{&apiClient, jobHCL, job, nil, ctx, cancel}
e.idleRunners = storage.NewMonitoredLocalStorage[runner.Runner](monitoring.MeasurementIdleRunnerNomad,
runner.MonitorEnvironmentID[runner.Runner](id), time.Minute, ctx)
return e, nil
}
// TODO MISSING IMPLEMENTATION
func parseDeployment(jobHCL string) (*appsv1.Deployment, error) {
deployment := appsv1.Deployment{}
// jobConfig := jobspec2.ParseConfig{
// Body: []byte(jobHCL),
// AllowFS: false,
// Strict: true,
// }
// job, err := jobspec2.ParseWithConfig(&jobConfig)
// if err != nil {
// return job, fmt.Errorf("couldn't parse job HCL: %w", err)
// }
return &deployment, nil
}

View File

@ -0,0 +1,297 @@
package environment
import (
"context"
"fmt"
poseidonK8s "github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/logging"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strconv"
"time"
)
type KubernetesEnvironmentManager struct {
*AbstractManager
api poseidonK8s.ExecutorAPI
templateEnvironmentHCL string
}
func NewKubernetesEnvironmentManager(
runnerManager runner.Manager,
apiClient *poseidonK8s.ExecutorAPI,
templateJobFile string,
) (*KubernetesEnvironmentManager, error) {
if err := loadTemplateEnvironmentJobHCL(templateJobFile); err != nil {
return nil, err
}
m := &KubernetesEnvironmentManager{
AbstractManager: &AbstractManager{nil, runnerManager},
api: *apiClient,
templateEnvironmentHCL: templateEnvironmentJobHCL,
}
return m, nil
}
func (k *KubernetesEnvironmentManager) SetNextHandler(next ManagerHandler) {
k.nextHandler = next
}
func (k *KubernetesEnvironmentManager) NextHandler() ManagerHandler {
if k.HasNextHandler() {
return k.nextHandler
} else {
return &AbstractManager{}
}
}
func (k *KubernetesEnvironmentManager) HasNextHandler() bool {
return k.nextHandler != nil
}
// List all Kubernetes-based environments
func (k *KubernetesEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
if fetch {
if err := k.fetchEnvironments(); err != nil {
return nil, err
}
}
return k.runnerManager.ListEnvironments(), nil
}
func (k *KubernetesEnvironmentManager) fetchEnvironments() error {
remoteDeploymentResponse, err := k.api.LoadEnvironmentJobs()
if err != nil {
return fmt.Errorf("failed fetching environments: %w", err)
}
remoteDeployments := make(map[string]appsv1.Deployment)
// Update local environments from remote environments.
for _, deployment := range remoteDeploymentResponse {
remoteDeployments[deployment.Name] = *deployment
// Job Id to Environment Id Integer
intIdentifier, err := strconv.Atoi(deployment.Name)
if err != nil {
log.WithError(err).Warn("Failed to convert job name to int")
continue
}
id := dto.EnvironmentID(intIdentifier)
if localEnvironment, ok := k.runnerManager.GetEnvironment(id); ok {
fetchedEnvironment := newKubernetesEnvironmentFromJob(deployment, &k.api)
localEnvironment.SetConfigFrom(fetchedEnvironment)
// We destroy only this (second) local reference to the environment.
if err = fetchedEnvironment.Delete(runner.ErrDestroyedAndReplaced); err != nil {
log.WithError(err).Warn("Failed to remove environment locally")
}
} else {
k.runnerManager.StoreEnvironment(newKubernetesEnvironmentFromJob(deployment, &k.api))
}
}
// Remove local environments that are not remote environments.
for _, localEnvironment := range k.runnerManager.ListEnvironments() {
if _, ok := remoteDeployments[localEnvironment.ID().ToString()]; !ok {
err := localEnvironment.Delete(runner.ErrLocalDestruction)
log.WithError(err).Warn("Failed to remove environment locally")
}
}
return nil
}
// newNomadEnvironmentFromJob creates a Nomad environment from the passed Nomad job definition.
func newKubernetesEnvironmentFromJob(deployment *appsv1.Deployment, apiClient *poseidonK8s.ExecutorAPI) *KubernetesEnvironment {
ctx, cancel := context.WithCancel(context.Background())
e := &KubernetesEnvironment{
apiClient: apiClient,
jobHCL: templateEnvironmentJobHCL,
deployment: deployment,
ctx: ctx,
cancel: cancel,
}
e.idleRunners = storage.NewMonitoredLocalStorage[runner.Runner](monitoring.MeasurementIdleRunnerNomad,
runner.MonitorEnvironmentID[runner.Runner](e.ID()), time.Minute, ctx)
return e
}
// Get retrieves a specific Kubernetes environment
func (k *KubernetesEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (executionEnvironment runner.ExecutionEnvironment, err error) {
executionEnvironment, ok := k.runnerManager.GetEnvironment(id)
if fetch {
fetchedEnvironment, err := fetchK8sEnvironment(id, k.api)
switch {
case err != nil:
return nil, err
case fetchedEnvironment == nil:
_, err = k.Delete(id)
if err != nil {
return nil, err
}
ok = false
case !ok:
k.runnerManager.StoreEnvironment(fetchedEnvironment)
executionEnvironment = fetchedEnvironment
ok = true
default:
executionEnvironment.SetConfigFrom(fetchedEnvironment)
// We destroy only this (second) local reference to the environment.
err = fetchedEnvironment.Delete(runner.ErrDestroyedAndReplaced)
if err != nil {
log.WithError(err).Warn("Failed to remove environment locally")
}
}
}
if !ok {
err = runner.ErrUnknownExecutionEnvironment
}
return executionEnvironment, err
}
// CreateOrUpdate creates or updates an environment in Kubernetes
func (k *KubernetesEnvironmentManager) CreateOrUpdate(
id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (created bool, err error) {
// Check if execution environment is already existing (in the local memory).
environment, isExistingEnvironment := k.runnerManager.GetEnvironment(id)
if isExistingEnvironment {
// Remove existing environment to force downloading the newest Docker image.
// See https://github.com/openHPI/poseidon/issues/69
err = environment.Delete(runner.ErrEnvironmentUpdated)
if err != nil {
return false, fmt.Errorf("failed to remove the environment: %w", err)
}
}
// Create a new environment with the given request options.
environment, err = NewKubernetesEnvironmentFromRequest(k.api, k.templateEnvironmentHCL, id, request)
if err != nil {
return false, fmt.Errorf("error creating Nomad environment: %w", err)
}
// Keep a copy of environment specification in memory.
k.runnerManager.StoreEnvironment(environment)
// Register template Job with Nomad.
logging.StartSpan("env.update.register", "Register Environment", ctx, func(_ context.Context) {
err = environment.Register()
})
if err != nil {
return false, fmt.Errorf("error registering template job in API: %w", err)
}
// Launch idle runners based on the template job.
logging.StartSpan("env.update.poolsize", "Apply Prewarming Pool Size", ctx, func(_ context.Context) {
err = environment.ApplyPrewarmingPoolSize()
})
if err != nil {
return false, fmt.Errorf("error scaling template job in API: %w", err)
}
return !isExistingEnvironment, nil
}
// Statistics fetches statistics from Kubernetes
func (k *KubernetesEnvironmentManager) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
// Collect and return statistics for Kubernetes environments
return map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{}
}
// MapExecutionEnvironmentRequestToDeployment maps ExecutionEnvironmentRequest to a Kubernetes Deployment
func MapExecutionEnvironmentRequestToDeployment(req dto.ExecutionEnvironmentRequest, environmentID string) *appsv1.Deployment {
// Create the Deployment object
deployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: environmentID, // Set the environment ID as the name of the deployment
Labels: map[string]string{
"environment-id": environmentID,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: int32Ptr(int32(req.PrewarmingPoolSize)), // Use PrewarmingPoolSize to set the number of replicas
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"environment-id": environmentID,
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"environment-id": environmentID,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "runner-container",
Image: req.Image, // Map the image to the container
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
"cpu": resource.MustParse(strconv.Itoa(int(req.CPULimit))), // Map CPU request
"memory": resource.MustParse(strconv.Itoa(int(req.MemoryLimit)) + "Mi"), // Map Memory request
},
Limits: v1.ResourceList{
"cpu": resource.MustParse(strconv.Itoa(int(req.CPULimit))), // Map CPU limit
"memory": resource.MustParse(strconv.Itoa(int(req.MemoryLimit)) + "Mi"), // Map Memory limit
},
},
},
},
},
},
},
}
// Handle network access and exposed ports
if req.NetworkAccess {
var containerPorts []v1.ContainerPort
for _, port := range req.ExposedPorts {
containerPorts = append(containerPorts, v1.ContainerPort{
ContainerPort: int32(port),
})
}
deployment.Spec.Template.Spec.Containers[0].Ports = containerPorts
}
return deployment
}
// Helper function to return a pointer to an int32
func int32Ptr(i int32) *int32 {
return &i
}
func fetchK8sEnvironment(id dto.EnvironmentID, apiClient poseidonK8s.ExecutorAPI) (runner.ExecutionEnvironment, error) {
environments, err := apiClient.LoadEnvironmentJobs()
if err != nil {
return nil, fmt.Errorf("error fetching the environment jobs: %w", err)
}
var fetchedEnvironment runner.ExecutionEnvironment
for _, deployment := range environments {
environmentID, err := nomad.EnvironmentIDFromTemplateJobID(deployment.Name)
if err != nil {
log.WithError(err).Warn("Cannot parse environment id of loaded environment")
continue
}
if id == environmentID {
fetchedEnvironment = newKubernetesEnvironmentFromJob(deployment, &apiClient)
}
}
return fetchedEnvironment, nil
}

View File

@ -1,171 +0,0 @@
// Code generated by mockery v2.16.0. DO NOT EDIT.
package environment
import (
context "context"
dto "github.com/openHPI/poseidon/pkg/dto"
mock "github.com/stretchr/testify/mock"
runner "github.com/openHPI/poseidon/internal/runner"
)
// ManagerHandlerMock is an autogenerated mock type for the ManagerHandler type
type ManagerHandlerMock struct {
mock.Mock
}
// CreateOrUpdate provides a mock function with given fields: id, request, ctx
func (_m *ManagerHandlerMock) CreateOrUpdate(id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) {
ret := _m.Called(id, request, ctx)
var r0 bool
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, dto.ExecutionEnvironmentRequest, context.Context) bool); ok {
r0 = rf(id, request, ctx)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, dto.ExecutionEnvironmentRequest, context.Context) error); ok {
r1 = rf(id, request, ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Delete provides a mock function with given fields: id
func (_m *ManagerHandlerMock) Delete(id dto.EnvironmentID) (bool, error) {
ret := _m.Called(id)
var r0 bool
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) bool); ok {
r0 = rf(id)
} else {
r0 = ret.Get(0).(bool)
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Get provides a mock function with given fields: id, fetch
func (_m *ManagerHandlerMock) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) {
ret := _m.Called(id, fetch)
var r0 runner.ExecutionEnvironment
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, bool) runner.ExecutionEnvironment); ok {
r0 = rf(id, fetch)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(runner.ExecutionEnvironment)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, bool) error); ok {
r1 = rf(id, fetch)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// HasNextHandler provides a mock function with given fields:
func (_m *ManagerHandlerMock) HasNextHandler() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// List provides a mock function with given fields: fetch
func (_m *ManagerHandlerMock) List(fetch bool) ([]runner.ExecutionEnvironment, error) {
ret := _m.Called(fetch)
var r0 []runner.ExecutionEnvironment
if rf, ok := ret.Get(0).(func(bool) []runner.ExecutionEnvironment); ok {
r0 = rf(fetch)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]runner.ExecutionEnvironment)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(bool) error); ok {
r1 = rf(fetch)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NextHandler provides a mock function with given fields:
func (_m *ManagerHandlerMock) NextHandler() ManagerHandler {
ret := _m.Called()
var r0 ManagerHandler
if rf, ok := ret.Get(0).(func() ManagerHandler); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(ManagerHandler)
}
}
return r0
}
// SetNextHandler provides a mock function with given fields: next
func (_m *ManagerHandlerMock) SetNextHandler(next ManagerHandler) {
_m.Called(next)
}
// Statistics provides a mock function with given fields:
func (_m *ManagerHandlerMock) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
ret := _m.Called()
var r0 map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
if rf, ok := ret.Get(0).(func() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData)
}
}
return r0
}
type mockConstructorTestingTNewManagerHandlerMock interface {
mock.TestingT
Cleanup(func())
}
// NewManagerHandlerMock creates a new instance of ManagerHandlerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewManagerHandlerMock(t mockConstructorTestingTNewManagerHandlerMock) *ManagerHandlerMock {
mock := &ManagerHandlerMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,265 +0,0 @@
package environment
import (
"context"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
"time"
)
func (s *MainTestSuite) TestConfigureNetworkCreatesNewNetworkWhenNoNetworkExists() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
environment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
if s.Equal(0, len(defaultTaskGroup.Networks)) {
environment.SetNetworkAccess(true, []uint16{})
s.Equal(1, len(defaultTaskGroup.Networks))
}
}
func (s *MainTestSuite) TestConfigureNetworkDoesNotCreateNewNetworkWhenNetworkExists() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
environment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
networkResource := config.Config.Nomad.Network
defaultTaskGroup.Networks = []*nomadApi.NetworkResource{&networkResource}
if s.Equal(1, len(defaultTaskGroup.Networks)) {
environment.SetNetworkAccess(true, []uint16{})
s.Equal(1, len(defaultTaskGroup.Networks))
s.Equal(&networkResource, defaultTaskGroup.Networks[0])
}
}
func (s *MainTestSuite) TestConfigureNetworkSetsCorrectValues() {
_, job := helpers.CreateTemplateJob()
defaultTaskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
defaultTask := nomad.FindAndValidateDefaultTask(defaultTaskGroup)
mode, ok := defaultTask.Config["network_mode"]
s.True(ok)
s.Equal("none", mode)
s.Equal(0, len(defaultTaskGroup.Networks))
exposedPortsTests := [][]uint16{{}, {1337}, {42, 1337}}
s.Run("with no network access", func() {
for _, ports := range exposedPortsTests {
_, testJob := helpers.CreateTemplateJob()
testTaskGroup := nomad.FindAndValidateDefaultTaskGroup(testJob)
testTask := nomad.FindAndValidateDefaultTask(testTaskGroup)
testEnvironment := &NomadEnvironment{nil, "", job, nil, context.Background(), nil}
testEnvironment.SetNetworkAccess(false, ports)
mode, ok := testTask.Config["network_mode"]
s.True(ok)
s.Equal("none", mode)
s.Equal(0, len(testTaskGroup.Networks))
}
})
s.Run("with network access", func() {
for _, ports := range exposedPortsTests {
_, testJob := helpers.CreateTemplateJob()
testTaskGroup := nomad.FindAndValidateDefaultTaskGroup(testJob)
testTask := nomad.FindAndValidateDefaultTask(testTaskGroup)
testEnvironment := &NomadEnvironment{nil, "", testJob, nil, context.Background(), nil}
testEnvironment.SetNetworkAccess(true, ports)
s.Require().Equal(1, len(testTaskGroup.Networks))
networkResource := testTaskGroup.Networks[0]
s.Equal(config.Config.Nomad.Network.Mode, networkResource.Mode)
s.Require().Equal(len(ports), len(networkResource.DynamicPorts))
assertExpectedPorts(s.T(), ports, networkResource)
mode, ok := testTask.Config["network_mode"]
s.True(ok)
s.Equal(mode, "")
}
})
}
func assertExpectedPorts(t *testing.T, expectedPorts []uint16, networkResource *nomadApi.NetworkResource) {
t.Helper()
for _, expectedPort := range expectedPorts {
found := false
for _, actualPort := range networkResource.DynamicPorts {
if actualPort.To == int(expectedPort) {
found = true
break
}
}
assert.True(t, found, fmt.Sprintf("port list should contain %v", expectedPort))
}
}
func (s *MainTestSuite) TestRegisterFailsWhenNomadJobRegistrationFails() {
apiClientMock := &nomad.ExecutorAPIMock{}
expectedErr := tests.ErrDefault
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", expectedErr)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), nil, nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.ErrorIs(err, expectedErr)
apiClientMock.AssertNotCalled(s.T(), "MonitorEvaluation")
}
func (s *MainTestSuite) TestRegisterTemplateJobSucceedsWhenMonitoringEvaluationSucceeds() {
apiClientMock := &nomad.ExecutorAPIMock{}
evaluationID := "id"
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
apiClientMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(nil)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.NoError(err)
}
func (s *MainTestSuite) TestRegisterTemplateJobReturnsErrorWhenMonitoringEvaluationFails() {
apiClientMock := &nomad.ExecutorAPIMock{}
evaluationID := "id"
apiClientMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return(evaluationID, nil)
apiClientMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(tests.ErrDefault)
apiClientMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiClientMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment := &NomadEnvironment{apiClientMock, "", &nomadApi.Job{},
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetID(tests.DefaultEnvironmentIDAsInteger)
err := environment.Register()
s.ErrorIs(err, tests.ErrDefault)
}
func (s *MainTestSuite) TestParseJob() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.Run("parses the given default job", func() {
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.NoError(err)
s.NotNil(environment.job)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
s.Run("returns error when given wrong job", func() {
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, nil, "")
s.Error(err)
s.Nil(environment)
})
}
func (s *MainTestSuite) TestTwoSampleAddExactlyTwoRunners() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job")).Return(nil)
_, job := helpers.CreateTemplateJob()
environment := &NomadEnvironment{apiMock, templateEnvironmentJobHCL, job,
storage.NewLocalStorage[runner.Runner](), context.Background(), nil}
environment.SetPrewarmingPoolSize(2)
runner1 := &runner.RunnerMock{}
runner1.On("ID").Return(tests.DefaultRunnerID)
runner2 := &runner.RunnerMock{}
runner2.On("ID").Return(tests.AnotherRunnerID)
environment.AddRunner(runner1)
environment.AddRunner(runner2)
_, ok := environment.Sample()
s.Require().True(ok)
_, ok = environment.Sample()
s.Require().True(ok)
<-time.After(tests.ShortTimeout) // New Runners are requested asynchronously
apiMock.AssertNumberOfCalls(s.T(), "RegisterRunnerJob", 2)
}
func (s *MainTestSuite) TestSampleDoesNotSetForcePullFlag() {
apiMock := &nomad.ExecutorAPIMock{}
call := apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job"))
call.Run(func(args mock.Arguments) {
job, ok := args.Get(0).(*nomadApi.Job)
s.True(ok)
taskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
task := nomad.FindAndValidateDefaultTask(taskGroup)
s.False(task.Config["force_pull"].(bool))
call.ReturnArguments = mock.Arguments{nil}
})
_, job := helpers.CreateTemplateJob()
environment := &NomadEnvironment{apiMock, templateEnvironmentJobHCL, job,
storage.NewLocalStorage[runner.Runner](), s.TestCtx, nil}
runner1 := &runner.RunnerMock{}
runner1.On("ID").Return(tests.DefaultRunnerID)
environment.AddRunner(runner1)
_, ok := environment.Sample()
s.Require().True(ok)
<-time.After(tests.ShortTimeout) // New Runners are requested asynchronously
}
func (s *MainTestSuite) TestNomadEnvironment_DeleteLocally() {
apiMock := &nomad.ExecutorAPIMock{}
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
err = environment.Delete(runner.ErrLocalDestruction)
s.NoError(err)
apiMock.AssertExpectations(s.T())
}
func (s *MainTestSuite) TestNomadEnvironment_AddRunner() {
s.Run("Destroys runner before replacing it", func() {
apiMock := &nomad.ExecutorAPIMock{}
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
r := &runner.RunnerMock{}
r.On("ID").Return(tests.DefaultRunnerID)
r.On("Destroy", mock.Anything).Run(func(args mock.Arguments) {
err, ok := args[0].(error)
s.Require().True(ok)
s.ErrorIs(err, runner.ErrLocalDestruction)
}).Return(nil).Once()
r2 := &runner.RunnerMock{}
r2.On("ID").Return(tests.DefaultRunnerID)
environment.AddRunner(r)
environment.AddRunner(r2)
r.AssertExpectations(s.T())
// Teardown test case
r2.On("Destroy", mock.Anything).Return(nil)
apiMock.On("LoadRunnerIDs", mock.Anything).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.Anything).Return(nil)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
}

View File

@ -1,455 +0,0 @@
package environment
import (
"context"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/internal/runner"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"os"
"testing"
"time"
)
type CreateOrUpdateTestSuite struct {
tests.MemoryLeakTestSuite
runnerManagerMock runner.ManagerMock
apiMock nomad.ExecutorAPIMock
request dto.ExecutionEnvironmentRequest
manager *NomadEnvironmentManager
environmentID dto.EnvironmentID
}
func TestCreateOrUpdateTestSuite(t *testing.T) {
suite.Run(t, new(CreateOrUpdateTestSuite))
}
func (s *CreateOrUpdateTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.runnerManagerMock = runner.ManagerMock{}
s.apiMock = nomad.ExecutorAPIMock{}
s.request = dto.ExecutionEnvironmentRequest{
PrewarmingPoolSize: 10,
CPULimit: 20,
MemoryLimit: 30,
Image: "my-image",
NetworkAccess: false,
ExposedPorts: nil,
}
s.manager = &NomadEnvironmentManager{
AbstractManager: &AbstractManager{runnerManager: &s.runnerManagerMock},
api: &s.apiMock,
templateEnvironmentHCL: templateEnvironmentJobHCL,
}
s.environmentID = dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger)
}
func (s *CreateOrUpdateTestSuite) TestReturnsErrorIfCreatesOrUpdateEnvironmentReturnsError() {
s.apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", tests.ErrDefault)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runnerManagerMock.On("GetEnvironment", mock.AnythingOfType("dto.EnvironmentID")).Return(nil, false)
s.runnerManagerMock.On("StoreEnvironment", mock.AnythingOfType("*environment.NomadEnvironment")).Return(true)
s.ExpectedGoroutineIncrease++ // We don't care about removing the created environment.
_, err := s.manager.CreateOrUpdate(
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), s.request, context.Background())
s.ErrorIs(err, tests.ErrDefault)
}
func (s *CreateOrUpdateTestSuite) TestCreateOrUpdatesSetsForcePullFlag() {
s.apiMock.On("RegisterNomadJob", mock.AnythingOfType("*api.Job")).Return("", nil)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.runnerManagerMock.On("GetEnvironment", mock.AnythingOfType("dto.EnvironmentID")).Return(nil, false)
s.runnerManagerMock.On("StoreEnvironment", mock.AnythingOfType("*environment.NomadEnvironment")).Return(true)
s.apiMock.On("MonitorEvaluation", mock.AnythingOfType("string"), mock.Anything).Return(nil)
s.apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
call := s.apiMock.On("RegisterRunnerJob", mock.AnythingOfType("*api.Job"))
count := 0
call.Run(func(args mock.Arguments) {
count++
job, ok := args.Get(0).(*nomadApi.Job)
s.True(ok)
// The environment job itself has not the force_pull flag
if count > 1 {
taskGroup := nomad.FindAndValidateDefaultTaskGroup(job)
task := nomad.FindAndValidateDefaultTask(taskGroup)
s.True(task.Config["force_pull"].(bool))
}
call.ReturnArguments = mock.Arguments{nil}
})
s.ExpectedGoroutineIncrease++ // We dont care about removing the created environment at this point.
_, err := s.manager.CreateOrUpdate(
dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger), s.request, context.Background())
s.NoError(err)
s.True(count > 1)
}
func (s *MainTestSuite) TestNewNomadEnvironmentManager() {
executorAPIMock := &nomad.ExecutorAPIMock{}
executorAPIMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
executorAPIMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
executorAPIMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runnerManagerMock := &runner.ManagerMock{}
runnerManagerMock.On("Load").Return()
previousTemplateEnvironmentJobHCL := templateEnvironmentJobHCL
s.Run("returns error if template file does not exist", func() {
_, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, "/non-existent/file")
s.Error(err)
})
s.Run("loads template environment job from file", func() {
templateJobHCL := "job \"" + tests.DefaultTemplateJobID + "\" {}"
environment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, executorAPIMock, templateJobHCL)
s.Require().NoError(err)
f := createTempFile(s.T(), templateJobHCL)
defer os.Remove(f.Name())
m, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, f.Name())
s.NoError(err)
s.NotNil(m)
s.Equal(templateJobHCL, m.templateEnvironmentHCL)
s.NoError(environment.Delete(tests.ErrCleanupDestroyReason))
})
s.Run("returns error if template file is invalid", func() {
templateJobHCL := "invalid hcl file"
f := createTempFile(s.T(), templateJobHCL)
defer os.Remove(f.Name())
m, err := NewNomadEnvironmentManager(runnerManagerMock, executorAPIMock, f.Name())
s.Require().NoError(err)
_, err = NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, nil, m.templateEnvironmentHCL)
s.Error(err)
})
templateEnvironmentJobHCL = previousTemplateEnvironmentJobHCL
}
func (s *MainTestSuite) TestNomadEnvironmentManager_Get() {
apiMock := &nomad.ExecutorAPIMock{}
mockWatchAllocations(s.TestCtx, apiMock)
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
call := apiMock.On("LoadEnvironmentJobs")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{}, nil}
})
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("Returns error when not found", func() {
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.Error(err)
})
s.Run("Returns environment when it was added before", func() {
expectedEnvironment, err :=
NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
expectedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
s.Require().NoError(err)
runnerManager.StoreEnvironment(expectedEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.Equal(expectedEnvironment, environment)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
s.Run("Fetch", func() {
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.Run("Returns error when not found", func() {
_, err := m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.Error(err)
})
s.Run("Updates values when environment already known by Poseidon", func() {
fetchedEnvironment, err := NewNomadEnvironment(
tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
fetchedEnvironment.SetImage("random docker image")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
localEnvironment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.NoError(err)
s.NotEqual(fetchedEnvironment.Image(), environment.Image())
environment, err = m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.NoError(err)
s.Equal(fetchedEnvironment.Image(), environment.Image())
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = localEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Adds environment when not already known by Poseidon", func() {
fetchedEnvironment, err := NewNomadEnvironment(
tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
fetchedEnvironment.SetImage("random docker image")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
_, err = m.Get(tests.DefaultEnvironmentIDAsInteger, false)
s.Error(err)
environment, err := m.Get(tests.DefaultEnvironmentIDAsInteger, true)
s.NoError(err)
s.Equal(fetchedEnvironment.Image(), environment.Image())
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_List() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
mockWatchAllocations(s.TestCtx, apiMock)
call := apiMock.On("LoadEnvironmentJobs")
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{}, nil}
})
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("with no environments", func() {
environments, err := m.List(true)
s.NoError(err)
s.Empty(environments)
})
s.Run("Returns added environment", func() {
localEnvironment, err := NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
localEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
runnerManager.StoreEnvironment(localEnvironment)
environments, err := m.List(false)
s.NoError(err)
s.Equal(1, len(environments))
s.Equal(localEnvironment, environments[0])
err = localEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Fetches new Runners via the api client", func() {
fetchedEnvironment, err :=
NewNomadEnvironment(tests.DefaultEnvironmentIDAsInteger, apiMock, templateEnvironmentJobHCL)
s.Require().NoError(err)
fetchedEnvironment.SetID(tests.DefaultEnvironmentIDAsInteger)
status := structs.JobStatusRunning
fetchedEnvironment.job.Status = &status
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{[]*nomadApi.Job{fetchedEnvironment.job}, nil}
})
environments, err := m.List(false)
s.NoError(err)
s.Empty(environments)
environments, err = m.List(true)
s.NoError(err)
s.Equal(1, len(environments))
nomadEnvironment, ok := environments[0].(*NomadEnvironment)
s.True(ok)
s.Equal(fetchedEnvironment.job, nomadEnvironment.job)
err = fetchedEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
err = nomadEnvironment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_Load() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("LoadRunnerIDs", mock.AnythingOfType("string")).Return([]string{}, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
mockWatchAllocations(s.TestCtx, apiMock)
call := apiMock.On("LoadEnvironmentJobs")
apiMock.On("LoadRunnerJobs", mock.AnythingOfType("dto.EnvironmentID")).
Return([]*nomadApi.Job{}, nil)
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
s.Run("deletes local environments before loading Nomad environments", func() {
call.Return([]*nomadApi.Job{}, nil)
environment := &runner.ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("")
environment.On("CPULimit").Return(uint(0))
environment.On("MemoryLimit").Return(uint(0))
environment.On("NetworkAccess").Return(false, nil)
environment.On("Delete", mock.Anything).Return(nil)
runnerManager.StoreEnvironment(environment)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
err = m.load()
s.Require().NoError(err)
environment.AssertExpectations(s.T())
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Stores fetched environments", func() {
_, job := helpers.CreateTemplateJob()
call.Return([]*nomadApi.Job{job}, nil)
_, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
err = m.load()
s.Require().NoError(err)
environment, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().True(ok)
s.Equal("python:latest", environment.Image())
err = environment.Delete(tests.ErrCleanupDestroyReason)
s.Require().NoError(err)
})
runnerManager.DeleteEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Run("Processes only running environments", func() {
_, job := helpers.CreateTemplateJob()
jobStatus := structs.JobStatusDead
job.Status = &jobStatus
call.Return([]*nomadApi.Job{job}, nil)
_, ok := runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
_, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
_, ok = runnerManager.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.Require().False(ok)
})
}
func (s *MainTestSuite) TestNomadEnvironmentManager_KeepEnvironmentsSynced() {
apiMock := &nomad.ExecutorAPIMock{}
runnerManager := runner.NewNomadRunnerManager(apiMock, s.TestCtx)
m, err := NewNomadEnvironmentManager(runnerManager, apiMock, "")
s.Require().NoError(err)
s.Run("stops when context is done", func() {
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, context.DeadlineExceeded)
ctx, cancel := context.WithCancel(s.TestCtx)
cancel()
var done bool
go func() {
<-time.After(tests.ShortTimeout)
if !done {
s.FailNow("KeepEnvironmentsSynced is ignoring the context")
}
}()
m.KeepEnvironmentsSynced(func(_ context.Context) error { return nil }, ctx)
done = true
})
apiMock.ExpectedCalls = []*mock.Call{}
apiMock.Calls = []mock.Call{}
s.Run("retries loading environments", func() {
ctx, cancel := context.WithCancel(s.TestCtx)
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, context.DeadlineExceeded).Once()
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil).Run(func(_ mock.Arguments) {
cancel()
}).Once()
m.KeepEnvironmentsSynced(func(_ context.Context) error { return nil }, ctx)
apiMock.AssertExpectations(s.T())
})
apiMock.ExpectedCalls = []*mock.Call{}
apiMock.Calls = []mock.Call{}
s.Run("retries synchronizing runners", func() {
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
ctx, cancel := context.WithCancel(s.TestCtx)
count := 0
synchronizeRunners := func(ctx context.Context) error {
count++
if count >= 2 {
cancel()
return nil
}
return context.DeadlineExceeded
}
m.KeepEnvironmentsSynced(synchronizeRunners, ctx)
if count < 2 {
s.Fail("KeepEnvironmentsSynced is not retrying to synchronize the runners")
}
})
}
func mockWatchAllocations(ctx context.Context, apiMock *nomad.ExecutorAPIMock) {
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-ctx.Done()
call.ReturnArguments = mock.Arguments{nil}
})
}
func createTempFile(t *testing.T, content string) *os.File {
t.Helper()
f, err := os.CreateTemp("", "test")
require.NoError(t, err)
n, err := f.WriteString(content)
require.NoError(t, err)
require.Equal(t, len(content), n)
return f
}

View File

@ -0,0 +1,122 @@
package kubernetes
import (
"context"
"errors"
nomadApi "github.com/hashicorp/nomad/api"
"io"
appv1 "k8s.io/api/apps/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
var (
ErrorNoAllocationFound = errors.New("no allocation found")
)
// apiQuerier provides access to the Nomad functionality.
type apiQuerier interface {
// init prepares an apiClient to be able to communicate to a provided Nomad API.
init(nomadConfig *rest.Config) (err error)
// LoadJobList loads the list of jobs from the Nomad API.
LoadJobList() (list []*appv1.DeploymentList, err error)
// JobScale returns the scale of the passed job.
JobScale(jobID string) (jobScale uint, err error)
// SetJobScale sets the scaling count of the passed job to Nomad.
SetJobScale(jobID string, count uint, reason string) (err error)
// DeleteJob deletes the Job with the given ID.
DeleteDeployment(name string) (err error)
// Execute runs a command in the passed job.
Execute(jobID string, ctx context.Context, command string, tty bool,
stdin io.Reader, stdout, stderr io.Writer) (int, error)
// listJobs loads all jobs with the specified prefix.
listDeployments(namespace string) (jobListStub []*appv1.DeploymentList, err error)
// job returns the job of the given jobID.
deployment(name string) (deployment appv1.Deployment, err error)
// listAllocations loads all allocations.
listAllocations() (allocationListStub []*nomadApi.AllocationListStub, err error)
// allocation returns the first allocation of the given job.
allocation(jobID string) (*nomadApi.Allocation, error)
// RegisterKubernetesDeployment registers a deployment with Kubernetes.
// It returns the deployment ID that can be used when listening to the Kubernetes event stream.
RegisterKubernetesDeployment(deployment appv1.Deployment) (string, error)
// EventStream returns a Nomad event stream filtered to return only allocation and evaluation events.
EventStream(ctx context.Context) (<-chan *nomadApi.Events, error)
}
// nomadAPIClient implements the nomadApiQuerier interface and provides access to a real Nomad API.
type kubernetesAPIClient struct {
client *kubernetes.Clientset
namespace string
}
func (k kubernetesAPIClient) init(nomadConfig *rest.Config) (err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) LoadJobList() (list []*appv1.DeploymentList, err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) JobScale(jobID string) (jobScale uint, err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) SetJobScale(jobID string, count uint, reason string) (err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) DeleteDeployment(name string) (err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) Execute(jobID string, ctx context.Context, command string, tty bool, stdin io.Reader, stdout, stderr io.Writer) (int, error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) listDeployments(namespace string) (jobListStub []*appv1.DeploymentList, err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) deployment(name string) (deployment appv1.Deployment, err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) listAllocations() (allocationListStub []*nomadApi.AllocationListStub, err error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) allocation(jobID string) (*nomadApi.Allocation, error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) RegisterKubernetesDeployment(deployment appv1.Deployment) (string, error) {
//TODO implement me
panic("implement me")
}
func (k kubernetesAPIClient) EventStream(ctx context.Context) (<-chan *nomadApi.Events, error) {
//TODO implement me
panic("implement me")
}

View File

@ -0,0 +1,125 @@
package kubernetes
import (
"context"
"errors"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/openHPI/poseidon/pkg/dto"
appv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"strconv"
"strings"
"time"
)
const (
TemplateJobPrefix = "template"
TaskGroupName = "default-group"
TaskName = "default-task"
TaskCount = 1
TaskDriver = "docker"
TaskCommand = "sleep"
ConfigTaskGroupName = "config"
ConfigTaskName = "config"
ConfigTaskDriver = "exec"
ConfigTaskCommand = "true"
ConfigMetaUsedKey = "used"
ConfigMetaUsedValue = "true"
ConfigMetaUnusedValue = "false"
ConfigMetaTimeoutKey = "timeout"
ConfigMetaPoolSizeKey = "prewarmingPoolSize"
TemplateJobNameParts = 2
RegisterTimeout = 10 * time.Second
RunnerTimeoutFallback = 60 * time.Second
)
var (
ErrorInvalidJobID = errors.New("invalid job id")
ErrorMissingTaskGroup = errors.New("couldn't find config task group in job")
TaskArgs = []string{"infinity"}
)
func (a *APIClient) RegisterRunnerJob(template *appv1.Deployment) error {
evalID, err := a.apiQuerier.RegisterKubernetesDeployment(*template)
if err != nil {
return fmt.Errorf("couldn't register runner job: %w", err)
}
registerTimeout, cancel := context.WithTimeout(context.Background(), RegisterTimeout)
defer cancel()
return a.MonitorEvaluation(evalID, registerTimeout)
}
// SetForcePullFlag sets the flag of a job if the image should be pulled again.
func SetForcePullFlag(deployment *appv1.Deployment, value bool) {
for _, container := range deployment.Spec.Template.Spec.Containers {
if container.Name == TaskName {
if value {
container.ImagePullPolicy = v1.PullAlways
} else {
container.ImagePullPolicy = v1.PullIfNotPresent
}
}
}
}
// IsEnvironmentTemplateID checks if the passed job id belongs to a template job.
func IsEnvironmentTemplateID(jobID string) bool {
parts := strings.Split(jobID, "-")
if len(parts) != TemplateJobNameParts || parts[0] != TemplateJobPrefix {
return false
}
_, err := EnvironmentIDFromTemplateJobID(jobID)
return err == nil
}
// RunnerJobID returns the nomad job id of the runner with the given environmentID and id.
func RunnerJobID(environmentID dto.EnvironmentID, id string) string {
return fmt.Sprintf("%d-%s", environmentID, id)
}
// TemplateJobID returns the id of the nomad job for the environment with the given id.
func TemplateJobID(id dto.EnvironmentID) string {
return fmt.Sprintf("%s-%d", TemplateJobPrefix, id)
}
// EnvironmentIDFromRunnerID returns the environment id that is part of the passed runner job id.
func EnvironmentIDFromRunnerID(jobID string) (dto.EnvironmentID, error) {
return partOfJobID(jobID, 0)
}
// EnvironmentIDFromTemplateJobID returns the environment id that is part of the passed environment job id.
func EnvironmentIDFromTemplateJobID(id string) (dto.EnvironmentID, error) {
return partOfJobID(id, 1)
}
func partOfJobID(id string, part uint) (dto.EnvironmentID, error) {
parts := strings.Split(id, "-")
if len(parts) == 0 {
return 0, fmt.Errorf("empty job id: %w", ErrorInvalidJobID)
}
environmentID, err := strconv.Atoi(parts[part])
if err != nil {
return 0, fmt.Errorf("invalid environment id par %v: %w", err, ErrorInvalidJobID)
}
return dto.EnvironmentID(environmentID), nil
}
func isOOMKilled(alloc *nomadApi.Allocation) bool {
state, ok := alloc.TaskStates[TaskName]
if !ok {
return false
}
var oomKilledCount uint64
for _, event := range state.Events {
if oomString, ok := event.Details["oom_killed"]; ok {
if oom, err := strconv.ParseBool(oomString); err == nil && oom {
oomKilledCount++
}
}
}
return oomKilledCount >= state.Restarts
}

View File

@ -0,0 +1,221 @@
package kubernetes
import (
"context"
"errors"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/influxdata/influxdb-client-go/v2/api/write"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/logging"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"io"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"strings"
"time"
)
var (
log = logging.GetLogger("kubernetes")
ErrorExecutorCommunicationFailed = errors.New("communication with executor failed")
ErrorEvaluation = errors.New("evaluation could not complete")
ErrorPlacingAllocations = errors.New("failed to place all allocations")
ErrorLoadingJob = errors.New("failed to load job")
ErrorNoAllocatedResourcesFound = errors.New("no allocated resources found")
ErrorLocalDestruction RunnerDeletedReason = errors.New("the destruction should not cause external changes")
ErrorOOMKilled RunnerDeletedReason = fmt.Errorf("%s: %w", dto.ErrOOMKilled.Error(), ErrorLocalDestruction)
ErrorAllocationRescheduled RunnerDeletedReason = fmt.Errorf("the allocation was rescheduled: %w", ErrorLocalDestruction)
ErrorAllocationStopped RunnerDeletedReason = errors.New("the allocation was stopped")
ErrorAllocationStoppedUnexpectedly RunnerDeletedReason = fmt.Errorf("%w unexpectedly", ErrorAllocationStopped)
ErrorAllocationRescheduledUnexpectedly RunnerDeletedReason = fmt.Errorf(
"%w correctly but rescheduled", ErrorAllocationStopped)
// ErrorAllocationCompleted is for reporting the reason for the stopped allocation.
// We do not consider it as an error but add it anyway for a complete reporting.
ErrorAllocationCompleted RunnerDeletedReason = errors.New("the allocation completed")
)
type allocationData struct {
// allocClientStatus defines the state defined by Nomad.
allocClientStatus string
// allocDesiredStatus defines if the allocation wants to be running or being stopped.
allocDesiredStatus string
jobID string
start time.Time
// stopExpected is used to suppress warnings that could be triggered by a race condition
// between the Inactivity timer and an external event leadng to allocation rescheduling.
stopExpected bool
// Just debugging information
allocNomadNode string
}
// resultChannelWriteTimeout is to detect the error when more element are written into a channel than expected.
const resultChannelWriteTimeout = 10 * time.Millisecond
type DeletedAllocationProcessor func(jobID string, RunnerDeletedReason error) (removedByPoseidon bool)
type NewAllocationProcessor func(*nomadApi.Allocation, time.Duration)
// AllocationProcessing includes the callbacks to interact with allocation events.
type AllocationProcessing struct {
OnNew NewAllocationProcessor
OnDeleted DeletedAllocationProcessor
}
type RunnerDeletedReason error
// ExecutorAPI provides access to a container orchestration solution.
type ExecutorAPI interface {
apiQuerier
// LoadEnvironmentJobs loads all environment jobs.
LoadEnvironmentJobs() ([]*appsv1.Deployment, error)
// LoadRunnerJobs loads all runner jobs specific for the environment.
LoadRunnerJobs(environmentID dto.EnvironmentID) ([]*appsv1.Deployment, error)
// LoadRunnerIDs returns the IDs of all runners with the specified id prefix which are not about to
// get stopped.
LoadRunnerIDs(prefix string) (runnerIds []string, err error)
// LoadRunnerPortMappings returns the mapped ports of the runner.
LoadRunnerPortMappings(runnerID string) ([]v1.ContainerPort, error)
// RegisterRunnerJob creates a runner job based on the template job.
// It registers the job and waits until the registration completes.
RegisterRunnerJob(template *appsv1.Deployment) error
// MonitorEvaluation monitors the given evaluation ID.
// It waits until the evaluation reaches one of the states complete, canceled or failed.
// If the evaluation was not successful, an error containing the failures is returned.
// See also https://github.com/hashicorp/nomad/blob/7d5a9ecde95c18da94c9b6ace2565afbfdd6a40d/command/monitor.go#L175
MonitorEvaluation(evaluationID string, ctx context.Context) error
// WatchEventStream listens on the Nomad event stream for allocation and evaluation events.
// Depending on the incoming event, any of the given function is executed.
// Do not run multiple times simultaneously.
WatchEventStream(ctx context.Context, callbacks *AllocationProcessing) error
// ExecuteCommand executes the given command in the job/runner with the given id.
// It writes the output of the command to stdout/stderr and reads input from stdin.
// If tty is true, the command will run with a tty.
// Iff privilegedExecution is true, the command will be executed privileged.
// The command is passed in the shell form (not the exec array form) and will be executed in a shell.
ExecuteCommand(jobID string, ctx context.Context, command string, tty bool, privilegedExecution bool,
stdin io.Reader, stdout, stderr io.Writer) (int, error)
// MarkRunnerAsUsed marks the runner with the given ID as used. It also stores the timeout duration in the metadata.
MarkRunnerAsUsed(runnerID string, duration int) error
}
// APIClient implements the ExecutorAPI interface and can be used to perform different operations on the real
// Executor API and its return values.
type APIClient struct {
apiQuerier
evaluations storage.Storage[chan error]
// allocations contain management data for all pending and running allocations.
allocations storage.Storage[*allocationData]
isListening bool
}
func (A APIClient) LoadEnvironmentJobs() ([]*appsv1.Deployment, error) {
//TODO implement me
panic("implement me")
}
func (a *APIClient) LoadRunnerJobs(environmentID dto.EnvironmentID) ([]*appsv1.Deployment, error) {
go a.initializeAllocations(environmentID)
runnerIDs, err := a.LoadRunnerIDs(RunnerJobID(environmentID, ""))
if err != nil {
return []*appsv1.Deployment{}, fmt.Errorf("couldn't load jobs: %w", err)
}
var occurredError error
jobs := make([]*appsv1.Deployment, 0, len(runnerIDs))
for _, id := range runnerIDs {
job, err := a.apiQuerier.deployment(id)
if err != nil {
if occurredError == nil {
occurredError = ErrorLoadingJob
}
occurredError = fmt.Errorf("%w: couldn't load job info for runner %s - %v", occurredError, id, err)
continue
}
jobs = append(jobs, &job)
}
return jobs, occurredError
}
func (A APIClient) LoadRunnerIDs(prefix string) (runnerIds []string, err error) {
//TODO implement me
panic("implement me")
}
func (A APIClient) LoadRunnerPortMappings(runnerID string) ([]v1.ContainerPort, error) {
//TODO implement me
panic("implement me")
}
func (A APIClient) MonitorEvaluation(evaluationID string, ctx context.Context) error {
//TODO implement me
panic("implement me")
}
func (A APIClient) WatchEventStream(ctx context.Context, callbacks *AllocationProcessing) error {
//TODO implement me
panic("implement me")
}
func (A APIClient) ExecuteCommand(jobID string, ctx context.Context, command string, tty bool, privilegedExecution bool, stdin io.Reader, stdout, stderr io.Writer) (int, error) {
//TODO implement me
panic("implement me")
}
func (A APIClient) MarkRunnerAsUsed(runnerID string, duration int) error {
//TODO implement me
panic("implement me")
}
// NewExecutorAPI creates a new api client.
// One client is usually sufficient for the complete runtime of the API.
func NewExecutorAPI(kubernetesConfig *rest.Config) (ExecutorAPI, error) {
client := &APIClient{
apiQuerier: &kubernetesAPIClient{},
evaluations: storage.NewLocalStorage[chan error](),
allocations: storage.NewMonitoredLocalStorage[*allocationData](monitoring.MeasurementNomadAllocations,
func(p *write.Point, object *allocationData, _ storage.EventType) {
p.AddTag(monitoring.InfluxKeyJobID, object.jobID)
p.AddTag(monitoring.InfluxKeyClientStatus, object.allocClientStatus)
p.AddTag(monitoring.InfluxKeyNomadNode, object.allocNomadNode)
}, 0, nil),
}
err := client.init(kubernetesConfig)
return client, err
}
func (a *APIClient) initializeAllocations(environmentID dto.EnvironmentID) {
allocationStubs, err := a.listAllocations()
if err != nil {
log.WithError(err).Warn("Could not initialize allocations")
} else {
for _, stub := range allocationStubs {
switch {
case IsEnvironmentTemplateID(stub.JobID):
continue
case !strings.HasPrefix(stub.JobID, RunnerJobID(environmentID, "")):
continue
case stub.ClientStatus == structs.AllocClientStatusPending || stub.ClientStatus == structs.AllocClientStatusRunning:
log.WithField("jobID", stub.JobID).WithField("status", stub.ClientStatus).Debug("Recovered Allocation")
a.allocations.Add(stub.ID, &allocationData{
allocClientStatus: stub.ClientStatus,
allocDesiredStatus: stub.DesiredStatus,
jobID: stub.JobID,
start: time.Unix(0, stub.CreateTime),
allocNomadNode: stub.NodeName,
})
}
}
}
}

View File

@ -1,304 +0,0 @@
// Code generated by mockery v2.23.1. DO NOT EDIT.
package nomad
import (
context "context"
api "github.com/hashicorp/nomad/api"
config "github.com/openHPI/poseidon/internal/config"
io "io"
mock "github.com/stretchr/testify/mock"
)
// apiQuerierMock is an autogenerated mock type for the apiQuerier type
type apiQuerierMock struct {
mock.Mock
}
// DeleteJob provides a mock function with given fields: jobID
func (_m *apiQuerierMock) DeleteJob(jobID string) error {
ret := _m.Called(jobID)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(jobID)
} else {
r0 = ret.Error(0)
}
return r0
}
// EventStream provides a mock function with given fields: ctx
func (_m *apiQuerierMock) EventStream(ctx context.Context) (<-chan *api.Events, error) {
ret := _m.Called(ctx)
var r0 <-chan *api.Events
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (<-chan *api.Events, error)); ok {
return rf(ctx)
}
if rf, ok := ret.Get(0).(func(context.Context) <-chan *api.Events); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan *api.Events)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Execute provides a mock function with given fields: jobID, ctx, command, tty, stdin, stdout, stderr
func (_m *apiQuerierMock) Execute(jobID string, ctx context.Context, command string, tty bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) (int, error) {
ret := _m.Called(jobID, ctx, command, tty, stdin, stdout, stderr)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) (int, error)); ok {
return rf(jobID, ctx, command, tty, stdin, stdout, stderr)
}
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) int); ok {
r0 = rf(jobID, ctx, command, tty, stdin, stdout, stderr)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) error); ok {
r1 = rf(jobID, ctx, command, tty, stdin, stdout, stderr)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// JobScale provides a mock function with given fields: jobID
func (_m *apiQuerierMock) JobScale(jobID string) (uint, error) {
ret := _m.Called(jobID)
var r0 uint
var r1 error
if rf, ok := ret.Get(0).(func(string) (uint, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) uint); ok {
r0 = rf(jobID)
} else {
r0 = ret.Get(0).(uint)
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadJobList provides a mock function with given fields:
func (_m *apiQuerierMock) LoadJobList() ([]*api.JobListStub, error) {
ret := _m.Called()
var r0 []*api.JobListStub
var r1 error
if rf, ok := ret.Get(0).(func() ([]*api.JobListStub, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []*api.JobListStub); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.JobListStub)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RegisterNomadJob provides a mock function with given fields: job
func (_m *apiQuerierMock) RegisterNomadJob(job *api.Job) (string, error) {
ret := _m.Called(job)
var r0 string
var r1 error
if rf, ok := ret.Get(0).(func(*api.Job) (string, error)); ok {
return rf(job)
}
if rf, ok := ret.Get(0).(func(*api.Job) string); ok {
r0 = rf(job)
} else {
r0 = ret.Get(0).(string)
}
if rf, ok := ret.Get(1).(func(*api.Job) error); ok {
r1 = rf(job)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SetJobScale provides a mock function with given fields: jobID, count, reason
func (_m *apiQuerierMock) SetJobScale(jobID string, count uint, reason string) error {
ret := _m.Called(jobID, count, reason)
var r0 error
if rf, ok := ret.Get(0).(func(string, uint, string) error); ok {
r0 = rf(jobID, count, reason)
} else {
r0 = ret.Error(0)
}
return r0
}
// allocation provides a mock function with given fields: jobID
func (_m *apiQuerierMock) allocation(jobID string) (*api.Allocation, error) {
ret := _m.Called(jobID)
var r0 *api.Allocation
var r1 error
if rf, ok := ret.Get(0).(func(string) (*api.Allocation, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) *api.Allocation); ok {
r0 = rf(jobID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*api.Allocation)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// init provides a mock function with given fields: nomadConfig
func (_m *apiQuerierMock) init(nomadConfig *config.Nomad) error {
ret := _m.Called(nomadConfig)
var r0 error
if rf, ok := ret.Get(0).(func(*config.Nomad) error); ok {
r0 = rf(nomadConfig)
} else {
r0 = ret.Error(0)
}
return r0
}
// job provides a mock function with given fields: jobID
func (_m *apiQuerierMock) job(jobID string) (*api.Job, error) {
ret := _m.Called(jobID)
var r0 *api.Job
var r1 error
if rf, ok := ret.Get(0).(func(string) (*api.Job, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) *api.Job); ok {
r0 = rf(jobID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*api.Job)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// listAllocations provides a mock function with given fields:
func (_m *apiQuerierMock) listAllocations() ([]*api.AllocationListStub, error) {
ret := _m.Called()
var r0 []*api.AllocationListStub
var r1 error
if rf, ok := ret.Get(0).(func() ([]*api.AllocationListStub, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []*api.AllocationListStub); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.AllocationListStub)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// listJobs provides a mock function with given fields: prefix
func (_m *apiQuerierMock) listJobs(prefix string) ([]*api.JobListStub, error) {
ret := _m.Called(prefix)
var r0 []*api.JobListStub
var r1 error
if rf, ok := ret.Get(0).(func(string) ([]*api.JobListStub, error)); ok {
return rf(prefix)
}
if rf, ok := ret.Get(0).(func(string) []*api.JobListStub); ok {
r0 = rf(prefix)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.JobListStub)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(prefix)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTnewApiQuerierMock interface {
mock.TestingT
Cleanup(func())
}
// newApiQuerierMock creates a new instance of apiQuerierMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func newApiQuerierMock(t mockConstructorTestingTnewApiQuerierMock) *apiQuerierMock {
mock := &apiQuerierMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,27 +0,0 @@
package nomad
import (
"errors"
"fmt"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/suite"
"testing"
)
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestWebsocketErrorNeedsToBeUnwrapped() {
rawError := &websocket.CloseError{Code: websocket.CloseNormalClosure}
err := fmt.Errorf("websocket closed before receiving exit code: %w", rawError)
s.False(websocket.IsCloseError(err, websocket.CloseNormalClosure))
rootCause := errors.Unwrap(err)
s.True(websocket.IsCloseError(rootCause, websocket.CloseNormalClosure))
}

View File

@ -1,490 +0,0 @@
// Code generated by mockery v2.23.1. DO NOT EDIT.
package nomad
import (
context "context"
api "github.com/hashicorp/nomad/api"
config "github.com/openHPI/poseidon/internal/config"
dto "github.com/openHPI/poseidon/pkg/dto"
io "io"
mock "github.com/stretchr/testify/mock"
)
// ExecutorAPIMock is an autogenerated mock type for the ExecutorAPI type
type ExecutorAPIMock struct {
mock.Mock
}
// DeleteJob provides a mock function with given fields: jobID
func (_m *ExecutorAPIMock) DeleteJob(jobID string) error {
ret := _m.Called(jobID)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(jobID)
} else {
r0 = ret.Error(0)
}
return r0
}
// EventStream provides a mock function with given fields: ctx
func (_m *ExecutorAPIMock) EventStream(ctx context.Context) (<-chan *api.Events, error) {
ret := _m.Called(ctx)
var r0 <-chan *api.Events
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (<-chan *api.Events, error)); ok {
return rf(ctx)
}
if rf, ok := ret.Get(0).(func(context.Context) <-chan *api.Events); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan *api.Events)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Execute provides a mock function with given fields: jobID, ctx, command, tty, stdin, stdout, stderr
func (_m *ExecutorAPIMock) Execute(jobID string, ctx context.Context, command string, tty bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) (int, error) {
ret := _m.Called(jobID, ctx, command, tty, stdin, stdout, stderr)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) (int, error)); ok {
return rf(jobID, ctx, command, tty, stdin, stdout, stderr)
}
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) int); ok {
r0 = rf(jobID, ctx, command, tty, stdin, stdout, stderr)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(string, context.Context, string, bool, io.Reader, io.Writer, io.Writer) error); ok {
r1 = rf(jobID, ctx, command, tty, stdin, stdout, stderr)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// ExecuteCommand provides a mock function with given fields: jobID, ctx, command, tty, privilegedExecution, stdin, stdout, stderr
func (_m *ExecutorAPIMock) ExecuteCommand(jobID string, ctx context.Context, command string, tty bool, privilegedExecution bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) (int, error) {
ret := _m.Called(jobID, ctx, command, tty, privilegedExecution, stdin, stdout, stderr)
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, bool, io.Reader, io.Writer, io.Writer) (int, error)); ok {
return rf(jobID, ctx, command, tty, privilegedExecution, stdin, stdout, stderr)
}
if rf, ok := ret.Get(0).(func(string, context.Context, string, bool, bool, io.Reader, io.Writer, io.Writer) int); ok {
r0 = rf(jobID, ctx, command, tty, privilegedExecution, stdin, stdout, stderr)
} else {
r0 = ret.Get(0).(int)
}
if rf, ok := ret.Get(1).(func(string, context.Context, string, bool, bool, io.Reader, io.Writer, io.Writer) error); ok {
r1 = rf(jobID, ctx, command, tty, privilegedExecution, stdin, stdout, stderr)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// JobScale provides a mock function with given fields: jobID
func (_m *ExecutorAPIMock) JobScale(jobID string) (uint, error) {
ret := _m.Called(jobID)
var r0 uint
var r1 error
if rf, ok := ret.Get(0).(func(string) (uint, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) uint); ok {
r0 = rf(jobID)
} else {
r0 = ret.Get(0).(uint)
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadEnvironmentJobs provides a mock function with given fields:
func (_m *ExecutorAPIMock) LoadEnvironmentJobs() ([]*api.Job, error) {
ret := _m.Called()
var r0 []*api.Job
var r1 error
if rf, ok := ret.Get(0).(func() ([]*api.Job, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []*api.Job); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.Job)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadJobList provides a mock function with given fields:
func (_m *ExecutorAPIMock) LoadJobList() ([]*api.JobListStub, error) {
ret := _m.Called()
var r0 []*api.JobListStub
var r1 error
if rf, ok := ret.Get(0).(func() ([]*api.JobListStub, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []*api.JobListStub); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.JobListStub)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadRunnerIDs provides a mock function with given fields: prefix
func (_m *ExecutorAPIMock) LoadRunnerIDs(prefix string) ([]string, error) {
ret := _m.Called(prefix)
var r0 []string
var r1 error
if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok {
return rf(prefix)
}
if rf, ok := ret.Get(0).(func(string) []string); ok {
r0 = rf(prefix)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(prefix)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadRunnerJobs provides a mock function with given fields: environmentID
func (_m *ExecutorAPIMock) LoadRunnerJobs(environmentID dto.EnvironmentID) ([]*api.Job, error) {
ret := _m.Called(environmentID)
var r0 []*api.Job
var r1 error
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) ([]*api.Job, error)); ok {
return rf(environmentID)
}
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) []*api.Job); ok {
r0 = rf(environmentID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.Job)
}
}
if rf, ok := ret.Get(1).(func(dto.EnvironmentID) error); ok {
r1 = rf(environmentID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadRunnerPortMappings provides a mock function with given fields: runnerID
func (_m *ExecutorAPIMock) LoadRunnerPortMappings(runnerID string) ([]api.PortMapping, error) {
ret := _m.Called(runnerID)
var r0 []api.PortMapping
var r1 error
if rf, ok := ret.Get(0).(func(string) ([]api.PortMapping, error)); ok {
return rf(runnerID)
}
if rf, ok := ret.Get(0).(func(string) []api.PortMapping); ok {
r0 = rf(runnerID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]api.PortMapping)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(runnerID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MarkRunnerAsUsed provides a mock function with given fields: runnerID, duration
func (_m *ExecutorAPIMock) MarkRunnerAsUsed(runnerID string, duration int) error {
ret := _m.Called(runnerID, duration)
var r0 error
if rf, ok := ret.Get(0).(func(string, int) error); ok {
r0 = rf(runnerID, duration)
} else {
r0 = ret.Error(0)
}
return r0
}
// MonitorEvaluation provides a mock function with given fields: evaluationID, ctx
func (_m *ExecutorAPIMock) MonitorEvaluation(evaluationID string, ctx context.Context) error {
ret := _m.Called(evaluationID, ctx)
var r0 error
if rf, ok := ret.Get(0).(func(string, context.Context) error); ok {
r0 = rf(evaluationID, ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// RegisterNomadJob provides a mock function with given fields: job
func (_m *ExecutorAPIMock) RegisterNomadJob(job *api.Job) (string, error) {
ret := _m.Called(job)
var r0 string
var r1 error
if rf, ok := ret.Get(0).(func(*api.Job) (string, error)); ok {
return rf(job)
}
if rf, ok := ret.Get(0).(func(*api.Job) string); ok {
r0 = rf(job)
} else {
r0 = ret.Get(0).(string)
}
if rf, ok := ret.Get(1).(func(*api.Job) error); ok {
r1 = rf(job)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// RegisterRunnerJob provides a mock function with given fields: template
func (_m *ExecutorAPIMock) RegisterRunnerJob(template *api.Job) error {
ret := _m.Called(template)
var r0 error
if rf, ok := ret.Get(0).(func(*api.Job) error); ok {
r0 = rf(template)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetJobScale provides a mock function with given fields: jobID, count, reason
func (_m *ExecutorAPIMock) SetJobScale(jobID string, count uint, reason string) error {
ret := _m.Called(jobID, count, reason)
var r0 error
if rf, ok := ret.Get(0).(func(string, uint, string) error); ok {
r0 = rf(jobID, count, reason)
} else {
r0 = ret.Error(0)
}
return r0
}
// WatchEventStream provides a mock function with given fields: ctx, callbacks
func (_m *ExecutorAPIMock) WatchEventStream(ctx context.Context, callbacks *AllocationProcessing) error {
ret := _m.Called(ctx, callbacks)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *AllocationProcessing) error); ok {
r0 = rf(ctx, callbacks)
} else {
r0 = ret.Error(0)
}
return r0
}
// allocation provides a mock function with given fields: jobID
func (_m *ExecutorAPIMock) allocation(jobID string) (*api.Allocation, error) {
ret := _m.Called(jobID)
var r0 *api.Allocation
var r1 error
if rf, ok := ret.Get(0).(func(string) (*api.Allocation, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) *api.Allocation); ok {
r0 = rf(jobID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*api.Allocation)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// init provides a mock function with given fields: nomadConfig
func (_m *ExecutorAPIMock) init(nomadConfig *config.Nomad) error {
ret := _m.Called(nomadConfig)
var r0 error
if rf, ok := ret.Get(0).(func(*config.Nomad) error); ok {
r0 = rf(nomadConfig)
} else {
r0 = ret.Error(0)
}
return r0
}
// job provides a mock function with given fields: jobID
func (_m *ExecutorAPIMock) job(jobID string) (*api.Job, error) {
ret := _m.Called(jobID)
var r0 *api.Job
var r1 error
if rf, ok := ret.Get(0).(func(string) (*api.Job, error)); ok {
return rf(jobID)
}
if rf, ok := ret.Get(0).(func(string) *api.Job); ok {
r0 = rf(jobID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*api.Job)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(jobID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// listAllocations provides a mock function with given fields:
func (_m *ExecutorAPIMock) listAllocations() ([]*api.AllocationListStub, error) {
ret := _m.Called()
var r0 []*api.AllocationListStub
var r1 error
if rf, ok := ret.Get(0).(func() ([]*api.AllocationListStub, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []*api.AllocationListStub); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.AllocationListStub)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// listJobs provides a mock function with given fields: prefix
func (_m *ExecutorAPIMock) listJobs(prefix string) ([]*api.JobListStub, error) {
ret := _m.Called(prefix)
var r0 []*api.JobListStub
var r1 error
if rf, ok := ret.Get(0).(func(string) ([]*api.JobListStub, error)); ok {
return rf(prefix)
}
if rf, ok := ret.Get(0).(func(string) []*api.JobListStub); ok {
r0 = rf(prefix)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*api.JobListStub)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(prefix)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewExecutorAPIMock interface {
mock.TestingT
Cleanup(func())
}
// NewExecutorAPIMock creates a new instance of ExecutorAPIMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewExecutorAPIMock(t mockConstructorTestingTNewExecutorAPIMock) *ExecutorAPIMock {
mock := &ExecutorAPIMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,156 +0,0 @@
package nomad
import (
nomadApi "github.com/hashicorp/nomad/api"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests/helpers"
)
func (s *MainTestSuite) TestFindTaskGroup() {
s.Run("Returns nil if task group not found", func() {
group := FindTaskGroup(&nomadApi.Job{}, TaskGroupName)
s.Nil(group)
})
s.Run("Finds task group when existent", func() {
_, job := helpers.CreateTemplateJob()
group := FindTaskGroup(job, TaskGroupName)
s.NotNil(group)
})
}
func (s *MainTestSuite) TestFindOrCreateDefaultTask() {
s.Run("Adds default task group when not set", func() {
job := &nomadApi.Job{}
group := FindAndValidateDefaultTaskGroup(job)
s.NotNil(group)
s.Equal(TaskGroupName, *group.Name)
s.Equal(1, len(job.TaskGroups))
s.Equal(group, job.TaskGroups[0])
s.Equal(TaskCount, *group.Count)
})
s.Run("Does not modify task group when already set", func() {
job := &nomadApi.Job{}
groupName := TaskGroupName
expectedGroup := &nomadApi.TaskGroup{Name: &groupName}
job.TaskGroups = []*nomadApi.TaskGroup{expectedGroup}
group := FindAndValidateDefaultTaskGroup(job)
s.NotNil(group)
s.Equal(1, len(job.TaskGroups))
s.Equal(expectedGroup, group)
})
}
func (s *MainTestSuite) TestFindOrCreateConfigTaskGroup() {
s.Run("Adds config task group when not set", func() {
job := &nomadApi.Job{}
group := FindAndValidateConfigTaskGroup(job)
s.NotNil(group)
s.Equal(group, job.TaskGroups[0])
s.Equal(1, len(job.TaskGroups))
s.Equal(ConfigTaskGroupName, *group.Name)
s.Equal(0, *group.Count)
})
s.Run("Does not modify task group when already set", func() {
job := &nomadApi.Job{}
groupName := ConfigTaskGroupName
expectedGroup := &nomadApi.TaskGroup{Name: &groupName}
job.TaskGroups = []*nomadApi.TaskGroup{expectedGroup}
group := FindAndValidateConfigTaskGroup(job)
s.NotNil(group)
s.Equal(1, len(job.TaskGroups))
s.Equal(expectedGroup, group)
})
}
func (s *MainTestSuite) TestFindOrCreateTask() {
s.Run("Does not modify default task when already set", func() {
groupName := TaskGroupName
group := &nomadApi.TaskGroup{Name: &groupName}
expectedTask := &nomadApi.Task{Name: TaskName}
group.Tasks = []*nomadApi.Task{expectedTask}
task := FindAndValidateDefaultTask(group)
s.NotNil(task)
s.Equal(1, len(group.Tasks))
s.Equal(expectedTask, task)
})
s.Run("Does not modify config task when already set", func() {
groupName := ConfigTaskGroupName
group := &nomadApi.TaskGroup{Name: &groupName}
expectedTask := &nomadApi.Task{Name: ConfigTaskName}
group.Tasks = []*nomadApi.Task{expectedTask}
task := FindAndValidateConfigTask(group)
s.NotNil(task)
s.Equal(1, len(group.Tasks))
s.Equal(expectedTask, task)
})
}
func (s *MainTestSuite) TestSetForcePullFlag() {
_, job := helpers.CreateTemplateJob()
taskGroup := FindAndValidateDefaultTaskGroup(job)
task := FindAndValidateDefaultTask(taskGroup)
s.Run("Ignoring passed value if DisableForcePull", func() {
config.Config.Nomad.DisableForcePull = true
SetForcePullFlag(job, true)
s.Equal(false, task.Config["force_pull"])
})
s.Run("Using passed value if not DisableForcePull", func() {
config.Config.Nomad.DisableForcePull = false
SetForcePullFlag(job, true)
s.Equal(true, task.Config["force_pull"])
SetForcePullFlag(job, false)
s.Equal(false, task.Config["force_pull"])
})
}
func (s *MainTestSuite) TestIsEnvironmentTemplateID() {
s.True(IsEnvironmentTemplateID("template-42"))
s.False(IsEnvironmentTemplateID("template-42-100"))
s.False(IsEnvironmentTemplateID("job-42"))
s.False(IsEnvironmentTemplateID("template-top"))
}
func (s *MainTestSuite) TestRunnerJobID() {
s.Equal("0-RANDOM-UUID", RunnerJobID(0, "RANDOM-UUID"))
}
func (s *MainTestSuite) TestTemplateJobID() {
s.Equal("template-42", TemplateJobID(42))
}
func (s *MainTestSuite) TestEnvironmentIDFromRunnerID() {
id, err := EnvironmentIDFromRunnerID("42-RANDOM-UUID")
s.NoError(err)
s.Equal(dto.EnvironmentID(42), id)
_, err = EnvironmentIDFromRunnerID("")
s.Error(err)
}
func (s *MainTestSuite) TestOOMKilledAllocation() {
event := nomadApi.TaskEvent{Details: map[string]string{"oom_killed": "true"}}
state := nomadApi.TaskState{Restarts: 2, Events: []*nomadApi.TaskEvent{&event}}
alloc := nomadApi.Allocation{TaskStates: map[string]*nomadApi.TaskState{TaskName: &state}}
s.False(isOOMKilled(&alloc))
event2 := nomadApi.TaskEvent{Details: map[string]string{"oom_killed": "false"}}
alloc.TaskStates[TaskName].Events = []*nomadApi.TaskEvent{&event, &event2}
s.False(isOOMKilled(&alloc))
event3 := nomadApi.TaskEvent{Details: map[string]string{"oom_killed": "true"}}
alloc.TaskStates[TaskName].Events = []*nomadApi.TaskEvent{&event, &event2, &event3}
s.True(isOOMKilled(&alloc))
}

View File

@ -1,983 +0,0 @@
package nomad
import (
"bytes"
"context"
"errors"
"fmt"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/pkg/nullio"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"io"
"regexp"
"strings"
"testing"
"time"
)
var (
noopAllocationProcessing = &AllocationProcessing{
OnNew: func(_ *nomadApi.Allocation, _ time.Duration) {},
OnDeleted: func(_ string, _ error) bool { return false },
}
ErrUnexpectedEOF = errors.New("unexpected EOF")
)
func TestLoadRunnersTestSuite(t *testing.T) {
suite.Run(t, new(LoadRunnersTestSuite))
}
type LoadRunnersTestSuite struct {
tests.MemoryLeakTestSuite
jobID string
mock *apiQuerierMock
nomadAPIClient APIClient
availableRunner *nomadApi.JobListStub
anotherAvailableRunner *nomadApi.JobListStub
pendingRunner *nomadApi.JobListStub
deadRunner *nomadApi.JobListStub
}
func (s *LoadRunnersTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.jobID = tests.DefaultRunnerID
s.mock = &apiQuerierMock{}
s.nomadAPIClient = APIClient{apiQuerier: s.mock}
s.availableRunner = newJobListStub(tests.DefaultRunnerID, structs.JobStatusRunning, 1)
s.anotherAvailableRunner = newJobListStub(tests.AnotherRunnerID, structs.JobStatusRunning, 1)
s.pendingRunner = newJobListStub(tests.DefaultRunnerID+"-1", structs.JobStatusPending, 0)
s.deadRunner = newJobListStub(tests.AnotherRunnerID+"-1", structs.JobStatusDead, 0)
}
func newJobListStub(id, status string, amountRunning int) *nomadApi.JobListStub {
return &nomadApi.JobListStub{
ID: id,
Status: status,
JobSummary: &nomadApi.JobSummary{
JobID: id,
Summary: map[string]nomadApi.TaskGroupSummary{TaskGroupName: {Running: amountRunning}},
},
}
}
func (s *LoadRunnersTestSuite) TestErrorOfUnderlyingApiCallIsPropagated() {
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return(nil, tests.ErrDefault)
returnedIds, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.Nil(returnedIds)
s.Equal(tests.ErrDefault, err)
}
func (s *LoadRunnersTestSuite) TestReturnsNoErrorWhenUnderlyingApiCallDoesNot() {
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return([]*nomadApi.JobListStub{}, nil)
_, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.NoError(err)
}
func (s *LoadRunnersTestSuite) TestAvailableRunnerIsReturned() {
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return([]*nomadApi.JobListStub{s.availableRunner}, nil)
returnedIds, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.Require().NoError(err)
s.Len(returnedIds, 1)
s.Equal(s.availableRunner.ID, returnedIds[0])
}
func (s *LoadRunnersTestSuite) TestPendingRunnerIsReturned() {
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return([]*nomadApi.JobListStub{s.pendingRunner}, nil)
returnedIds, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.Require().NoError(err)
s.Len(returnedIds, 1)
s.Equal(s.pendingRunner.ID, returnedIds[0])
}
func (s *LoadRunnersTestSuite) TestDeadRunnerIsNotReturned() {
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return([]*nomadApi.JobListStub{s.deadRunner}, nil)
returnedIds, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.Require().NoError(err)
s.Empty(returnedIds)
}
func (s *LoadRunnersTestSuite) TestReturnsAllAvailableRunners() {
runnersList := []*nomadApi.JobListStub{
s.availableRunner,
s.anotherAvailableRunner,
s.pendingRunner,
s.deadRunner,
}
s.mock.On("listJobs", mock.AnythingOfType("string")).
Return(runnersList, nil)
returnedIds, err := s.nomadAPIClient.LoadRunnerIDs(s.jobID)
s.Require().NoError(err)
s.Len(returnedIds, 3)
s.Contains(returnedIds, s.availableRunner.ID)
s.Contains(returnedIds, s.anotherAvailableRunner.ID)
s.Contains(returnedIds, s.pendingRunner.ID)
s.NotContains(returnedIds, s.deadRunner.ID)
}
const TestNamespace = "unit-tests"
const TestNomadToken = "n0m4d-t0k3n"
const TestDefaultAddress = "127.0.0.1"
const evaluationID = "evaluation-id"
func NomadTestConfig(address string) *config.Nomad {
return &config.Nomad{
Address: address,
Port: 4646,
Token: TestNomadToken,
TLS: config.TLS{
Active: false,
},
Namespace: TestNamespace,
}
}
func (s *MainTestSuite) TestApiClient_init() {
client := &APIClient{apiQuerier: &nomadAPIClient{}}
err := client.init(NomadTestConfig(TestDefaultAddress))
s.Require().Nil(err)
}
func (s *MainTestSuite) TestApiClientCanNotBeInitializedWithInvalidUrl() {
client := &APIClient{apiQuerier: &nomadAPIClient{}}
err := client.init(NomadTestConfig("http://" + TestDefaultAddress))
s.NotNil(err)
}
func (s *MainTestSuite) TestNewExecutorApiCanBeCreatedWithoutError() {
expectedClient := &APIClient{apiQuerier: &nomadAPIClient{}}
err := expectedClient.init(NomadTestConfig(TestDefaultAddress))
s.Require().Nil(err)
_, err = NewExecutorAPI(NomadTestConfig(TestDefaultAddress))
s.Require().Nil(err)
}
// asynchronouslyMonitorEvaluation creates an APIClient with mocked Nomad API and
// runs the MonitorEvaluation method in a goroutine. The mock returns a read-only
// version of the given stream to simulate an event stream gotten from the real
// Nomad API.
func asynchronouslyMonitorEvaluation(stream <-chan *nomadApi.Events) chan error {
ctx := context.Background()
// We can only get a read-only channel once we return it from a function.
readOnlyStream := func() <-chan *nomadApi.Events { return stream }()
apiMock := &apiQuerierMock{}
apiMock.On("EventStream", mock.AnythingOfType("*context.cancelCtx")).
Return(readOnlyStream, nil)
apiClient := &APIClient{apiMock, storage.NewLocalStorage[chan error](), storage.NewLocalStorage[*allocationData](), false}
errChan := make(chan error)
go func() {
errChan <- apiClient.MonitorEvaluation(evaluationID, ctx)
}()
return errChan
}
func (s *MainTestSuite) TestApiClient_MonitorEvaluationReturnsNilWhenStreamIsClosed() {
stream := make(chan *nomadApi.Events)
errChan := asynchronouslyMonitorEvaluation(stream)
close(stream)
var err error
// If close doesn't terminate MonitorEvaluation, this test won't complete without a timeout.
select {
case err = <-errChan:
case <-time.After(time.Millisecond * 10):
s.T().Fatal("MonitorEvaluation didn't finish as expected")
}
s.Nil(err)
}
func (s *MainTestSuite) TestApiClient_MonitorEvaluationReturnsErrorWhenStreamReturnsError() {
apiMock := &apiQuerierMock{}
apiMock.On("EventStream", mock.AnythingOfType("*context.cancelCtx")).
Return(nil, tests.ErrDefault)
apiClient := &APIClient{apiMock, storage.NewLocalStorage[chan error](), storage.NewLocalStorage[*allocationData](), false}
err := apiClient.MonitorEvaluation("id", context.Background())
s.ErrorIs(err, tests.ErrDefault)
}
type eventPayload struct {
Evaluation *nomadApi.Evaluation
Allocation *nomadApi.Allocation
}
// eventForEvaluation takes an evaluation and creates an Event with the given evaluation
// as its payload. Nomad uses the mapstructure library to decode the payload, which we
// simply reverse here.
func eventForEvaluation(t *testing.T, eval *nomadApi.Evaluation) nomadApi.Event {
t.Helper()
payload := make(map[string]interface{})
err := mapstructure.Decode(eventPayload{Evaluation: eval}, &payload)
if err != nil {
t.Fatalf("Couldn't decode evaluation %v into payload map", eval)
return nomadApi.Event{}
}
event := nomadApi.Event{Topic: nomadApi.TopicEvaluation, Payload: payload}
return event
}
// simulateNomadEventStream streams the given events sequentially to the stream channel.
// It returns how many events have been processed until an error occurred.
func simulateNomadEventStream(
ctx context.Context,
stream chan<- *nomadApi.Events,
errChan chan error,
events []*nomadApi.Events,
) (int, error) {
eventsProcessed := 0
var e *nomadApi.Events
for _, e = range events {
select {
case err := <-errChan:
return eventsProcessed, err
case stream <- e:
eventsProcessed++
}
}
close(stream)
// Wait for last event being processed
var err error
select {
case <-ctx.Done():
case err = <-errChan:
}
return eventsProcessed, err
}
// runEvaluationMonitoring simulates events streamed from the Nomad event stream
// to the MonitorEvaluation method. It starts the MonitorEvaluation function as a goroutine
// and sequentially transfers the events from the given array to a channel simulating the stream.
func runEvaluationMonitoring(ctx context.Context, events []*nomadApi.Events) (eventsProcessed int, err error) {
stream := make(chan *nomadApi.Events)
errChan := asynchronouslyMonitorEvaluation(stream)
return simulateNomadEventStream(ctx, stream, errChan, events)
}
func (s *MainTestSuite) TestApiClient_MonitorEvaluationWithSuccessfulEvent() {
eval := nomadApi.Evaluation{Status: structs.EvalStatusComplete}
pendingEval := nomadApi.Evaluation{Status: structs.EvalStatusPending}
// make sure that the tested function can complete
s.Require().Nil(checkEvaluation(&eval))
events := nomadApi.Events{Events: []nomadApi.Event{eventForEvaluation(s.T(), &eval)}}
pendingEvaluationEvents := nomadApi.Events{Events: []nomadApi.Event{eventForEvaluation(s.T(), &pendingEval)}}
multipleEventsWithPending := nomadApi.Events{Events: []nomadApi.Event{
eventForEvaluation(s.T(), &pendingEval), eventForEvaluation(s.T(), &eval),
}}
var cases = []struct {
streamedEvents []*nomadApi.Events
expectedEventsProcessed int
name string
}{
{[]*nomadApi.Events{&events}, 1,
"it completes with successful event"},
{[]*nomadApi.Events{&events, &events}, 2,
"it keeps listening after first successful event"},
{[]*nomadApi.Events{{}, &events}, 2,
"it skips heartbeat and completes"},
{[]*nomadApi.Events{&pendingEvaluationEvents, &events}, 2,
"it skips pending evaluation and completes"},
{[]*nomadApi.Events{&multipleEventsWithPending}, 1,
"it handles multiple events per received event"},
}
for _, c := range cases {
s.Run(c.name, func() {
eventsProcessed, err := runEvaluationMonitoring(s.TestCtx, c.streamedEvents)
s.Nil(err)
s.Equal(c.expectedEventsProcessed, eventsProcessed)
})
}
}
func (s *MainTestSuite) TestApiClient_MonitorEvaluationWithFailingEvent() {
eval := nomadApi.Evaluation{ID: evaluationID, Status: structs.EvalStatusFailed}
evalErr := checkEvaluation(&eval)
s.Require().NotNil(evalErr)
pendingEval := nomadApi.Evaluation{Status: structs.EvalStatusPending}
events := nomadApi.Events{Events: []nomadApi.Event{eventForEvaluation(s.T(), &eval)}}
pendingEvaluationEvents := nomadApi.Events{Events: []nomadApi.Event{eventForEvaluation(s.T(), &pendingEval)}}
multipleEventsWithPending := nomadApi.Events{Events: []nomadApi.Event{
eventForEvaluation(s.T(), &pendingEval), eventForEvaluation(s.T(), &eval),
}}
eventsWithErr := nomadApi.Events{Err: tests.ErrDefault, Events: []nomadApi.Event{{}}}
var cases = []struct {
streamedEvents []*nomadApi.Events
expectedEventsProcessed int
expectedError error
name string
}{
{[]*nomadApi.Events{&events}, 1, evalErr,
"it fails with failing event"},
{[]*nomadApi.Events{{}, &events}, 2, evalErr,
"it skips heartbeat and fail"},
{[]*nomadApi.Events{&pendingEvaluationEvents, &events}, 2, evalErr,
"it skips pending evaluation and fail"},
{[]*nomadApi.Events{&multipleEventsWithPending}, 1, evalErr,
"it handles multiple events per received event and fails"},
{[]*nomadApi.Events{&eventsWithErr}, 1, tests.ErrDefault,
"it fails with event error when event has error"},
}
for _, c := range cases {
s.Run(c.name, func() {
eventsProcessed, err := runEvaluationMonitoring(s.TestCtx, c.streamedEvents)
s.Require().NotNil(err)
s.Contains(err.Error(), c.expectedError.Error())
s.Equal(c.expectedEventsProcessed, eventsProcessed)
})
}
}
func (s *MainTestSuite) TestApiClient_MonitorEvaluationFailsWhenFailingToDecodeEvaluation() {
event := nomadApi.Event{
Topic: nomadApi.TopicEvaluation,
// This should fail decoding, as Evaluation.Status is expected to be a string, not int
Payload: map[string]interface{}{"Evaluation": map[string]interface{}{"Status": 1}},
}
_, err := event.Evaluation()
s.Require().NotNil(err)
eventsProcessed, err := runEvaluationMonitoring(s.TestCtx, []*nomadApi.Events{{Events: []nomadApi.Event{event}}})
s.Error(err)
s.Equal(1, eventsProcessed)
}
func (s *MainTestSuite) TestCheckEvaluationWithFailedAllocations() {
testKey := "test1"
failedAllocs := map[string]*nomadApi.AllocationMetric{
testKey: {NodesExhausted: 1},
}
evaluation := nomadApi.Evaluation{FailedTGAllocs: failedAllocs, Status: structs.EvalStatusFailed}
assertMessageContainsCorrectStrings := func(msg string) {
s.Contains(msg, evaluation.Status, "error should contain the evaluation status")
s.Contains(msg, fmt.Sprintf("%s: %#v", testKey, failedAllocs[testKey]),
"error should contain the failed allocations metric")
}
var msgWithoutBlockedEval, msgWithBlockedEval string
s.Run("without blocked eval", func() {
err := checkEvaluation(&evaluation)
s.Require().NotNil(err)
msgWithoutBlockedEval = err.Error()
assertMessageContainsCorrectStrings(msgWithoutBlockedEval)
})
s.Run("with blocked eval", func() {
evaluation.BlockedEval = "blocking-eval"
err := checkEvaluation(&evaluation)
s.Require().NotNil(err)
msgWithBlockedEval = err.Error()
assertMessageContainsCorrectStrings(msgWithBlockedEval)
})
s.NotEqual(msgWithBlockedEval, msgWithoutBlockedEval)
}
func (s *MainTestSuite) TestCheckEvaluationWithoutFailedAllocations() {
evaluation := nomadApi.Evaluation{FailedTGAllocs: make(map[string]*nomadApi.AllocationMetric)}
s.Run("when evaluation status complete", func() {
evaluation.Status = structs.EvalStatusComplete
err := checkEvaluation(&evaluation)
s.Nil(err)
})
s.Run("when evaluation status not complete", func() {
incompleteStates := []string{structs.EvalStatusFailed, structs.EvalStatusCancelled,
structs.EvalStatusBlocked, structs.EvalStatusPending}
for _, status := range incompleteStates {
evaluation.Status = status
err := checkEvaluation(&evaluation)
s.Require().NotNil(err)
s.Contains(err.Error(), status, "error should contain the evaluation status")
}
})
}
func (s *MainTestSuite) TestApiClient_WatchAllocationsIgnoresOldAllocations() {
oldStoppedAllocation := createOldAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusStop)
oldPendingAllocation := createOldAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
oldRunningAllocation := createOldAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusRun)
oldAllocationEvents := nomadApi.Events{Events: []nomadApi.Event{
eventForAllocation(s.T(), oldStoppedAllocation),
eventForAllocation(s.T(), oldPendingAllocation),
eventForAllocation(s.T(), oldRunningAllocation),
}}
assertWatchAllocation(s, []*nomadApi.Events{&oldAllocationEvents},
[]*nomadApi.Allocation(nil), []string(nil))
}
func createOldAllocation(clientStatus, desiredStatus string) *nomadApi.Allocation {
return createAllocation(time.Now().Add(-time.Minute).UnixNano(), clientStatus, desiredStatus)
}
func (s *MainTestSuite) TestApiClient_WatchAllocationsIgnoresUnhandledEvents() {
nodeEvents := nomadApi.Events{Events: []nomadApi.Event{
{
Topic: nomadApi.TopicNode,
Type: structs.TypeNodeEvent,
},
}}
assertWatchAllocation(s, []*nomadApi.Events{&nodeEvents}, []*nomadApi.Allocation(nil), []string(nil))
}
func (s *MainTestSuite) TestApiClient_WatchAllocationsUsesCallbacksForEvents() {
pendingAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
pendingEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), pendingAllocation)}}
s.Run("it does not add allocation when client status is pending", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingEvents}, []*nomadApi.Allocation(nil), []string(nil))
})
startedAllocation := createRecentAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusRun)
startedEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), startedAllocation)}}
pendingStartedEvents := nomadApi.Events{Events: []nomadApi.Event{
eventForAllocation(s.T(), pendingAllocation), eventForAllocation(s.T(), startedAllocation)}}
s.Run("it adds allocation with matching events", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents},
[]*nomadApi.Allocation{startedAllocation}, []string(nil))
})
s.Run("it skips heartbeat and adds allocation with matching events", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents},
[]*nomadApi.Allocation{startedAllocation}, []string(nil))
})
stoppedAllocation := createRecentAllocation(structs.AllocClientStatusComplete, structs.AllocDesiredStatusStop)
stoppedEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), stoppedAllocation)}}
pendingStartStopEvents := nomadApi.Events{Events: []nomadApi.Event{
eventForAllocation(s.T(), pendingAllocation),
eventForAllocation(s.T(), startedAllocation),
eventForAllocation(s.T(), stoppedAllocation),
}}
s.Run("it adds and deletes the allocation", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartStopEvents},
[]*nomadApi.Allocation{startedAllocation}, []string{stoppedAllocation.JobID})
})
s.Run("it ignores duplicate events", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingEvents, &startedEvents, &startedEvents,
&stoppedEvents, &stoppedEvents, &stoppedEvents},
[]*nomadApi.Allocation{startedAllocation}, []string{startedAllocation.JobID})
})
s.Run("it ignores events of unknown allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&startedEvents, &startedEvents,
&stoppedEvents, &stoppedEvents, &stoppedEvents}, []*nomadApi.Allocation(nil), []string(nil))
})
s.Run("it removes restarted allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents, &pendingStartedEvents},
[]*nomadApi.Allocation{startedAllocation, startedAllocation}, []string{startedAllocation.JobID})
})
rescheduleAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
rescheduleAllocation.ID = tests.AnotherUUID
rescheduleAllocation.PreviousAllocation = pendingAllocation.ID
rescheduleStartedAllocation := createRecentAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusRun)
rescheduleStartedAllocation.ID = tests.AnotherUUID
rescheduleAllocation.PreviousAllocation = pendingAllocation.ID
rescheduleEvents := nomadApi.Events{Events: []nomadApi.Event{
eventForAllocation(s.T(), rescheduleAllocation), eventForAllocation(s.T(), rescheduleStartedAllocation)}}
s.Run("it removes rescheduled allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents, &rescheduleEvents},
[]*nomadApi.Allocation{startedAllocation, rescheduleStartedAllocation}, []string{startedAllocation.JobID})
})
stoppedPendingAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusStop)
stoppedPendingEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), stoppedPendingAllocation)}}
s.Run("it does not callback for stopped pending allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingEvents, &stoppedPendingEvents},
[]*nomadApi.Allocation(nil), []string(nil))
})
failedAllocation := createRecentAllocation(structs.AllocClientStatusFailed, structs.AllocDesiredStatusStop)
failedEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), failedAllocation)}}
s.Run("it removes stopped failed allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents, &failedEvents},
[]*nomadApi.Allocation{startedAllocation}, []string{failedAllocation.JobID})
})
lostAllocation := createRecentAllocation(structs.AllocClientStatusLost, structs.AllocDesiredStatusStop)
lostEvents := nomadApi.Events{Events: []nomadApi.Event{eventForAllocation(s.T(), lostAllocation)}}
s.Run("it removes stopped lost allocations", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents, &lostEvents},
[]*nomadApi.Allocation{startedAllocation}, []string{lostAllocation.JobID})
})
rescheduledLostAllocation := createRecentAllocation(structs.AllocClientStatusLost, structs.AllocDesiredStatusStop)
rescheduledLostAllocation.NextAllocation = tests.AnotherUUID
rescheduledLostEvents := nomadApi.Events{Events: []nomadApi.Event{
eventForAllocation(s.T(), rescheduledLostAllocation)}}
s.Run("it removes lost allocations not before the last restart attempt", func() {
assertWatchAllocation(s, []*nomadApi.Events{&pendingStartedEvents, &rescheduledLostEvents},
[]*nomadApi.Allocation{startedAllocation}, []string(nil))
})
}
func (s *MainTestSuite) TestHandleAllocationEventBuffersPendingAllocation() {
s.Run("AllocationUpdated", func() {
newPendingAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
newPendingEvent := eventForAllocation(s.T(), newPendingAllocation)
allocations := storage.NewLocalStorage[*allocationData]()
err := handleAllocationEvent(
time.Now().UnixNano(), allocations, &newPendingEvent, noopAllocationProcessing)
s.Require().NoError(err)
_, ok := allocations.Get(newPendingAllocation.ID)
s.True(ok)
})
s.Run("PlanResult", func() {
newPendingAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
newPendingEvent := eventForAllocation(s.T(), newPendingAllocation)
newPendingEvent.Type = structs.TypePlanResult
allocations := storage.NewLocalStorage[*allocationData]()
err := handleAllocationEvent(
time.Now().UnixNano(), allocations, &newPendingEvent, noopAllocationProcessing)
s.Require().NoError(err)
_, ok := allocations.Get(newPendingAllocation.ID)
s.True(ok)
})
}
func (s *MainTestSuite) TestHandleAllocationEvent_RegressionTest_14_09_2023() {
jobID := "29-6f04b525-5315-11ee-af32-fa163e079f19"
a1ID := "04d86250-550c-62f9-9a21-ecdc3b38773e"
a1Starting := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
a1Starting.ID = a1ID
a1Starting.JobID = jobID
// With this event the job is added to the idle runners
a1Running := createRecentAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusRun)
a1Running.ID = a1ID
a1Running.JobID = jobID
// With this event the job is removed from the idle runners
a2ID := "102f282f-376a-1453-4d3d-7d4e32046acd"
a2Starting := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
a2Starting.ID = a2ID
a2Starting.PreviousAllocation = a1ID
a2Starting.JobID = jobID
// Because the runner is neither an idle runner nor an used runner, this event triggered the now removed
// race condition handling that led to neither removing a2 from the allocations nor adding a3 to the allocations.
a3ID := "0d8a8ece-cf52-2968-5a9f-e972a4150a6e"
a3Starting := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
a3Starting.ID = a3ID
a3Starting.PreviousAllocation = a2ID
a3Starting.JobID = jobID
// a2Stopping was not ignored and led to an unexpected allocation stopping.
a2Stopping := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusStop)
a2Stopping.ID = a2ID
a2Stopping.PreviousAllocation = a1ID
a2Stopping.NextAllocation = a3ID
a2Stopping.JobID = jobID
// a2Complete was not ignored (wrong behavior).
a2Complete := createRecentAllocation(structs.AllocClientStatusComplete, structs.AllocDesiredStatusStop)
a2Complete.ID = a2ID
a2Complete.PreviousAllocation = a1ID
a2Complete.NextAllocation = a3ID
a2Complete.JobID = jobID
// a3Running was ignored because it was unknown (wrong behavior).
a3Running := createRecentAllocation(structs.AllocClientStatusRunning, structs.AllocDesiredStatusRun)
a3Running.ID = a3ID
a3Running.PreviousAllocation = a2ID
a3Running.JobID = jobID
events := []*nomadApi.Events{{Events: []nomadApi.Event{
eventForAllocation(s.T(), a1Starting),
eventForAllocation(s.T(), a1Running),
eventForAllocation(s.T(), a2Starting),
eventForAllocation(s.T(), a3Starting),
eventForAllocation(s.T(), a2Stopping),
eventForAllocation(s.T(), a2Complete),
eventForAllocation(s.T(), a3Running),
}}}
idleRunner := make(map[string]bool)
callbacks := &AllocationProcessing{
OnNew: func(alloc *nomadApi.Allocation, _ time.Duration) {
idleRunner[alloc.JobID] = true
},
OnDeleted: func(jobID string, _ error) bool {
_, ok := idleRunner[jobID]
delete(idleRunner, jobID)
return !ok
},
}
_, err := runAllocationWatching(s, events, callbacks)
s.NoError(err)
s.True(idleRunner[jobID])
}
func (s *MainTestSuite) TestHandleAllocationEvent_ReportsOOMKilledStatus() {
restartedAllocation := createRecentAllocation(structs.AllocClientStatusPending, structs.AllocDesiredStatusRun)
event := nomadApi.TaskEvent{Details: map[string]string{"oom_killed": "true"}}
state := nomadApi.TaskState{Restarts: 1, Events: []*nomadApi.TaskEvent{&event}}
restartedAllocation.TaskStates = map[string]*nomadApi.TaskState{TaskName: &state}
restartedEvent := eventForAllocation(s.T(), restartedAllocation)
allocations := storage.NewLocalStorage[*allocationData]()
allocations.Add(restartedAllocation.ID, &allocationData{jobID: restartedAllocation.JobID})
var reason error
err := handleAllocationEvent(time.Now().UnixNano(), allocations, &restartedEvent, &AllocationProcessing{
OnNew: func(_ *nomadApi.Allocation, _ time.Duration) {},
OnDeleted: func(_ string, r error) bool {
reason = r
return true
},
})
s.Require().NoError(err)
s.ErrorIs(reason, ErrorOOMKilled)
}
func (s *MainTestSuite) TestAPIClient_WatchAllocationsReturnsErrorWhenAllocationStreamCannotBeRetrieved() {
apiMock := &apiQuerierMock{}
apiMock.On("EventStream", mock.Anything).Return(nil, tests.ErrDefault)
apiClient := &APIClient{apiMock, storage.NewLocalStorage[chan error](), storage.NewLocalStorage[*allocationData](), false}
err := apiClient.WatchEventStream(context.Background(), noopAllocationProcessing)
s.ErrorIs(err, tests.ErrDefault)
}
// Test case: WatchAllocations returns an error when an allocation cannot be retrieved without receiving further events.
func (s *MainTestSuite) TestAPIClient_WatchAllocations() {
event := nomadApi.Event{
Type: structs.TypeAllocationUpdated,
Topic: nomadApi.TopicAllocation,
// This should fail decoding, as Allocation.ID is expected to be a string, not int
Payload: map[string]interface{}{"Allocation": map[string]interface{}{"ID": 1}},
}
_, err := event.Allocation()
s.Require().Error(err)
events := []*nomadApi.Events{{Events: []nomadApi.Event{event}}, {}}
eventsProcessed, err := runAllocationWatching(s, events, noopAllocationProcessing)
s.Error(err)
s.Equal(1, eventsProcessed)
}
func (s *MainTestSuite) TestAPIClient_WatchAllocationsReturnsErrorOnUnexpectedEOF() {
events := []*nomadApi.Events{{Err: ErrUnexpectedEOF}, {}}
eventsProcessed, err := runAllocationWatching(s, events, noopAllocationProcessing)
s.Error(err)
s.Equal(1, eventsProcessed)
}
func assertWatchAllocation(s *MainTestSuite, events []*nomadApi.Events,
expectedNewAllocations []*nomadApi.Allocation, expectedDeletedAllocations []string) {
s.T().Helper()
var newAllocations []*nomadApi.Allocation
var deletedAllocations []string
callbacks := &AllocationProcessing{
OnNew: func(alloc *nomadApi.Allocation, _ time.Duration) {
newAllocations = append(newAllocations, alloc)
},
OnDeleted: func(jobID string, _ error) bool {
deletedAllocations = append(deletedAllocations, jobID)
return false
},
}
eventsProcessed, err := runAllocationWatching(s, events, callbacks)
s.NoError(err)
s.Equal(len(events), eventsProcessed)
s.Equal(expectedNewAllocations, newAllocations)
s.Equal(expectedDeletedAllocations, deletedAllocations)
}
// runAllocationWatching simulates events streamed from the Nomad event stream
// to the MonitorEvaluation method. It starts the MonitorEvaluation function as a goroutine
// and sequentially transfers the events from the given array to a channel simulating the stream.
func runAllocationWatching(s *MainTestSuite, events []*nomadApi.Events, callbacks *AllocationProcessing) (
eventsProcessed int, err error) {
s.T().Helper()
stream := make(chan *nomadApi.Events)
errChan := asynchronouslyWatchAllocations(stream, callbacks)
return simulateNomadEventStream(s.TestCtx, stream, errChan, events)
}
// asynchronouslyMonitorEvaluation creates an APIClient with mocked Nomad API and
// runs the MonitorEvaluation method in a goroutine. The mock returns a read-only
// version of the given stream to simulate an event stream gotten from the real
// Nomad API.
func asynchronouslyWatchAllocations(stream chan *nomadApi.Events, callbacks *AllocationProcessing) chan error {
ctx := context.Background()
// We can only get a read-only channel once we return it from a function.
readOnlyStream := func() <-chan *nomadApi.Events { return stream }()
apiMock := &apiQuerierMock{}
apiMock.On("EventStream", ctx).Return(readOnlyStream, nil)
apiClient := &APIClient{apiMock, storage.NewLocalStorage[chan error](), storage.NewLocalStorage[*allocationData](), false}
errChan := make(chan error)
go func() {
errChan <- apiClient.WatchEventStream(ctx, callbacks)
}()
return errChan
}
// eventForEvaluation takes an evaluation and creates an Event with the given evaluation
// as its payload. Nomad uses the mapstructure library to decode the payload, which we
// simply reverse here.
func eventForAllocation(t *testing.T, alloc *nomadApi.Allocation) nomadApi.Event {
t.Helper()
payload := make(map[string]interface{})
err := mapstructure.Decode(eventPayload{Allocation: alloc}, &payload)
if err != nil {
t.Fatalf("Couldn't decode allocation %v into payload map", err)
return nomadApi.Event{}
}
event := nomadApi.Event{
Topic: nomadApi.TopicAllocation,
Type: structs.TypeAllocationUpdated,
Payload: payload,
}
return event
}
func createAllocation(modifyTime int64, clientStatus, desiredStatus string) *nomadApi.Allocation {
return &nomadApi.Allocation{
ID: tests.DefaultUUID,
JobID: tests.DefaultRunnerID,
ModifyTime: modifyTime,
ClientStatus: clientStatus,
DesiredStatus: desiredStatus,
}
}
func createRecentAllocation(clientStatus, desiredStatus string) *nomadApi.Allocation {
return createAllocation(time.Now().Add(time.Minute).UnixNano(), clientStatus, desiredStatus)
}
func TestExecuteCommandTestSuite(t *testing.T) {
suite.Run(t, new(ExecuteCommandTestSuite))
}
type ExecuteCommandTestSuite struct {
tests.MemoryLeakTestSuite
allocationID string
ctx context.Context
testCommand string
expectedStdout string
expectedStderr string
apiMock *apiQuerierMock
nomadAPIClient APIClient
}
func (s *ExecuteCommandTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.allocationID = "test-allocation-id"
s.ctx = context.Background()
s.testCommand = "echo \"do nothing\""
s.expectedStdout = "stdout"
s.expectedStderr = "stderr"
s.apiMock = &apiQuerierMock{}
s.nomadAPIClient = APIClient{apiQuerier: s.apiMock}
}
const withTTY = true
func (s *ExecuteCommandTestSuite) TestWithSeparateStderr() {
config.Config.Server.InteractiveStderr = true
commandExitCode := 42
stderrExitCode := 1
var stdout, stderr bytes.Buffer
var calledStdoutCommand, calledStderrCommand string
runFn := func(args mock.Arguments) {
var ok bool
calledCommand, ok := args.Get(2).(string)
s.Require().True(ok)
var out string
if isStderrCommand := strings.Contains(calledCommand, "mkfifo"); isStderrCommand {
calledStderrCommand = calledCommand
out = s.expectedStderr
} else {
calledStdoutCommand = calledCommand
out = s.expectedStdout
}
writer, ok := args.Get(5).(io.Writer)
s.Require().True(ok)
_, err := writer.Write([]byte(out))
s.Require().NoError(err)
}
s.apiMock.On("Execute", s.allocationID, mock.Anything, mock.Anything, withTTY,
mock.AnythingOfType("nullio.Reader"), mock.Anything, mock.Anything).Run(runFn).Return(stderrExitCode, nil)
s.apiMock.On("Execute", s.allocationID, mock.Anything, mock.Anything, withTTY,
mock.AnythingOfType("*bytes.Buffer"), mock.Anything, mock.Anything).Run(runFn).Return(commandExitCode, nil)
exitCode, err := s.nomadAPIClient.ExecuteCommand(s.allocationID, s.ctx, s.testCommand, withTTY,
UnprivilegedExecution, &bytes.Buffer{}, &stdout, &stderr)
s.Require().NoError(err)
s.apiMock.AssertNumberOfCalls(s.T(), "Execute", 2)
s.Equal(commandExitCode, exitCode)
s.Run("should wrap command in stderr wrapper", func() {
s.Require().NotEmpty(calledStdoutCommand)
stderrWrapperCommand := fmt.Sprintf(stderrWrapperCommandFormat, stderrFifoFormat, s.testCommand, stderrFifoFormat)
stdoutFifoRegexp := strings.ReplaceAll(regexp.QuoteMeta(stderrWrapperCommand), "%d", "\\d*")
stdoutFifoRegexp = strings.Replace(stdoutFifoRegexp, s.testCommand, ".*", 1)
s.Regexp(stdoutFifoRegexp, calledStdoutCommand)
})
s.Run("should call correct stderr command", func() {
s.Require().NotEmpty(calledStderrCommand)
stderrFifoCommand := fmt.Sprintf(stderrFifoCommandFormat, stderrFifoFormat, stderrFifoFormat, stderrFifoFormat)
stderrFifoRegexp := strings.ReplaceAll(regexp.QuoteMeta(stderrFifoCommand), "%d", "\\d*")
s.Regexp(stderrFifoRegexp, calledStderrCommand)
})
s.Run("should return correct output", func() {
s.Equal(s.expectedStdout, stdout.String())
s.Equal(s.expectedStderr, stderr.String())
})
}
func (s *ExecuteCommandTestSuite) TestWithSeparateStderrReturnsCommandError() {
config.Config.Server.InteractiveStderr = true
call := s.mockExecute(mock.AnythingOfType("string"), 0, nil, func(_ mock.Arguments) {})
call.Run(func(args mock.Arguments) {
var ok bool
calledCommand, ok := args.Get(2).(string)
s.Require().True(ok)
if isStderrCommand := strings.Contains(calledCommand, "mkfifo"); isStderrCommand {
// Here we defuse the data race condition of the ReturnArguments being set twice at the same time.
<-time.After(tests.ShortTimeout)
call.ReturnArguments = mock.Arguments{1, nil}
} else {
call.ReturnArguments = mock.Arguments{1, tests.ErrDefault}
}
})
_, err := s.nomadAPIClient.ExecuteCommand(s.allocationID, s.ctx, s.testCommand, withTTY, UnprivilegedExecution,
nullio.Reader{}, io.Discard, io.Discard)
s.Equal(tests.ErrDefault, err)
}
func (s *ExecuteCommandTestSuite) TestWithoutSeparateStderr() {
config.Config.Server.InteractiveStderr = false
var stdout, stderr bytes.Buffer
commandExitCode := 42
// mock regular call
expectedCommand := prepareCommandWithoutTTY(s.testCommand, UnprivilegedExecution)
s.mockExecute(expectedCommand, commandExitCode, nil, func(args mock.Arguments) {
stdout, ok := args.Get(5).(io.Writer)
s.Require().True(ok)
_, err := stdout.Write([]byte(s.expectedStdout))
s.Require().NoError(err)
stderr, ok := args.Get(6).(io.Writer)
s.Require().True(ok)
_, err = stderr.Write([]byte(s.expectedStderr))
s.Require().NoError(err)
})
exitCode, err := s.nomadAPIClient.ExecuteCommand(s.allocationID, s.ctx, s.testCommand, withTTY,
UnprivilegedExecution, nullio.Reader{}, &stdout, &stderr)
s.Require().NoError(err)
s.apiMock.AssertNumberOfCalls(s.T(), "Execute", 1)
s.Equal(commandExitCode, exitCode)
s.Equal(s.expectedStdout, stdout.String())
s.Equal(s.expectedStderr, stderr.String())
}
func (s *ExecuteCommandTestSuite) TestWithoutSeparateStderrReturnsCommandError() {
config.Config.Server.InteractiveStderr = false
expectedCommand := prepareCommandWithoutTTY(s.testCommand, UnprivilegedExecution)
s.mockExecute(expectedCommand, 1, tests.ErrDefault, func(args mock.Arguments) {})
_, err := s.nomadAPIClient.ExecuteCommand(s.allocationID, s.ctx, s.testCommand, withTTY, UnprivilegedExecution,
nullio.Reader{}, io.Discard, io.Discard)
s.ErrorIs(err, tests.ErrDefault)
}
func (s *ExecuteCommandTestSuite) mockExecute(command interface{}, exitCode int,
err error, runFunc func(arguments mock.Arguments)) *mock.Call {
return s.apiMock.On("Execute", s.allocationID, mock.Anything, command, withTTY,
mock.Anything, mock.Anything, mock.Anything).
Run(runFunc).
Return(exitCode, err)
}
func (s *MainTestSuite) TestAPIClient_LoadRunnerPortMappings() {
apiMock := &apiQuerierMock{}
mockedCall := apiMock.On("allocation", tests.DefaultRunnerID)
nomadAPIClient := APIClient{apiQuerier: apiMock}
s.Run("should return error when API query fails", func() {
mockedCall.Return(nil, tests.ErrDefault)
portMappings, err := nomadAPIClient.LoadRunnerPortMappings(tests.DefaultRunnerID)
s.Nil(portMappings)
s.ErrorIs(err, tests.ErrDefault)
})
s.Run("should return error when AllocatedResources is nil", func() {
mockedCall.Return(&nomadApi.Allocation{AllocatedResources: nil}, nil)
portMappings, err := nomadAPIClient.LoadRunnerPortMappings(tests.DefaultRunnerID)
s.ErrorIs(err, ErrorNoAllocatedResourcesFound)
s.Nil(portMappings)
})
s.Run("should correctly return ports", func() {
allocation := &nomadApi.Allocation{
AllocatedResources: &nomadApi.AllocatedResources{
Shared: nomadApi.AllocatedSharedResources{Ports: tests.DefaultPortMappings},
},
}
mockedCall.Return(allocation, nil)
portMappings, err := nomadAPIClient.LoadRunnerPortMappings(tests.DefaultRunnerID)
s.NoError(err)
s.Equal(tests.DefaultPortMappings, portMappings)
})
}

View File

@ -1,52 +0,0 @@
package nomad
import (
"bytes"
)
func (s *MainTestSuite) TestSentryDebugWriter_Write() {
buf := &bytes.Buffer{}
w := SentryDebugWriter{Target: buf, Ctx: s.TestCtx}
description := "TestDebugMessageDescription"
data := "\x1EPoseidon " + description + " 1676646791482\x1E"
count, err := w.Write([]byte(data))
s.Require().NoError(err)
s.Equal(len(data), count)
s.NotContains(buf.String(), description)
}
func (s *MainTestSuite) TestSentryDebugWriter_WriteComposed() {
buf := &bytes.Buffer{}
w := SentryDebugWriter{Target: buf, Ctx: s.TestCtx}
data := "Hello World!\r\n\x1EPoseidon unset 1678540012404\x1E\x1EPoseidon /sbin/setuser user 1678540012408\x1E"
count, err := w.Write([]byte(data))
s.Require().NoError(err)
s.Equal(len(data), count)
s.Contains(buf.String(), "Hello World!")
}
func (s *MainTestSuite) TestSentryDebugWriter_Close() {
buf := &bytes.Buffer{}
w := NewSentryDebugWriter(buf, s.TestCtx)
s.Require().Empty(w.lastSpan.Tags)
w.Close(42)
s.Require().Contains(w.lastSpan.Tags, "exit_code")
s.Equal("42", w.lastSpan.Tags["exit_code"])
}
func (s *MainTestSuite) TestSentryDebugWriter_handleTimeDebugMessage() {
buf := &bytes.Buffer{}
w := NewSentryDebugWriter(buf, s.TestCtx)
s.Require().Equal("nomad.execute.connect", w.lastSpan.Op)
description := "TestDebugMessageDescription"
match := map[string][]byte{"time": []byte("1676646791482"), "text": []byte(description)}
w.handleTimeDebugMessage(match)
s.Equal("nomad.execute.bash", w.lastSpan.Op)
s.Equal(description, w.lastSpan.Description)
}

View File

@ -1,48 +0,0 @@
package runner
import (
"context"
"fmt"
"github.com/openHPI/poseidon/pkg/dto"
"time"
)
type AWSRunnerManager struct {
*AbstractManager
}
// NewAWSRunnerManager creates a new runner manager that keeps track of all runners at AWS.
func NewAWSRunnerManager(ctx context.Context) *AWSRunnerManager {
return &AWSRunnerManager{NewAbstractManager(ctx)}
}
func (a AWSRunnerManager) Claim(id dto.EnvironmentID, duration int) (Runner, error) {
environment, ok := a.GetEnvironment(id)
if !ok {
r, err := a.NextHandler().Claim(id, duration)
if err != nil {
return nil, fmt.Errorf("aws wrapped: %w", err)
}
return r, nil
}
runner, ok := environment.Sample()
if !ok {
log.Warn("no aws runner available")
return nil, ErrNoRunnersAvailable
}
a.usedRunners.Add(runner.ID(), runner)
runner.SetupTimeout(time.Duration(duration) * time.Second)
return runner, nil
}
func (a AWSRunnerManager) Return(r Runner) error {
_, isAWSRunner := r.(*AWSFunctionWorkload)
if isAWSRunner {
a.usedRunners.Delete(r.ID())
} else if err := a.NextHandler().Return(r); err != nil {
return fmt.Errorf("aws wrapped: %w", err)
}
return nil
}

View File

@ -1,118 +0,0 @@
package runner
import (
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"testing"
)
type MainTestSuite struct {
tests.MemoryLeakTestSuite
}
func TestMainTestSuite(t *testing.T) {
suite.Run(t, new(MainTestSuite))
}
func (s *MainTestSuite) TestAWSRunnerManager_EnvironmentAccessor() {
m := NewAWSRunnerManager(s.TestCtx)
environments := m.ListEnvironments()
s.Empty(environments)
environment := createBasicEnvironmentMock(defaultEnvironmentID)
m.StoreEnvironment(environment)
environments = m.ListEnvironments()
s.Len(environments, 1)
s.Equal(environments[0].ID(), dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
e, ok := m.GetEnvironment(tests.DefaultEnvironmentIDAsInteger)
s.True(ok)
s.Equal(environment, e)
_, ok = m.GetEnvironment(tests.AnotherEnvironmentIDAsInteger)
s.False(ok)
}
func (s *MainTestSuite) TestAWSRunnerManager_Claim() {
m := NewAWSRunnerManager(s.TestCtx)
environment := createBasicEnvironmentMock(defaultEnvironmentID)
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
s.NoError(err)
environment.On("Sample").Return(r, true)
m.StoreEnvironment(environment)
s.Run("returns runner for AWS environment", func() {
r, err := m.Claim(tests.DefaultEnvironmentIDAsInteger, 60)
s.NoError(err)
s.NotNil(r)
})
s.Run("forwards request for non-AWS environments", func() {
nextHandler := &ManagerMock{}
nextHandler.On("Claim", mock.AnythingOfType("dto.EnvironmentID"), mock.AnythingOfType("int")).
Return(nil, nil)
m.SetNextHandler(nextHandler)
_, err := m.Claim(tests.AnotherEnvironmentIDAsInteger, 60)
s.Nil(err)
nextHandler.AssertCalled(s.T(), "Claim", dto.EnvironmentID(tests.AnotherEnvironmentIDAsInteger), 60)
})
err = r.Destroy(nil)
s.NoError(err)
}
func (s *MainTestSuite) TestAWSRunnerManager_Return() {
m := NewAWSRunnerManager(s.TestCtx)
environment := createBasicEnvironmentMock(defaultEnvironmentID)
m.StoreEnvironment(environment)
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
s.NoError(err)
s.Run("removes usedRunner", func() {
m.usedRunners.Add(r.ID(), r)
s.Contains(m.usedRunners.List(), r)
err := m.Return(r)
s.NoError(err)
s.NotContains(m.usedRunners.List(), r)
})
s.Run("calls nextHandler for non-AWS runner", func() {
nextHandler := &ManagerMock{}
nextHandler.On("Return", mock.AnythingOfType("*runner.NomadJob")).Return(nil)
m.SetNextHandler(nextHandler)
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
nonAWSRunner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, nil)
err := m.Return(nonAWSRunner)
s.NoError(err)
nextHandler.AssertCalled(s.T(), "Return", nonAWSRunner)
err = nonAWSRunner.Destroy(nil)
s.NoError(err)
})
err = r.Destroy(nil)
s.NoError(err)
}
func createBasicEnvironmentMock(id dto.EnvironmentID) *ExecutionEnvironmentMock {
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(id)
environment.On("Image").Return("")
environment.On("CPULimit").Return(uint(0))
environment.On("MemoryLimit").Return(uint(0))
environment.On("NetworkAccess").Return(false, nil)
environment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
environment.On("ApplyPrewarmingPoolSize").Return(nil)
environment.On("IdleRunnerCount").Return(uint(1)).Maybe()
environment.On("PrewarmingPoolSize").Return(uint(1)).Maybe()
return environment
}

View File

@ -1,246 +0,0 @@
package runner
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"io"
"net/http"
"time"
)
var ErrWrongMessageType = errors.New("received message that is not a text message")
type awsFunctionRequest struct {
Action string `json:"action"`
Cmd []string `json:"cmd"`
Files map[dto.FilePath][]byte `json:"files"`
}
// AWSFunctionWorkload is an abstraction to build a request to an AWS Lambda Function.
// It is not persisted on a Poseidon restart.
// The InactivityTimer is used actively. It stops listening to the Lambda function.
// AWS terminates the Lambda Function after the [Globals.Function.Timeout](deploy/aws/template.yaml).
type AWSFunctionWorkload struct {
InactivityTimer
id string
fs map[dto.FilePath][]byte
executions storage.Storage[*dto.ExecutionRequest]
runningExecutions map[string]context.CancelFunc
onDestroy DestroyRunnerHandler
environment ExecutionEnvironment
ctx context.Context
cancel context.CancelFunc
}
// NewAWSFunctionWorkload creates a new AWSFunctionWorkload with the provided id.
func NewAWSFunctionWorkload(
environment ExecutionEnvironment, onDestroy DestroyRunnerHandler) (*AWSFunctionWorkload, error) {
newUUID, err := uuid.NewUUID()
if err != nil {
return nil, fmt.Errorf("failed generating runner id: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
workload := &AWSFunctionWorkload{
id: newUUID.String(),
fs: make(map[dto.FilePath][]byte),
runningExecutions: make(map[string]context.CancelFunc),
onDestroy: onDestroy,
environment: environment,
ctx: ctx,
cancel: cancel,
}
workload.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
monitoring.MeasurementExecutionsAWS, monitorExecutionsRunnerID(environment.ID(), workload.id), time.Minute, ctx)
workload.InactivityTimer = NewInactivityTimer(workload, func(_ Runner) error {
return workload.Destroy(nil)
})
return workload, nil
}
func (w *AWSFunctionWorkload) ID() string {
return w.id
}
func (w *AWSFunctionWorkload) Environment() dto.EnvironmentID {
return w.environment.ID()
}
func (w *AWSFunctionWorkload) MappedPorts() []*dto.MappedPort {
return []*dto.MappedPort{}
}
func (w *AWSFunctionWorkload) StoreExecution(id string, request *dto.ExecutionRequest) {
w.executions.Add(id, request)
}
func (w *AWSFunctionWorkload) ExecutionExists(id string) bool {
_, ok := w.executions.Get(id)
return ok
}
// ExecuteInteractively runs the execution request in an AWS function.
// It should be further improved by using the passed context to handle lost connections.
func (w *AWSFunctionWorkload) ExecuteInteractively(
id string, _ io.ReadWriter, stdout, stderr io.Writer, _ context.Context) (
<-chan ExitInfo, context.CancelFunc, error) {
w.ResetTimeout()
request, ok := w.executions.Pop(id)
if !ok {
return nil, nil, ErrorUnknownExecution
}
hideEnvironmentVariables(request, "AWS")
request.PrivilegedExecution = true // AWS does not support multiple users at this moment.
command, ctx, cancel := prepareExecution(request, w.ctx)
commands := []string{"/bin/bash", "-c", command}
exitInternal := make(chan ExitInfo)
exit := make(chan ExitInfo, 1)
go w.executeCommand(ctx, commands, stdout, stderr, exitInternal)
go w.handleRunnerTimeout(ctx, exitInternal, exit, id)
return exit, cancel, nil
}
// ListFileSystem is currently not supported with this aws serverless function.
// This is because the function execution ends with the termination of the workload code.
// So an on-demand file system listing after the termination is not possible. Also, we do not want to copy all files.
func (w *AWSFunctionWorkload) ListFileSystem(_ string, _ bool, _ io.Writer, _ bool, _ context.Context) error {
return dto.ErrNotSupported
}
// UpdateFileSystem copies Files into the executor.
// Current limitation: No files can be deleted apart from the previously added files.
// Future Work: Deduplication of the file systems, as the largest workload is likely to be used by additional
// CSV files or similar, which are the same for many executions.
func (w *AWSFunctionWorkload) UpdateFileSystem(request *dto.UpdateFileSystemRequest, _ context.Context) error {
for _, path := range request.Delete {
delete(w.fs, path)
}
for _, file := range request.Copy {
w.fs[file.Path] = file.Content
}
return nil
}
// GetFileContent is currently not supported with this aws serverless function.
// This is because the function execution ends with the termination of the workload code.
// So an on-demand file streaming after the termination is not possible. Also, we do not want to copy all files.
func (w *AWSFunctionWorkload) GetFileContent(_ string, _ http.ResponseWriter, _ bool, _ context.Context) error {
return dto.ErrNotSupported
}
func (w *AWSFunctionWorkload) Destroy(_ DestroyReason) error {
w.cancel()
if err := w.onDestroy(w); err != nil {
return fmt.Errorf("error while destroying aws runner: %w", err)
}
return nil
}
func (w *AWSFunctionWorkload) executeCommand(ctx context.Context, command []string,
stdout, stderr io.Writer, exit chan<- ExitInfo,
) {
defer close(exit)
data := &awsFunctionRequest{
Action: w.environment.Image(),
Cmd: command,
Files: w.fs,
}
log.WithContext(ctx).WithField("request", data).Trace("Sending request to AWS")
rawData, err := json.Marshal(data)
if err != nil {
exit <- ExitInfo{uint8(1), fmt.Errorf("cannot stingify aws function request: %w", err)}
return
}
wsConn, response, err := websocket.DefaultDialer.Dial(config.Config.AWS.Endpoint, nil)
if err != nil {
exit <- ExitInfo{uint8(1), fmt.Errorf("failed to establish aws connection: %w", err)}
return
}
_ = response.Body.Close()
defer wsConn.Close()
err = wsConn.WriteMessage(websocket.TextMessage, rawData)
if err != nil {
exit <- ExitInfo{uint8(1), fmt.Errorf("cannot send aws request: %w", err)}
return
}
// receiveOutput listens for the execution timeout (or the exit code).
exitCode, err := w.receiveOutput(wsConn, stdout, stderr, ctx)
// TimeoutPassed checks the runner timeout
if w.TimeoutPassed() {
err = ErrorRunnerInactivityTimeout
}
exit <- ExitInfo{exitCode, err}
}
func (w *AWSFunctionWorkload) receiveOutput(
conn *websocket.Conn, stdout, stderr io.Writer, ctx context.Context) (uint8, error) {
for ctx.Err() == nil {
messageType, reader, err := conn.NextReader()
if err != nil {
return 1, fmt.Errorf("cannot read from aws connection: %w", err)
}
if messageType != websocket.TextMessage {
return 1, ErrWrongMessageType
}
var wsMessage dto.WebSocketMessage
err = json.NewDecoder(reader).Decode(&wsMessage)
if err != nil {
return 1, fmt.Errorf("failed to decode message from aws: %w", err)
}
log.WithField("msg", wsMessage).Info("New Message from AWS function")
switch wsMessage.Type {
default:
log.WithContext(ctx).WithField("data", wsMessage).Warn("unexpected message from aws function")
case dto.WebSocketExit:
return wsMessage.ExitCode, nil
case dto.WebSocketOutputStdout:
// We do not check the written bytes as the rawToCodeOceanWriter receives everything or nothing.
_, err = stdout.Write([]byte(wsMessage.Data))
case dto.WebSocketOutputStderr, dto.WebSocketOutputError:
_, err = stderr.Write([]byte(wsMessage.Data))
}
if err != nil {
return 1, fmt.Errorf("failed to forward message: %w", err)
}
}
return 1, fmt.Errorf("receiveOutput stpped by context: %w", ctx.Err())
}
// handleRunnerTimeout listens for a runner timeout and aborts the execution in that case.
// It listens via a context in runningExecutions that is canceled on the timeout event.
func (w *AWSFunctionWorkload) handleRunnerTimeout(ctx context.Context,
exitInternal <-chan ExitInfo, exit chan<- ExitInfo, executionID string) {
executionCtx, cancelExecution := context.WithCancel(ctx)
w.runningExecutions[executionID] = cancelExecution
defer delete(w.runningExecutions, executionID)
defer close(exit)
select {
case exitInfo := <-exitInternal:
exit <- exitInfo
case <-executionCtx.Done():
exit <- ExitInfo{255, ErrorRunnerInactivityTimeout}
}
}
// hideEnvironmentVariables sets the CODEOCEAN variable and unsets all variables starting with the passed prefix.
func hideEnvironmentVariables(request *dto.ExecutionRequest, unsetPrefix string) {
if request.Environment == nil {
request.Environment = make(map[string]string)
}
request.Command = "unset \"${!" + unsetPrefix + "@}\" && " + request.Command
}

View File

@ -1,165 +0,0 @@
package runner
import (
"context"
"encoding/base64"
"github.com/gorilla/websocket"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
"io"
"net/http"
"net/http/httptest"
"strings"
"time"
)
func (s *MainTestSuite) TestAWSExecutionRequestIsStored() {
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
s.NoError(err)
executionRequest := &dto.ExecutionRequest{
Command: "command",
TimeLimit: 10,
Environment: nil,
}
r.StoreExecution(tests.DefaultEnvironmentIDAsString, executionRequest)
s.True(r.ExecutionExists(tests.DefaultEnvironmentIDAsString))
storedExecutionRunner, ok := r.executions.Pop(tests.DefaultEnvironmentIDAsString)
s.True(ok, "Getting an execution should not return ok false")
s.Equal(executionRequest, storedExecutionRunner)
err = r.Destroy(nil)
s.NoError(err)
}
type awsEndpointMock struct {
hasConnected bool
ctx context.Context
receivedData string
}
func (a *awsEndpointMock) handler(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{}
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
defer c.Close()
a.hasConnected = true
for a.ctx.Err() == nil {
_, message, err := c.ReadMessage()
if err != nil {
break
}
a.receivedData = string(message)
}
}
func (s *MainTestSuite) TestAWSFunctionWorkload_ExecuteInteractively() {
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("testImage or AWS endpoint")
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error { return nil })
s.Require().NoError(err)
var cancel context.CancelFunc
awsMock := &awsEndpointMock{}
sv := httptest.NewServer(http.HandlerFunc(awsMock.handler))
defer sv.Close()
s.Run("establishes WebSocket connection to AWS endpoint", func() {
// Convert http://127.0.0.1 to ws://127.0.0.1
config.Config.AWS.Endpoint = "ws" + strings.TrimPrefix(sv.URL, "http")
awsMock.ctx, cancel = context.WithCancel(context.Background())
cancel()
r.StoreExecution(tests.DefaultEnvironmentIDAsString, &dto.ExecutionRequest{})
exit, _, err := r.ExecuteInteractively(
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
s.Require().NoError(err)
<-exit
s.True(awsMock.hasConnected)
})
s.Run("sends execution request", func() {
s.T().Skip("The AWS runner ignores its context for executions and waits infinetly for the exit message.") // ToDo
awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout)
defer cancel()
command := "sl"
request := &dto.ExecutionRequest{Command: command}
r.StoreExecution(tests.DefaultEnvironmentIDAsString, request)
_, cancel, err := r.ExecuteInteractively(
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
s.Require().NoError(err)
<-time.After(tests.ShortTimeout)
cancel()
expectedRequestData := `{"action":"` + environment.Image() +
`","cmd":["/bin/bash","-c","env CODEOCEAN=true /bin/bash -c \"unset \\\"\\${!AWS@}\\\" \u0026\u0026 ` + command +
`\""],"files":{}}`
s.Equal(expectedRequestData, awsMock.receivedData)
})
err = r.Destroy(nil)
s.NoError(err)
}
func (s *MainTestSuite) TestAWSFunctionWorkload_UpdateFileSystem() {
s.T().Skip("The AWS runner ignores its context for executions and waits infinetly for the exit message.") // ToDo
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("testImage or AWS endpoint")
r, err := NewAWSFunctionWorkload(environment, nil)
s.Require().NoError(err)
var cancel context.CancelFunc
awsMock := &awsEndpointMock{}
sv := httptest.NewServer(http.HandlerFunc(awsMock.handler))
defer sv.Close()
// Convert http://127.0.0.1 to ws://127.0.0.1
config.Config.AWS.Endpoint = "ws" + strings.TrimPrefix(sv.URL, "http")
awsMock.ctx, cancel = context.WithTimeout(context.Background(), tests.ShortTimeout)
defer cancel()
command := "sl"
request := &dto.ExecutionRequest{Command: command}
r.StoreExecution(tests.DefaultEnvironmentIDAsString, request)
myFile := dto.File{Path: "myPath", Content: []byte("myContent")}
err = r.UpdateFileSystem(&dto.UpdateFileSystemRequest{Copy: []dto.File{myFile}}, s.TestCtx)
s.NoError(err)
_, execCancel, err := r.ExecuteInteractively(
tests.DefaultEnvironmentIDAsString, nil, io.Discard, io.Discard, s.TestCtx)
s.Require().NoError(err)
<-time.After(tests.ShortTimeout)
execCancel()
expectedRequestData := `{"action":"` + environment.Image() +
`","cmd":["/bin/bash","-c","env CODEOCEAN=true /bin/bash -c \"unset \\\"\\${!AWS@}\\\" \u0026\u0026 ` + command +
`\""],"files":{"` + string(myFile.Path) + `":"` + base64.StdEncoding.EncodeToString(myFile.Content) + `"}}`
s.Equal(expectedRequestData, awsMock.receivedData)
err = r.Destroy(nil)
s.NoError(err)
}
func (s *MainTestSuite) TestAWSFunctionWorkload_Destroy() {
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
hasDestroyBeenCalled := false
r, err := NewAWSFunctionWorkload(environment, func(_ Runner) error {
hasDestroyBeenCalled = true
return nil
})
s.Require().NoError(err)
var reason error
err = r.Destroy(reason)
s.NoError(err)
s.True(hasDestroyBeenCalled)
}

View File

@ -1,12 +0,0 @@
package runner
import (
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/tests"
)
const (
defaultEnvironmentID = dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger)
anotherEnvironmentID = dto.EnvironmentID(tests.AnotherEnvironmentIDAsInteger)
defaultInactivityTimeout = 0
)

View File

@ -1,349 +0,0 @@
// Code generated by mockery v2.43.2. DO NOT EDIT.
package runner
import (
dto "github.com/openHPI/poseidon/pkg/dto"
mock "github.com/stretchr/testify/mock"
)
// ExecutionEnvironmentMock is an autogenerated mock type for the ExecutionEnvironment type
type ExecutionEnvironmentMock struct {
mock.Mock
}
// AddRunner provides a mock function with given fields: r
func (_m *ExecutionEnvironmentMock) AddRunner(r Runner) {
_m.Called(r)
}
// ApplyPrewarmingPoolSize provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) ApplyPrewarmingPoolSize() error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for ApplyPrewarmingPoolSize")
}
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// CPULimit provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) CPULimit() uint {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for CPULimit")
}
var r0 uint
if rf, ok := ret.Get(0).(func() uint); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint)
}
return r0
}
// Delete provides a mock function with given fields: reason
func (_m *ExecutionEnvironmentMock) Delete(reason DestroyReason) error {
ret := _m.Called(reason)
if len(ret) == 0 {
panic("no return value specified for Delete")
}
var r0 error
if rf, ok := ret.Get(0).(func(DestroyReason) error); ok {
r0 = rf(reason)
} else {
r0 = ret.Error(0)
}
return r0
}
// DeleteRunner provides a mock function with given fields: id
func (_m *ExecutionEnvironmentMock) DeleteRunner(id string) (Runner, bool) {
ret := _m.Called(id)
if len(ret) == 0 {
panic("no return value specified for DeleteRunner")
}
var r0 Runner
var r1 bool
if rf, ok := ret.Get(0).(func(string) (Runner, bool)); ok {
return rf(id)
}
if rf, ok := ret.Get(0).(func(string) Runner); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
if rf, ok := ret.Get(1).(func(string) bool); ok {
r1 = rf(id)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// ID provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) ID() dto.EnvironmentID {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for ID")
}
var r0 dto.EnvironmentID
if rf, ok := ret.Get(0).(func() dto.EnvironmentID); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(dto.EnvironmentID)
}
return r0
}
// IdleRunnerCount provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) IdleRunnerCount() uint {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for IdleRunnerCount")
}
var r0 uint
if rf, ok := ret.Get(0).(func() uint); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint)
}
return r0
}
// Image provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) Image() string {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Image")
}
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// MarshalJSON provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) MarshalJSON() ([]byte, error) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for MarshalJSON")
}
var r0 []byte
var r1 error
if rf, ok := ret.Get(0).(func() ([]byte, error)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() []byte); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]byte)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MemoryLimit provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) MemoryLimit() uint {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for MemoryLimit")
}
var r0 uint
if rf, ok := ret.Get(0).(func() uint); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint)
}
return r0
}
// NetworkAccess provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) NetworkAccess() (bool, []uint16) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for NetworkAccess")
}
var r0 bool
var r1 []uint16
if rf, ok := ret.Get(0).(func() (bool, []uint16)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func() []uint16); ok {
r1 = rf()
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).([]uint16)
}
}
return r0, r1
}
// PrewarmingPoolSize provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) PrewarmingPoolSize() uint {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for PrewarmingPoolSize")
}
var r0 uint
if rf, ok := ret.Get(0).(func() uint); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(uint)
}
return r0
}
// Register provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) Register() error {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Register")
}
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// Sample provides a mock function with given fields:
func (_m *ExecutionEnvironmentMock) Sample() (Runner, bool) {
ret := _m.Called()
if len(ret) == 0 {
panic("no return value specified for Sample")
}
var r0 Runner
var r1 bool
if rf, ok := ret.Get(0).(func() (Runner, bool)); ok {
return rf()
}
if rf, ok := ret.Get(0).(func() Runner); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
if rf, ok := ret.Get(1).(func() bool); ok {
r1 = rf()
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// SetCPULimit provides a mock function with given fields: limit
func (_m *ExecutionEnvironmentMock) SetCPULimit(limit uint) {
_m.Called(limit)
}
// SetConfigFrom provides a mock function with given fields: environment
func (_m *ExecutionEnvironmentMock) SetConfigFrom(environment ExecutionEnvironment) {
_m.Called(environment)
}
// SetID provides a mock function with given fields: id
func (_m *ExecutionEnvironmentMock) SetID(id dto.EnvironmentID) {
_m.Called(id)
}
// SetImage provides a mock function with given fields: image
func (_m *ExecutionEnvironmentMock) SetImage(image string) {
_m.Called(image)
}
// SetMemoryLimit provides a mock function with given fields: limit
func (_m *ExecutionEnvironmentMock) SetMemoryLimit(limit uint) {
_m.Called(limit)
}
// SetNetworkAccess provides a mock function with given fields: allow, ports
func (_m *ExecutionEnvironmentMock) SetNetworkAccess(allow bool, ports []uint16) {
_m.Called(allow, ports)
}
// SetPrewarmingPoolSize provides a mock function with given fields: count
func (_m *ExecutionEnvironmentMock) SetPrewarmingPoolSize(count uint) {
_m.Called(count)
}
// NewExecutionEnvironmentMock creates a new instance of ExecutionEnvironmentMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewExecutionEnvironmentMock(t interface {
mock.TestingT
Cleanup(func())
}) *ExecutionEnvironmentMock {
mock := &ExecutionEnvironmentMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,43 +0,0 @@
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package runner
import (
time "time"
mock "github.com/stretchr/testify/mock"
)
// InactivityTimerMock is an autogenerated mock type for the InactivityTimer type
type InactivityTimerMock struct {
mock.Mock
}
// ResetTimeout provides a mock function with given fields:
func (_m *InactivityTimerMock) ResetTimeout() {
_m.Called()
}
// SetupTimeout provides a mock function with given fields: duration
func (_m *InactivityTimerMock) SetupTimeout(duration time.Duration) {
_m.Called(duration)
}
// StopTimeout provides a mock function with given fields:
func (_m *InactivityTimerMock) StopTimeout() {
_m.Called()
}
// TimeoutPassed provides a mock function with given fields:
func (_m *InactivityTimerMock) TimeoutPassed() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}

View File

@ -1,95 +0,0 @@
package runner
import (
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/suite"
"testing"
"time"
)
func TestInactivityTimerTestSuite(t *testing.T) {
suite.Run(t, new(InactivityTimerTestSuite))
}
type InactivityTimerTestSuite struct {
tests.MemoryLeakTestSuite
runner Runner
returned chan bool
}
func (s *InactivityTimerTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.returned = make(chan bool, 1)
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", tests.DefaultRunnerID).Return(nil)
s.runner = NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error {
s.returned <- true
return nil
})
s.runner.SetupTimeout(tests.ShortTimeout)
}
func (s *InactivityTimerTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
go func() {
select {
case <-s.returned:
case <-time.After(tests.ShortTimeout):
}
}()
err := s.runner.Destroy(nil)
s.Require().NoError(err)
}
func (s *InactivityTimerTestSuite) TestRunnerIsReturnedAfterTimeout() {
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestRunnerIsNotReturnedBeforeTimeout() {
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout/2))
}
func (s *InactivityTimerTestSuite) TestResetTimeoutExtendsTheDeadline() {
time.Sleep(3 * tests.ShortTimeout / 4)
s.runner.ResetTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 3*tests.ShortTimeout/4),
"Because of the reset, the timeout should not be reached by now.")
s.True(tests.ChannelReceivesSomething(s.returned, 5*tests.ShortTimeout/4),
"After reset, the timout should be reached by now.")
}
func (s *InactivityTimerTestSuite) TestStopTimeoutStopsTimeout() {
s.runner.StopTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsFalseBeforeDeadline() {
s.False(s.runner.TimeoutPassed())
}
func (s *InactivityTimerTestSuite) TestTimeoutPassedReturnsTrueAfterDeadline() {
<-time.After(2 * tests.ShortTimeout)
s.True(s.runner.TimeoutPassed())
}
func (s *InactivityTimerTestSuite) TestTimerIsNotResetAfterDeadline() {
time.Sleep(2 * tests.ShortTimeout)
// We need to empty the returned channel so Return can send to it again.
tests.ChannelReceivesSomething(s.returned, 0)
s.runner.ResetTimeout()
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestSetupTimeoutStopsOldTimeout() {
s.runner.SetupTimeout(3 * tests.ShortTimeout)
s.False(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
s.True(tests.ChannelReceivesSomething(s.returned, 2*tests.ShortTimeout))
}
func (s *InactivityTimerTestSuite) TestTimerIsInactiveWhenDurationIsZero() {
s.runner.SetupTimeout(0)
s.False(tests.ChannelReceivesSomething(s.returned, tests.ShortTimeout))
}

View File

@ -0,0 +1,125 @@
package runner
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/pkg/util"
appv1 "k8s.io/api/apps/v1"
"strconv"
"time"
)
type KubernetesRunnerManager struct {
*AbstractManager
apiClient kubernetes.ExecutorAPI
reloadingEnvironment storage.Storage[*alertData]
}
func NewKubernetesRunnerManager(apiClient *kubernetes.ExecutorAPI, ctx context.Context) *KubernetesRunnerManager {
return &KubernetesRunnerManager{
AbstractManager: NewAbstractManager(ctx),
apiClient: *apiClient,
reloadingEnvironment: storage.NewLocalStorage[*alertData](),
}
}
// Load recovers all runners for all existing environments.
func (k *KubernetesRunnerManager) Load() {
log.Info("Loading runners")
newUsedRunners := storage.NewLocalStorage[Runner]()
for _, environment := range k.ListEnvironments() {
usedRunners, err := k.loadEnvironment(environment)
if err != nil {
log.WithError(err).WithField(dto.KeyEnvironmentID, environment.ID().ToString()).
Warn("Failed loading environment. Skipping...")
continue
}
for _, r := range usedRunners.List() {
newUsedRunners.Add(r.ID(), r)
}
}
// TODO MISSING IMPLEMENTATION
//k.updateUsedRunners(newUsedRunners, true)
}
func (k *KubernetesRunnerManager) loadEnvironment(environment ExecutionEnvironment) (used storage.Storage[Runner], err error) {
used = storage.NewLocalStorage[Runner]()
runnerJobs, err := k.apiClient.LoadRunnerJobs(environment.ID())
if err != nil {
return nil, fmt.Errorf("failed fetching the runner jobs: %w", err)
}
for _, job := range runnerJobs {
r, isUsed, err := k.loadSingleJob(job, environment)
if err != nil {
log.WithError(err).WithField(dto.KeyEnvironmentID, environment.ID().ToString()).
WithField("used", isUsed).Warn("Failed loading job. Skipping...")
continue
} else if isUsed {
used.Add(r.ID(), r)
}
}
err = environment.ApplyPrewarmingPoolSize()
if err != nil {
return used, fmt.Errorf("couldn't scale environment: %w", err)
}
return used, nil
}
func (k *KubernetesRunnerManager) loadSingleJob(deployment *appv1.Deployment, environment ExecutionEnvironment) (r Runner, isUsed bool, err error) {
configTaskGroup := deployment.Spec.Template
if err != nil {
return nil, false, fmt.Errorf("%w, %s", nomad.ErrorMissingTaskGroup, deployment.Name)
}
isUsed = configTaskGroup.Annotations[nomad.ConfigMetaUsedKey] == nomad.ConfigMetaUsedValue
portMappings, err := k.apiClient.LoadRunnerPortMappings(deployment.Name)
if err != nil {
return nil, false, fmt.Errorf("error loading runner portMappings: %w", err)
}
newJob := NewKubernetesDeployment(deployment.Name, portMappings, k.apiClient, k.onRunnerDestroyed)
log.WithField("isUsed", isUsed).WithField(dto.KeyRunnerID, newJob.ID()).Debug("Recovered Runner")
if isUsed {
timeout, err := strconv.Atoi(configTaskGroup.ObjectMeta.Annotations[nomad.ConfigMetaTimeoutKey])
if err != nil {
log.WithField(dto.KeyRunnerID, newJob.ID()).WithError(err).Warn("failed loading timeout from meta values")
timeout = int(nomad.RunnerTimeoutFallback.Seconds())
go k.markRunnerAsUsed(newJob, timeout)
}
newJob.SetupTimeout(time.Duration(timeout) * time.Second)
} else {
environment.AddRunner(newJob)
}
return newJob, isUsed, nil
}
func (k *KubernetesRunnerManager) markRunnerAsUsed(runner Runner, timeoutDuration int) {
err := util.RetryExponential(func() (err error) {
if err = k.apiClient.MarkRunnerAsUsed(runner.ID(), timeoutDuration); err != nil {
err = fmt.Errorf("cannot mark runner as used: %w", err)
}
return
})
if err != nil {
log.WithError(err).WithField(dto.KeyRunnerID, runner.ID()).Error("cannot mark runner as used")
err := k.Return(runner)
if err != nil {
log.WithError(err).WithField(dto.KeyRunnerID, runner.ID()).Error("can't mark runner as used and can't return runner")
}
}
}
func (k *KubernetesRunnerManager) onRunnerDestroyed(r Runner) error {
k.usedRunners.Delete(r.ID())
environment, ok := k.GetEnvironment(r.Environment())
if ok {
environment.DeleteRunner(r.ID())
}
return nil
}

View File

@ -0,0 +1,107 @@
package runner
import (
"context"
"fmt"
"github.com/openHPI/poseidon/internal/kubernetes"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/monitoring"
"github.com/openHPI/poseidon/pkg/storage"
"io"
v1 "k8s.io/api/core/v1"
"net/http"
"time"
)
// NomadJob is an abstraction to communicate with Nomad environments.
type KubernetesDeployment struct {
InactivityTimer
executions storage.Storage[*dto.ExecutionRequest]
id string
portMappings []v1.ContainerPort
api kubernetes.ExecutorAPI
onDestroy DestroyRunnerHandler
ctx context.Context
cancel context.CancelFunc
}
func (r *KubernetesDeployment) MappedPorts() []*dto.MappedPort {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) StoreExecution(id string, executionRequest *dto.ExecutionRequest) {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) ExecutionExists(id string) bool {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) ExecuteInteractively(id string, stdin io.ReadWriter, stdout, stderr io.Writer, ctx context.Context) (exit <-chan ExitInfo, cancel context.CancelFunc, err error) {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) Destroy(reason DestroyReason) error {
//TODO implement me
panic("implement me")
}
func (r *KubernetesDeployment) ID() string {
return r.id
}
func (r *KubernetesDeployment) Environment() dto.EnvironmentID {
id, err := nomad.EnvironmentIDFromRunnerID(r.ID())
if err != nil {
log.WithError(err).Error("Runners must have correct IDs")
}
return id
}
// NewNomadJob creates a new NomadJob with the provided id.
// The InactivityTimer is used actively. It executes onDestroy when it has expired.
// The InactivityTimer is persisted in Nomad by the runner manager's Claim Function.
func NewKubernetesDeployment(id string, portMappings []v1.ContainerPort,
apiClient kubernetes.ExecutorAPI, onDestroy DestroyRunnerHandler,
) *KubernetesDeployment {
ctx := context.WithValue(context.Background(), dto.ContextKey(dto.KeyRunnerID), id)
ctx, cancel := context.WithCancel(ctx)
job := &KubernetesDeployment{
id: id,
portMappings: portMappings,
api: apiClient,
onDestroy: onDestroy,
ctx: ctx,
cancel: cancel,
}
job.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
monitoring.MeasurementExecutionsNomad, monitorExecutionsRunnerID(job.Environment(), id), time.Minute, ctx)
job.InactivityTimer = NewInactivityTimer(job, func(r Runner) error {
err := r.Destroy(ErrorRunnerInactivityTimeout)
if err != nil {
err = fmt.Errorf("NomadJob: %w", err)
}
return err
})
return job
}

View File

@ -1,178 +0,0 @@
// Code generated by mockery v2.10.0. DO NOT EDIT.
package runner
import (
dto "github.com/openHPI/poseidon/pkg/dto"
mock "github.com/stretchr/testify/mock"
)
// ManagerMock is an autogenerated mock type for the Manager type
type ManagerMock struct {
mock.Mock
}
// Claim provides a mock function with given fields: id, duration
func (_m *ManagerMock) Claim(id dto.EnvironmentID, duration int) (Runner, error) {
ret := _m.Called(id, duration)
var r0 Runner
if rf, ok := ret.Get(0).(func(dto.EnvironmentID, int) Runner); ok {
r0 = rf(id, duration)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(dto.EnvironmentID, int) error); ok {
r1 = rf(id, duration)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// DeleteEnvironment provides a mock function with given fields: id
func (_m *ManagerMock) DeleteEnvironment(id dto.EnvironmentID) {
_m.Called(id)
}
// EnvironmentStatistics provides a mock function with given fields:
func (_m *ManagerMock) EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData {
ret := _m.Called()
var r0 map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData
if rf, ok := ret.Get(0).(func() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData)
}
}
return r0
}
// Get provides a mock function with given fields: runnerID
func (_m *ManagerMock) Get(runnerID string) (Runner, error) {
ret := _m.Called(runnerID)
var r0 Runner
if rf, ok := ret.Get(0).(func(string) Runner); ok {
r0 = rf(runnerID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(Runner)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(runnerID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetEnvironment provides a mock function with given fields: id
func (_m *ManagerMock) GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool) {
ret := _m.Called(id)
var r0 ExecutionEnvironment
if rf, ok := ret.Get(0).(func(dto.EnvironmentID) ExecutionEnvironment); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(ExecutionEnvironment)
}
}
var r1 bool
if rf, ok := ret.Get(1).(func(dto.EnvironmentID) bool); ok {
r1 = rf(id)
} else {
r1 = ret.Get(1).(bool)
}
return r0, r1
}
// HasNextHandler provides a mock function with given fields:
func (_m *ManagerMock) HasNextHandler() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// ListEnvironments provides a mock function with given fields:
func (_m *ManagerMock) ListEnvironments() []ExecutionEnvironment {
ret := _m.Called()
var r0 []ExecutionEnvironment
if rf, ok := ret.Get(0).(func() []ExecutionEnvironment); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]ExecutionEnvironment)
}
}
return r0
}
// Load provides a mock function with given fields:
func (_m *ManagerMock) Load() {
_m.Called()
}
// NextHandler provides a mock function with given fields:
func (_m *ManagerMock) NextHandler() AccessorHandler {
ret := _m.Called()
var r0 AccessorHandler
if rf, ok := ret.Get(0).(func() AccessorHandler); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(AccessorHandler)
}
}
return r0
}
// Return provides a mock function with given fields: r
func (_m *ManagerMock) Return(r Runner) error {
ret := _m.Called(r)
var r0 error
if rf, ok := ret.Get(0).(func(Runner) error); ok {
r0 = rf(r)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetNextHandler provides a mock function with given fields: m
func (_m *ManagerMock) SetNextHandler(m AccessorHandler) {
_m.Called(m)
}
// StoreEnvironment provides a mock function with given fields: environment
func (_m *ManagerMock) StoreEnvironment(environment ExecutionEnvironment) {
_m.Called(environment)
}

View File

@ -42,6 +42,7 @@ func NewNomadRunnerManager(apiClient nomad.ExecutorAPI, ctx context.Context) *No
return &NomadRunnerManager{NewAbstractManager(ctx), apiClient, storage.NewLocalStorage[*alertData]()}
}
// Claim returns a runner for the given environment. The runner will be marked as used for the given duration.
func (m *NomadRunnerManager) Claim(environmentID dto.EnvironmentID, duration int) (Runner, error) {
environment, ok := m.GetEnvironment(environmentID)
if !ok {
@ -185,6 +186,7 @@ func (m *NomadRunnerManager) checkPrewarmingPoolAlert(environment ExecutionEnvir
func (m *NomadRunnerManager) loadEnvironment(environment ExecutionEnvironment) (used storage.Storage[Runner], err error) {
used = storage.NewLocalStorage[Runner]()
runnerJobs, err := m.apiClient.LoadRunnerJobs(environment.ID())
if err != nil {
return nil, fmt.Errorf("failed fetching the runner jobs: %w", err)

View File

@ -1,716 +0,0 @@
package runner
import (
"context"
nomadApi "github.com/hashicorp/nomad/api"
"github.com/openHPI/poseidon/internal/config"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/pkg/util"
"github.com/openHPI/poseidon/tests"
"github.com/openHPI/poseidon/tests/helpers"
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"strconv"
"testing"
"time"
)
func TestGetNextRunnerTestSuite(t *testing.T) {
suite.Run(t, new(ManagerTestSuite))
}
type ManagerTestSuite struct {
tests.MemoryLeakTestSuite
apiMock *nomad.ExecutorAPIMock
nomadRunnerManager *NomadRunnerManager
exerciseEnvironment *ExecutionEnvironmentMock
exerciseRunner Runner
}
func (s *ManagerTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.apiMock = &nomad.ExecutorAPIMock{}
mockRunnerQueries(s.TestCtx, s.apiMock, []string{})
// Instantly closed context to manually start the update process in some cases
ctx, cancel := context.WithCancel(context.Background())
cancel()
s.nomadRunnerManager = NewNomadRunnerManager(s.apiMock, ctx)
s.exerciseRunner = NewNomadJob(tests.DefaultRunnerID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
s.exerciseEnvironment = createBasicEnvironmentMock(defaultEnvironmentID)
s.nomadRunnerManager.StoreEnvironment(s.exerciseEnvironment)
}
func (s *ManagerTestSuite) TearDownTest() {
defer s.MemoryLeakTestSuite.TearDownTest()
err := s.exerciseRunner.Destroy(nil)
s.Require().NoError(err)
}
func mockRunnerQueries(ctx context.Context, apiMock *nomad.ExecutorAPIMock, returnedRunnerIds []string) {
// reset expected calls to allow new mocked return values
apiMock.ExpectedCalls = []*mock.Call{}
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-ctx.Done()
call.ReturnArguments = mock.Arguments{nil}
})
apiMock.On("LoadEnvironmentJobs").Return([]*nomadApi.Job{}, nil)
apiMock.On("LoadRunnerJobs", mock.AnythingOfType("dto.EnvironmentID")).Return([]*nomadApi.Job{}, nil)
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
apiMock.On("LoadRunnerIDs", tests.DefaultRunnerID).Return(returnedRunnerIds, nil)
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
apiMock.On("JobScale", tests.DefaultRunnerID).Return(uint(len(returnedRunnerIds)), nil)
apiMock.On("SetJobScale", tests.DefaultRunnerID, mock.AnythingOfType("uint"), "Runner Requested").Return(nil)
apiMock.On("RegisterRunnerJob", mock.Anything).Return(nil)
apiMock.On("MonitorEvaluation", mock.Anything, mock.Anything).Return(nil)
}
func mockIdleRunners(environmentMock *ExecutionEnvironmentMock) {
tests.RemoveMethodFromMock(&environmentMock.Mock, "DeleteRunner")
idleRunner := storage.NewLocalStorage[Runner]()
environmentMock.On("AddRunner", mock.Anything).Run(func(args mock.Arguments) {
r, ok := args.Get(0).(Runner)
if !ok {
return
}
idleRunner.Add(r.ID(), r)
})
sampleCall := environmentMock.On("Sample", mock.Anything)
sampleCall.Run(func(args mock.Arguments) {
r, ok := idleRunner.Sample()
sampleCall.ReturnArguments = mock.Arguments{r, ok}
})
deleteCall := environmentMock.On("DeleteRunner", mock.AnythingOfType("string"))
deleteCall.Run(func(args mock.Arguments) {
id, ok := args.Get(0).(string)
if !ok {
log.Fatal("Cannot parse ID")
}
r, ok := idleRunner.Get(id)
deleteCall.ReturnArguments = mock.Arguments{r, ok}
if !ok {
return
}
idleRunner.Delete(id)
})
}
func (s *ManagerTestSuite) waitForRunnerRefresh() {
<-time.After(tests.ShortTimeout)
}
func (s *ManagerTestSuite) TestSetEnvironmentAddsNewEnvironment() {
anotherEnvironment := createBasicEnvironmentMock(anotherEnvironmentID)
s.nomadRunnerManager.StoreEnvironment(anotherEnvironment)
job, ok := s.nomadRunnerManager.environments.Get(anotherEnvironmentID.ToString())
s.True(ok)
s.NotNil(job)
}
func (s *ManagerTestSuite) TestClaimReturnsNotFoundErrorIfEnvironmentNotFound() {
runner, err := s.nomadRunnerManager.Claim(anotherEnvironmentID, defaultInactivityTimeout)
s.Nil(runner)
s.Equal(ErrUnknownExecutionEnvironment, err)
}
func (s *ManagerTestSuite) TestClaimReturnsRunnerIfAvailable() {
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
s.Equal(s.exerciseRunner, receivedRunner)
}
func (s *ManagerTestSuite) TestClaimReturnsErrorIfNoRunnerAvailable() {
s.waitForRunnerRefresh()
s.exerciseEnvironment.On("Sample", mock.Anything).Return(nil, false)
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Nil(runner)
s.Equal(ErrNoRunnersAvailable, err)
}
func (s *ManagerTestSuite) TestClaimReturnsNoRunnerOfDifferentEnvironment() {
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
receivedRunner, err := s.nomadRunnerManager.Claim(anotherEnvironmentID, defaultInactivityTimeout)
s.Nil(receivedRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestClaimDoesNotReturnTheSameRunnerTwice() {
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true).Once()
secondRunner := NewNomadJob(tests.AnotherRunnerID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
s.exerciseEnvironment.On("Sample", mock.Anything).Return(secondRunner, true).Once()
firstReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
secondReceivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
s.NotEqual(firstReceivedRunner, secondReceivedRunner)
err = secondRunner.Destroy(nil)
s.NoError(err)
}
func (s *ManagerTestSuite) TestClaimAddsRunnerToUsedRunners() {
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
receivedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Require().NoError(err)
savedRunner, ok := s.nomadRunnerManager.usedRunners.Get(receivedRunner.ID())
s.True(ok)
s.Equal(savedRunner, receivedRunner)
}
func (s *ManagerTestSuite) TestClaimRemovesRunnerWhenMarkAsUsedFails() {
s.exerciseEnvironment.On("Sample", mock.Anything).Return(s.exerciseRunner, true)
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
util.MaxConnectionRetriesExponential = 1
modifyMockedCall(s.apiMock, "MarkRunnerAsUsed", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{tests.ErrDefault}
})
})
claimedRunner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Require().NoError(err)
<-time.After(time.Second + tests.ShortTimeout) // Claimed runners are marked as used asynchronously
s.apiMock.AssertCalled(s.T(), "DeleteJob", claimedRunner.ID())
_, ok := s.nomadRunnerManager.usedRunners.Get(claimedRunner.ID())
s.False(ok)
}
func (s *ManagerTestSuite) TestGetReturnsRunnerIfRunnerIsUsed() {
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner.ID(), s.exerciseRunner)
savedRunner, err := s.nomadRunnerManager.Get(s.exerciseRunner.ID())
s.NoError(err)
s.Equal(savedRunner, s.exerciseRunner)
}
func (s *ManagerTestSuite) TestGetReturnsErrorIfRunnerNotFound() {
savedRunner, err := s.nomadRunnerManager.Get(tests.DefaultRunnerID)
s.Nil(savedRunner)
s.Error(err)
}
func (s *ManagerTestSuite) TestReturnRemovesRunnerFromUsedRunners() {
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
s.nomadRunnerManager.usedRunners.Add(s.exerciseRunner.ID(), s.exerciseRunner)
err := s.nomadRunnerManager.Return(s.exerciseRunner)
s.Nil(err)
_, ok := s.nomadRunnerManager.usedRunners.Get(s.exerciseRunner.ID())
s.False(ok)
}
func (s *ManagerTestSuite) TestReturnCallsDeleteRunnerApiMethod() {
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
err := s.nomadRunnerManager.Return(s.exerciseRunner)
s.Nil(err)
s.apiMock.AssertCalled(s.T(), "DeleteJob", s.exerciseRunner.ID())
}
func (s *ManagerTestSuite) TestReturnReturnsErrorWhenApiCallFailed() {
tests.RemoveMethodFromMock(&s.apiMock.Mock, "DeleteJob")
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(tests.ErrDefault)
defer s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
defer tests.RemoveMethodFromMock(&s.apiMock.Mock, "DeleteJob")
s.exerciseEnvironment.On("DeleteRunner", mock.AnythingOfType("string")).Return(nil, false)
util.MaxConnectionRetriesExponential = 1
util.InitialWaitingDuration = 2 * tests.ShortTimeout
chReturnDone := make(chan error)
go func(done chan<- error) {
err := s.nomadRunnerManager.Return(s.exerciseRunner)
select {
case <-s.TestCtx.Done():
case done <- err:
}
close(done)
}(chReturnDone)
select {
case <-chReturnDone:
s.Fail("Return should not return if the API request failed")
case <-time.After(tests.ShortTimeout):
}
select {
case err := <-chReturnDone:
s.ErrorIs(err, tests.ErrDefault)
case <-time.After(2 * tests.ShortTimeout):
s.Fail("Return should return after the retry mechanism")
// note: MaxConnectionRetriesExponential and InitialWaitingDuration is decreased extremely here.
}
}
func (s *ManagerTestSuite) TestUpdateRunnersLogsErrorFromWatchAllocation() {
var hook *test.Hook
logger, hook := test.NewNullLogger()
log = logger.WithField("pkg", "runner")
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
call.ReturnArguments = mock.Arguments{tests.ErrDefault}
})
})
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
if err != nil {
log.WithError(err).Error("failed to synchronize runners")
}
s.Require().Equal(2, len(hook.Entries))
s.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
err, ok := hook.LastEntry().Data[logrus.ErrorKey].(error)
s.Require().True(ok)
s.ErrorIs(err, tests.ErrDefault)
}
func (s *ManagerTestSuite) TestUpdateRunnersAddsIdleRunner() {
allocation := &nomadApi.Allocation{ID: tests.DefaultRunnerID}
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID.ToString())
s.Require().True(ok)
allocation.JobID = environment.ID().ToString()
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
_, ok = environment.Sample()
s.Require().False(ok)
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
callbacks, ok := args.Get(1).(*nomad.AllocationProcessing)
s.Require().True(ok)
callbacks.OnNew(allocation, 0)
call.ReturnArguments = mock.Arguments{nil}
})
})
go func() {
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
if err != nil {
log.WithError(err).Error("failed to synchronize runners")
}
}()
<-time.After(10 * time.Millisecond)
r, ok := environment.Sample()
s.True(ok)
s.NoError(r.Destroy(nil))
}
func (s *ManagerTestSuite) TestUpdateRunnersRemovesIdleAndUsedRunner() {
allocation := &nomadApi.Allocation{JobID: tests.DefaultRunnerID}
environment, ok := s.nomadRunnerManager.environments.Get(defaultEnvironmentID.ToString())
s.Require().True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
testRunner := NewNomadJob(allocation.JobID, nil, s.apiMock, s.nomadRunnerManager.onRunnerDestroyed)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environment.AddRunner(testRunner)
s.nomadRunnerManager.usedRunners.Add(testRunner.ID(), testRunner)
modifyMockedCall(s.apiMock, "WatchEventStream", func(call *mock.Call) {
call.Run(func(args mock.Arguments) {
callbacks, ok := args.Get(1).(*nomad.AllocationProcessing)
s.Require().True(ok)
callbacks.OnDeleted(allocation.JobID, nil)
call.ReturnArguments = mock.Arguments{nil}
})
})
go func() {
err := s.nomadRunnerManager.SynchronizeRunners(s.TestCtx)
if err != nil {
log.WithError(err).Error("failed to synchronize runners")
}
}()
<-time.After(tests.ShortTimeout)
_, ok = environment.Sample()
s.False(ok)
_, ok = s.nomadRunnerManager.usedRunners.Get(allocation.JobID)
s.False(ok)
}
func modifyMockedCall(apiMock *nomad.ExecutorAPIMock, method string, modifier func(call *mock.Call)) {
for _, c := range apiMock.ExpectedCalls {
if c.Method == method {
modifier(c)
}
}
}
func (s *ManagerTestSuite) TestOnAllocationAdded() {
s.Run("does not add environment template id job", func() {
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
s.True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
alloc := &nomadApi.Allocation{JobID: nomad.TemplateJobID(tests.DefaultEnvironmentIDAsInteger)}
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
_, ok = environment.Sample()
s.False(ok)
})
s.Run("does not panic when environment id cannot be parsed", func() {
alloc := &nomadApi.Allocation{JobID: ""}
s.NotPanics(func() {
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
})
})
s.Run("does not panic when environment does not exist", func() {
nonExistentEnvironment := dto.EnvironmentID(1234)
_, ok := s.nomadRunnerManager.environments.Get(nonExistentEnvironment.ToString())
s.Require().False(ok)
alloc := &nomadApi.Allocation{JobID: nomad.RunnerJobID(nonExistentEnvironment, "1-1-1-1")}
s.NotPanics(func() {
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
})
})
s.Run("adds correct job", func() {
s.Run("without allocated resources", func() {
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
s.True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
_, ok = environment.Sample()
s.Require().False(ok)
alloc := &nomadApi.Allocation{
JobID: tests.DefaultRunnerID,
AllocatedResources: nil,
}
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.NoError(err)
nomadJob, ok := runner.(*NomadJob)
s.True(ok)
s.Equal(nomadJob.id, tests.DefaultRunnerID)
s.Empty(nomadJob.portMappings)
s.Run("but not again", func() {
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
runner, err = s.nomadRunnerManager.Claim(defaultEnvironmentID, defaultInactivityTimeout)
s.Error(err)
})
err = nomadJob.Destroy(nil)
s.NoError(err)
})
s.nomadRunnerManager.usedRunners.Purge()
s.Run("with mapped ports", func() {
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
s.True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
alloc := &nomadApi.Allocation{
JobID: tests.DefaultRunnerID,
AllocatedResources: &nomadApi.AllocatedResources{
Shared: nomadApi.AllocatedSharedResources{Ports: tests.DefaultPortMappings},
},
}
s.nomadRunnerManager.onAllocationAdded(alloc, 0)
runner, ok := environment.Sample()
s.True(ok)
nomadJob, ok := runner.(*NomadJob)
s.True(ok)
s.Equal(nomadJob.id, tests.DefaultRunnerID)
s.Equal(nomadJob.portMappings, tests.DefaultPortMappings)
err := runner.Destroy(nil)
s.NoError(err)
})
})
}
func (s *ManagerTestSuite) TestOnAllocationStopped() {
s.Run("returns false for idle runner", func() {
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
s.Require().True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
r := NewNomadJob(tests.DefaultRunnerID, []nomadApi.PortMapping{}, s.apiMock, func(r Runner) error { return nil })
environment.AddRunner(r)
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(tests.DefaultRunnerID, nil)
s.False(alreadyRemoved)
s.Error(r.ctx.Err(), "The runner should be destroyed and its context canceled")
})
s.Run("returns false and stops inactivity timer", func() {
runner, runnerDestroyed := testStoppedInactivityTimer(s)
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(runner.ID(), nil)
s.False(alreadyRemoved)
select {
case <-time.After(time.Second + tests.ShortTimeout):
s.Fail("runner was stopped too late")
case <-runnerDestroyed:
s.False(runner.TimeoutPassed())
}
})
s.Run("stops inactivity timer - counter check", func() {
runner, runnerDestroyed := testStoppedInactivityTimer(s)
select {
case <-time.After(time.Second + tests.ShortTimeout):
s.Fail("runner was stopped too late")
case <-runnerDestroyed:
s.True(runner.TimeoutPassed())
}
})
s.Run("returns true when the runner is already removed", func() {
s.Run("by the inactivity timer", func() {
runner, _ := testStoppedInactivityTimer(s)
<-time.After(time.Second)
s.Require().True(runner.TimeoutPassed())
alreadyRemoved := s.nomadRunnerManager.onAllocationStopped(runner.ID(), nil)
s.True(alreadyRemoved)
})
})
}
func testStoppedInactivityTimer(s *ManagerTestSuite) (r Runner, destroyed chan struct{}) {
s.T().Helper()
environment, ok := s.nomadRunnerManager.environments.Get(tests.DefaultEnvironmentIDAsString)
s.Require().True(ok)
mockIdleRunners(environment.(*ExecutionEnvironmentMock))
runnerDestroyed := make(chan struct{})
environment.AddRunner(NewNomadJob(tests.DefaultRunnerID, []nomadApi.PortMapping{}, s.apiMock, func(r Runner) error {
go func() {
select {
case runnerDestroyed <- struct{}{}:
case <-s.TestCtx.Done():
}
}()
return s.nomadRunnerManager.onRunnerDestroyed(r)
}))
runner, err := s.nomadRunnerManager.Claim(defaultEnvironmentID, 1)
s.Require().NoError(err)
s.Require().False(runner.TimeoutPassed())
select {
case runnerDestroyed <- struct{}{}:
s.Fail("The runner should not be removed by now")
case <-time.After(tests.ShortTimeout):
}
return runner, runnerDestroyed
}
func (s *MainTestSuite) TestNomadRunnerManager_Load() {
apiMock := &nomad.ExecutorAPIMock{}
mockWatchAllocations(s.TestCtx, apiMock)
apiMock.On("LoadRunnerPortMappings", mock.AnythingOfType("string")).
Return([]nomadApi.PortMapping{}, nil)
call := apiMock.On("LoadRunnerJobs", dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
runnerManager := NewNomadRunnerManager(apiMock, s.TestCtx)
environmentMock := createBasicEnvironmentMock(tests.DefaultEnvironmentIDAsInteger)
environmentMock.On("ApplyPrewarmingPoolSize").Return(nil)
runnerManager.StoreEnvironment(environmentMock)
s.Run("Stores unused runner", func() {
tests.RemoveMethodFromMock(&environmentMock.Mock, "DeleteRunner")
environmentMock.On("AddRunner", mock.AnythingOfType("*runner.NomadJob")).Once()
_, job := helpers.CreateTemplateJob()
jobID := tests.DefaultRunnerID
job.ID = &jobID
job.Name = &jobID
s.ExpectedGoroutineIncrease++ // We dont care about destroying the created runner.
call.Return([]*nomadApi.Job{job}, nil)
runnerManager.Load()
environmentMock.AssertExpectations(s.T())
})
s.Run("Stores used runner", func() {
apiMock.On("MarkRunnerAsUsed", mock.AnythingOfType("string"), mock.AnythingOfType("int")).Return(nil)
_, job := helpers.CreateTemplateJob()
jobID := tests.DefaultRunnerID
job.ID = &jobID
job.Name = &jobID
configTaskGroup := nomad.FindTaskGroup(job, nomad.ConfigTaskGroupName)
s.Require().NotNil(configTaskGroup)
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
s.ExpectedGoroutineIncrease++ // We don't care about destroying the created runner.
call.Return([]*nomadApi.Job{job}, nil)
s.Require().Zero(runnerManager.usedRunners.Length())
runnerManager.Load()
_, ok := runnerManager.usedRunners.Get(tests.DefaultRunnerID)
s.True(ok)
})
runnerManager.usedRunners.Purge()
s.Run("Restart timeout of used runner", func() {
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
environmentMock.On("DeleteRunner", mock.AnythingOfType("string")).Once().Return(nil, false)
timeout := 1
_, job := helpers.CreateTemplateJob()
jobID := tests.DefaultRunnerID
job.ID = &jobID
job.Name = &jobID
configTaskGroup := nomad.FindTaskGroup(job, nomad.ConfigTaskGroupName)
s.Require().NotNil(configTaskGroup)
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
configTaskGroup.Meta[nomad.ConfigMetaTimeoutKey] = strconv.Itoa(timeout)
call.Return([]*nomadApi.Job{job}, nil)
s.Require().Zero(runnerManager.usedRunners.Length())
runnerManager.Load()
s.Require().NotZero(runnerManager.usedRunners.Length())
<-time.After(time.Duration(timeout*2) * time.Second)
s.Require().Zero(runnerManager.usedRunners.Length())
})
}
func (s *MainTestSuite) TestNomadRunnerManager_checkPrewarmingPoolAlert() {
timeout := uint(1)
config.Config.Server.Alert.PrewarmingPoolReloadTimeout = timeout
config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("")
environment.On("CPULimit").Return(uint(0))
environment.On("MemoryLimit").Return(uint(0))
environment.On("NetworkAccess").Return(false, nil)
apiMock := &nomad.ExecutorAPIMock{}
m := NewNomadRunnerManager(apiMock, s.TestCtx)
m.StoreEnvironment(environment)
s.Run("checks the alert condition again after the reload timeout", func() {
environment.On("PrewarmingPoolSize").Return(uint(1)).Once()
environment.On("IdleRunnerCount").Return(uint(0)).Once()
environment.On("PrewarmingPoolSize").Return(uint(1)).Once()
environment.On("IdleRunnerCount").Return(uint(1)).Once()
checkDone := make(chan struct{})
go func() {
m.checkPrewarmingPoolAlert(environment, false)
close(checkDone)
}()
select {
case <-checkDone:
s.Fail("checkPrewarmingPoolAlert returned before the reload timeout")
case <-time.After(time.Duration(timeout) * time.Second / 2):
}
select {
case <-time.After(time.Duration(timeout) * time.Second):
s.Fail("checkPrewarmingPoolAlert did not return after checking the alert condition again")
case <-checkDone:
}
environment.AssertExpectations(s.T())
})
s.Run("checks the alert condition again after the reload timeout", func() {
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
environment.On("IdleRunnerCount").Return(uint(0)).Twice()
apiMock.On("LoadRunnerJobs", environment.ID()).Return([]*nomadApi.Job{}, nil).Once()
environment.On("ApplyPrewarmingPoolSize").Return(nil).Once()
checkDone := make(chan struct{})
go func() {
m.checkPrewarmingPoolAlert(environment, false)
close(checkDone)
}()
select {
case <-time.After(time.Duration(timeout) * time.Second * 2):
s.Fail("checkPrewarmingPoolAlert did not return")
case <-checkDone:
}
environment.AssertExpectations(s.T())
})
s.Run("is canceled by an added runner", func() {
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
environment.On("IdleRunnerCount").Return(uint(0)).Once()
environment.On("IdleRunnerCount").Return(uint(1)).Once()
checkDone := make(chan struct{})
go func() {
m.checkPrewarmingPoolAlert(environment, false)
close(checkDone)
}()
<-time.After(tests.ShortTimeout)
go m.checkPrewarmingPoolAlert(environment, true)
<-time.After(tests.ShortTimeout)
select {
case <-time.After(100 * time.Duration(timeout) * time.Second):
s.Fail("checkPrewarmingPoolAlert was not canceled")
case <-checkDone:
}
environment.AssertExpectations(s.T())
})
}
func (s *MainTestSuite) TestNomadRunnerManager_checkPrewarmingPoolAlert_reloadsRunners() {
config.Config.Server.Alert.PrewarmingPoolReloadTimeout = uint(1)
config.Config.Server.Alert.PrewarmingPoolThreshold = 0.5
environment := &ExecutionEnvironmentMock{}
environment.On("ID").Return(dto.EnvironmentID(tests.DefaultEnvironmentIDAsInteger))
environment.On("Image").Return("")
environment.On("CPULimit").Return(uint(0))
environment.On("MemoryLimit").Return(uint(0))
environment.On("NetworkAccess").Return(false, nil)
apiMock := &nomad.ExecutorAPIMock{}
m := NewNomadRunnerManager(apiMock, s.TestCtx)
m.StoreEnvironment(environment)
environment.On("PrewarmingPoolSize").Return(uint(1)).Twice()
environment.On("IdleRunnerCount").Return(uint(0)).Twice()
environment.On("DeleteRunner", mock.Anything).Return(nil, false).Once()
s.Require().Empty(m.usedRunners.Length())
_, usedJob := helpers.CreateTemplateJob()
id := tests.DefaultRunnerID
usedJob.ID = &id
configTaskGroup := nomad.FindTaskGroup(usedJob, nomad.ConfigTaskGroupName)
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
configTaskGroup.Meta[nomad.ConfigMetaTimeoutKey] = "42"
_, idleJob := helpers.CreateTemplateJob()
idleID := tests.AnotherRunnerID
idleJob.ID = &idleID
nomad.FindTaskGroup(idleJob, nomad.ConfigTaskGroupName).Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUnusedValue
apiMock.On("LoadRunnerJobs", environment.ID()).Return([]*nomadApi.Job{usedJob, idleJob}, nil).Once()
apiMock.On("LoadRunnerPortMappings", mock.Anything).Return(nil, nil).Twice()
environment.On("ApplyPrewarmingPoolSize").Return(nil).Once()
environment.On("AddRunner", mock.Anything).Run(func(args mock.Arguments) {
job, ok := args[0].(*NomadJob)
s.Require().True(ok)
err := job.Destroy(ErrLocalDestruction)
s.NoError(err)
}).Return().Once()
m.checkPrewarmingPoolAlert(environment, false)
r, ok := m.usedRunners.Get(tests.DefaultRunnerID)
s.Require().True(ok)
err := r.Destroy(ErrLocalDestruction)
s.NoError(err)
environment.AssertExpectations(s.T())
}
func mockWatchAllocations(ctx context.Context, apiMock *nomad.ExecutorAPIMock) {
call := apiMock.On("WatchEventStream", mock.Anything, mock.Anything, mock.Anything)
call.Run(func(args mock.Arguments) {
<-ctx.Done()
call.ReturnArguments = mock.Arguments{nil}
})
}

View File

@ -1,548 +0,0 @@
package runner
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/openHPI/poseidon/internal/nomad"
"github.com/openHPI/poseidon/pkg/dto"
"github.com/openHPI/poseidon/pkg/logging"
"github.com/openHPI/poseidon/pkg/nullio"
"github.com/openHPI/poseidon/pkg/storage"
"github.com/openHPI/poseidon/tests"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"io"
"regexp"
"strings"
"testing"
"time"
)
const defaultExecutionID = "execution-id"
func (s *MainTestSuite) TestIdIsStored() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
s.Equal(tests.DefaultRunnerID, runner.ID())
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestMappedPortsAreStoredCorrectly() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, tests.DefaultPortMappings, apiMock, func(_ Runner) error { return nil })
s.Equal(tests.DefaultMappedPorts, runner.MappedPorts())
s.NoError(runner.Destroy(nil))
runner = NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
s.Empty(runner.MappedPorts())
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestMarshalRunner() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
marshal, err := json.Marshal(runner)
s.NoError(err)
s.Equal("{\"runnerId\":\""+tests.DefaultRunnerID+"\"}", string(marshal))
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestExecutionRequestIsStored() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
executionRequest := &dto.ExecutionRequest{
Command: "command",
TimeLimit: 10,
Environment: nil,
}
id := "test-execution"
runner.StoreExecution(id, executionRequest)
storedExecutionRunner, ok := runner.executions.Pop(id)
s.True(ok, "Getting an execution should not return ok false")
s.Equal(executionRequest, storedExecutionRunner)
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestNewContextReturnsNewContextWithRunner() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
ctx := context.Background()
newCtx := NewContext(ctx, runner)
storedRunner, ok := newCtx.Value(runnerContextKey).(Runner)
s.Require().True(ok)
s.NotEqual(ctx, newCtx)
s.Equal(runner, storedRunner)
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestFromContextReturnsRunner() {
apiMock := &nomad.ExecutorAPIMock{}
apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
runner := NewNomadJob(tests.DefaultRunnerID, nil, apiMock, func(_ Runner) error { return nil })
ctx := NewContext(context.Background(), runner)
storedRunner, ok := FromContext(ctx)
s.True(ok)
s.Equal(runner, storedRunner)
s.NoError(runner.Destroy(nil))
}
func (s *MainTestSuite) TestFromContextReturnsIsNotOkWhenContextHasNoRunner() {
ctx := context.Background()
_, ok := FromContext(ctx)
s.False(ok)
}
func (s *MainTestSuite) TestDestroyDoesNotPropagateToNomadForSomeReasons() {
apiMock := &nomad.ExecutorAPIMock{}
timer := &InactivityTimerMock{}
timer.On("StopTimeout").Return()
ctx, cancel := context.WithCancel(s.TestCtx)
r := &NomadJob{
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
InactivityTimer: timer,
id: tests.DefaultRunnerID,
api: apiMock,
ctx: ctx,
cancel: cancel,
}
s.Run("destroy removes the runner only locally for OOM Killed Allocations", func() {
err := r.Destroy(ErrOOMKilled)
s.NoError(err)
apiMock.AssertExpectations(s.T())
})
s.Run("destroy removes the runner only locally for rescheduled allocations", func() {
err := r.Destroy(nomad.ErrorAllocationRescheduled)
s.NoError(err)
apiMock.AssertExpectations(s.T())
})
}
func TestExecuteInteractivelyTestSuite(t *testing.T) {
suite.Run(t, new(ExecuteInteractivelyTestSuite))
}
type ExecuteInteractivelyTestSuite struct {
tests.MemoryLeakTestSuite
runner *NomadJob
apiMock *nomad.ExecutorAPIMock
timer *InactivityTimerMock
manager *ManagerMock
mockedExecuteCommandCall *mock.Call
mockedTimeoutPassedCall *mock.Call
}
func (s *ExecuteInteractivelyTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.apiMock = &nomad.ExecutorAPIMock{}
s.mockedExecuteCommandCall = s.apiMock.On("ExecuteCommand", mock.Anything, mock.Anything, mock.Anything,
true, false, mock.Anything, mock.Anything, mock.Anything).
Return(0, nil)
s.apiMock.On("DeleteJob", mock.AnythingOfType("string")).Return(nil)
s.timer = &InactivityTimerMock{}
s.timer.On("StopTimeout").Return()
s.timer.On("ResetTimeout").Return()
s.mockedTimeoutPassedCall = s.timer.On("TimeoutPassed").Return(false)
s.manager = &ManagerMock{}
s.manager.On("Return", mock.Anything).Return(nil)
ctx, cancel := context.WithCancel(context.Background())
s.runner = &NomadJob{
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
InactivityTimer: s.timer,
id: tests.DefaultRunnerID,
api: s.apiMock,
ctx: ctx,
cancel: cancel,
}
}
func (s *ExecuteInteractivelyTestSuite) TestReturnsErrorWhenExecutionDoesNotExist() {
_, _, err := s.runner.ExecuteInteractively("non-existent-id", nil, nil, nil, context.Background())
s.ErrorIs(err, ErrorUnknownExecution)
}
func (s *ExecuteInteractivelyTestSuite) TestCallsApi() {
request := &dto.ExecutionRequest{Command: "echo 'Hello World!'"}
s.runner.StoreExecution(defaultExecutionID, request)
_, _, err := s.runner.ExecuteInteractively(defaultExecutionID, nil, nil, nil, context.Background())
s.Require().NoError(err)
time.Sleep(tests.ShortTimeout)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", tests.DefaultRunnerID, mock.Anything, request.FullCommand(),
true, false, mock.Anything, mock.Anything, mock.Anything)
}
func (s *ExecuteInteractivelyTestSuite) TestReturnsAfterTimeout() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
<-ctx.Done()
}).Return(0, nil)
timeLimit := 1
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
exit, _, err := s.runner.ExecuteInteractively(defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
s.Require().NoError(err)
select {
case <-exit:
s.FailNow("ExecuteInteractively should not terminate instantly")
case <-time.After(tests.ShortTimeout):
}
select {
case <-time.After(time.Duration(timeLimit) * time.Second):
s.FailNow("ExecuteInteractively should return after the time limit")
case exitInfo := <-exit:
s.Equal(uint8(255), exitInfo.Code)
}
}
func (s *ExecuteInteractivelyTestSuite) TestSendsSignalAfterTimeout() {
quit := make(chan struct{})
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
stdin, ok := args.Get(5).(io.Reader)
s.Require().True(ok)
buffer := make([]byte, 1) //nolint:makezero,lll // If the length is zero, the Read call never reads anything. gofmt want this alignment.
for n := 0; !(n == 1 && buffer[0] == SIGQUIT); {
<-time.After(tests.ShortTimeout)
n, _ = stdin.Read(buffer) //nolint:errcheck,lll // Read returns EOF errors but that is expected. This nolint makes the line too long.
if n > 0 {
log.WithField("buffer", fmt.Sprintf("%x", buffer[0])).Info("Received Stdin")
}
}
log.Info("After loop")
close(quit)
}).Return(0, nil)
timeLimit := 1
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
_, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, bytes.NewBuffer(make([]byte, 1)), nil, nil, context.Background())
s.Require().NoError(err)
log.Info("Before waiting")
select {
case <-time.After(2 * (time.Duration(timeLimit) * time.Second)):
s.FailNow("The execution should receive a SIGQUIT after the timeout")
case <-quit:
log.Info("Received quit")
}
}
func (s *ExecuteInteractivelyTestSuite) TestDestroysRunnerAfterTimeoutAndSignal() {
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
<-s.TestCtx.Done()
})
runnerDestroyed := false
s.runner.onDestroy = func(_ Runner) error {
runnerDestroyed = true
return nil
}
timeLimit := 1
executionRequest := &dto.ExecutionRequest{TimeLimit: timeLimit}
s.runner.cancel = func() {}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
_, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, bytes.NewBuffer(make([]byte, 1)), nil, nil, context.Background())
s.Require().NoError(err)
<-time.After(executionTimeoutGracePeriod + time.Duration(timeLimit)*time.Second)
// Even if we expect the timeout to be exceeded now, Poseidon sometimes take a couple of hundred ms longer.
<-time.After(2 * tests.ShortTimeout)
s.manager.AssertNotCalled(s.T(), "Return", s.runner)
s.apiMock.AssertCalled(s.T(), "DeleteJob", s.runner.ID())
s.True(runnerDestroyed)
}
func (s *ExecuteInteractivelyTestSuite) TestResetTimerGetsCalled() {
executionRequest := &dto.ExecutionRequest{}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
_, _, err := s.runner.ExecuteInteractively(defaultExecutionID, nil, nil, nil, context.Background())
s.Require().NoError(err)
s.timer.AssertCalled(s.T(), "ResetTimeout")
}
func (s *ExecuteInteractivelyTestSuite) TestExitHasTimeoutErrorIfRunnerTimesOut() {
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
<-s.TestCtx.Done()
}).Return(0, nil)
s.mockedTimeoutPassedCall.Return(true)
executionRequest := &dto.ExecutionRequest{}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
exitChannel, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
s.Require().NoError(err)
err = s.runner.Destroy(ErrorRunnerInactivityTimeout)
s.Require().NoError(err)
exit := <-exitChannel
s.ErrorIs(exit.Err, ErrorRunnerInactivityTimeout)
}
func (s *ExecuteInteractivelyTestSuite) TestDestroyReasonIsPassedToExecution() {
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
<-s.TestCtx.Done()
}).Return(0, nil)
s.mockedTimeoutPassedCall.Return(true)
executionRequest := &dto.ExecutionRequest{}
s.runner.StoreExecution(defaultExecutionID, executionRequest)
exitChannel, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
s.Require().NoError(err)
err = s.runner.Destroy(ErrOOMKilled)
s.Require().NoError(err)
exit := <-exitChannel
s.ErrorIs(exit.Err, ErrOOMKilled)
}
func (s *ExecuteInteractivelyTestSuite) TestSuspectedOOMKilledExecutionWaitsForVerification() {
s.mockedExecuteCommandCall.Return(128, nil)
executionRequest := &dto.ExecutionRequest{}
s.Run("Actually OOM Killed", func() {
s.runner.StoreExecution(defaultExecutionID, executionRequest)
exitChannel, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
s.Require().NoError(err)
select {
case <-exitChannel:
s.FailNow("For exit code 128 Poseidon should wait a while to verify the OOM Kill assumption.")
case <-time.After(tests.ShortTimeout):
// All good. Poseidon waited.
}
err = s.runner.Destroy(ErrOOMKilled)
s.Require().NoError(err)
exit := <-exitChannel
s.ErrorIs(exit.Err, ErrOOMKilled)
})
ctx, cancel := context.WithCancel(context.Background())
s.runner.ctx = ctx
s.runner.cancel = cancel
s.Run("Not OOM Killed", func() {
s.runner.StoreExecution(defaultExecutionID, executionRequest)
exitChannel, _, err := s.runner.ExecuteInteractively(
defaultExecutionID, &nullio.ReadWriter{}, nil, nil, context.Background())
s.Require().NoError(err)
select {
case <-time.After(tests.ShortTimeout + time.Second):
s.FailNow("Poseidon should not wait too long for verifying the OOM Kill assumption.")
case exit := <-exitChannel:
s.Equal(uint8(128), exit.Code)
s.Nil(exit.Err)
}
})
}
func TestUpdateFileSystemTestSuite(t *testing.T) {
suite.Run(t, new(UpdateFileSystemTestSuite))
}
type UpdateFileSystemTestSuite struct {
tests.MemoryLeakTestSuite
runner *NomadJob
timer *InactivityTimerMock
apiMock *nomad.ExecutorAPIMock
mockedExecuteCommandCall *mock.Call
command string
stdin *bytes.Buffer
}
func (s *UpdateFileSystemTestSuite) SetupTest() {
s.MemoryLeakTestSuite.SetupTest()
s.apiMock = &nomad.ExecutorAPIMock{}
s.timer = &InactivityTimerMock{}
s.timer.On("ResetTimeout").Return()
s.timer.On("TimeoutPassed").Return(false)
s.runner = &NomadJob{
executions: storage.NewLocalStorage[*dto.ExecutionRequest](),
InactivityTimer: s.timer,
id: tests.DefaultRunnerID,
api: s.apiMock,
}
s.mockedExecuteCommandCall = s.apiMock.On("ExecuteCommand", tests.DefaultRunnerID, mock.Anything,
mock.Anything, false, mock.AnythingOfType("bool"), mock.Anything, mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
var ok bool
s.command, ok = args.Get(2).(string)
s.Require().True(ok)
s.stdin, ok = args.Get(5).(*bytes.Buffer)
s.Require().True(ok)
}).Return(0, nil)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerPerformsTarExtractionWithAbsoluteNamesOnRunner() {
// note: this method tests an implementation detail of the method UpdateFileSystemOfRunner method
// if the implementation changes, delete this test and write a new one
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything,
false, mock.AnythingOfType("bool"), mock.Anything, mock.Anything, mock.Anything)
s.Regexp("tar --extract --absolute-names", s.command)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfExitCodeIsNotZero() {
s.mockedExecuteCommandCall.Return(1, nil)
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.ErrorIs(err, ErrorFileCopyFailed)
}
func (s *UpdateFileSystemTestSuite) TestUpdateFileSystemForRunnerReturnsErrorIfApiCallDid() {
s.mockedExecuteCommandCall.Return(0, tests.ErrDefault)
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.ErrorIs(err, nomad.ErrorExecutorCommunicationFailed)
}
func (s *UpdateFileSystemTestSuite) TestFilesToCopyAreIncludedInTarArchive() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
mock.Anything, mock.Anything, mock.Anything)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
tarFile := tarFiles[0]
s.True(strings.HasSuffix(tarFile.Name, tests.DefaultFileName))
s.Equal(byte(tar.TypeReg), tarFile.TypeFlag)
s.Equal(tests.DefaultFileContent, tarFile.Content)
}
func (s *UpdateFileSystemTestSuite) TestTarFilesContainCorrectPathForRelativeFilePath() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.DefaultFileName, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
// tar is extracted in the active workdir of the container, file will be put relative to that
s.Equal(tests.DefaultFileName, tarFiles[0].Name)
}
func (s *UpdateFileSystemTestSuite) TestFilesWithAbsolutePathArePutInAbsoluteLocation() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{
{Path: tests.FileNameWithAbsolutePath, Content: []byte(tests.DefaultFileContent)}}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
s.Equal(tarFiles[0].Name, tests.FileNameWithAbsolutePath)
}
func (s *UpdateFileSystemTestSuite) TestDirectoriesAreMarkedAsDirectoryInTar() {
copyRequest := &dto.UpdateFileSystemRequest{Copy: []dto.File{{Path: tests.DefaultDirectoryName, Content: []byte{}}}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.Require().NoError(err)
tarFiles := s.readFilesFromTarArchive(s.stdin)
s.Len(tarFiles, 1)
tarFile := tarFiles[0]
s.True(strings.HasSuffix(tarFile.Name+"/", tests.DefaultDirectoryName))
s.Equal(byte(tar.TypeDir), tarFile.TypeFlag)
s.Equal("", tarFile.Content)
}
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetRemoved() {
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{tests.DefaultFileName}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
mock.Anything, mock.Anything, mock.Anything)
s.Regexp(fmt.Sprintf("rm[^;]+%s' *;", regexp.QuoteMeta(tests.DefaultFileName)), s.command)
}
func (s *UpdateFileSystemTestSuite) TestFilesToRemoveGetEscaped() {
copyRequest := &dto.UpdateFileSystemRequest{Delete: []dto.FilePath{"/some/potentially/harmful'filename"}}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.NoError(err)
s.apiMock.AssertCalled(s.T(), "ExecuteCommand", mock.Anything, mock.Anything, mock.Anything, false, true,
mock.Anything, mock.Anything, mock.Anything)
s.Contains(s.command, "'/some/potentially/harmful'\\\\''filename'")
}
func (s *UpdateFileSystemTestSuite) TestResetTimerGetsCalled() {
copyRequest := &dto.UpdateFileSystemRequest{}
err := s.runner.UpdateFileSystem(copyRequest, context.Background())
s.NoError(err)
s.timer.AssertCalled(s.T(), "ResetTimeout")
}
type TarFile struct {
Name string
Content string
TypeFlag byte
}
func (s *UpdateFileSystemTestSuite) readFilesFromTarArchive(tarArchive io.Reader) (files []TarFile) {
reader := tar.NewReader(tarArchive)
for {
hdr, err := reader.Next()
if err != nil {
break
}
bf, err := io.ReadAll(reader)
s.Require().NoError(err)
files = append(files, TarFile{Name: hdr.Name, Content: string(bf), TypeFlag: hdr.Typeflag})
}
return files
}
func (s *UpdateFileSystemTestSuite) TestGetFileContentReturnsErrorIfExitCodeIsNotZero() {
s.mockedExecuteCommandCall.RunFn = nil
s.mockedExecuteCommandCall.Return(1, nil)
err := s.runner.GetFileContent("", logging.NewLoggingResponseWriter(nil), false, context.Background())
s.ErrorIs(err, ErrFileNotFound)
}
func (s *UpdateFileSystemTestSuite) TestFileCopyIsCanceledOnRunnerDestroy() {
s.mockedExecuteCommandCall.Run(func(args mock.Arguments) {
ctx, ok := args.Get(1).(context.Context)
s.Require().True(ok)
select {
case <-ctx.Done():
s.Fail("mergeContext is done before any of its parents")
return
case <-time.After(tests.ShortTimeout):
}
select {
case <-ctx.Done():
case <-time.After(3 * tests.ShortTimeout):
s.Fail("mergeContext is not done after the earliest of its parents")
return
}
})
ctx, cancel := context.WithCancel(context.Background())
s.runner.ctx = ctx
s.runner.cancel = cancel
<-time.After(2 * tests.ShortTimeout)
s.runner.cancel()
}

View File

@ -1,218 +0,0 @@
// Code generated by mockery v2.30.16. DO NOT EDIT.
package runner
import (
context "context"
http "net/http"
dto "github.com/openHPI/poseidon/pkg/dto"
io "io"
mock "github.com/stretchr/testify/mock"
time "time"
)
// RunnerMock is an autogenerated mock type for the Runner type
type RunnerMock struct {
mock.Mock
}
// Destroy provides a mock function with given fields: reason
func (_m *RunnerMock) Destroy(reason DestroyReason) error {
ret := _m.Called(reason)
var r0 error
if rf, ok := ret.Get(0).(func(DestroyReason) error); ok {
r0 = rf(reason)
} else {
r0 = ret.Error(0)
}
return r0
}
// Environment provides a mock function with given fields:
func (_m *RunnerMock) Environment() dto.EnvironmentID {
ret := _m.Called()
var r0 dto.EnvironmentID
if rf, ok := ret.Get(0).(func() dto.EnvironmentID); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(dto.EnvironmentID)
}
return r0
}
// ExecuteInteractively provides a mock function with given fields: id, stdin, stdout, stderr, ctx
func (_m *RunnerMock) ExecuteInteractively(id string, stdin io.ReadWriter, stdout io.Writer, stderr io.Writer, ctx context.Context) (<-chan ExitInfo, context.CancelFunc, error) {
ret := _m.Called(id, stdin, stdout, stderr, ctx)
var r0 <-chan ExitInfo
var r1 context.CancelFunc
var r2 error
if rf, ok := ret.Get(0).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) (<-chan ExitInfo, context.CancelFunc, error)); ok {
return rf(id, stdin, stdout, stderr, ctx)
}
if rf, ok := ret.Get(0).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) <-chan ExitInfo); ok {
r0 = rf(id, stdin, stdout, stderr, ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan ExitInfo)
}
}
if rf, ok := ret.Get(1).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) context.CancelFunc); ok {
r1 = rf(id, stdin, stdout, stderr, ctx)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(context.CancelFunc)
}
}
if rf, ok := ret.Get(2).(func(string, io.ReadWriter, io.Writer, io.Writer, context.Context) error); ok {
r2 = rf(id, stdin, stdout, stderr, ctx)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// ExecutionExists provides a mock function with given fields: id
func (_m *RunnerMock) ExecutionExists(id string) bool {
ret := _m.Called(id)
var r0 bool
if rf, ok := ret.Get(0).(func(string) bool); ok {
r0 = rf(id)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// GetFileContent provides a mock function with given fields: path, content, privilegedExecution, ctx
func (_m *RunnerMock) GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error {
ret := _m.Called(path, content, privilegedExecution, ctx)
var r0 error
if rf, ok := ret.Get(0).(func(string, http.ResponseWriter, bool, context.Context) error); ok {
r0 = rf(path, content, privilegedExecution, ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// ID provides a mock function with given fields:
func (_m *RunnerMock) ID() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// ListFileSystem provides a mock function with given fields: path, recursive, result, privilegedExecution, ctx
func (_m *RunnerMock) ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error {
ret := _m.Called(path, recursive, result, privilegedExecution, ctx)
var r0 error
if rf, ok := ret.Get(0).(func(string, bool, io.Writer, bool, context.Context) error); ok {
r0 = rf(path, recursive, result, privilegedExecution, ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// MappedPorts provides a mock function with given fields:
func (_m *RunnerMock) MappedPorts() []*dto.MappedPort {
ret := _m.Called()
var r0 []*dto.MappedPort
if rf, ok := ret.Get(0).(func() []*dto.MappedPort); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*dto.MappedPort)
}
}
return r0
}
// ResetTimeout provides a mock function with given fields:
func (_m *RunnerMock) ResetTimeout() {
_m.Called()
}
// SetupTimeout provides a mock function with given fields: duration
func (_m *RunnerMock) SetupTimeout(duration time.Duration) {
_m.Called(duration)
}
// StopTimeout provides a mock function with given fields:
func (_m *RunnerMock) StopTimeout() {
_m.Called()
}
// StoreExecution provides a mock function with given fields: id, executionRequest
func (_m *RunnerMock) StoreExecution(id string, executionRequest *dto.ExecutionRequest) {
_m.Called(id, executionRequest)
}
// TimeoutPassed provides a mock function with given fields:
func (_m *RunnerMock) TimeoutPassed() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// UpdateFileSystem provides a mock function with given fields: request, ctx
func (_m *RunnerMock) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error {
ret := _m.Called(request, ctx)
var r0 error
if rf, ok := ret.Get(0).(func(*dto.UpdateFileSystemRequest, context.Context) error); ok {
r0 = rf(request, ctx)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewRunnerMock creates a new instance of RunnerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewRunnerMock(t interface {
mock.TestingT
Cleanup(func())
}) *RunnerMock {
mock := &RunnerMock{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}