diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..943451d --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# Project binary +/poseidon + +# CPU profiling information +cmd/poseidon/default.pgo + +# Configuration file +configuration.yaml +tests/e2e/configuration.yaml + +# TLS certificate/key +*.crt +*.key +*.pem + +# trivy artifacts +.trivy + +# coverage reports +/coverage + +# IDE files +/.idea +*.iml + +# Dockerfiles repository +deploy/dockerfiles diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..d1f9fe3 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "deploy/codeocean-terraform"] + path = deploy/codeocean-terraform + url = git@lab.xikolo.de:codeocean/codeocean-terraform.git diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..b5d577f --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +sebastian.serth@hpi.de. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d4b0326 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +# Use a Golang base image +FROM golang:latest + +# Set the working directory inside the container +WORKDIR /go/src/app + +# Install dependencies and clone Poseidon repository +RUN apt-get update + # && \ + #apt-get install -y git && \ + #git clone https://github.com/openHPI/poseidon.git . + +# Install make (required for building) +RUN apt-get install -y make + +COPY . . + +# Install required project libraries +RUN make bootstrap + +# Build the binary +RUN make build + +# Expose the port on which Poseidon runs (adjust if necessary) +EXPOSE 8080 + +# Command to run Poseidon +CMD ["./poseidon"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..eff9632 --- /dev/null +++ b/Makefile @@ -0,0 +1,196 @@ +PROJECT_NAME = poseidon +REPOSITORY_OWNER = openHPI +PKG = github.com/$(REPOSITORY_OWNER)/$(PROJECT_NAME)/cmd/$(PROJECT_NAME) +UNIT_TESTS = $(shell go list ./... | grep -v /e2e | grep -v /recovery) +GOCOVERDIR=coverage + +# Define the PGO file to be used for the build +PGO_FILE = ./cmd/$(PROJECT_NAME)/default.pgo + +# Docker options +DOCKER_TAG = poseidon:latest +DOCKER_OPTS = -v $(shell pwd)/configuration.yaml:/configuration.yaml +LOWER_REPOSITORY_OWNER = $(shell echo $(REPOSITORY_OWNER) | tr A-Z a-z) + +# Define image to be used in e2e tests. Requires `make` to be available. +E2E_TEST_DOCKER_CONTAINER = co_execenv_java +E2E_TEST_DOCKER_TAG = 17 +E2E_TEST_DOCKER_IMAGE = $(LOWER_REPOSITORY_OWNER)/$(E2E_TEST_DOCKER_CONTAINER):$(E2E_TEST_DOCKER_TAG) +# The base image of the e2e test image. This is used to build the base image as well. +E2E_TEST_BASE_CONTAINER := docker_exec_phusion +E2E_TEST_BASE_IMAGE = $(LOWER_REPOSITORY_OWNER)/$(E2E_TEST_BASE_CONTAINER) + +default: help + +.PHONY: all +all: build + +.PHONY: bootstrap +bootstrap: deps lint-deps git-hooks ## Install all dependencies + +.PHONY: deps +deps: ## Get the dependencies + @go get -v -d ./... + @go install github.com/vektra/mockery/v2@latest + +.PHONY: upgrade-deps +upgrade-deps: ## Upgrade the dependencies + @go get -u -v -d ./... + +.PHONY: tidy-deps +tidy-deps: ## Remove unused dependencies + @go mod tidy + + +.PHONY: git-hooks +GIT_HOOKS_DIR := $(shell if [ -f .git ]; then echo $$(cat .git | sed 's/gitdir: //')/hooks; else echo .git/hooks; fi) +git-hooks: $(GIT_HOOKS_DIR)/pre-commit ## Install the git-hooks +$(GIT_HOOKS_DIR)/%: git_hooks/% + @if [ -d "$(GIT_HOOKS_DIR)" ]; then \ + cp $^ $@; \ + chmod 755 $@; \ + fi + +.PHONY: build +build: deps ## Build the binary +ifneq ("$(wildcard $(PGO_FILE))","") +# PGO_FILE exists + @go build -pgo=$(PGO_FILE) -ldflags "-X main.pgoEnabled=true" -o $(PROJECT_NAME) -v $(PKG) +else +# PGO_FILE does not exist + @go build -o $(PROJECT_NAME) -v $(PKG) +endif + +.PHONY: build-cover +build-cover: deps ## Build the binary and with coverage support for e2e-tests + @go build -cover -o $(PROJECT_NAME) -v $(PKG) + +.PHONY: clean +clean: ## Remove previous build + @rm -f poseidon + +.PHONY: docker +docker: + @CGO_ENABLED=0 make build + @docker build -t $(DOCKER_TAG) -f deploy/poseidon/Dockerfile . + +.PHONY: lint-deps +lint-deps: ## Install linter dependencies + @go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +.PHONY: golangci-lint +golangci-lint: ## Lint the source code using golangci-lint + @golangci-lint run ./... --timeout=3m + +.PHONY: lint +lint: golangci-lint ## Lint the source code using all linters + +.PHONY: mock +snaked_name=$(shell sed -e "s/\([a-z]\)\([A-Z]\)/\1_\2/g" -e "s/\([A-Z]\)/\L\1/g" -e "s/^_//" <<< "$(name)") +mock: deps ## Create/Update a mock. Example: make mock name=apiQuerier pkg=./nomad + @mockery \ + --name=$(name) \ + --structname=$(name)Mock \ + --filename=$(snaked_name)_mock.go \ + --inpackage \ + --srcpkg=$(pkg) + +.PHONY: test +test: deps ## Run unit tests + @go test -count=1 -short $(UNIT_TESTS) + +.PHONY: race +race: deps ## Run data race detector + @go test -race -count=1 -short $(UNIT_TESTS) + +.PHONY: coverage +coverage: deps ## Generate code coverage report + @mkdir -p $(GOCOVERDIR) + @go test $(UNIT_TESTS) -v -coverprofile $(GOCOVERDIR)/coverage_output.cov -covermode atomic + # exclude mock files from coverage + @cat $(GOCOVERDIR)/coverage_output.cov | grep -v _mock.go > $(GOCOVERDIR)/coverage.cov || true + @rm $(GOCOVERDIR)/coverage_output.cov + @go tool cover -func=$(GOCOVERDIR)/coverage.cov + +.PHONY: coverhtml +coverhtml: coverage ## Generate HTML coverage report + @go tool cover -html=$(GOCOVERDIR)/coverage.cov -o $(GOCOVERDIR)/coverage_unit.html + +deploy/dockerfiles: ## Clone Dockerfiles repository + @git clone git@github.com:$(REPOSITORY_OWNER)/dockerfiles.git deploy/dockerfiles + +.PHONY: run-with-coverage +run-with-coverage: build-cover ## Run binary and capture code coverage (during e2e tests) + @mkdir -p $(GOCOVERDIR) + @GOCOVERDIR=$(GOCOVERDIR) ./$(PROJECT_NAME) + +## This target uses `systemd-socket-activate` (only Linux) to create a systemd socket and makes it accessible to a new Poseidon execution. +.PHONY: run-with-socket +run-with-socket: build + @systemd-socket-activate -l 7200 ./$(PROJECT_NAME) + +.PHONY: convert-run-coverage +convert-run-coverage: ## Convert coverage data (created by `run-with-coverage`) to legacy text format + @go tool covdata textfmt -i $(GOCOVERDIR) -o $(GOCOVERDIR)/coverage_run.cov + @go tool cover -html=$(GOCOVERDIR)/coverage_run.cov -o $(GOCOVERDIR)/coverage_run.html + +.PHONY: e2e-test-docker-image +e2e-test-docker-image: deploy/dockerfiles ## Build Docker image that is used in e2e tests + @docker build -t $(E2E_TEST_BASE_IMAGE) deploy/dockerfiles/$(E2E_TEST_BASE_CONTAINER) + @docker build -t $(E2E_TEST_DOCKER_IMAGE) deploy/dockerfiles/$(E2E_TEST_DOCKER_CONTAINER)/$(E2E_TEST_DOCKER_TAG) + +.PHONY: e2e-test +e2e-test: deps ## Run e2e tests + @[ -z "$(docker images -q $(E2E_TEST_DOCKER_IMAGE))" ] || docker pull $(E2E_TEST_DOCKER_IMAGE) + @go test -count=1 ./tests/e2e -v -args -dockerImage="$(E2E_TEST_DOCKER_IMAGE)" + +.PHONY: e2e-test-recovery +e2e-test-recovery: deps ## Run recovery e2e tests + @go test -count=1 ./tests/recovery -v -args -dockerImage="$(E2E_TEST_DOCKER_IMAGE)" + +.PHONY: e2e-docker +e2e-docker: docker ## Run e2e tests against the Docker container + docker run --rm -p 127.0.0.1:7200:7200 \ + --name $(E2E_TEST_DOCKER_CONTAINER) \ + -e POSEIDON_SERVER_ADDRESS=0.0.0.0 \ + $(DOCKER_OPTS) \ + $(DOCKER_TAG) & + @timeout 30s bash -c "until curl -s -o /dev/null http://127.0.0.1:7200/; do sleep 0.1; done" + @make e2e-test || EXIT=$$?; docker stop $(E2E_TEST_DOCKER_CONTAINER); exit $$EXIT + +# See https://aquasecurity.github.io/trivy/v0.18.1/integrations/gitlab-ci/ +TRIVY_VERSION = $(shell wget -qO - "https://api.github.com/repos/aquasecurity/trivy/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/') +.trivy/trivy: + @mkdir -p .trivy + @wget --no-verbose https://github.com/aquasecurity/trivy/releases/download/v$(TRIVY_VERSION)/trivy_$(TRIVY_VERSION)_Linux-64bit.tar.gz -O - | tar -zxvf - -C .trivy + @chmod +x .trivy/trivy + +# trivy only comes with a template for container_scanning but we want dependency_scanning here +.trivy/contrib/gitlab-dep.tpl: .trivy/trivy + @sed -e "s/container_scanning/dependency_scanning/" .trivy/contrib/gitlab.tpl > $@ + +.PHONY: trivy-scan-deps +trivy-scan-deps: poseidon .trivy/contrib/gitlab-dep.tpl ## Run trivy vulnerability against our dependencies + make trivy TRIVY_COMMAND="fs" TRIVY_TARGET="--skip-dirs .trivy --skip-files go.sum ." TRIVY_TEMPLATE="@.trivy/contrib/gitlab-dep.tpl" + +.PHONY: trivy-scan-docker +trivy-scan-docker: ## Run trivy vulnerability scanner against the docker image + make trivy TRIVY_COMMAND="i" TRIVY_TARGET="--skip-files home/api/poseidon $(DOCKER_TAG)" TRIVY_TEMPLATE="@.trivy/contrib/gitlab.tpl" + +.PHONY: trivy +trivy: .trivy/trivy + # Build report + @.trivy/trivy --cache-dir .trivy/.trivycache/ $(TRIVY_COMMAND) --exit-code 0 --no-progress --format template --template $(TRIVY_TEMPLATE) -o .trivy/gl-scanning-report.json $(TRIVY_TARGET) + # Print report + @.trivy/trivy --cache-dir .trivy/.trivycache/ $(TRIVY_COMMAND) --exit-code 0 --no-progress $(TRIVY_TARGET) + # Fail on severe vulnerabilities + @.trivy/trivy --cache-dir .trivy/.trivycache/ $(TRIVY_COMMAND) --exit-code 1 --severity HIGH,CRITICAL --no-progress $(TRIVY_TARGET) + +.PHONY: help +HELP_FORMAT=" \033[36m%-25s\033[0m %s\n" +help: ## Display this help screen + @echo "Valid targets:" + @grep -E '^[^ ]+:.*?## .*$$' $(MAKEFILE_LIST) | \ + sort | \ + awk 'BEGIN {FS = ":.*?## "}; \ + {printf $(HELP_FORMAT), $$1, $$2}' diff --git a/api/swagger.yaml b/api/swagger.yaml new file mode 100644 index 0000000..8d84475 --- /dev/null +++ b/api/swagger.yaml @@ -0,0 +1,688 @@ +openapi: 3.0.0 +info: + title: Poseidon API + description: | + This API is used by CodeOcean to run code in runners. + version: '0.2.2' + +servers: + - url: '/api/v1' + +components: + schemas: + ExecutionEnvironment: + type: object + properties: + id: + description: The id of the execution environment + type: integer + example: 6 + image: + description: The name of the OCI image used for this execution environment + type: string + example: openhpi/co_execenv_python:latest + prewarmingPoolSize: + description: Number of runners with this configuration to prewarm + type: integer + example: 50 + cpuLimit: + description: CPU limit for one runner in MHz + type: number + example: 100 + memoryLimit: + description: Memory limit for one runner in MB. Exceeding the limit may result in termination of the runner. + type: integer + example: 256 + networkAccess: + description: Whether the runner is allowed to access the network or not + type: boolean + example: true + exposedPorts: + description: A list of ports inside the runner to expose to the outside + type: array + items: + type: integer + minimum: 1 + maximum: 65535 + example: [80, 443] + required: + - id + - image + - prewarmingPoolSize + - cpuLimit + - memoryLimit + - networkAccess + - exposedPorts + additionalProperties: false + FileHeader: + type: object + properties: + name: + description: The path of the file. + type: string + example: ./logs/last.log + entryType: + description: The type of the object (file). See the man page `info ls` for all the meanings. + type: string + minLength: 1 + maxLength: 1 + enum: ["-", "a", "A", "b", "c", "C", "d", "D", "l", "M", "n", "p", "P", "s", "w", "?"] + default: "-" + size: + description: The size of the file in bytes. + type: integer + example: 42 + modificationTime: + description: The Unix Time Stamp of the last modification. + type: integer + example: 1654201799 + required: + - name + - size + - modificationTime + additionalProperties: false + ClientError: + type: object + properties: + message: + description: Explanation on why the request could not be handled + type: string + example: Nomad server unreachable + required: + - message + additionalProperties: false + + securitySchemes: + poseidonAuthToken: + type: apiKey + in: header + name: Poseidon-Token + description: A security token that might be required depending on the Poseidon configuration. + + responses: + BadRequest: + description: Request is invalid. E.g. request body does not follow the json schema required by the given route or url parameters are invalid. + content: + application/json: + schema: + type: object + properties: + message: + description: Explanation on why the request is invalid + type: string + Unauthorized: + description: Client could not be authenticated + NotFound: + description: The entity with the given identifier does not exist. + RunnerGone: + description: The runner is not available any longer. + content: + application/json: + schema: + $ref: "#/components/schemas/ClientError" + FailedFileDependency: + description: The file is not available. + content: + application/json: + schema: + $ref: "#/components/schemas/ClientError" + InternalServerError: + description: Request could not be handled + content: + application/json: + schema: + allOf: + - $ref: "#/components/schemas/ClientError" + - type: object + properties: + errorCode: + description: Machine readable error description + type: string + enum: + - NOMAD_UNREACHABLE + - NOMAD_OVERLOAD + - NOMAD_INTERNAL_SERVER_ERROR + - PREWARMING_POOL_DEPLETING + - UNKNOWN + example: NOMAD_UNREACHABLE + +tags: + - name: runner + description: A unit of execution + - name: execution environment + description: A template for runners + - name: miscellaneous + +paths: + /health: + get: + summary: Check if the API is available + description: If this route does not return, the API is not available. + tags: + - miscellaneous + responses: + "204": + description: Everything okay + "503": + $ref: "#/components/responses/InternalServerError" + /version: + get: + summary: Retrieve the version of Poseidon + description: Return hash-like release information. + tags: + - miscellaneous + responses: + "200": + description: The release information could be returned. + "404": + $ref: "#/components/responses/NotFound" + "500": + $ref: "#/components/responses/InternalServerError" + + /statistics/execution-environments: + get: + summary: Retrieve the statistics about the execution environments of Poseidon + description: Return Return the current availability and usage of runners. + tags: + - miscellaneous + security: + - poseidonAuthToken: [ ] + responses: + "200": + description: Success. Returns all execution environments + content: + application/json: + schema: + type: object + additionalProperties: + type: object + properties: + id: + description: The id of the execution environment. + type: integer + prewarmingPoolSize: + description: Number of runners with this configuration to prewarm. + type: integer + example: 50 + idleRunners: + description: Number of runners currently prewarmed. + type: number + example: 45 + usedRunners: + description: Number of runners currently in use. + type: number + example: 20 + example: + 21: + id: 21 + prewarmingPoolSize: 50 + idleRunners: 45 + usedRunners: 20 + 42: + id: 42 + prewarmingPoolSize: 50 + idleRunners: 45 + usedRunners: 20 + "500": + $ref: "#/components/responses/InternalServerError" + + /runners: + post: + summary: Provide a runner + description: Provide a runner with the requested execution environment to the client (CodeOcean). + tags: + - runner + security: + - poseidonAuthToken: [ ] + requestBody: + description: Runner attributes + required: true + content: + application/json: + schema: + type: object + properties: + inactivityTimeout: + description: Specify how long the runner should be available when there is no activity (execution or file copy). Activity resets this timer. 0 means no timeout + type: integer + default: 0 + example: 60 + executionEnvironmentId: + description: Specifies the execution environment of the runner + type: integer + example: 6 + required: + - executionEnvironmentId + additionalProperties: false + responses: + "200": + description: A runner was successfully reserved + content: + application/json: + schema: + type: object + properties: + runnerId: + description: The UUID of the provided runner + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + mappedPorts: + description: Array containing the addresses of the mapped ports specified in the execution environment. + type: array + items: + description: The exposedPort inside the container is reachable on the returned hostAddress. + type: object + properties: + exposedPort: + description: The port inside the container. + type: integer + minimum: 0 + maximum: 65535 + example: 80 + hostAddress: + description: The address which can be contacted to reach the mapped port. + type: string + example: 10.224.6.18:23832 + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" + "500": + $ref: "#/components/responses/InternalServerError" + + /runners/{runnerId}: + delete: + summary: Destroy the runner + description: The runner is no longer in use and should be destroyed. + tags: + - runner + security: + - poseidonAuthToken: [ ] + parameters: + - name: runnerId + in: path + schema: + description: The UUID of the runner that should be destroyed + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + responses: + "204": + description: Success + "401": + $ref: "#/components/responses/Unauthorized" + "410": + $ref: "#/components/responses/RunnerGone" + "500": + $ref: "#/components/responses/InternalServerError" + + /runners/{runnerId}/files: + parameters: + - name: runnerId + in: path + schema: + description: Runner on which the files should be placed + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + get: + summary: List filesystem. + description: List all files available in the runner. + tags: + - runner + security: + - poseidonAuthToken: [ ] + parameters: + - name: recursive + in: query + description: Specify if the filesystem should be listed recursively. + schema: + type: boolean + default: true + required: false + - name: path + in: query + description: Specify the directory from where the filesystem is listed. + schema: + type: string + format: pct-encoded # rfc 3986 + default: "./" + required: false + - name: privilegedExecution + in: query + description: Specifies if the command should be executed as an privileged user. + schema: + type: boolean + default: false + responses: + "200": + description: Success. Returns the listing of the runner's filesystem. + content: + application/json: + schema: + type: object + properties: + files: + description: A list of all Files + type: array + items: + $ref: "#/components/schemas/FileHeader" + "401": + $ref: "#/components/responses/Unauthorized" + "410": + $ref: "#/components/responses/RunnerGone" + "424": + $ref: "#/components/responses/FailedFileDependency" + "500": + $ref: "#/components/responses/InternalServerError" + patch: + summary: Manipulate runner file system + description: Delete the files with the given paths from the file system of the specified runner. Afterwards, copy the enclosed files to the runner. Existing files get overwritten and results of previous file copy operations on the same runner are present when executing multiple requests. + tags: + - runner + security: + - poseidonAuthToken: [ ] + requestBody: + description: Files to copy or delete + required: true + content: + application/json: + schema: + type: object + properties: + delete: + description: Array of filepaths that should be deleted. Each of the given files or directories should be deleted recursively. + type: array + items: + description: Location of the file or directory that should be deleted. Can be absolute (starting with /) or relative to the workspace directory. + type: string + example: /workspace + copy: + description: Array of files that should be placed in the runner. + type: array + items: + type: object + properties: + path: + description: Location where the file should be placed. Can be absolute (starting with /) or relative to the workspace directory. Missing parent directories are created. If this ends with a /, the path is interpreted as a directory and content is ignored. Currently, every file/directory is owned by root but the directories have the sticky bit set to allow unprivileged file creation. + type: string + example: /etc/passwd + content: + description: The content of the file. MUST be base64 encoded. If this is not given, the file is created with no content. + type: string + example: cm9vdDp4OjA6MDo6L3Jvb3Q6L2Jpbi9iYXNo # root:x:0:0::/root:/bin/bash + required: + - path + additionalProperties: false + additionalProperties: false + responses: + "204": + description: All files were saved + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "410": + $ref: "#/components/responses/RunnerGone" + "500": + $ref: "#/components/responses/InternalServerError" + + /runners/{runnerId}/files/raw: + get: + summary: Download the file. + description: Download the specified file from the selected runner. + tags: + - runner + security: + - poseidonAuthToken: [ ] + parameters: + - name: runnerId + description: Runner on which the command should be executed + in: path + schema: + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + - name: path + in: query + description: Specify the file that should be returned by its filename including its path and extension. + schema: + type: string + format: pct-encoded # rfc 3986 + example: "./flag.txt" + required: true + - name: privilegedExecution + in: query + description: Specifies if the command should be executed as an privileged user. + schema: + type: boolean + default: false + responses: + "200": + description: Success. Returns the file. + content: + application/octet-stream: + schema: + type: string + format: binary + "401": + $ref: "#/components/responses/Unauthorized" + "410": + $ref: "#/components/responses/RunnerGone" + "424": + $ref: "#/components/responses/FailedFileDependency" + + /runners/{runnerId}/execute: + post: + summary: Execute a command + description: Execute a command in the runner. Whether this starts the actual execution or only prepares a Websocket URL to start it depends on the implementation. + tags: + - runner + security: + - poseidonAuthToken: [ ] + parameters: + - name: runnerId + description: Runner on which the command should be executed + in: path + schema: + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + requestBody: + description: Description what and how to execute + required: true + content: + application/json: + schema: + type: object + properties: + command: + description: The command to be executed. The working directory for this execution is the working directory of the image of the execution environment. Single quotation ' can not be used. + type: string + example: python exercise.py + privilegedExecution: + description: Specifies if the command should be executed as an privileged user. + type: boolean + default: false + environment: + description: Environment variables for this execution. The keys of this object are the variable names and the value of each key is the value of the variable with the same name. The environment variables of the system remain accessible. + type: object + additionalProperties: + type: string + pattern: "[a-zA-Z_][a-zA-Z0-9_]+" + default: {} + example: + PATH: /bin + timeLimit: + description: Specifies the time in seconds until this execution should be killed. 0 means do not kill + type: integer + default: 0 + example: 5 + required: + - command + additionalProperties: false + responses: + "200": + description: Success. Returns a Websocket URL to connect to + content: + application/json: + schema: + type: object + properties: + websocketUrl: + description: A Websocket endpoint to connect to communicate with the process running in the runner + type: string + example: "ws://ws.example.com/path/to/websocket" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "410": + $ref: "#/components/responses/RunnerGone" + "500": + $ref: "#/components/responses/InternalServerError" + + /runners/{runnerId}/websocket: + get: + summary: Connect to an execution. + description: The url including all parameters will be generated and returned by the `execute` route. This is a WebSocket endpoint. The schema for the WS communication is described in `websocket.schema.json`. + tags: + - runner + security: + - poseidonAuthToken: [ ] + parameters: + - name: runnerId + description: Runner on which the execution is created. + in: path + schema: + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + - name: executionID + description: The execution of the runner that you want to connect to. + in: query + schema: + type: string + example: 123e4567-e89b-12d3-a456-426614174000 + required: true + responses: + "101": + description: Success. Switching protocols to WebSocket. + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" + "410": + $ref: "#/components/responses/RunnerGone" + "500": + $ref: "#/components/responses/InternalServerError" + + /execution-environments: + get: + summary: List execution environments + description: List all execution environments the API is aware of. + tags: + - execution environment + security: + - poseidonAuthToken: [ ] + parameters: + - name: fetch + in: query + description: Specify whether environments should be fetched again from the executor before returning. Otherwise, the data currently in cache is returned. + schema: + type: boolean + default: false + required: false + responses: + "200": + description: Success. Returns all execution environments + content: + application/json: + schema: + type: object + properties: + executionEnvironments: + description: A list of all execution environments + type: array + items: + $ref: "#/components/schemas/ExecutionEnvironment" + "401": + $ref: "#/components/responses/Unauthorized" + + /execution-environments/{executionEnvironmentId}: + parameters: + - name: executionEnvironmentId + in: path + description: Id of the execution environment + required: true + schema: + type: integer + get: + summary: Show an execution environment + description: Get a representation of the execution environment specified by the id. + tags: + - execution environment + security: + - poseidonAuthToken: [ ] + parameters: + - name: fetch + in: query + description: Specify whether the environment should be fetched again from the executor before returning. Otherwise, the data currently in cache is returned. + schema: + type: boolean + default: false + required: false + responses: + "200": + description: Success. Returns the execution environment + content: + application/json: + schema: + $ref: "#/components/schemas/ExecutionEnvironment" + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" + put: + summary: Create or replace the execution environment + description: This is used for keeping execution environments in sync between the client and the provider of this API. By sending a request with an id, the execution environment is created if it does not exist and updated otherwise. + tags: + - execution environment + security: + - poseidonAuthToken: [ ] + requestBody: + description: The new execution environment + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ExecutionEnvironment" + responses: + "201": + description: The executions environment did not exist and was created + "204": + description: The execution environment was replaced + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + delete: + summary: Delete the execution environment + description: Remove the specified execution environment from the API. + tags: + - execution environment + security: + - poseidonAuthToken: [ ] + responses: + "204": + description: The execution environment was deleted. + "400": + $ref: "#/components/responses/BadRequest" + "401": + $ref: "#/components/responses/Unauthorized" + "404": + $ref: "#/components/responses/NotFound" diff --git a/api/websocket.schema.json b/api/websocket.schema.json new file mode 100644 index 0000000..eaafdff --- /dev/null +++ b/api/websocket.schema.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json-schema.org/schema#", + "title": "event", + "type": "object", + "oneOf": [ + { + "properties": { + "type": { + "const": "exit" + }, + "data": { + "type": "integer", + "minimum": 0, + "maximum": 255 + } + }, + "required": ["type", "data"], + "additionalProperties": false + }, + { + "properties": { + "type": { + "enum": [ "stdout", "stderr", "error" ] + }, + "data": { + "type": "string" + } + }, + "required": ["type", "data"], + "additionalProperties": false + }, + { + "properties": { + "type": { + "enum": [ "start", "timeout" ] + } + }, + "required": ["type"], + "additionalProperties": false + } + ] +} diff --git a/app.yaml b/app.yaml new file mode 100644 index 0000000..cd8d1c2 --- /dev/null +++ b/app.yaml @@ -0,0 +1,6 @@ +runtime: go122 +api_version: go1 + +handlers: +- url: /.* + script: _go_app \ No newline at end of file diff --git a/cmd/poseidon/main.go b/cmd/poseidon/main.go new file mode 100644 index 0000000..3705351 --- /dev/null +++ b/cmd/poseidon/main.go @@ -0,0 +1,423 @@ +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "github.com/coreos/go-systemd/v22/activation" + "github.com/coreos/go-systemd/v22/daemon" + "github.com/getsentry/sentry-go" + sentryhttp "github.com/getsentry/sentry-go/http" + "github.com/gorilla/mux" + "github.com/openHPI/poseidon/internal/api" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/internal/environment" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "github.com/openHPI/poseidon/pkg/monitoring" + "k8s.io/client-go/kubernetes" + "net" + "net/http" + "os" + "os/signal" + "regexp" + "runtime" + "runtime/debug" + "runtime/pprof" + "strconv" + "strings" + "sync" + "syscall" + "time" +) + +var ( + gracefulShutdownWait = 15 * time.Second + log = logging.GetLogger("main") + // If pgoEnabled is true, the binary was built with PGO enabled. + // This is set during compilation with our Makefile as a STRING. + pgoEnabled = "false" +) + +func getVcsRevision(short bool) string { + vcsRevision := "unknown" + vcsModified := false + + if info, ok := debug.ReadBuildInfo(); ok { + for _, setting := range info.Settings { + if setting.Key == "vcs.revision" { + vcsRevision = setting.Value + } else if setting.Key == "vcs.modified" { + var err error + vcsModified, err = strconv.ParseBool(setting.Value) + if err != nil { + vcsModified = true // fallback to true, so we can see that something is wrong + log.WithError(err).Error("Could not parse the vcs.modified setting") + } + } + } + } + + if short { + vcsRevision = vcsRevision[:7] + } + + if vcsModified { + return vcsRevision + "-modified" + } else { + return vcsRevision + } +} + +func initializeUserAgent() { + dto.UserAgentOut = strings.ReplaceAll(dto.UserAgentOut, dto.UserAgentVCSPlaceholder, getVcsRevision(true)) + dto.UserAgentFiltered = strings.ReplaceAll(dto.UserAgentFiltered, dto.UserAgentVCSPlaceholder, getVcsRevision(true)) + dto.UserAgentFiltered = strings.ReplaceAll(dto.UserAgentFiltered, dto.UserAgentFilterTokenPlaceholder, config.Config.Server.LoggingFilterToken) +} + +func initSentry(options *sentry.ClientOptions, profilingEnabled bool) { + if options.Release == "" { + commit := getVcsRevision(false) + options.Release = commit + } + + options.BeforeSendTransaction = func(event *sentry.Event, _ *sentry.EventHint) *sentry.Event { + if event.Tags == nil { + event.Tags = make(map[string]string) + } + event.Tags["go_pgo"] = pgoEnabled + event.Tags["go_profiling"] = strconv.FormatBool(profilingEnabled) + return event + } + + if err := sentry.Init(*options); err != nil { + log.Errorf("sentry.Init: %s", err) + } +} + +func shutdownSentry() { + if err := recover(); err != nil { + sentry.CurrentHub().Recover(err) + sentry.Flush(logging.GracefulSentryShutdown) + } +} + +func initProfiling(options config.Profiling) (cancel func()) { + if options.CPUEnabled { + profile, err := os.Create(options.CPUFile) + if err != nil { + log.WithError(err).Error("Error while opening the profile file") + } + + log.Debug("Starting CPU profiler") + if err := pprof.StartCPUProfile(profile); err != nil { + log.WithError(err).Error("Error while starting the CPU profiler!!") + } + + cancel = func() { + if options.CPUEnabled { + log.Debug("Stopping CPU profiler") + pprof.StopCPUProfile() + if err := profile.Close(); err != nil { + log.WithError(err).Error("Error while closing profile file") + } + } + } + } else { + cancel = func() {} + } + return cancel +} + +// watchMemoryAndAlert monitors the memory usage of Poseidon and sends an alert if it exceeds a threshold. +func watchMemoryAndAlert(options config.Profiling) { + if options.MemoryInterval == 0 { + return + } + + var exceeded bool + for { + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + log.WithField("heap", stats.HeapAlloc).Trace("Current Memory Usage") + + const megabytesToBytes = 1000 * 1000 + if !exceeded && stats.HeapAlloc >= uint64(options.MemoryThreshold)*megabytesToBytes { + exceeded = true + log.WithField("heap", stats.HeapAlloc).Warn("Memory Threshold exceeded") + + err := pprof.Lookup("heap").WriteTo(os.Stderr, 1) + if err != nil { + log.WithError(err).Warn("Failed to log the heap profile") + } + + err = pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + if err != nil { + log.WithError(err).Warn("Failed to log the goroutines") + } + } else if exceeded { + exceeded = false + log.WithField("heap", stats.HeapAlloc).Info("Memory Threshold no longer exceeded") + } + + select { + case <-time.After(time.Duration(options.MemoryInterval) * time.Millisecond): + continue + case <-context.Background().Done(): + return + } + } +} + +func runServer(router *mux.Router, server *http.Server, cancel context.CancelFunc) { + defer cancel() + defer shutdownSentry() // shutdownSentry must be executed in the main goroutine. + + httpListeners := getHTTPListeners(server) + notifySystemd(router) + serveHTTPListeners(server, httpListeners) +} + +func getHTTPListeners(server *http.Server) (httpListeners []net.Listener) { + var err error + if config.Config.Server.SystemdSocketActivation { + httpListeners, err = activation.Listeners() + } else { + var httpListener net.Listener + httpListener, err = net.Listen("tcp", server.Addr) + httpListeners = append(httpListeners, httpListener) + } + if err != nil || httpListeners == nil || len(httpListeners) == 0 { + log.WithError(err). + WithField("listeners", httpListeners). + WithField("systemd_socket", config.Config.Server.SystemdSocketActivation). + Fatal("Failed listening to any socket") + return nil + } + return httpListeners +} + +func serveHTTPListeners(server *http.Server, httpListeners []net.Listener) { + var wg sync.WaitGroup + wg.Add(len(httpListeners)) + for _, l := range httpListeners { + go func(listener net.Listener) { + defer wg.Done() + log.WithField("address", listener.Addr()).Info("Serving Listener") + serveHTTPListener(server, listener) + }(l) + } + wg.Wait() +} + +func serveHTTPListener(server *http.Server, l net.Listener) { + var err error + if config.Config.Server.TLS.Active { + server.TLSConfig = config.TLSConfig + log.WithField("CertFile", config.Config.Server.TLS.CertFile). + WithField("KeyFile", config.Config.Server.TLS.KeyFile). + Debug("Using TLS") + err = server.ServeTLS(l, config.Config.Server.TLS.CertFile, config.Config.Server.TLS.KeyFile) + } else { + err = server.Serve(l) + } + + if errors.Is(err, http.ErrServerClosed) { + log.WithError(err).WithField("listener", l.Addr()).Info("Server closed") + } else { + log.WithError(err).WithField("listener", l.Addr()).Error("Error during listening and serving") + } +} + +func notifySystemd(router *mux.Router) { + notify, err := daemon.SdNotify(false, daemon.SdNotifyReady) + switch { + case err == nil && !notify: + log.Debug("Systemd Readiness Notification not supported") + case err != nil: + log.WithError(err).WithField("notify", notify).Warn("Failed notifying Readiness to Systemd") + default: + log.Trace("Notified Readiness to Systemd") + } + + interval, err := daemon.SdWatchdogEnabled(false) + if err != nil || interval == 0 { + log.WithError(err).Error("Systemd Watchdog not supported") + return + } + go systemdWatchdogLoop(context.Background(), router, interval) +} + +func systemdWatchdogLoop(ctx context.Context, router *mux.Router, interval time.Duration) { + healthRoute, err := router.Get(api.HealthPath).URL() + if err != nil { + log.WithError(err).Error("Failed to parse Health route") + return + } + healthURL := config.Config.Server.URL().String() + healthRoute.String() + + // Workaround for certificate subject names + unspecifiedAddresses := regexp.MustCompile(`0\.0\.0\.0|\[::]`) + healthURL = unspecifiedAddresses.ReplaceAllString(healthURL, "localhost") + + client := &http.Client{} + if config.Config.Server.TLS.Active { + tlsConfig := &tls.Config{RootCAs: x509.NewCertPool()} // #nosec G402 The default MinTLSVersion is secure. + caCertBytes, err := os.ReadFile(config.Config.Server.TLS.CAFile) + if err != nil { + log.WithError(err).Warn("Cannot read tls ca file") + } else { + ok := tlsConfig.RootCAs.AppendCertsFromPEM(caCertBytes) + log.WithField("success", ok).Trace("Loaded CA certificate") + } + client.Transport = &http.Transport{TLSClientConfig: tlsConfig} + } + + // notificationIntervalFactor defines how many more notifications we send than required. + const notificationIntervalFactor = 2 + for { + select { + case <-ctx.Done(): + return + case <-time.After(interval / notificationIntervalFactor): + notifySystemdWatchdog(ctx, healthURL, client) + } + } +} + +func notifySystemdWatchdog(ctx context.Context, healthURL string, client *http.Client) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, healthURL, http.NoBody) + if err != nil { + return + } + + req.Header.Set("User-Agent", dto.UserAgentFiltered) + resp, err := client.Do(req) + if err != nil { + log.WithError(err).Debug("Failed watchdog health check") + // We do not check for resp.StatusCode == 503 as Poseidon's error recovery will try to handle such errors + // by itself. The Watchdog should just check that Poseidon handles http requests at all. + return + } + _ = resp.Body.Close() + + notify, err := daemon.SdNotify(false, daemon.SdNotifyWatchdog) + switch { + case err == nil && !notify: + log.Debug("Systemd Watchdog Notification not supported") + case err != nil: + log.WithError(err).WithField("notify", notify).Warn("Failed notifying Systemd Watchdog") + default: + log.Trace("Notified Systemd Watchdog") + } +} + +type managerCreator func(ctx context.Context) ( + runnerManager runner.Manager, environmentManager environment.ManagerHandler) + +// createManagerHandler adds the managers of the passed managerCreator to the chain of responsibility. +func createManagerHandler(handler managerCreator, enabled bool, + nextRunnerManager runner.Manager, nextEnvironmentManager environment.ManagerHandler, ctx context.Context) ( + runnerManager runner.Manager, environmentManager environment.ManagerHandler) { + if !enabled { + return nextRunnerManager, nextEnvironmentManager + } + + runnerManager, environmentManager = handler(ctx) + runnerManager.SetNextHandler(nextRunnerManager) + environmentManager.SetNextHandler(nextEnvironmentManager) + return runnerManager, environmentManager +} + +func createKubernetesManager(ctx context.Context) (runner.Manager, environment.ManagerHandler) { + // API initialization + kubernetesClient, err := kubernetes.NewForConfig(config.Config.Kubernetes.Config) + if err != nil { + log.WithError(err).Fatal("Failed to create kubernetes client") + } + runnerManager := runner.NewKubernetesRunnerManager(ctx, kubernetesClient) + environmentManager := environment.NewKubernetesEnvironmentManager(runnerManager, kubernetesClient) + return runnerManager, environmentManager +} + +// initRouter builds a router that serves the API with the chain of responsibility for multiple managers. +func initRouter(ctx context.Context) *mux.Router { + + runnerManager, environmentManager := createManagerHandler(createKubernetesManager, config.Config.Kubernetes.Enabled, + nil, nil, ctx) + + return api.NewRouter(runnerManager, environmentManager) +} + +// initServer creates a server that serves the routes provided by the router. +func initServer(router *mux.Router) *http.Server { + sentryHandler := sentryhttp.New(sentryhttp.Options{}).Handle(router) + const readTimeout = 15 * time.Second + const idleTimeout = 60 * time.Second + + return &http.Server{ + Addr: config.Config.Server.URL().Host, + // A WriteTimeout would prohibit long-running requests such as creating an execution environment. + // See also https://github.com/openHPI/poseidon/pull/68. + // WriteTimeout: time.Second * 15, + ReadHeaderTimeout: readTimeout, + ReadTimeout: readTimeout, + IdleTimeout: idleTimeout, + Handler: sentryHandler, + } +} + +// shutdownOnOSSignal listens for a signal from the operating system +// When receiving a signal the server shuts down but waits up to 15 seconds to close remaining connections. +func shutdownOnOSSignal(server *http.Server, ctx context.Context, stopProfiling func()) { + // wait for SIGINT + shutdownSignals := make(chan os.Signal, 1) + signal.Notify(shutdownSignals, syscall.Signal(0x2), syscall.Signal(0xf), syscall.Signal(0x6)) + + // wait for SIGUSR1 + writeProfileSignal := make(chan os.Signal, 1) + signal.Notify(writeProfileSignal, syscall.Signal(0xa)) + + select { + case <-ctx.Done(): + os.Exit(1) + case <-writeProfileSignal: + log.Info("Received SIGUSR1...") + + stopProfiling() + // Continue listening on signals and replace `stopProfiling` with an empty function + shutdownOnOSSignal(server, ctx, func() {}) + case <-shutdownSignals: + log.Info("Received SIGINT, shutting down...") + + defer stopProfiling() + ctx, cancel := context.WithTimeout(context.Background(), gracefulShutdownWait) + defer cancel() + if err := server.Shutdown(ctx); err != nil { + log.WithError(err).Warn("error shutting server down") + } + } +} + +func main() { + if err := config.InitConfig(); err != nil { + log.WithError(err).Warn("Could not initialize configuration") + } + initializeUserAgent() + logging.InitializeLogging(config.Config.Logger.Level, config.Config.Logger.Formatter) + initSentry(&config.Config.Sentry, config.Config.Profiling.CPUEnabled) + + cancelInflux := monitoring.InitializeInfluxDB(&config.Config.InfluxDB) + defer cancelInflux() + + stopProfiling := initProfiling(config.Config.Profiling) + go watchMemoryAndAlert(config.Config.Profiling) + + ctx, cancel := context.WithCancel(context.Background()) + router := initRouter(ctx) + server := initServer(router) + go runServer(router, server, cancel) + shutdownOnOSSignal(server, ctx, stopProfiling) +} diff --git a/configuration.example.yaml b/configuration.example.yaml new file mode 100644 index 0000000..185edc0 --- /dev/null +++ b/configuration.example.yaml @@ -0,0 +1,120 @@ +# Configuration of the Poseidon webserver +server: + # Address or hostname on which the webserver listens + # If a hostname is specified, the server might listen on only one of the resolved IPv4 or IPv6 addresses + address: 127.0.0.1 + # Port on which the webserver listens + port: 7200 + # When using Systemd socket activation, Poseidon tries to connect to an existing systemd socket instead of creating its own. + # This is useful for zero downtime deployments as the systemd sockets hold up the connections while Poseidon is restarting. + # Iff systemdsocketactivation, the configured address and port will not be used, instead the provided systemd socket will be. + systemdsocketactivation: false + # If set, this token is required in the `Poseidon-Token` header for each route except /health + # token: SECRET + # Configuration of TLS between the web client and Poseidon. + tls: + # If set, the API uses TLS for all incoming connections. + active: false + # The path to the certificate of the CA authority used for TLS + # cafile: ./ca.crt + # The path to the certificate file used for TLS + # certfile: ./poseidon.crt + # The path to the key file used for TLS + # keyfile: ./poseidon.key + # If true, an additional WebSocket connection will be opened to split stdout and stderr when executing interactively + interactivestderr: true + # If set, the file at the given path overwrites the default Nomad job file in internal/environment/template-environment-job.hcl + # templatejobfile: ./poseidon.hcl + # The LoggingFilterToken filters out Systemd Watchdog requests from the logs and is preconfigured with a random value. + # It can also be manually configured to hide additional requests from the logs, such as those from monitoring systems. + # To use this feature, the respective user agent must be set according to `dto.UserAgentFiltered`. + # However, it is important to consider the security implications of using this expert-level setting for manual values. + # loggingfiltertoken: secret + # alert defines how poseidon should handle specific risks. + alert: + # The prewarming pool threshold [0, 1) defines which part of the prewarming pool should always be filled. + # Setting it to 0 will disable the alert. + # If the prewarming pool is filled for less than, i.e., 50%, the health route of Poseidon will return a warning. + prewarmingpoolthreshold: 0.5 + # The prewarming pool reload timeout (in seconds) defines for how long the low prewarming pool warning (above) + # should be active before Poseidon automatically reloads the environment. + # Setting it to 0 will disable the automatic reload. + prewarmingpoolreloadtimeout: 300 + +aws: + # Specifies whether AWS should be used as executor. + enabled: false + # The endpoint of the WebSocket API + # endpoint: wss://abcdef1234.execute-api.eu-central-1.amazonaws.com/production + # Currently, only static AWS environments are supported. + # For setting this via environment variables you have to use a string separated by spaces, like: POSEIDON_AWS_FUNCTIONS="java11Exec go118Exec". + # functions: + # - java11Exec + # - go118Exec + +kubernetes: + # Specifies whether Kubernetes should be used as executor. + enabled: false + # The namespace in which the Kubernetes pods are created. + namespace: default + config: + host: "" + apiPath: "" + username: "" + password: "" + token: "" + + + + +# Configuration of the logger +logger: + # Log level that is used after reading the config (INFO until then) + level: DEBUG + +# Configuration of the embedded profiler +profiling: + # Enables the runtime profiler + cpuenabled: false + # The file to which the profile is written to. + # The default location `cmd/poseidon/default.pgo` will be picked up during the build process to create a profile-guided build. + cpufile: cmd/poseidon/default.pgo + # If set, a memory watchdog will be started that monitors the memory usage of Poseidon and alerts if the threshold is exceeded. + # The value defines the interval in milliseconds in which the memory usage is checked. + memoryinterval: 30_000 + # The Threshold in MB of memory usage at which Poseidon will start alerting. + memorythreshold: 1_000 + +# Configuration of the sentry logging +sentry: + # The DSN of the sentry endpoint to use. + # If the DSN is not set, the client is effectively disabled. + # dsn: https://example.io + # The environment to be sent with events. + # environment: staging + # This release information is used by Poseidon to provide the version route and tag events in Sentry. + # If no specific value is set, the git commit hash used to build the binary is used. + # release: development + # In debug mode, the debug information is printed to stdout to help you understand what sentry is doing. + # debug: true + # Enable performance tracing. + # enabletracing: true + # The sample rate for sampling traces in the range [0.0, 1.0]. + # tracessamplerate: 1.0 + # The sample rate for sampling performance profiles in the range [0.0, 1.0]. + # This is only used if enabletracing is set to true and is relative to tracessamplerate. + # profilessamplerate: 1.0 + +# Configuration of the influxdb monitoring +influxdb: + # The url of the influxdb endpoint. + # If the url is not set, the influxdb monitoring is disabled. + # url: http://localhost:8086 + # The token to secure the influxdb endpoint. + # token: SecretBase64Token== + # The organization set in your influxdb configuration. + # organization: PoseidonOrg + # The influxdb bucket to store the data in. + # bucket: poseidon + # The stage to be sent with events. + # stage: staging diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..00f481a --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,12 @@ +services: + poseidon: + build: + context: . + dockerfile: Dockerfile + ports: + - "7200:7200" + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - ./configuration.yaml:/go/src/app/configuration.yaml + restart: unless-stopped diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..c51b31b --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,138 @@ +# Configuration + +Poseidon can be configured to suit different use cases. + + +## Poseidon + +The file `config/config.go` contains a configuration struct containing all possible configuration options for Poseidon. The file also defines default values for most of the configuration options. +The options *can* be overridden with a yaml configuration file whose path can be configured with the flag `-config`. By default, Poseidon searches for `configuration.yaml` in the working directory. `configuration.example.yaml` is an example for a configuration file and contains explanations for all options. The keys of the options specified in the configuration file must be written in lowercase. +The options *can* also be overridden by environment variables. Currently, only the Go types `string`, `int`, `bool` and `struct` (nested) are implemented. The name of the environment variable is constructed as follows: `POSEIDON_(_)*` (all letters are uppercase). + +The precedence of configuration possibilities is: + +1. Environment variables +2. Configuration file +3. Default values + +If a value is not specified, the value of the subsequent possibility is used. + +### Example + +- The default value for the `Port` (type `int`) field in the `Server` field (type `struct`) of the configuration is `7200`. +- This can be overwritten with the following `configuration.yaml`: + + ```yaml + server: + port: 4000 + ``` + +- Again, this can be overwritten by the environment variable `POSEIDON_SERVER_PORT`, e.g., using `export POSEIDON_SERVER_PORT=5000`. + +### Systemd + +Poseidon can be configured to run as a systemd service. Poseidon can optionally also be configured to use a systemd socket. +The use of systemd provides capabilities for managing Poseidon's state and zero downtime deployments. +Minimal examples for systemd configurations can be found in `.github/workflows/resources`. + + +## Nomad + +As a subsystem of Poseidon, Nomad can and should also be configured accordingly. + +### Memory Oversubscription + +Poseidon is using Nomad's feature of memory oversubscription. This way all Runner are allocated with just 16MB. The memory limit defined per execution environment is used as an upper bound for the memory oversubscription. +On the one hand, this feature allows Nomad to execute much more Runner in parallel but, on the other hand, it introduces a risk of overloading the Nomad host. Still, this feature is obligatory for Poseidon to work and therefore needs to be enabled. [Example Configuration](./resources/server.example.hcl) + +```hcl +default_scheduler_config { + memory_oversubscription_enabled = true +} +``` + + +### Scheduler + +By default, Nomad uses a bin-packing scheduler. This places all Jobs on one host. In our case, a high load then leads to one Nomad client being fully utilised while the others remain mostly idle. +To mitigate the overload of a Nomad client, the ["spread" scheduler algorithm](https://www.nomadproject.io/api-docs/operator/scheduler#update-scheduler-configuration) should be used. + +### Maximum Connections per Client + +By default, Nomad only allows 100 maximum concurrent connections per client. However, as Poseidon is a client, this would significantly impair and limit the performance of Poseidon. Thus, this limit should be disabled. + +To do so, ensure the following configuration is set in your Nomad agents, for example by adding it to `/etc/nomad.d/base.hcl`: + +```hcl +limits { + http_max_conns_per_client = 0 +} +``` + +### Enable Networking Support in Nomad + +In order to allow full networking support in Nomad, the `containernetworking-plugins` are required on the host. They can be either installed manually or through a package manager. In the latter case, the installation path might differ. Hence, add the following line to the `client` directive of the Nomad configuration in `/etc/nomad.d/client.hcl`: + +```hcl + cni_path = "/usr/lib/cni" +``` + +If the path is not set up correctly or the dependency is missing, the following error will be shown in Nomad: `failed to find plugin "bridge" in path [/opt/cni/bin]` + +Additionally, we provide a [secure-bridge](./resources/secure-bridge.conflist) configuration for the `containernetworking-plugins`. We highly recommend to use this configuration, as it will automatically configure an appropriate firewall and isolate your local network. Store the [secure-bridge](./resources/secure-bridge.conflist) in an (otherwise) empty folder and specify that folder in `/etc/nomad.d/client.hcl`: + +```hcl + cni_config_dir = "" +``` + +If the path is not set up correctly or with a different name, the placement of allocations will fail in Nomad: `Constraint missing network filtered [all] nodes`. Be sure to set the "dns" and "dns-search" options in `/etc/docker/daemon.json` with reasonable defaults, for example with those shown in our [example configuration for Docker](./resources/docker.daemon.json). + +### Network range + +The default subnet range for Docker containers can be adjusted. +This can be done both in the Docker daemon configuration and the CNI secure-bridge configuration. +Accordingly, every container using the secure-bridge will receive an IP of the CNI configuration. +Both subnet range configurations should not be overlapping. + +An example configuration could use `10.151.0.0/20` for all containers without the CNI secure-bridge and `10.151.16.0/20` +for all containers using the CNI secure bridge. +This would grant 4096 IPs to both subnets and keep 14 network range blocks of the `10.151.0.0/16` network free for future use (e.g., in other CNI configs). + +### Use gVisor as a sandbox + +We recommend using gVisor as a sandbox for the execution environments. First, [install gVisor following the official documentation](https://gvisor.dev/docs/user_guide/install/) and second, adapt the `/etc/docker/daemon.json` with reasonable defaults as shown in our [example configuration for Docker](./resources/docker.daemon.json). + +## Supported Docker Images + +In general, any Docker image can be used as an execution environment. + +### Users + +If the `privilegedExecution` flag is set to `true` during execution, no additional user is required. Otherwise, the following two requirements must be met: + +- A non-privileged user called `user` needs to be present in the image. This user is used to execute the code. +- The Docker image needs to have a `/sbin/setuser` script allowing the execution of the user code as a non-root user, similar to `/usr/bin/su`. + +### Executable Commands + +In order to function properly, Poseidon expects the following commands to be available within the PATH: + +- `cat` +- `env` +- `ls` +- `mkfifo` +- `rm` +- `bash` (not compatible with `sh` or `zsh`) +- `sleep` +- `tar` (including the `--absolute-names` option) +- `true` +- `unset` +- `whoami` + +Tests need additional commands: + +- `echo` +- `head` +- `id` +- `make` +- `tail` diff --git a/docs/development.md b/docs/development.md new file mode 100644 index 0000000..4d2f966 --- /dev/null +++ b/docs/development.md @@ -0,0 +1,136 @@ +# Development + +## Setup + +If you haven't installed Go on your system yet, follow the [golang installation guide](https://golang.org/doc/install). + +To get your local setup going, run `make bootstrap`. It will install all required dependencies as well as setting up our git hooks. Run `make help` to get an overview of available make targets. + +The project can be compiled using `make build`. This should create a binary which can then be executed. + +Alternatively, the `go run ./cmd/poseidon` command can be used to automatically compile and run the project. + +### URLs + +Once you completed the project setup, you can check the availability using the following URL: + +```http request +http://localhost:7200/api/v1/version +``` + +Using the prefix `/api/v1`, all routes as described in [API documentation](../api/swagger.yaml) are available and thus can be used in conjunction with [CodeOcean](https://github.com/openHPI/codeocean). + +## Tests + +As testing framework we use the [testify](https://github.com/stretchr/testify) toolkit. + +Run `make test` to run the unit tests. + +### Mocks + +For mocks we use [mockery](https://github.com/vektra/mockery). You can create a mock for the interface of your choice by running + +```bash +make mock name=INTERFACE_NAME pkg=./PATH/TO/PKG +``` + +on a specific interface. + +For example, for an interface called `ExecutorApi` in the package `nomad`, you might run + +```bash +make mock name=ExecutorApi pkg=./nomad +``` + +If the interface changes, you can rerun this command (deleting the mock file first to avoid errors may be necessary). + +Mocks can also be generated by using mockery directly on a specific interface. To do this, first navigate to the package the interface is defined in. Then run + +```bash +mockery \ + --name=<> \ + --structname=<>Mock \ + --filename=<>Mock.go \ + --inpackage +``` + +For example, for an interface called `ExecutorApi` in the package `nomad`, you might run + +```bash +mockery \ +--name=ExecutorApi \ +--structname=ExecutorAPIMock \ +--filename=ExecutorAPIMock.go \ +--inpackage +``` + +Note that per default, the mocks are created in a `mocks` sub-folder. However, in some cases (if the mock implements private interface methods), it needs to be in the same package as the interface it is mocking. The `--inpackage` flag can be used to avoid creating it in a subdirectory. + +### End-to-end tests + +For e2e tests we provide a separate package. e2e tests require the connection to a Nomad cluster. +Run `make e2e-tests` to run the e2e tests. This requires Poseidon to be already running. +Instead, you can run `make e2e-docker` to run the API in a Docker container, and the e2e tests afterwards. +You can use the `DOCKER_OPTS` variable to add additional arguments to the Docker run command that runs the API. By default, it is set to `-v $(shell pwd)/configuraton.yaml:/configuration.yaml`, which means, your local configuration file is mapped to the container. If you don't want this, use the following command. + +```shell +$ make e2e-docker DOCKER_OPTS="" +``` + +### Local Nomad + +In order to support the development of Poseidon, a local Nomad dev server is recommended. Following the instructions below, you can setup a Nomad server on your local system that won't persist any data between restarts. More details can be found on [Nomad's official website](https://www.nomadproject.io/docs/install). + +#### macOS + +```shell +brew tap hashicorp/tap +brew install hashicorp/tap/nomad +brew services start nomad +``` + +**Prerequisites**: [Docker for Mac](https://docs.docker.com/desktop/mac/install/) is installed and started: +```shell +brew install --cask docker +``` + +**Note**: Due to architecture of Docker networking on macOS, the bridge network is not available with Nomad. Please refer to the [Nomad FAQ](https://www.nomadproject.io/docs/faq#q-how-to-connect-to-my-host-network-when-using-docker-desktop-windows-and-macos) for more information. As a result, those environments having network access enabled won't sync properly to Nomad and thus cannot be started. + +#### Linux + +```shell +curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +sudo apt-get update && sudo apt-get install nomad +sudo nomad agent -dev +``` + +#### Namespace registration + +As the Nomad dev serer does not persist any data, the namespace selected in the configuration of Poseidon needs to be created each time the Nomad server is started. This can be done with the following command: + +```shell +nomad namespace apply -description "Poseidon development namespace" poseidon +``` + +Alternatively, the namespace used by Poseidon can be updated to `default` so that no additional namespace is required. + +## Coding Style + +### Git hooks + +The repository contains a git pre-commit hook which runs the go formatting tool `gofmt` to ensure the code is formatted properly before committing. To enable them, run `make git-hooks`. + +### Linter + +To lint our source code and ensure a common code style in the codebase we use [Golang CI Lint](https://golangci-lint.run/usage/install/#local-installation) as a linter. Use `make lint` to execute it. + +## Continuous Integration + +We use the Gitlab CI to automatically build the project, run unit and e2e-tests, perform an automated dependency check and deploy instances of the API. + +### Docker + +The CI builds a Docker image and pushes it to the Docker registry associated with this repo. Execute `sudo docker run -p 7200:7200 ghcr.io/openhpi/poseidon` to run the image locally. You can find all available images on the [package listing on GitHub](https://github.com/openHPI/poseidon/pkgs/container/poseidon). Once started, you can then interact with the webserver on your local port 7200. + +You can also build the Docker image locally by executing `make docker` in the root directory of this project. It builds the binary first and a container with the tag `poseidon:latest` afterwards. You can then start a Docker container with `sudo docker run --rm -p 7200:7200 poseidon:latest`. diff --git a/docs/nomad_usage.md b/docs/nomad_usage.md new file mode 100644 index 0000000..8f1c68f --- /dev/null +++ b/docs/nomad_usage.md @@ -0,0 +1,44 @@ +# Nomad Usage + +Poseidon is an abstraction of the functionality provided by Nomad. In the following we will look at how Poseidon uses Nomad's functionality. + +Nomad is structured in different levels of abstraction. Jobs are collected in namespaces. Each Job can contain several Task Groups. Each Task Group can contain several Tasks. Finally, Allocations map Task Groups to Nomad Clients. For more insights take a look at [the official description](https://www.nomadproject.io/docs/internals/architecture). +In our case, a Task is executed in a Docker container. + +![Overview Poseidon-Nomad mapping](resources/OverviewPoseidonNomadMapping.png) + +## Execution environments as template Jobs + +Execution Environments are mapped to Nomad Jobs. In the following, we will call these Jobs `Template Jobs`. +The naming schema for Template Jobs is "template-\". + +The last figure shows the structure in Nomad. +Each template Job contains a "config" Task Group including a "config" Task. This Task does not perform any computations but is used to store environment-specific attributes, such as the prewarming pool size. +In addition, the template Job contains a "default-group" Task Group with a "default-task" Task. In this Task, `sleep infinity` is executed so that the Task remains active and is ready for dynamic executions in the container. + +As shown in the figure, the "config" Task Group has no Allocation, while the "default-group" has an Allocation. +This is because the "config" Task Group only stores information but does not execute anything. +In the "default-group" the user's code submissions are executed. Therefore, Nomad creates an Allocation that points the Task Group to a Nomad Client for execution. + +## Runner as Nomad Jobs + +As an abstraction of the execution engine, we use `Runner` as a description for Docker containers (currently used) or microVMs. +If a user requests a new runner, Poseidon duplicates the template Job of the corresponding environment. + +When a user then executes their code, Poseidon copies the code into the container and executes it. + +## Prewarming + +To reduce the response time in the process of claiming a runner, Poseidon creates a pool of runners that have been started in advance. +When a user requests a runner, a runner from this pool can be used. +In the background, a new runner is created, thus replenishing the pool. +By running in the background, the user does not have to wait as long as the runner needs to start. +The implementation of this concept can be seen in [the Runner Manager](/internal/runner/manager.go). + +### Lifecycle + +The prewarming pool is initiated when a new environment is requested/created according to the requested prewarming pool size. + +Every change on the environment (resource constraints, prewarming pool size, network access) leads to the destruction of the environment including all used and idle runners (the prewarming pool). After that, the environment and its prewarming pool is re-created. + +Other causes which lead to the destruction of the prewarming pool are the explicit deletion of the environment by using the API route or when the corresponding template job for a given enviornment is no longer available on Nomad but a force update is requested using the `GET /execution-environments/{id}?fetch=true` route. The issue described in the latter case should not occur in normal operation, but could arise from either manually deleting the template job, scheduling issues on Nomad or other unforseenable edge cases. diff --git a/docs/resources/OverviewCodeOceanPoseidonNomad.png b/docs/resources/OverviewCodeOceanPoseidonNomad.png new file mode 100644 index 0000000..558402a Binary files /dev/null and b/docs/resources/OverviewCodeOceanPoseidonNomad.png differ diff --git a/docs/resources/OverviewPoseidonNomadMapping.drawio b/docs/resources/OverviewPoseidonNomadMapping.drawio new file mode 100644 index 0000000..cced3da --- /dev/null +++ b/docs/resources/OverviewPoseidonNomadMapping.drawio @@ -0,0 +1 @@ +7Vxbc+I2FP41PCZjyxfgMckm23bSTtpkp7uPCha2urZFbUGgv76SLeGLDDEYEE4gmcE6ulg+3/mOdGSJgXUXLb8mcBb8TjwUDoDhLQfWlwEApg3AgP8b3iqXDF07F/gJ9kShQvCM/0NCaAjpHHsorRSkhIQUz6rCCYljNKEVGUwS8lYtNiVh9a4z6CNF8DyBoSr9G3s0yKUjMCzkvyDsB/LOpjvOcyIoC4snSQPokbeSyLofWHcJITS/ipZ3KOTKk3rJ6z1syF13LEExbVNh/P3XW/LtzwUdBuljtHiYPl75V6KVBQzn4oEHwA1Ze7dTwpplvaYroQr33zmRGVdpBtQNK8C6sCwy2ZUvvrNWXqXgiaQIeySWGayfr/XCTJbfVYpBpQMgIfPYQ/xxDJb9FmCKnmdwwnPfmPUxWUCjkKVMdrlACcUMx5sQ+zGTUTJbt8nz0HKjHs01OsysEYkQTVasyFuBP3AEqEEJ+6FEGgqb89d1C1jYhUBmB5TARpQUHXaD7Q8SQW8jHE3InRdEogJwBRIlyBx3eErIrM7EMl2GUKb2OkgwQmmm1FPwJsKexxu6neIwvCMhSbJGranD/5g8pQn5iUo5bvYRD1SS5x8hF57edA+DuD1y8ipimGkwABM0cNY9Fv5DBQXksYFFJElCA+KTGIb3hfS2ilNR5pFwYmTo/IMoXQndwTklVezQEtPvvPq1I1I/RGP8+suynFjJRMwet1SJJ3/I9niiqJalZL06iCj2bvh4y9IehhGJvZcAx3nGA+bKE43IQR4Ya+i5arYDzzRJ5skEbdG4mFdQmPiIvsdM1ZASFEKKF9V+HNwsbMUt/EZe08/IWDA0Kow1Rw0+u2mYBdaxOOsq4LygaMbMgnlag+HUDaa6DjfCdgDluk5VuWudafOHpqlo7wQOcYuTmoQwTfGk4qDMioNyO3jHfTzxAX3hqKUvdDr6QlH1ieBsTiOszxpXrc8yalaV90vUKgyLQQNXpWIzXiDdch9QHfQtsxYBvdevanl2kfegsPK1TjoYvtFPw9drwONTGvCudmfbPbC7sTKYXV9f92cEs42azoA6go1POoCpQfgLTH8yyVemxNllDsdBawCpcQ43PhpKatx9E4ZkwrwBiT8nRu7ouuquHP0oqWEQ59LnxKfGIVc7OgDomLEcYR2i3UzngLMW0205bXE7Tlu6sU+NcycknmK/P7ODNUnkwGM5emcHQEt4+yE4M2zJGVMvaYYfjjSudtJYF9LsSZq2Czx6OTNSOOOhKZyH9MrnMVN/qDM8u/HGuVBnT+q0XVoqbdLQwR11/UZyh2bLDn2ljv5Rx75QZz/qSCTfn6qNdFJHdrNEHbheA+ovcRzdxLEuxNmXOG3XBfTypv/rAmB0bvM0PfuAPgJn2oY4QGuMA9QYp++k0T5Dsy7Bzb6kaRvcdB1o9ntvXtuvId/xbnxvvr38cd6bg82BV88WLaxzGwzlJrQLr3fltSWPvry7uVXr7lbZzd4vWtSpo39IBBfq7Ekdq+08cqyVOurGlT4uWtSJo3/RQnMAVqZNiUW7EycmMaqwxlBY48E0yHrNu5BvbpFnFsGBSdV2QUPvcKQuaMgjUx5eyONSz5QfAS2OUpWyGkrvfIoLNJ2ze0qIN5/k1GbPZvA+4NjPE9k+za1H8F6ThkN5lU528Rf93EflntkBMFvzjpAP6njaBrhaF4UsNYa8X6LJPCe8cR8vcELiCInjD5+bqfrPJjkKWn0992VZxpnp1ta8x2c3N1hxZN184lHdoA1aukGtMY3s5fb511/zOEbJZSKzwT02HXw5LYXVNxwSsn45x/p7jiMeKWLJ4odd8pXp4udxrPv/AQ== \ No newline at end of file diff --git a/docs/resources/OverviewPoseidonNomadMapping.png b/docs/resources/OverviewPoseidonNomadMapping.png new file mode 100644 index 0000000..4c7716c Binary files /dev/null and b/docs/resources/OverviewPoseidonNomadMapping.png differ diff --git a/docs/resources/client.example.hcl b/docs/resources/client.example.hcl new file mode 100644 index 0000000..63c7f9c --- /dev/null +++ b/docs/resources/client.example.hcl @@ -0,0 +1,17 @@ +client { + enabled = true + servers = [ + "server domain 1", + "server domain 2" + ] + cni_path = "/usr/lib/cni" +} + +plugin "docker" { + config { + allow_runtimes = ["runsc"] + gc { + image_delay = "0s" + } + } +} diff --git a/docs/resources/docker.daemon.json b/docs/resources/docker.daemon.json new file mode 100644 index 0000000..add384b --- /dev/null +++ b/docs/resources/docker.daemon.json @@ -0,0 +1,17 @@ +{ + "dns": [ + "8.8.8.8", + "8.8.4.4" + ], + "dns-search": [ + "codeocean.internal" + ], + "default-runtime": "runsc", + "runtimes": { + "runsc": { + "path": "/usr/bin/runsc", + "runtimeArgs": [ + ] + } + } +} diff --git a/docs/resources/nomad.example.hcl b/docs/resources/nomad.example.hcl new file mode 100644 index 0000000..c97268e --- /dev/null +++ b/docs/resources/nomad.example.hcl @@ -0,0 +1,28 @@ +# Full configuration options can be found at https://www.nomadproject.io/docs/configuration + +data_dir = "/opt/nomad/data" +bind_addr = "0.0.0.0" + +limits { + http_max_conns_per_client = 0 +} + +# Require TLS +tls { + http = true + rpc = true + + ca_file = "/home/ubuntu/ca.crt" + cert_file = "/home/ubuntu/cert.crt" + key_file = "/home/ubuntu/cert-key.pem" + + verify_server_hostname = true + verify_https_client = true +} + +# telemetry { +# collection_interval = "10s" +# prometheus_metrics = true +# publish_allocation_metrics = true +# publish_node_metrics = true +# } diff --git a/docs/resources/poseidon_policy.hcl b/docs/resources/poseidon_policy.hcl new file mode 100644 index 0000000..4b15bf8 --- /dev/null +++ b/docs/resources/poseidon_policy.hcl @@ -0,0 +1,30 @@ +// Allow-all access policy + +namespace "*" { + policy = "write" + capabilities = ["alloc-node-exec", "read-job"] +} + +agent { + policy = "write" +} + +operator { + policy = "write" +} + +quota { + policy = "write" +} + +node { + policy = "write" +} + +host_volume "*" { + policy = "write" +} + +plugin { + policy = "read" +} diff --git a/docs/resources/secure-bridge.conflist b/docs/resources/secure-bridge.conflist new file mode 100644 index 0000000..8329c94 --- /dev/null +++ b/docs/resources/secure-bridge.conflist @@ -0,0 +1,105 @@ +{ + "cniVersion": "0.4.0", + "name": "secure-bridge", + "plugins": [ + { + "type": "loopback" + }, + { + "type": "bridge", + "bridge": "nomad-filtered", + "ipMasq": true, + "isGateway": true, + "forceAddress": true, + "dns":{ + "nameservers":[ + "8.8.8.8", + "8.8.4.4", + "2001:4860:4860::8888", + "2001:4860:4860::8844" + ], + "domain": "poseidon.internal", + "search": [ + "poseidon.internal" + ] + }, + "ipam": { + "type": "host-local", + "ranges": [ + [ + { + "subnet": "10.151.16.0/20" + } + ], + [ + { + "subnet": "fd00:2::/64" + } + ] + ], + "routes": [ + { "dst": "0.0.0.0/5" }, + { "dst": "8.0.0.0/7" }, + { "dst": "11.0.0.0/8" }, + { "dst": "12.0.0.0/6" }, + { "dst": "16.0.0.0/4" }, + { "dst": "32.0.0.0/3" }, + { "dst": "64.0.0.0/2" }, + { "dst": "128.0.0.0/3" }, + { "dst": "160.0.0.0/5" }, + { "dst": "168.0.0.0/8" }, + { "dst": "169.0.0.0/9" }, + { "dst": "169.128.0.0/10" }, + { "dst": "169.192.0.0/11" }, + { "dst": "169.224.0.0/12" }, + { "dst": "169.240.0.0/13" }, + { "dst": "169.248.0.0/14" }, + { "dst": "169.252.0.0/15" }, + { "dst": "169.255.0.0/16" }, + { "dst": "170.0.0.0/8" }, + { "dst": "171.0.0.0/12" }, + { "dst": "171.32.0.0/11" }, + { "dst": "171.64.0.0/10" }, + { "dst": "171.128.0.0/9" }, + { "dst": "172.0.0.0/6" }, + { "dst": "176.0.0.0/4" }, + { "dst": "192.0.0.0/9" }, + { "dst": "192.128.0.0/11" }, + { "dst": "192.160.0.0/13" }, + { "dst": "192.169.0.0/16" }, + { "dst": "192.170.0.0/15" }, + { "dst": "192.172.0.0/14" }, + { "dst": "192.176.0.0/12" }, + { "dst": "192.192.0.0/10" }, + { "dst": "193.0.0.0/8" }, + { "dst": "194.0.0.0/7" }, + { "dst": "196.0.0.0/6" }, + { "dst": "200.0.0.0/5" }, + { "dst": "208.0.0.0/4" }, + { "dst": "224.0.0.0/3" }, + { "dst": "::/1" }, + { "dst": "8000::/2" }, + { "dst": "c000::/3" }, + { "dst": "e000::/4" }, + { "dst": "f000::/5" }, + { "dst": "f800::/6" }, + { "dst": "fe00::/9" }, + { "dst": "fec0::/10" }, + { "dst": "ff00::/8" } + ] + } + }, + { + "type": "firewall", + "backend": "iptables", + "iptablesAdminChainName": "NOMAD-ADMIN-FILTERED" + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + }, + "snat": true + } + ] +} diff --git a/docs/resources/server.example.hcl b/docs/resources/server.example.hcl new file mode 100644 index 0000000..3685128 --- /dev/null +++ b/docs/resources/server.example.hcl @@ -0,0 +1,15 @@ +server { + enabled = true + bootstrap_expect = 2 + server_join { + retry_join = ["<>"] + retry_max = 3 + retry_interval = "15s" + } + + # https://www.nomadproject.io/docs/configuration/server + default_scheduler_config { + scheduler_algorithm = "spread" + memory_oversubscription_enabled = true + } +} diff --git a/docs/security.md b/docs/security.md new file mode 100644 index 0000000..cd8f949 --- /dev/null +++ b/docs/security.md @@ -0,0 +1,70 @@ +# Security configurations + +## TLS + +⚠️ We highly encourage the use of TLS in this API to increase the security. + +### Poseidon + +To enable TLS, you need to create an appropriate certificate first. +You can do this in the same way [as for Nomad](https://learn.hashicorp.com/tutorials/nomad/security-enable-tls): +- `cfssl print-defaults csr | cfssl gencert -initca - | cfssljson -bare poseidon-ca` +- Copy `cfssl.json` +- `echo '{}' | cfssl gencert -ca=poseidon-ca.pem -ca-key=poseidon-ca-key.pem -config=cfssl.json -hostname="<>,localhost,127.0.0.1" - | cfssljson -bare poseidon-server` + + +Then, set `server.tls.active` or the corresponding environment variable to `true` and specify the `server.tls.certfile` and `server.tls.keyfile` options. + +### Nomad + +To enable TLS between Poseidon and Nomad, TLS needs to be first activated in Nomad. See [the Nomad documentation](https://learn.hashicorp.com/collections/nomad/transport-security) for a guideline on how to do that. + +Afterwards, it is *required* to set the `nomad.tls.active` config option to `true`, as Nomad will no longer accept any connections over HTTP. To make sure the authenticity of the Nomad host can be validated, the `nomad.tls.cafile` option has to point to a certificate of the signing authority. + +If using mutual TLS between Poseidon and Nomad is desired, the `nomad.tls.certfile` and `nomad.tls.keyfile` options can hold a client certificate. This certificate must be signed by the same CA as the certificates of the Nomad hosts. Note that mTLS can (and should) be enforced by Nomad in this case using the [verify_https_client](https://www.nomadproject.io/docs/configuration/tls#verify_https_client) configuration option. + +Here are sample configurations for [all Nomad nodes](resources/nomad.example.hcl), [the Nomad servers](resources/server.example.hcl) and [the Nomad clients](resources/client.example.hcl). + + +## Authentication + +⚠️ Don't use authentication without TLS enabled, as otherwise the token will be transmitted in clear text. + +### Poseidon + +⚠️ We encourage you to enable authentication for this API. If disabled, everyone with access to your API has also indirectly access to your Nomad cluster as this API uses it. + +The API supports authentication via an HTTP header. To enable it, specify the `server.token` value in the `configuration.yaml` or the corresponding environment variable `POSEIDON_SERVER_TOKEN`. + +Once configured, all requests to the API, except the `health` route require the configured token in the `Poseidon-Token` header. + +An example `curl` command with the configured token being `SECRET` looks as follows: + +```bash +$ curl -H "Poseidon-Token: SECRET" http://localhost:7200/api/v1/some-protected-route +``` + +### Nomad + +An alternative or additional measure to mTLS (as mentioned above) is to enable access control in the Nomad cluster to prevent unauthorised actors from performing unwanted actions in the cluster. + Instructions on setting up the cluster appropriately can be found in [the Nomad documentation](https://learn.hashicorp.com/collections/nomad/access-control). + +Afterwards, it is recommended to create a specific [Access Policy](https://learn.hashicorp.com/tutorials/nomad/access-control-policies?in=nomad/access-control) for Poseidon with the minimal set of capabilities it needs for operating the cluster. A non-minimal example with complete permissions can be found [here](resources/poseidon_policy.hcl). Poseidon requires a corresponding [Access Token](https://learn.hashicorp.com/tutorials/nomad/access-control-tokens?in=nomad/access-control) to send commands to Nomad. A Token looks like this: + +```text +Accessor ID = 463d3216-dc16-570f-380c-a48f5d26d955 +Secret ID = ea1ac4c5-892b-0bcc-9fc5-5faeb5273a13 +Name = Poseidon access token +Type = client +Global = false +Policies = [poseidon] +Create Time = 2021-07-26 12:45:11.437786378 +0000 UTC +Create Index = 246238 +Modify Index = 246238 +``` + +The `Secret ID` of the Token needs to be specified as the value of `nomad.token` value in the `configuration.yaml` or the corresponding environment variable `POSEIDON_NOMAD_TOKEN`. It may also be required for authentication in the Nomad Web UI and for using the Nomad CLI on the Nomad hosts (where the token can be specified via the `NOMAD_TOKEN` environment variable). + +Once configured, all requests to the Nomad API automatically contain a `X-Nomad-Token` header containing the token. + +⚠️ Make sure that no (overly permissive) `anonymous` access policy is present in the cluster after the policy for Poseidon has been added. Anyone can perform actions as specified by this special policy without authenticating! diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..68ff922 --- /dev/null +++ b/go.mod @@ -0,0 +1,141 @@ +module github.com/openHPI/poseidon + +go 1.22.4 + +require ( + github.com/coreos/go-systemd/v22 v22.5.0 + github.com/getsentry/sentry-go v0.28.1 + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 + github.com/gorilla/websocket v1.5.3 + github.com/hashicorp/nomad v1.8.1 + github.com/hashicorp/nomad/api v0.0.0-20240624153520-bbdc8b7fa758 + github.com/influxdata/influxdb-client-go/v2 v2.13.0 + github.com/sirupsen/logrus v1.9.3 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/client-go v0.30.3 +) + +replace ( + // Some Hashicorp dependencies still use the old package name (and version). See https://github.com/hashicorp/nomad/issues/11826. + github.com/armon/go-metrics => github.com/hashicorp/go-metrics v0.5.3 + // Nomad is not compatible with newer versions of HCLv2 yet. See https://github.com/hashicorp/nomad/issues/11826. + github.com/hashicorp/hcl/v2 => github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d +) + +require ( + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/apparentlymart/go-cidr v1.1.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/armon/go-metrics v0.5.3 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fatih/color v1.17.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/gojuno/minimock/v3 v3.3.13 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/hashicorp/consul/api v1.29.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-bexpr v0.1.14 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-cty-funcs v0.0.0-20240510212344-9599f7024f07 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.6.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-set/v2 v2.1.0 // indirect + github.com/hashicorp/go-sockaddr v1.0.6 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect + github.com/hashicorp/memberlist v0.5.1 // indirect + github.com/hashicorp/raft v1.7.0 // indirect + github.com/hashicorp/raft-autopilot v0.2.0 // indirect + github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa // indirect + github.com/hashicorp/vault/api v1.14.0 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.61 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/hashstructure v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oapi-codegen/runtime v1.1.1 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/testify v1.9.0 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.0.3 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.22.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect + google.golang.org/grpc v1.64.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.30.3 // indirect + k8s.io/apimachinery v0.30.3 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + oss.indeed.com/go/libtime v1.6.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8f6cff7 --- /dev/null +++ b/go.sum @@ -0,0 +1,514 @@ +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k= +github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= +github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gojuno/minimock/v3 v3.0.4/go.mod h1:HqeqnwV8mAABn3pO5hqF+RE7gjA0jsN8cbbSogoGrzI= +github.com/gojuno/minimock/v3 v3.0.6/go.mod h1:v61ZjAKHr+WnEkND63nQPCZ/DTfQgJdvbCi3IuoMblY= +github.com/gojuno/minimock/v3 v3.3.13 h1:sXFO7RbB4JnZiKhgMO4BU4RLYcfhcOSepfiv4wPgGNY= +github.com/gojuno/minimock/v3 v3.3.13/go.mod h1:WtJbR+15lbzpUHoOFtT7Sv1rR885bFxoyHrzoMOmK/k= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.14 h1:uKDeyuOhWhT1r5CiMTjdVY4Aoxdxs6EtwgTGnlosyp4= +github.com/hashicorp/go-bexpr v0.1.14/go.mod h1:gN7hRKB3s7yT+YvTdnhZVLTENejvhlkZ8UE4YVBS+Q8= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-cty-funcs v0.0.0-20240510212344-9599f7024f07 h1:gfDsyjG59qNfpOXgv9mJbo4ooJMRWyZxs8mUlk/F1u0= +github.com/hashicorp/go-cty-funcs v0.0.0-20240510212344-9599f7024f07/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= +github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-set/v2 v2.1.0 h1:iERPCQWks+I+4bTgy0CT2myZsCqNgBg79ZHqwniohXo= +github.com/hashicorp/go-set/v2 v2.1.0/go.mod h1:6q4nh8UCVZODn2tJ5RbJi8+ki7pjZBsAEYGt6yaGeTo= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d h1:7abftkc86B+tlA/0cDy5f6C4LgWfFOCpsGg3RJZsfbw= +github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo= +github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24= +github.com/hashicorp/nomad v1.8.1 h1:WDVz8z0Szx9gfPNje+jIFqUZroJ4nXbWHSNcc0hzpe0= +github.com/hashicorp/nomad v1.8.1/go.mod h1:boe1jvdt70t0kCOAg0y4HDjW4ZWHLPvfMKz+cbeiTDg= +github.com/hashicorp/nomad/api v0.0.0-20240624153520-bbdc8b7fa758 h1:zCwy94icHDtxYWqdCeJBBwFblJhHa/l2x0CA8m0K84g= +github.com/hashicorp/nomad/api v0.0.0-20240624153520-bbdc8b7fa758/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= +github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= +github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa h1:UXgK+AZPfeQ1vOXXXfBj7C7mZpWUgRFcMAKpyyYrYgU= +github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa/go.mod h1:RiISHML4PEb0ZN6S6uNW04TO8D6EUtTIOpCzzDnZeGk= +github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= +github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= +github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM= +github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU= +github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= +github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770 h1:drhDO54gdT/a15GBcMRmunZiNcLgPiFIJa23KzmcvcU= +github.com/mitchellh/go-testing-interface v1.14.2-0.20210821155943-2d9075ca8770/go.mod h1:SO/iHr6q2EzbqRApt+8/E9wqebTwQn5y+UlB04bxzo0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= +github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sdFMLc= +github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oss.indeed.com/go/libtime v1.6.0 h1:XQyczJihse/wQGo59OfPF3f4f+Sywv4R8vdGB3S9BfU= +oss.indeed.com/go/libtime v1.6.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/internal/api/api.go b/internal/api/api.go new file mode 100644 index 0000000..4f36839 --- /dev/null +++ b/internal/api/api.go @@ -0,0 +1,98 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/openHPI/poseidon/internal/api/auth" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/internal/environment" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "github.com/openHPI/poseidon/pkg/monitoring" + "net/http" +) + +var log = logging.GetLogger("api") + +const ( + BasePath = "/api/v1" + HealthPath = "/health" + VersionPath = "/version" + RunnersPath = "/runners" + EnvironmentsPath = "/execution-environments" + StatisticsPath = "/statistics" +) + +// NewRouter returns a *mux.Router which can be +// used by the net/http package to serve the routes of our API. It +// always returns a router for the newest version of our API. We +// use gorilla/mux because it is more convenient than net/http, e.g. +// when extracting path parameters. +func NewRouter(runnerManager runner.Manager, environmentManager environment.ManagerHandler) *mux.Router { + router := mux.NewRouter() + // this can later be restricted to a specific host with + // `router.Host(...)` and to HTTPS with `router.Schemes("https")` + configureV1Router(router, runnerManager, environmentManager) + router.Use(logging.HTTPLoggingMiddleware) + router.Use(monitoring.InfluxDB2Middleware) + return router +} + +// configureV1Router configures a given router with the routes of version 1 of the Poseidon API. +func configureV1Router(router *mux.Router, + runnerManager runner.Manager, environmentManager environment.ManagerHandler) { + router.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log.WithContext(r.Context()).WithField("request", r).Debug("Not Found Handler") + w.WriteHeader(http.StatusNotFound) + }) + v1 := router.PathPrefix(BasePath).Subrouter() + v1.HandleFunc(HealthPath, Health(environmentManager)).Methods(http.MethodGet).Name(HealthPath) + v1.HandleFunc(VersionPath, Version).Methods(http.MethodGet).Name(VersionPath) + + runnerController := &RunnerController{manager: runnerManager} + environmentController := &EnvironmentController{manager: environmentManager} + configureRoutes := func(router *mux.Router) { + runnerController.ConfigureRoutes(router) + environmentController.ConfigureRoutes(router) + + // May add a statistics controller if another route joins + statisticsRouter := router.PathPrefix(StatisticsPath).Subrouter() + statisticsRouter. + HandleFunc(EnvironmentsPath, StatisticsExecutionEnvironments(environmentManager)). + Methods(http.MethodGet).Name(EnvironmentsPath) + } + + if auth.InitializeAuthentication() { + // Create new authenticated subrouter. + // All routes added to v1 after this require authentication. + authenticatedV1Router := v1.PathPrefix("").Subrouter() + authenticatedV1Router.Use(auth.HTTPAuthenticationMiddleware) + configureRoutes(authenticatedV1Router) + } else { + configureRoutes(v1) + } +} + +// Version handles the version route. +// It responds the release information stored in the configuration. +func Version(writer http.ResponseWriter, request *http.Request) { + release := config.Config.Sentry.Release + if release != "" { + sendJSON(writer, release, http.StatusOK, request.Context()) + } else { + writer.WriteHeader(http.StatusNotFound) + } +} + +// StatisticsExecutionEnvironments handles the route for statistics about execution environments. +// It responds the prewarming pool size and the number of idle runners and used runners. +func StatisticsExecutionEnvironments(manager environment.Manager) http.HandlerFunc { + return func(writer http.ResponseWriter, request *http.Request) { + result := make(map[string]*dto.StatisticalExecutionEnvironmentData) + environmentsData := manager.Statistics() + for id, data := range environmentsData { + result[id.ToString()] = data + } + sendJSON(writer, result, http.StatusOK, request.Context()) + } +} diff --git a/internal/api/auth/auth.go b/internal/api/auth/auth.go new file mode 100644 index 0000000..1439c04 --- /dev/null +++ b/internal/api/auth/auth.go @@ -0,0 +1,38 @@ +package auth + +import ( + "crypto/subtle" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/pkg/logging" + "net/http" +) + +var log = logging.GetLogger("api/auth") + +const TokenHeader = "Poseidon-Token" + +var correctAuthenticationToken []byte + +// InitializeAuthentication returns true iff the authentication is initialized successfully and can be used. +func InitializeAuthentication() bool { + token := config.Config.Server.Token + if token == "" { + return false + } + correctAuthenticationToken = []byte(token) + return true +} + +func HTTPAuthenticationMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get(TokenHeader) + if subtle.ConstantTimeCompare([]byte(token), correctAuthenticationToken) == 0 { + log.WithContext(r.Context()). + WithField("token", logging.RemoveNewlineSymbol(token)). + Warn("Incorrect token") + w.WriteHeader(http.StatusUnauthorized) + return + } + next.ServeHTTP(w, r) + }) +} diff --git a/internal/api/environments.go b/internal/api/environments.go new file mode 100644 index 0000000..6c822c3 --- /dev/null +++ b/internal/api/environments.go @@ -0,0 +1,159 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/gorilla/mux" + "github.com/openHPI/poseidon/internal/environment" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "net/http" + "strconv" +) + +const ( + executionEnvironmentIDKey = "executionEnvironmentId" + fetchEnvironmentKey = "fetch" + listRouteName = "list" + getRouteName = "get" + createOrUpdateRouteName = "createOrUpdate" + deleteRouteName = "delete" +) + +var ErrMissingURLParameter = errors.New("url parameter missing") + +type EnvironmentController struct { + manager environment.ManagerHandler +} + +type ExecutionEnvironmentsResponse struct { + ExecutionEnvironments []runner.ExecutionEnvironment `json:"executionEnvironments"` +} + +func (e *EnvironmentController) ConfigureRoutes(router *mux.Router) { + environmentRouter := router.PathPrefix(EnvironmentsPath).Subrouter() + environmentRouter.HandleFunc("", e.list).Methods(http.MethodGet).Name(listRouteName) + + specificEnvironmentRouter := environmentRouter.Path(fmt.Sprintf("/{%s:[0-9]+}", executionEnvironmentIDKey)).Subrouter() + specificEnvironmentRouter.HandleFunc("", e.get).Methods(http.MethodGet).Name(getRouteName) + specificEnvironmentRouter.HandleFunc("", e.createOrUpdate).Methods(http.MethodPut).Name(createOrUpdateRouteName) + specificEnvironmentRouter.HandleFunc("", e.delete).Methods(http.MethodDelete).Name(deleteRouteName) +} + +// list returns all information about available execution environments. +func (e *EnvironmentController) list(writer http.ResponseWriter, request *http.Request) { + fetch, err := parseFetchParameter(request) + if err != nil { + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + + environments, err := e.manager.List(fetch) + if err != nil { + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + + sendJSON(writer, ExecutionEnvironmentsResponse{environments}, http.StatusOK, request.Context()) +} + +// get returns all information about the requested execution environment. +func (e *EnvironmentController) get(writer http.ResponseWriter, request *http.Request) { + environmentID, err := parseEnvironmentID(request) + if err != nil { + // This case is never used as the router validates the id format + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + fetch, err := parseFetchParameter(request) + if err != nil { + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + + executionEnvironment, err := e.manager.Get(environmentID, fetch) + if errors.Is(err, runner.ErrUnknownExecutionEnvironment) { + writer.WriteHeader(http.StatusNotFound) + return + } else if err != nil { + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + + sendJSON(writer, executionEnvironment, http.StatusOK, request.Context()) +} + +// delete removes the specified execution environment. +func (e *EnvironmentController) delete(writer http.ResponseWriter, request *http.Request) { + environmentID, err := parseEnvironmentID(request) + if err != nil { + // This case is never used as the router validates the id format + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + + found, err := e.manager.Delete(environmentID) + if err != nil { + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } else if !found { + writer.WriteHeader(http.StatusNotFound) + return + } + + writer.WriteHeader(http.StatusNoContent) +} + +// createOrUpdate creates/updates an execution environment on the executor. +func (e *EnvironmentController) createOrUpdate(writer http.ResponseWriter, request *http.Request) { + req := new(dto.ExecutionEnvironmentRequest) + if err := json.NewDecoder(request.Body).Decode(req); err != nil { + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + environmentID, err := parseEnvironmentID(request) + if err != nil { + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return + } + + var created bool + logging.StartSpan("api.env.update", "Create Environment", request.Context(), func(ctx context.Context) { + created, err = e.manager.CreateOrUpdate(environmentID, *req, ctx) + }) + if err != nil { + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + } + + if created { + writer.WriteHeader(http.StatusCreated) + } else { + writer.WriteHeader(http.StatusNoContent) + } +} + +func parseEnvironmentID(request *http.Request) (dto.EnvironmentID, error) { + id, ok := mux.Vars(request)[executionEnvironmentIDKey] + if !ok { + return 0, fmt.Errorf("could not find %s: %w", executionEnvironmentIDKey, ErrMissingURLParameter) + } + environmentID, err := dto.NewEnvironmentID(id) + if err != nil { + return 0, fmt.Errorf("could not update environment: %w", err) + } + return environmentID, nil +} + +func parseFetchParameter(request *http.Request) (fetch bool, err error) { + fetchString := request.FormValue(fetchEnvironmentKey) + if fetchString != "" { + fetch, err = strconv.ParseBool(fetchString) + if err != nil { + return false, fmt.Errorf("could not parse fetch parameter: %w", err) + } + } + return fetch, nil +} diff --git a/internal/api/health.go b/internal/api/health.go new file mode 100644 index 0000000..5c2699f --- /dev/null +++ b/internal/api/health.go @@ -0,0 +1,42 @@ +package api + +import ( + "errors" + "fmt" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/internal/environment" + "github.com/openHPI/poseidon/pkg/dto" + "net/http" + "strings" +) + +var ErrorPrewarmingPoolDepleting = errors.New("the prewarming pool is depleting") + +// Health handles the health route. +// It responds that the server is alive. +// If it is not, the response won't reach the client. +func Health(manager environment.Manager) http.HandlerFunc { + return func(writer http.ResponseWriter, request *http.Request) { + if err := checkPrewarmingPool(manager); err != nil { + sendJSON(writer, &dto.InternalServerError{Message: err.Error(), ErrorCode: dto.PrewarmingPoolDepleting}, + http.StatusServiceUnavailable, request.Context()) + return + } + + writer.WriteHeader(http.StatusNoContent) + } +} + +func checkPrewarmingPool(manager environment.Manager) error { + var depletingEnvironments []int + for _, data := range manager.Statistics() { + if float64(data.IdleRunners)/float64(data.PrewarmingPoolSize) < config.Config.Server.Alert.PrewarmingPoolThreshold { + depletingEnvironments = append(depletingEnvironments, data.ID) + } + } + if len(depletingEnvironments) > 0 { + arrayToString := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(depletingEnvironments)), ", "), "[]") + return fmt.Errorf("%w: environments %s", ErrorPrewarmingPoolDepleting, arrayToString) + } + return nil +} diff --git a/internal/api/helpers.go b/internal/api/helpers.go new file mode 100644 index 0000000..eb3cad4 --- /dev/null +++ b/internal/api/helpers.go @@ -0,0 +1,41 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "github.com/openHPI/poseidon/pkg/dto" + "net/http" +) + +func writeInternalServerError(writer http.ResponseWriter, err error, errorCode dto.ErrorCode, ctx context.Context) { + sendJSON(writer, &dto.InternalServerError{Message: err.Error(), ErrorCode: errorCode}, + http.StatusInternalServerError, ctx) +} + +func writeClientError(writer http.ResponseWriter, err error, status uint16, ctx context.Context) { + sendJSON(writer, &dto.ClientError{Message: err.Error()}, int(status), ctx) +} + +func sendJSON(writer http.ResponseWriter, content interface{}, httpStatusCode int, ctx context.Context) { + writer.Header().Set("Content-Type", "application/json") + writer.WriteHeader(httpStatusCode) + response, err := json.Marshal(content) + if err != nil { + // cannot produce infinite recursive loop, since json.Marshal of dto.InternalServerError won't return an error + writeInternalServerError(writer, err, dto.ErrorUnknown, ctx) + return + } + if _, err = writer.Write(response); err != nil { + log.WithError(err).WithContext(ctx).Error("Could not write JSON response") + http.Error(writer, err.Error(), http.StatusInternalServerError) + } +} + +func parseJSONRequestBody(writer http.ResponseWriter, request *http.Request, structure interface{}) error { + if err := json.NewDecoder(request.Body).Decode(structure); err != nil { + writeClientError(writer, err, http.StatusBadRequest, request.Context()) + return fmt.Errorf("error parsing JSON request body: %w", err) + } + return nil +} diff --git a/internal/api/runners.go b/internal/api/runners.go new file mode 100644 index 0000000..20d007e --- /dev/null +++ b/internal/api/runners.go @@ -0,0 +1,259 @@ +package api + +import ( + "context" + "errors" + "fmt" + "github.com/google/uuid" + "github.com/gorilla/mux" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "github.com/openHPI/poseidon/pkg/monitoring" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + ExecutePath = "/execute" + WebsocketPath = "/websocket" + UpdateFileSystemPath = "/files" + ListFileSystemRouteName = UpdateFileSystemPath + "_list" + FileContentRawPath = UpdateFileSystemPath + "/raw" + ProvideRoute = "provideRunner" + DeleteRoute = "deleteRunner" + RunnerIDKey = "runnerId" + ExecutionIDKey = "executionID" + PathKey = "path" + RecursiveKey = "recursive" + PrivilegedExecutionKey = "privilegedExecution" +) + +var ErrForbiddenCharacter = errors.New("use of forbidden character") + +type RunnerController struct { + manager runner.Accessor + runnerRouter *mux.Router +} + +// ConfigureRoutes configures a given router with the runner routes of our API. +func (r *RunnerController) ConfigureRoutes(router *mux.Router) { + runnersRouter := router.PathPrefix(RunnersPath).Subrouter() + runnersRouter.HandleFunc("", r.provide).Methods(http.MethodPost).Name(ProvideRoute) + r.runnerRouter = runnersRouter.PathPrefix(fmt.Sprintf("/{%s}", RunnerIDKey)).Subrouter() + r.runnerRouter.Use(r.findRunnerMiddleware) + r.runnerRouter.HandleFunc(UpdateFileSystemPath, r.listFileSystem).Methods(http.MethodGet). + Name(ListFileSystemRouteName) + r.runnerRouter.HandleFunc(UpdateFileSystemPath, r.updateFileSystem).Methods(http.MethodPatch). + Name(UpdateFileSystemPath) + r.runnerRouter.HandleFunc(FileContentRawPath, r.fileContent).Methods(http.MethodGet).Name(FileContentRawPath) + r.runnerRouter.HandleFunc(ExecutePath, r.execute).Methods(http.MethodPost).Name(ExecutePath) + r.runnerRouter.HandleFunc(WebsocketPath, r.connectToRunner).Methods(http.MethodGet).Name(WebsocketPath) + r.runnerRouter.HandleFunc("", r.delete).Methods(http.MethodDelete).Name(DeleteRoute) +} + +// provide handles the provide runners API route. +// It tries to respond with the id of a unused runner. +// This runner is then reserved for future use. +func (r *RunnerController) provide(writer http.ResponseWriter, request *http.Request) { + runnerRequest := new(dto.RunnerRequest) + if err := parseJSONRequestBody(writer, request, runnerRequest); err != nil { + return + } + environmentID := dto.EnvironmentID(runnerRequest.ExecutionEnvironmentID) + + var nextRunner runner.Runner + var err error + logging.StartSpan("api.runner.claim", "Claim Runner", request.Context(), func(_ context.Context) { + nextRunner, err = r.manager.Claim(environmentID, runnerRequest.InactivityTimeout) + }) + if err != nil { + switch { + case errors.Is(err, runner.ErrUnknownExecutionEnvironment): + writeClientError(writer, err, http.StatusNotFound, request.Context()) + case errors.Is(err, runner.ErrNoRunnersAvailable): + log.WithContext(request.Context()).Warn("No runners available") + writeInternalServerError(writer, err, dto.Errork8sOverload, request.Context()) + default: + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + } + return + } + monitoring.AddRunnerMonitoringData(request, nextRunner.ID(), nextRunner.Environment()) + sendJSON(writer, &dto.RunnerResponse{ID: nextRunner.ID(), MappedPorts: nextRunner.MappedPorts()}, + http.StatusOK, request.Context()) +} + +// listFileSystem handles the files API route with the method GET. +// It returns a listing of the file system of the provided runner. +func (r *RunnerController) listFileSystem(writer http.ResponseWriter, request *http.Request) { + targetRunner, _ := runner.FromContext(request.Context()) + + recursiveRaw := request.URL.Query().Get(RecursiveKey) + recursive, err := strconv.ParseBool(recursiveRaw) + recursive = err != nil || recursive + + path := request.URL.Query().Get(PathKey) + if path == "" { + path = "./" + } + privilegedExecution, err := strconv.ParseBool(request.URL.Query().Get(PrivilegedExecutionKey)) + if err != nil { + privilegedExecution = false + } + + writer.Header().Set("Content-Type", "application/json") + logging.StartSpan("api.fs.list", "List File System", request.Context(), func(ctx context.Context) { + err = targetRunner.ListFileSystem(path, recursive, writer, privilegedExecution, ctx) + }) + if errors.Is(err, runner.ErrFileNotFound) { + writeClientError(writer, err, http.StatusFailedDependency, request.Context()) + return + } else if err != nil { + log.WithContext(request.Context()).WithError(err).Error("Could not perform the requested listFileSystem.") + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } +} + +// updateFileSystem handles the files API route. +// It takes an dto.UpdateFileSystemRequest and sends it to the runner for processing. +func (r *RunnerController) updateFileSystem(writer http.ResponseWriter, request *http.Request) { + monitoring.AddRequestSize(request) + fileCopyRequest := new(dto.UpdateFileSystemRequest) + if err := parseJSONRequestBody(writer, request, fileCopyRequest); err != nil { + return + } + + targetRunner, _ := runner.FromContext(request.Context()) + + var err error + logging.StartSpan("api.fs.update", "Update File System", request.Context(), func(ctx context.Context) { + err = targetRunner.UpdateFileSystem(fileCopyRequest, ctx) + }) + if err != nil { + log.WithContext(request.Context()).WithError(err).Error("Could not perform the requested updateFileSystem.") + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + + writer.WriteHeader(http.StatusNoContent) +} + +func (r *RunnerController) fileContent(writer http.ResponseWriter, request *http.Request) { + targetRunner, _ := runner.FromContext(request.Context()) + path := request.URL.Query().Get(PathKey) + privilegedExecution, err := strconv.ParseBool(request.URL.Query().Get(PrivilegedExecutionKey)) + if err != nil { + privilegedExecution = false + } + + writer.Header().Set("Content-Disposition", "attachment; filename=\""+path+"\"") + logging.StartSpan("api.fs.read", "File Content", request.Context(), func(ctx context.Context) { + err = targetRunner.GetFileContent(path, writer, privilegedExecution, ctx) + }) + if errors.Is(err, runner.ErrFileNotFound) { + writeClientError(writer, err, http.StatusFailedDependency, request.Context()) + return + } else if err != nil { + log.WithContext(request.Context()).WithError(err).Error("Could not retrieve the requested file.") + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } +} + +// execute handles the execute API route. +// It takes an ExecutionRequest and stores it for a runner. +// It returns a url to connect to for a websocket connection to this execution in the corresponding runner. +func (r *RunnerController) execute(writer http.ResponseWriter, request *http.Request) { + executionRequest := new(dto.ExecutionRequest) + if err := parseJSONRequestBody(writer, request, executionRequest); err != nil { + return + } + forbiddenCharacters := "'" + if strings.ContainsAny(executionRequest.Command, forbiddenCharacters) { + writeClientError(writer, ErrForbiddenCharacter, http.StatusBadRequest, request.Context()) + return + } + + var scheme string + if config.Config.Server.TLS.Active { + scheme = "wss" + } else { + scheme = "ws" + } + targetRunner, _ := runner.FromContext(request.Context()) + + path, err := r.runnerRouter.Get(WebsocketPath).URL(RunnerIDKey, targetRunner.ID()) + if err != nil { + log.WithContext(request.Context()).WithError(err).Error("Could not create runner websocket URL.") + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + newUUID, err := uuid.NewRandom() + if err != nil { + log.WithContext(request.Context()).WithError(err).Error("Could not create execution id") + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + id := newUUID.String() + + logging.StartSpan("api.runner.exec", "Store Execution", request.Context(), func(ctx context.Context) { + targetRunner.StoreExecution(id, executionRequest) + }) + webSocketURL := url.URL{ + Scheme: scheme, + Host: request.Host, + Path: path.String(), + RawQuery: fmt.Sprintf("%s=%s", ExecutionIDKey, id), + } + + sendJSON(writer, &dto.ExecutionResponse{WebSocketURL: webSocketURL.String()}, http.StatusOK, request.Context()) +} + +// The findRunnerMiddleware looks up the runnerId for routes containing it +// and adds the runner to the context of the request. +func (r *RunnerController) findRunnerMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + runnerID := mux.Vars(request)[RunnerIDKey] + targetRunner, err := r.manager.Get(runnerID) + if err != nil { + // We discard the request body because an early write causes errors for some clients. + // See https://github.com/openHPI/poseidon/issues/54 + _, readErr := io.ReadAll(request.Body) + if readErr != nil { + log.WithContext(request.Context()).WithError(readErr).Debug("Failed to discard the request body") + } + writeClientError(writer, err, http.StatusGone, request.Context()) + return + } + ctx := runner.NewContext(request.Context(), targetRunner) + ctx = context.WithValue(ctx, dto.ContextKey(dto.KeyRunnerID), targetRunner.ID()) + ctx = context.WithValue(ctx, dto.ContextKey(dto.KeyEnvironmentID), targetRunner.Environment().ToString()) + requestWithRunner := request.WithContext(ctx) + monitoring.AddRunnerMonitoringData(requestWithRunner, targetRunner.ID(), targetRunner.Environment()) + + next.ServeHTTP(writer, requestWithRunner) + }) +} + +// delete handles the delete runner API route. +// It destroys the given runner on the executor and removes it from the used runners list. +func (r *RunnerController) delete(writer http.ResponseWriter, request *http.Request) { + targetRunner, _ := runner.FromContext(request.Context()) + + var err error + logging.StartSpan("api.runner.delete", "Return Runner", request.Context(), func(ctx context.Context) { + err = r.manager.Return(targetRunner) + }) + if err != nil { + writeInternalServerError(writer, err, dto.Errork8sInternalServerError, request.Context()) + return + } + + writer.WriteHeader(http.StatusNoContent) +} diff --git a/internal/api/websocket.go b/internal/api/websocket.go new file mode 100644 index 0000000..e173e86 --- /dev/null +++ b/internal/api/websocket.go @@ -0,0 +1,113 @@ +package api + +import ( + "context" + "errors" + "fmt" + "github.com/gorilla/websocket" + "github.com/openHPI/poseidon/internal/api/ws" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "net/http" +) + +var ErrUnknownExecutionID = errors.New("execution id unknown") + +// webSocketProxy is an encapsulation of logic for forwarding between Runners and CodeOcean. +type webSocketProxy struct { + ctx context.Context + Input ws.WebSocketReader + Output ws.WebSocketWriter +} + +// upgradeConnection upgrades a connection to a websocket and returns a webSocketProxy for this connection. +func upgradeConnection(writer http.ResponseWriter, request *http.Request) (ws.Connection, error) { + connUpgrader := websocket.Upgrader{} + connection, err := connUpgrader.Upgrade(writer, request, nil) + if err != nil { + log.WithContext(request.Context()).WithError(err).Warn("Connection upgrade failed") + return nil, fmt.Errorf("error upgrading the connection: %w", err) + } + return connection, nil +} + +// newWebSocketProxy returns an initiated and started webSocketProxy. +// As this proxy is already started, a start message is send to the client. +func newWebSocketProxy(connection ws.Connection, proxyCtx context.Context) *webSocketProxy { + // wsCtx is detached from the proxyCtx + // as it should send all messages in the buffer even shortly after the execution/proxy is done. + wsCtx := context.WithoutCancel(proxyCtx) + wsCtx, cancelWsCommunication := context.WithCancel(wsCtx) + + proxy := &webSocketProxy{ + ctx: wsCtx, + Input: ws.NewCodeOceanToRawReader(connection, wsCtx, proxyCtx), + Output: ws.NewCodeOceanOutputWriter(connection, wsCtx, cancelWsCommunication), + } + + connection.SetCloseHandler(func(code int, text string) error { + log.WithContext(wsCtx).WithField("code", code).WithField("text", text).Debug("The client closed the connection.") + cancelWsCommunication() + return nil + }) + return proxy +} + +// waitForExit waits for an exit of either the runner (when the command terminates) or the client closing the WebSocket +// and handles WebSocket exit messages. +func (wp *webSocketProxy) waitForExit(exit <-chan runner.ExitInfo, cancelExecution context.CancelFunc) { + wp.Input.Start() + + var exitInfo runner.ExitInfo + select { + case <-wp.ctx.Done(): + log.WithContext(wp.ctx).Info("Client closed the connection") + wp.Input.Stop() + wp.Output.Close(nil) + cancelExecution() + <-exit // /internal/runner/runner.go handleExitOrContextDone does not require client connection anymore. + <-exit // The goroutine closes this channel indicating that it does not use the connection to the executor anymore. + case exitInfo = <-exit: + log.WithContext(wp.ctx).Info("Execution returned") + wp.Input.Stop() + wp.Output.Close(&exitInfo) + } +} + +// connectToRunner is the endpoint for websocket connections. +func (r *RunnerController) connectToRunner(writer http.ResponseWriter, request *http.Request) { + targetRunner, _ := runner.FromContext(request.Context()) + + executionID := request.URL.Query().Get(ExecutionIDKey) + if !targetRunner.ExecutionExists(executionID) { + writeClientError(writer, ErrUnknownExecutionID, http.StatusNotFound, request.Context()) + return + } + + connection, err := upgradeConnection(writer, request) + if err != nil { + writeInternalServerError(writer, err, dto.ErrorUnknown, request.Context()) + return + } + + // We do not inherit from the request.Context() here because we rely on the WebSocket Close Handler. + proxyCtx := context.WithoutCancel(request.Context()) + proxyCtx, cancelProxy := context.WithCancel(proxyCtx) + defer cancelProxy() + proxy := newWebSocketProxy(connection, proxyCtx) + + log.WithContext(proxyCtx). + WithField("executionID", logging.RemoveNewlineSymbol(executionID)). + Info("Running execution") + logging.StartSpan("api.runner.connect", "Execute Interactively", request.Context(), func(ctx context.Context) { + exit, cancel, err := targetRunner.ExecuteInteractively(executionID, + proxy.Input, proxy.Output.StdOut(), proxy.Output.StdErr(), ctx) + if err != nil { + log.WithContext(ctx).WithError(err).Warn("Cannot execute request.") + return // The proxy is stopped by the deferred cancel. + } + + proxy.waitForExit(exit, cancel) + }) +} diff --git a/internal/api/ws/codeocean_reader.go b/internal/api/ws/codeocean_reader.go new file mode 100644 index 0000000..4b4c677 --- /dev/null +++ b/internal/api/ws/codeocean_reader.go @@ -0,0 +1,177 @@ +package ws + +import ( + "context" + "github.com/gorilla/websocket" + "github.com/openHPI/poseidon/pkg/logging" + "io" +) + +const CodeOceanToRawReaderBufferSize = 1024 + +var log = logging.GetLogger("ws") + +// WebSocketReader is an interface that is intended for providing abstraction around reading from a WebSocket. +// Besides, io.Reader, it also implements io.Writer. The Write method is used to inject data into the WebSocket stream. +type WebSocketReader interface { + io.Reader + io.Writer + Start() + Stop() +} + +// codeOceanToRawReader is an io.Reader implementation that provides the content of the WebSocket connection +// to CodeOcean. You have to start the Reader by calling readInputLoop. After that you can use the Read function. +type codeOceanToRawReader struct { + connection Connection + + // readCtx is the context in that messages from CodeOcean are read. + readCtx context.Context + cancelReadCtx context.CancelFunc + // executorCtx is the context in that messages are forwarded to the executor. + executorCtx context.Context + + // A buffered channel of bytes is used to store data coming from CodeOcean via WebSocket + // and retrieve it when Read(...) is called. Since channels are thread-safe, we use one here + // instead of bytes.Buffer. + buffer chan byte + // The priorityBuffer is a buffer for injecting data into stdin of the execution from Poseidon, + // for example the character that causes the tty to generate a SIGQUIT signal. + // It is always read before the regular buffer. + priorityBuffer chan byte +} + +func NewCodeOceanToRawReader(connection Connection, wsCtx, executorCtx context.Context) *codeOceanToRawReader { + return &codeOceanToRawReader{ + connection: connection, + readCtx: wsCtx, // This context may be canceled before the executorCtx. + cancelReadCtx: func() {}, + executorCtx: executorCtx, + buffer: make(chan byte, CodeOceanToRawReaderBufferSize), + priorityBuffer: make(chan byte, CodeOceanToRawReaderBufferSize), + } +} + +// readInputLoop reads from the WebSocket connection and buffers the user's input. +// This is necessary because input must be read for the connection to handle special messages like close and call the +// CloseHandler. +func (cr *codeOceanToRawReader) readInputLoop(ctx context.Context) { + readMessage := make(chan bool) + loopContext, cancelInputLoop := context.WithCancel(ctx) + defer cancelInputLoop() + readingContext, cancelNextMessage := context.WithCancel(loopContext) + defer cancelNextMessage() + + for loopContext.Err() == nil { + var messageType int + var reader io.Reader + var err error + + go func() { + messageType, reader, err = cr.connection.NextReader() + select { + case <-readingContext.Done(): + case readMessage <- true: + } + }() + select { + case <-loopContext.Done(): + return + case <-readMessage: + } + + if inputContainsError(messageType, err, loopContext) { + return + } + if handleInput(reader, cr.buffer, loopContext) { + return + } + } +} + +// handleInput receives a new message from the client and may forward it to the executor. +func handleInput(reader io.Reader, buffer chan byte, ctx context.Context) (done bool) { + message, err := io.ReadAll(reader) + if err != nil { + log.WithContext(ctx).WithError(err).Warn("error while reading WebSocket message") + return true + } + + log.WithContext(ctx).WithField("message", string(message)).Trace("Received message from client") + for _, character := range message { + select { + case <-ctx.Done(): + return true + case buffer <- character: + } + } + return false +} + +func inputContainsError(messageType int, err error, ctx context.Context) (done bool) { + if err != nil && websocket.IsCloseError(err, websocket.CloseNormalClosure) { + log.WithContext(ctx).Debug("ReadInputLoop: The client closed the connection!") + // The close handler will do something soon. + return true + } else if err != nil { + log.WithContext(ctx).WithError(err).Warn("Error reading client message") + return true + } + if messageType != websocket.TextMessage { + log.WithContext(ctx).WithField("messageType", messageType).Warn("Received message of wrong type") + return true + } + return false +} + +// Start starts the read input loop asynchronously. +func (cr *codeOceanToRawReader) Start() { + ctx, cancel := context.WithCancel(cr.readCtx) + cr.cancelReadCtx = cancel + go cr.readInputLoop(ctx) +} + +// Stop stops the asynchronous read input loop. +func (cr *codeOceanToRawReader) Stop() { + cr.cancelReadCtx() +} + +// Read implements the io.Reader interface. +// It returns bytes from the buffer or priorityBuffer. +func (cr *codeOceanToRawReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Ensure to not return until at least one byte has been read to avoid busy waiting. + select { + case <-cr.executorCtx.Done(): + return 0, io.EOF + case p[0] = <-cr.priorityBuffer: + case p[0] = <-cr.buffer: + } + var n int + for n = 1; n < len(p); n++ { + select { + case p[n] = <-cr.priorityBuffer: + case p[n] = <-cr.buffer: + default: + return n, nil + } + } + return n, nil +} + +// Write implements the io.Writer interface. +// Data written to a codeOceanToRawReader using this method is returned by Read before other data from the WebSocket. +func (cr *codeOceanToRawReader) Write(p []byte) (n int, err error) { + var c byte + for n, c = range p { + select { + case cr.priorityBuffer <- c: + default: + break + } + } + return n, nil +} diff --git a/internal/api/ws/codeocean_writer.go b/internal/api/ws/codeocean_writer.go new file mode 100644 index 0000000..3b9b0cf --- /dev/null +++ b/internal/api/ws/codeocean_writer.go @@ -0,0 +1,174 @@ +package ws + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/gorilla/websocket" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "io" +) + +// CodeOceanOutputWriterBufferSize defines the number of messages. +const CodeOceanOutputWriterBufferSize = 64 + +// rawToCodeOceanWriter is a simple io.Writer implementation that just forwards the call to sendMessage. +type rawToCodeOceanWriter struct { + outputType dto.WebSocketMessageType + sendMessage func(*dto.WebSocketMessage) + ctx context.Context +} + +// Write implements the io.Writer interface. +func (rc *rawToCodeOceanWriter) Write(p []byte) (int, error) { + switch { + case rc.ctx.Err() != nil: + return 0, fmt.Errorf("CodeOceanWriter context done: %w", rc.ctx.Err()) + case len(p) == 0: + return 0, nil + default: + rc.sendMessage(&dto.WebSocketMessage{Type: rc.outputType, Data: string(p)}) + return len(p), nil + } +} + +// WebSocketWriter is an interface that defines which data is required and which information can be passed. +type WebSocketWriter interface { + StdOut() io.Writer + StdErr() io.Writer + Close(info *runner.ExitInfo) +} + +// codeOceanOutputWriter is a concrete WebSocketWriter implementation. +// It forwards the data written to stdOut or stdErr (Nomad, AWS) to the WebSocket connection (CodeOcean). +type codeOceanOutputWriter struct { + connection Connection + stdOut *rawToCodeOceanWriter + stdErr *rawToCodeOceanWriter + queue chan *writingLoopMessage + ctx context.Context +} + +// writingLoopMessage is an internal data structure to notify the writing loop when it should stop. +type writingLoopMessage struct { + done bool + data *dto.WebSocketMessage +} + +// NewCodeOceanOutputWriter provides an codeOceanOutputWriter for the time the context ctx is active. +// The codeOceanOutputWriter handles all the messages defined in the websocket.schema.json (start, timeout, stdout, ...). +func NewCodeOceanOutputWriter( + connection Connection, ctx context.Context, done context.CancelFunc) WebSocketWriter { + cw := &codeOceanOutputWriter{ + connection: connection, + queue: make(chan *writingLoopMessage, CodeOceanOutputWriterBufferSize), + ctx: ctx, + } + cw.stdOut = &rawToCodeOceanWriter{ + outputType: dto.WebSocketOutputStdout, + sendMessage: cw.send, + ctx: ctx, + } + cw.stdErr = &rawToCodeOceanWriter{ + outputType: dto.WebSocketOutputStderr, + sendMessage: cw.send, + ctx: ctx, + } + + go cw.startWritingLoop(done) + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketMetaStart}) + return cw +} + +// StdOut provides an io.Writer that forwards the written data to CodeOcean as StdOut stream. +func (cw *codeOceanOutputWriter) StdOut() io.Writer { + return cw.stdOut +} + +// StdErr provides an io.Writer that forwards the written data to CodeOcean as StdErr stream. +func (cw *codeOceanOutputWriter) StdErr() io.Writer { + return cw.stdErr +} + +// Close forwards the kind of exit (timeout, error, normal) to CodeOcean. +// This results in the closing of the WebSocket connection. +// The call of Close is mandatory! +func (cw *codeOceanOutputWriter) Close(info *runner.ExitInfo) { + defer func() { cw.queue <- &writingLoopMessage{done: true} }() + // Mask the internal stop reason before disclosing/forwarding it externally/to CodeOcean. + switch { + case info == nil: + return + case info.Err == nil: + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketExit, ExitCode: info.Code}) + case errors.Is(info.Err, runner.ErrorExecutionTimeout) || errors.Is(info.Err, runner.ErrorRunnerInactivityTimeout): + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketMetaTimeout}) + case errors.Is(info.Err, runner.ErrOOMKilled): + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketOutputError, Data: dto.ErrOOMKilled.Error()}) + case errors.Is(info.Err, runner.ErrDestroyedByAPIRequest): + message := "the allocation stopped as expected" + log.WithContext(cw.ctx).WithError(info.Err).Trace(message) + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketOutputError, Data: message}) + default: + errorMessage := "Error executing the request" + log.WithContext(cw.ctx).WithError(info.Err).Warn(errorMessage) + cw.send(&dto.WebSocketMessage{Type: dto.WebSocketOutputError, Data: errorMessage}) + } +} + +// send forwards the passed dto.WebSocketMessage to the writing loop. +func (cw *codeOceanOutputWriter) send(message *dto.WebSocketMessage) { + cw.queue <- &writingLoopMessage{done: false, data: message} +} + +// startWritingLoop enables the writing loop. +// This is the central and only place where written changes to the WebSocket connection should be done. +// It synchronizes the messages to provide state checks of the WebSocket connection. +func (cw *codeOceanOutputWriter) startWritingLoop(writingLoopDone context.CancelFunc) { + defer func() { + message := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + err := cw.connection.WriteMessage(websocket.CloseMessage, message) + err2 := cw.connection.Close() + if err != nil || err2 != nil { + log.WithContext(cw.ctx).WithError(err).WithField("err2", err2).Warn("Error during websocket close") + } + }() + + for { + message := <-cw.queue + done := true + if message.data != nil { + done = sendMessage(cw.connection, message.data, cw.ctx) + } + if done || message.done { + log.WithContext(cw.ctx).Trace("Writing loop done") + writingLoopDone() + return + } + } +} + +// sendMessage is a helper function for the writing loop. It must not be called from somewhere else! +func sendMessage(connection Connection, message *dto.WebSocketMessage, ctx context.Context) (done bool) { + if message == nil { + return false + } + + encodedMessage, err := json.Marshal(message) + if err != nil { + log.WithContext(ctx).WithField("message", message).WithError(err).Warn("Marshal error") + return false + } + + log.WithContext(ctx).WithField("message", message).Trace("Sending message to client") + err = connection.WriteMessage(websocket.TextMessage, encodedMessage) + if err != nil { + errorMessage := "Error writing the message" + log.WithContext(ctx).WithField("message", message).WithError(err).Warn(errorMessage) + return true + } + + return false +} diff --git a/internal/api/ws/connection.go b/internal/api/ws/connection.go new file mode 100644 index 0000000..c486073 --- /dev/null +++ b/internal/api/ws/connection.go @@ -0,0 +1,14 @@ +package ws + +import ( + "io" +) + +// Connection is an internal interface for websocket.Conn in order to mock it for unit tests. +type Connection interface { + WriteMessage(messageType int, data []byte) error + Close() error + NextReader() (messageType int, r io.Reader, err error) + CloseHandler() func(code int, text string) error + SetCloseHandler(handler func(code int, text string) error) +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..f35810a --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,287 @@ +package config + +import ( + "crypto/rand" + "crypto/tls" + "encoding/base64" + "errors" + "flag" + "fmt" + "github.com/getsentry/sentry-go" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + "k8s.io/client-go/rest" + "net/url" + "os" + "reflect" + "strconv" + "strings" +) + +const ( + defaultPoseidonPort = 7200 + defaultNomadPort = 4646 + defaultMemoryUsageAlertThreshold = 1_000 +) + +// Config contains the default configuration of Poseidon. +var ( + Config = &configuration{ + Server: server{ + Address: "127.0.0.1", + Port: defaultPoseidonPort, + SystemdSocketActivation: false, + Token: "", + TLS: TLS{ + Active: false, + CAFile: "", + CertFile: "", + KeyFile: "", + }, + InteractiveStderr: true, + TemplateJobFile: "", + Alert: alert{ + PrewarmingPoolThreshold: 0, + PrewarmingPoolReloadTimeout: 0, + }, + LoggingFilterToken: randomFilterToken(), + }, + AWS: AWS{ + Enabled: false, + Endpoint: "", + Functions: []string{}, + }, + Kubernetes: Kubernetes{ + Enabled: false, + Address: "", + Port: 0, + Token: "", + }, + Logger: Logger{ + Level: "INFO", + Formatter: dto.FormatterText, + }, + Profiling: Profiling{ + MemoryThreshold: defaultMemoryUsageAlertThreshold, + }, + Sentry: sentry.ClientOptions{ + AttachStacktrace: true, + }, + InfluxDB: InfluxDB{ + URL: "", + Token: "", + Organization: "", + Bucket: "", + Stage: "", + }, + } + configurationFilePath = "./configuration.yaml" + configurationInitialized = false + log = logging.GetLogger("config") + TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS13, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + } + ErrConfigInitialized = errors.New("configuration is already initialized") +) + +type alert struct { + PrewarmingPoolThreshold float64 + PrewarmingPoolReloadTimeout uint +} + +// server configures the Poseidon webserver. +type server struct { + Address string + Port int + SystemdSocketActivation bool + Token string + TLS TLS + InteractiveStderr bool + TemplateJobFile string + Alert alert + LoggingFilterToken string +} + +// URL returns the URL of the Poseidon webserver. +func (s *server) URL() *url.URL { + return parseURL(s.Address, s.Port, s.TLS.Active) +} + +// AWS configures the AWS Lambda usage. +type AWS struct { + Enabled bool + Endpoint string + Functions []string +} + +type Kubernetes struct { + Enabled bool + Namespace string + Config *rest.Config + Images []string +} + +// TLS configures TLS on a connection. +type TLS struct { + Active bool + CAFile string + CertFile string + KeyFile string +} + +// Logger configures the used Logger. +type Logger struct { + Formatter dto.Formatter + Level string +} + +// Profiling configures the usage of a runtime profiler to create optimized binaries. +type Profiling struct { + CPUEnabled bool + CPUFile string + MemoryInterval uint + MemoryThreshold uint +} + +// InfluxDB configures the usage of an Influx db monitoring. +type InfluxDB struct { + URL string + Token string + Organization string + Bucket string + Stage string +} + +// configuration contains the complete configuration of Poseidon. +type configuration struct { + Server server + AWS AWS + Kubernetes Kubernetes + Logger Logger + Profiling Profiling + Sentry sentry.ClientOptions + InfluxDB InfluxDB +} + +// InitConfig merges configuration options from environment variables and +// a configuration file into the default configuration. Calls of InitConfig +// after the first call have no effect and return an error. InitConfig +// should be called directly after starting the program. +func InitConfig() error { + if configurationInitialized { + return ErrConfigInitialized + } + configurationInitialized = true + content := readConfigFile() + Config.mergeYaml(content) + Config.mergeEnvironmentVariables() + return nil +} + +func parseURL(address string, port int, tlsEnabled bool) *url.URL { + scheme := "http" + if tlsEnabled { + scheme = "https" + } + return &url.URL{ + Scheme: scheme, + Host: fmt.Sprintf("%s:%d", address, port), + } +} + +func readConfigFile() []byte { + parseFlags() + data, err := os.ReadFile(configurationFilePath) + if err != nil { + log.WithError(err).Info("Using default configuration...") + return nil + } + return data +} + +func parseFlags() { + if flag.Lookup("config") == nil { + flag.StringVar(&configurationFilePath, "config", configurationFilePath, "path of the yaml config file") + } + flag.Parse() +} + +func (c *configuration) mergeYaml(content []byte) { + if err := yaml.Unmarshal(content, c); err != nil { + log.WithError(err).Fatal("Could not parse configuration file") + } +} + +func (c *configuration) mergeEnvironmentVariables() { + readFromEnvironment("POSEIDON", reflect.ValueOf(c).Elem()) +} + +func readFromEnvironment(prefix string, value reflect.Value) { + logEntry := log.WithField("prefix", prefix) + // if value was not derived from a pointer, it is not possible to alter its contents + if !value.CanSet() { + logEntry.Warn("Cannot overwrite struct field that can not be set") + return + } + + if value.Kind() != reflect.Struct { + loadValue(prefix, value, logEntry) + } else { + for i := 0; i < value.NumField(); i++ { + fieldName := value.Type().Field(i).Name + newPrefix := fmt.Sprintf("%s_%s", prefix, strings.ToUpper(fieldName)) + readFromEnvironment(newPrefix, value.Field(i)) + } + } +} + +func loadValue(prefix string, value reflect.Value, logEntry *logrus.Entry) { + content, ok := os.LookupEnv(prefix) + if !ok { + return + } + logEntry = logEntry.WithField("content", content) + + switch value.Kind() { + case reflect.String: + value.SetString(content) + case reflect.Int: + integer, err := strconv.Atoi(content) + if err != nil { + logEntry.Warn("Could not parse environment variable as integer") + return + } + value.SetInt(int64(integer)) + case reflect.Bool: + boolean, err := strconv.ParseBool(content) + if err != nil { + logEntry.Warn("Could not parse environment variable as boolean") + return + } + value.SetBool(boolean) + case reflect.Slice: + if content != "" && content[0] == '"' && content[len(content)-1] == '"' { + content = content[1 : len(content)-1] // remove wrapping quotes + } + parts := strings.Fields(content) + value.Set(reflect.AppendSlice(value, reflect.ValueOf(parts))) + default: + // ignore this field + logEntry.WithField("type", value.Type().Name()). + Warn("Setting configuration option via environment variables is not supported") + } +} + +func randomFilterToken() string { + const tokenLength = 32 + randomBytes := make([]byte, tokenLength) //nolint:all // length required to be filled by rand.Read. + n, err := rand.Read(randomBytes) + if n != tokenLength || err != nil { + log.WithError(err).WithField("byteCount", n).Fatal("Failed to generate random token") + } + + return base64.URLEncoding.EncodeToString(randomBytes) +} diff --git a/internal/environment/abstract_manager.go b/internal/environment/abstract_manager.go new file mode 100644 index 0000000..d923c1b --- /dev/null +++ b/internal/environment/abstract_manager.go @@ -0,0 +1,77 @@ +package environment + +import ( + "context" + "fmt" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" +) + +// AbstractManager is used to have a fallback environment manager in the chain of responsibility +// following the null object pattern. +type AbstractManager struct { + nextHandler ManagerHandler + runnerManager runner.Manager +} + +func (n *AbstractManager) SetNextHandler(next ManagerHandler) { + n.nextHandler = next +} + +func (n *AbstractManager) NextHandler() ManagerHandler { + if n.HasNextHandler() { + return n.nextHandler + } else { + return &AbstractManager{} + } +} + +func (n *AbstractManager) HasNextHandler() bool { + return n.nextHandler != nil +} + +func (n *AbstractManager) List(_ bool) ([]runner.ExecutionEnvironment, error) { + return []runner.ExecutionEnvironment{}, nil +} + +func (n *AbstractManager) Get(_ dto.EnvironmentID, _ bool) (runner.ExecutionEnvironment, error) { + return nil, runner.ErrRunnerNotFound +} + +func (n *AbstractManager) CreateOrUpdate(_ dto.EnvironmentID, _ dto.ExecutionEnvironmentRequest, _ context.Context) ( + bool, error) { + return false, nil +} + +func (n *AbstractManager) Delete(id dto.EnvironmentID) (bool, error) { + if n.runnerManager == nil { + return false, nil + } + + e, ok := n.runnerManager.GetEnvironment(id) + if !ok { + isFound, err := n.NextHandler().Delete(id) + if err != nil { + return false, fmt.Errorf("abstract wrapped: %w", err) + } + return isFound, nil + } + + n.runnerManager.DeleteEnvironment(id) + if err := e.Delete(runner.ErrDestroyedByAPIRequest); err != nil { + return true, fmt.Errorf("could not delete environment: %w", err) + } + return true, nil +} + +func (n *AbstractManager) Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData { + if n.runnerManager == nil { + return map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData{} + } + + statistics := n.NextHandler().Statistics() + for k, v := range n.runnerManager.EnvironmentStatistics() { + statistics[k] = v + } + return statistics +} diff --git a/internal/environment/k8s_env.go b/internal/environment/k8s_env.go new file mode 100644 index 0000000..3c7086b --- /dev/null +++ b/internal/environment/k8s_env.go @@ -0,0 +1,139 @@ +package environment + +import ( + "encoding/json" + "fmt" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "k8s.io/client-go/kubernetes" +) + +type KubernetesEnvironment struct { + id dto.EnvironmentID + image string + cpuLimit uint + memoryLimit uint + networkEnabled bool + mappedPorts []uint16 + prewarmingPool uint + onDestroyRunner runner.DestroyRunnerHandler + clientset *kubernetes.Clientset +} + +func NewKubernetesEnvironment(onDestroyRunner runner.DestroyRunnerHandler, clientset *kubernetes.Clientset) *KubernetesEnvironment { + return &KubernetesEnvironment{ + onDestroyRunner: onDestroyRunner, + clientset: clientset, + cpuLimit: 500, // Default CPU limit (in millicores) + memoryLimit: 512, // Default memory limit (in MB) + networkEnabled: false, + prewarmingPool: 1, + } +} + +func (k *KubernetesEnvironment) MarshalJSON() ([]byte, error) { + res, err := json.Marshal(dto.ExecutionEnvironmentData{ + ID: int(k.ID()), + ExecutionEnvironmentRequest: dto.ExecutionEnvironmentRequest{Image: k.Image()}, + }) + if err != nil { + return res, fmt.Errorf("couldn't marshal kubernetes execution environment: %w", err) + } + return res, nil +} + +func (k *KubernetesEnvironment) ID() dto.EnvironmentID { + return k.id +} + +func (k *KubernetesEnvironment) SetID(id dto.EnvironmentID) { + k.id = id +} + +func (k *KubernetesEnvironment) Image() string { + return k.image +} + +func (k *KubernetesEnvironment) SetImage(image string) { + k.image = image +} + +func (k *KubernetesEnvironment) Delete(_ runner.DestroyReason) error { + // Implement Kubernetes-specific deletion logic here + return nil +} + +func (k *KubernetesEnvironment) Sample() (r runner.Runner, ok bool) { + workload, err := runner.NewKubernetesPodWorkload(k, k.onDestroyRunner, k.clientset) + if err != nil { + return nil, false + } + return workload, true +} + +func (k *KubernetesEnvironment) IdleRunnerCount() uint { + // Implement logic to count idle runners in Kubernetes + return 0 +} + +func (k *KubernetesEnvironment) PrewarmingPoolSize() uint { + return k.prewarmingPool +} + +func (k *KubernetesEnvironment) SetPrewarmingPoolSize(size uint) { + k.prewarmingPool = size +} + +func (k *KubernetesEnvironment) ApplyPrewarmingPoolSize() error { + // Implement logic to apply prewarming pool size in Kubernetes + return nil +} + +func (k *KubernetesEnvironment) CPULimit() uint { + return k.cpuLimit +} + +func (k *KubernetesEnvironment) SetCPULimit(limit uint) { + k.cpuLimit = limit +} + +func (k *KubernetesEnvironment) MemoryLimit() uint { + return k.memoryLimit +} + +func (k *KubernetesEnvironment) SetMemoryLimit(limit uint) { + k.memoryLimit = limit +} + +func (k *KubernetesEnvironment) NetworkAccess() (enabled bool, mappedPorts []uint16) { + return k.networkEnabled, k.mappedPorts +} + +func (k *KubernetesEnvironment) SetNetworkAccess(enabled bool, ports []uint16) { + k.networkEnabled = enabled + k.mappedPorts = ports +} + +func (k *KubernetesEnvironment) SetConfigFrom(env runner.ExecutionEnvironment) { + if kEnv, ok := env.(*KubernetesEnvironment); ok { + k.cpuLimit = kEnv.cpuLimit + k.memoryLimit = kEnv.memoryLimit + k.networkEnabled = kEnv.networkEnabled + k.mappedPorts = kEnv.mappedPorts + k.prewarmingPool = kEnv.prewarmingPool + } +} + +func (k *KubernetesEnvironment) Register() error { + // Implement Kubernetes-specific registration logic here + return nil +} + +func (k *KubernetesEnvironment) AddRunner(runner runner.Runner) { + // Implement logic to add a runner to the Kubernetes environment +} + +func (k *KubernetesEnvironment) DeleteRunner(id string) (r runner.Runner, ok bool) { + // Implement logic to delete a runner from the Kubernetes environment + return nil, false +} diff --git a/internal/environment/k8s_manager.go b/internal/environment/k8s_manager.go new file mode 100644 index 0000000..2dffeca --- /dev/null +++ b/internal/environment/k8s_manager.go @@ -0,0 +1,71 @@ +package environment + +import ( + "context" + "fmt" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" + "k8s.io/client-go/kubernetes" +) + +// KubernetesEnvironmentManager manages Kubernetes environments. +type KubernetesEnvironmentManager struct { + *AbstractManager + clientSet *kubernetes.Clientset +} + +func NewKubernetesEnvironmentManager(runnerManager runner.Manager, clientset *kubernetes.Clientset) *KubernetesEnvironmentManager { + return &KubernetesEnvironmentManager{ + AbstractManager: &AbstractManager{nil, runnerManager}, + clientSet: clientset, + } +} + +func (k *KubernetesEnvironmentManager) List(fetch bool) ([]runner.ExecutionEnvironment, error) { + list, err := k.NextHandler().List(fetch) + if err != nil { + return nil, fmt.Errorf("kubernetes wrapped: %w", err) + } + return append(list, k.runnerManager.ListEnvironments()...), nil +} + +func (k *KubernetesEnvironmentManager) Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) { + e, ok := k.runnerManager.GetEnvironment(id) + if ok { + return e, nil + } else { + e, err := k.NextHandler().Get(id, fetch) + if err != nil { + return nil, fmt.Errorf("kubernetes wrapped: %w", err) + } + return e, nil + } +} + +func (k *KubernetesEnvironmentManager) CreateOrUpdate( + id dto.EnvironmentID, request dto.ExecutionEnvironmentRequest, ctx context.Context) (bool, error) { + if !isKubernetesEnvironment(request) { + isCreated, err := k.NextHandler().CreateOrUpdate(id, request, ctx) + if err != nil { + return false, fmt.Errorf("kubernetes wrapped: %w", err) + } + return isCreated, nil + } + + _, ok := k.runnerManager.GetEnvironment(id) + e := NewKubernetesEnvironment(k.runnerManager.Return, k.clientSet) + e.SetID(id) + e.SetImage(request.Image) + k.runnerManager.StoreEnvironment(e) + return !ok, nil +} + +func isKubernetesEnvironment(request dto.ExecutionEnvironmentRequest) bool { + for _, image := range config.Config.Kubernetes.Images { + if request.Image == image { + return true + } + } + return false +} diff --git a/internal/environment/manager.go b/internal/environment/manager.go new file mode 100644 index 0000000..68f5959 --- /dev/null +++ b/internal/environment/manager.go @@ -0,0 +1,43 @@ +package environment + +import ( + "context" + "github.com/openHPI/poseidon/internal/runner" + "github.com/openHPI/poseidon/pkg/dto" +) + +// ManagerHandler is one handler in the chain of responsibility of environment managers. +// Each manager can handle different requests. +type ManagerHandler interface { + Manager + SetNextHandler(next ManagerHandler) + NextHandler() ManagerHandler + HasNextHandler() bool +} + +// Manager encapsulates API calls to the executor API for creation and deletion of execution environments. +type Manager interface { + // List returns all environments known by Poseidon. + // When `fetch` is set the environments are fetched from the executor before returning. + List(fetch bool) ([]runner.ExecutionEnvironment, error) + + // Get returns the details of the requested environment. + // When `fetch` is set the requested environment is fetched from the executor before returning. + Get(id dto.EnvironmentID, fetch bool) (runner.ExecutionEnvironment, error) + + // CreateOrUpdate creates/updates an execution environment on the executor. + // If the job was created, the returned boolean is true, if it was updated, it is false. + // If err is not nil, that means the environment was neither created nor updated. + CreateOrUpdate( + id dto.EnvironmentID, + request dto.ExecutionEnvironmentRequest, + ctx context.Context, + ) (bool, error) + + // Delete removes the specified execution environment. + // Iff the specified environment could not be found Delete returns false. + Delete(id dto.EnvironmentID) (bool, error) + + // Statistics returns statistical data for each execution environment. + Statistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData +} diff --git a/internal/environment/template-environment-job.hcl b/internal/environment/template-environment-job.hcl new file mode 100644 index 0000000..06bab16 --- /dev/null +++ b/internal/environment/template-environment-job.hcl @@ -0,0 +1,84 @@ +// This is the default job configuration that is used when no path to another default configuration is given + +job "template-0" { + datacenters = ["dc1"] + type = "batch" + + group "default-group" { + ephemeral_disk { + migrate = false + size = 10 + sticky = false + } + count = 1 + spread { + // see https://www.nomadproject.io/docs/job-specification/spread#even-spread-across-data-center + // This spreads the load evenly amongst our nodes + attribute = "${node.unique.name}" + weight = 100 + } + restart { + attempts = 3 + delay = "15s" + interval = "1h" + mode = "fail" + } + reschedule { + unlimited = false + attempts = 3 + interval = "6h" + delay = "1m" + max_delay = "4m" + delay_function = "exponential" + } + + task "default-task" { + driver = "docker" + kill_timeout = "0s" + kill_signal = "SIGKILL" + + config { + image = "openhpi/docker_exec_phusion" + command = "sleep" + args = ["infinity"] + network_mode = "none" + } + + logs { + max_files = 1 + max_file_size = 1 + } + + resources { + cpu = 40 + memory = 30 + } + } + } + + group "config" { + // We want to store whether a task is in use in order to recover from a downtime. + // Without a separate config task, marking a task as used would result in a restart of that task, + // as the meta information is passed to the container as environment variables. + count = 0 + task "config" { + driver = "exec" + config { + command = "true" + } + logs { + max_files = 1 + max_file_size = 1 + } + resources { + // minimum values + cpu = 1 + memory = 10 + } + } + meta { + used = "false" + prewarmingPoolSize = "0" + } + } +} diff --git a/internal/runner/abstract_manager.go b/internal/runner/abstract_manager.go new file mode 100644 index 0000000..faf702f --- /dev/null +++ b/internal/runner/abstract_manager.go @@ -0,0 +1,125 @@ +package runner + +import ( + "context" + "errors" + "fmt" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/monitoring" + "github.com/openHPI/poseidon/pkg/storage" + "time" +) + +var ErrNullObject = errors.New("functionality not available for the null object") + +// AbstractManager is used to have a fallback runner manager in the chain of responsibility +// following the null object pattern. +// Remember all functions that can call the NextHandler should call it (See AccessorHandler). +type AbstractManager struct { + nextHandler AccessorHandler + environments storage.Storage[ExecutionEnvironment] + usedRunners storage.Storage[Runner] +} + +// NewAbstractManager creates a new abstract runner manager that keeps track of all runners of one kind. +// Since this manager is currently directly bound to the lifespan of Poseidon, it does not need a context cancel. +func NewAbstractManager(ctx context.Context) *AbstractManager { + return &AbstractManager{ + environments: storage.NewMonitoredLocalStorage[ExecutionEnvironment]( + monitoring.MeasurementEnvironments, monitorEnvironmentData, 0, ctx), + usedRunners: storage.NewMonitoredLocalStorage[Runner]( + monitoring.MeasurementUsedRunner, MonitorRunnersEnvironmentID, time.Hour, ctx), + } +} + +// MonitorEnvironmentID adds the passed environment id to the monitoring Point p. +func MonitorEnvironmentID[T any](id dto.EnvironmentID) storage.WriteCallback[T] { + return func(p *write.Point, _ T, _ storage.EventType) { + p.AddTag(monitoring.InfluxKeyEnvironmentID, id.ToString()) + } +} + +// MonitorRunnersEnvironmentID passes the id of the environment e into the monitoring Point p. +func MonitorRunnersEnvironmentID(p *write.Point, e Runner, _ storage.EventType) { + if e != nil { + p.AddTag(monitoring.InfluxKeyEnvironmentID, e.Environment().ToString()) + } +} + +func (n *AbstractManager) SetNextHandler(next AccessorHandler) { + n.nextHandler = next +} + +func (n *AbstractManager) NextHandler() AccessorHandler { + if n.HasNextHandler() { + return n.nextHandler + } else { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return NewAbstractManager(ctx) + } +} + +func (n *AbstractManager) HasNextHandler() bool { + return n.nextHandler != nil +} + +func (n *AbstractManager) ListEnvironments() []ExecutionEnvironment { + return n.environments.List() +} + +func (n *AbstractManager) GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool) { + return n.environments.Get(id.ToString()) +} + +func (n *AbstractManager) StoreEnvironment(environment ExecutionEnvironment) { + n.environments.Add(environment.ID().ToString(), environment) +} + +func (n *AbstractManager) DeleteEnvironment(id dto.EnvironmentID) { + n.environments.Delete(id.ToString()) +} + +func (n *AbstractManager) EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData { + environments := make(map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData) + for _, e := range n.environments.List() { + environments[e.ID()] = &dto.StatisticalExecutionEnvironmentData{ + ID: int(e.ID()), + PrewarmingPoolSize: e.PrewarmingPoolSize(), + IdleRunners: e.IdleRunnerCount(), + UsedRunners: 0, // Increased below. + } + } + + for _, r := range n.usedRunners.List() { + environments[r.Environment()].UsedRunners++ + } + + return environments +} + +func (n *AbstractManager) Claim(_ dto.EnvironmentID, _ int) (Runner, error) { + return nil, ErrNullObject +} + +func (n *AbstractManager) Get(runnerID string) (Runner, error) { + runner, ok := n.usedRunners.Get(runnerID) + if ok { + return runner, nil + } + + if !n.HasNextHandler() { + return nil, ErrRunnerNotFound + } + + r, err := n.NextHandler().Get(runnerID) + if err != nil { + return r, fmt.Errorf("abstract manager wrapped: %w", err) + } + return r, nil +} + +func (n *AbstractManager) Return(_ Runner) error { + return nil +} diff --git a/internal/runner/execution_environment.go b/internal/runner/execution_environment.go new file mode 100644 index 0000000..73c583f --- /dev/null +++ b/internal/runner/execution_environment.go @@ -0,0 +1,66 @@ +package runner + +import ( + "encoding/json" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/storage" + "strconv" +) + +// ExecutionEnvironment are groups of runner that share the configuration stored in the environment. +type ExecutionEnvironment interface { + json.Marshaler + + // ID returns the id of the environment. + ID() dto.EnvironmentID + SetID(id dto.EnvironmentID) + // PrewarmingPoolSize sets the number of idle runner of this environment that should be prewarmed. + PrewarmingPoolSize() uint + SetPrewarmingPoolSize(count uint) + // ApplyPrewarmingPoolSize creates idle runners according to the PrewarmingPoolSize. + ApplyPrewarmingPoolSize() error + // CPULimit sets the share of cpu that a runner should receive at minimum. + CPULimit() uint + SetCPULimit(limit uint) + // MemoryLimit sets the amount of memory that should be available for each runner. + MemoryLimit() uint + SetMemoryLimit(limit uint) + // Image sets the image of the runner, e.g. Docker image. + Image() string + SetImage(image string) + // NetworkAccess sets if a runner should have network access and if ports should be mapped. + NetworkAccess() (bool, []uint16) + SetNetworkAccess(allow bool, ports []uint16) + // SetConfigFrom copies all above attributes from the passed environment to the object itself. + SetConfigFrom(environment ExecutionEnvironment) + + // Register saves this environment at the executor. + Register() error + // Delete removes this environment and all it's runner from the executor and Poseidon itself. + // Iff local the environment is just removed from Poseidon without external escalation. + Delete(reason DestroyReason) error + + // Sample returns and removes an arbitrary available runner. + // ok is true iff a runner was returned. + Sample() (r Runner, ok bool) + // AddRunner adds an existing runner to the idle runners of the environment. + AddRunner(r Runner) + // DeleteRunner removes an idle runner from the environment and returns it. + // This function handles only the environment. The runner has to be destroyed separately. + // ok is true iff the runner was found (and deleted). + DeleteRunner(id string) (r Runner, ok bool) + // IdleRunnerCount returns the number of idle runners of the environment. + IdleRunnerCount() uint +} + +// monitorEnvironmentData passes the configuration of the environment e into the monitoring Point p. +func monitorEnvironmentData(p *write.Point, e ExecutionEnvironment, eventType storage.EventType) { + if eventType == storage.Creation && e != nil { + p.AddTag("image", e.Image()) + p.AddTag("cpu_limit", strconv.Itoa(int(e.CPULimit()))) + p.AddTag("memory_limit", strconv.Itoa(int(e.MemoryLimit()))) + hasNetworkAccess, _ := e.NetworkAccess() + p.AddTag("network_access", strconv.FormatBool(hasNetworkAccess)) + } +} diff --git a/internal/runner/inactivity_timer.go b/internal/runner/inactivity_timer.go new file mode 100644 index 0000000..39b9aab --- /dev/null +++ b/internal/runner/inactivity_timer.go @@ -0,0 +1,111 @@ +package runner + +import ( + "errors" + "github.com/openHPI/poseidon/pkg/dto" + "sync" + "time" +) + +// InactivityTimer is a wrapper around a timer that is used to delete a a Runner after some time of inactivity. +type InactivityTimer interface { + // SetupTimeout starts the timeout after a runner gets deleted. + SetupTimeout(duration time.Duration) + + // ResetTimeout resets the current timeout so that the runner gets deleted after the time set in Setup from now. + // It does not make an already expired timer run again. + ResetTimeout() + + // StopTimeout stops the timeout but does not remove the runner. + StopTimeout() + + // TimeoutPassed returns true if the timeout expired and false otherwise. + TimeoutPassed() bool +} + +type TimerState uint8 + +const ( + TimerInactive TimerState = 0 + TimerRunning TimerState = 1 + TimerExpired TimerState = 2 +) + +var ( + ErrorRunnerInactivityTimeout DestroyReason = errors.New("runner inactivity timeout exceeded") + ErrorExecutionTimeout = errors.New("execution timeout exceeded") +) + +type InactivityTimerImplementation struct { + timer *time.Timer + duration time.Duration + state TimerState + runner Runner + onDestroy DestroyRunnerHandler + mu sync.Mutex +} + +func NewInactivityTimer(runner Runner, onDestroy DestroyRunnerHandler) InactivityTimer { + return &InactivityTimerImplementation{ + state: TimerInactive, + runner: runner, + onDestroy: onDestroy, + } +} + +func (t *InactivityTimerImplementation) SetupTimeout(duration time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + // Stop old timer if present. + if t.timer != nil { + t.timer.Stop() + } + if duration == 0 { + t.state = TimerInactive + return + } + t.state = TimerRunning + t.duration = duration + + t.timer = time.AfterFunc(duration, func() { + t.mu.Lock() + t.state = TimerExpired + // The timer must be unlocked here already in order to avoid a deadlock with the call to StopTimout in Manager.Return. + t.mu.Unlock() + err := t.onDestroy(t.runner) + if err != nil { + log.WithError(err).WithField(dto.KeyRunnerID, t.runner.ID()). + Warn("Returning runner after inactivity caused an error") + } else { + log.WithField(dto.KeyRunnerID, t.runner.ID()).Info("Returning runner due to inactivity timeout") + } + }) +} + +func (t *InactivityTimerImplementation) ResetTimeout() { + t.mu.Lock() + defer t.mu.Unlock() + if t.state != TimerRunning { + // The timer has already expired or been stopped. We don't want to restart it. + return + } + if t.timer.Stop() { + t.timer.Reset(t.duration) + } else { + log.Error("Timer is in state running but stopped. This should never happen") + } +} + +func (t *InactivityTimerImplementation) StopTimeout() { + t.mu.Lock() + defer t.mu.Unlock() + if t.state != TimerRunning { + return + } + t.timer.Stop() + t.state = TimerInactive +} + +func (t *InactivityTimerImplementation) TimeoutPassed() bool { + return t.state == TimerExpired +} diff --git a/internal/runner/k8s_manager.go b/internal/runner/k8s_manager.go new file mode 100644 index 0000000..2c982b8 --- /dev/null +++ b/internal/runner/k8s_manager.go @@ -0,0 +1,70 @@ +package runner + +import ( + "context" + "errors" + "fmt" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "k8s.io/client-go/kubernetes" + "time" +) + +var ( + log = logging.GetLogger("runner") + ErrUnknownExecutionEnvironment = errors.New("execution environment not found") + ErrNoRunnersAvailable = errors.New("no runners available for this execution environment") + ErrRunnerNotFound = errors.New("no runner found with this id") +) + +type KubernetesRunnerManager struct { + *AbstractManager + clientSet *kubernetes.Clientset +} + +// NewKubernetesRunnerManager creates a new runner manager that keeps track of all runners in Kubernetes. +func NewKubernetesRunnerManager(ctx context.Context, clientSet *kubernetes.Clientset) *KubernetesRunnerManager { + return &KubernetesRunnerManager{ + AbstractManager: NewAbstractManager(ctx), + clientSet: clientSet, + } +} + +func (k *KubernetesRunnerManager) Claim(id dto.EnvironmentID, duration int) (Runner, error) { + environment, ok := k.GetEnvironment(id) + if !ok { + r, err := k.NextHandler().Claim(id, duration) + if err != nil { + return nil, fmt.Errorf("kubernetes wrapped: %w", err) + } + return r, nil + } + + runner, ok := environment.Sample() + if !ok { + log.Warn("no kubernetes runner available") + return nil, ErrNoRunnersAvailable + } + + k.usedRunners.Add(runner.ID(), runner) + runner.SetupTimeout(time.Duration(duration) * time.Second) + + // Here you might want to add Kubernetes-specific logic + // For example, updating the pod status or adding labels + + return runner, nil +} + +func (k *KubernetesRunnerManager) Return(r Runner) error { + _, isKubernetesRunner := r.(*KubernetesPodWorkload) + if isKubernetesRunner { + k.usedRunners.Delete(r.ID()) + + // Here you might want to add Kubernetes-specific logic + // For example, cleaning up the pod or updating its status + + } else if err := k.NextHandler().Return(r); err != nil { + return fmt.Errorf("kubernetes wrapped: %w", err) + } + return nil +} diff --git a/internal/runner/k8s_runner.go b/internal/runner/k8s_runner.go new file mode 100644 index 0000000..6d7f166 --- /dev/null +++ b/internal/runner/k8s_runner.go @@ -0,0 +1,251 @@ +package runner + +import ( + "context" + "errors" + "fmt" + "github.com/google/uuid" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/monitoring" + "github.com/openHPI/poseidon/pkg/storage" + "io" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "net/http" + "time" +) + +var ErrPodCreationFailed = errors.New("failed to create pod") + +var ( + ErrorUnknownExecution = errors.New("unknown execution") + ErrFileNotFound = errors.New("file not found or insufficient permissions") + ErrOOMKilled DestroyReason = errors.New("the runner was killed due to out of memory") + ErrDestroyedByAPIRequest DestroyReason = errors.New("the client wants to stop the runner") +) + +// KubernetesPodWorkload is an abstraction to manage a Kubernetes pod. +// It is not persisted on a Poseidon restart. +// The InactivityTimer is used actively. It stops and deletes the pod. +type KubernetesPodWorkload struct { + InactivityTimer + id string + fs map[dto.FilePath][]byte + executions storage.Storage[*dto.ExecutionRequest] + runningExecutions map[string]context.CancelFunc + onDestroy DestroyRunnerHandler + environment ExecutionEnvironment + ctx context.Context + cancel context.CancelFunc + clientset *kubernetes.Clientset + podName string + namespace string +} + +// NewKubernetesPodWorkload creates a new KubernetesPodWorkload with the provided id. +func NewKubernetesPodWorkload( + environment ExecutionEnvironment, onDestroy DestroyRunnerHandler, clientset *kubernetes.Clientset) (*KubernetesPodWorkload, error) { + newUUID, err := uuid.NewUUID() + if err != nil { + return nil, fmt.Errorf("failed generating runner id: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + workload := &KubernetesPodWorkload{ + id: newUUID.String(), + fs: make(map[dto.FilePath][]byte), + runningExecutions: make(map[string]context.CancelFunc), + onDestroy: onDestroy, + environment: environment, + ctx: ctx, + cancel: cancel, + clientset: clientset, + namespace: "default", // You might want to make this configurable + podName: fmt.Sprintf("workload-%s", newUUID.String()), + } + workload.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest]( + monitoring.MeasurementExecutionsK8s, monitorExecutionsRunnerID(environment.ID(), workload.id), time.Minute, ctx) + workload.InactivityTimer = NewInactivityTimer(workload, func(_ Runner) error { + return workload.Destroy(nil) + }) + return workload, nil +} + +func (w *KubernetesPodWorkload) ID() string { + return w.id +} + +func (w *KubernetesPodWorkload) Environment() dto.EnvironmentID { + return w.environment.ID() +} + +func (w *KubernetesPodWorkload) MappedPorts() []*dto.MappedPort { + // Implement port mapping logic for Kubernetes + return []*dto.MappedPort{} +} + +func (w *KubernetesPodWorkload) StoreExecution(id string, request *dto.ExecutionRequest) { + w.executions.Add(id, request) +} + +func (w *KubernetesPodWorkload) ExecutionExists(id string) bool { + _, ok := w.executions.Get(id) + return ok +} + +// ExecuteInteractively runs the execution request in a Kubernetes pod. +func (w *KubernetesPodWorkload) ExecuteInteractively( + id string, _ io.ReadWriter, stdout, stderr io.Writer, ctx context.Context) ( + <-chan ExitInfo, context.CancelFunc, error) { + w.ResetTimeout() + request, ok := w.executions.Pop(id) + if !ok { + return nil, nil, ErrorUnknownExecution + } + hideEnvironmentVariablesK8s(request, "K8S") + command, executionCtx, cancel := prepareExecution(request, w.ctx) + exitInternal := make(chan ExitInfo) + exit := make(chan ExitInfo, 1) + + go w.executeCommand(executionCtx, command, stdout, stderr, exitInternal) + go w.handleRunnerTimeout(executionCtx, exitInternal, exit, id) + + return exit, cancel, nil +} + +func (w *KubernetesPodWorkload) ListFileSystem(path string, recursive bool, writer io.Writer, humanReadable bool, ctx context.Context) error { + // Implement file system listing for Kubernetes pods + return dto.ErrNotSupported +} + +func (w *KubernetesPodWorkload) UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error { + // Implement file system update for Kubernetes pods + return nil +} + +func (w *KubernetesPodWorkload) GetFileContent(path string, writer http.ResponseWriter, humanReadable bool, ctx context.Context) error { + // Implement file content retrieval for Kubernetes pods + return dto.ErrNotSupported +} + +func (w *KubernetesPodWorkload) Destroy(_ DestroyReason) error { + w.cancel() + err := w.clientset.CoreV1().Pods(w.namespace).Delete(context.Background(), w.podName, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("error while destroying kubernetes pod: %w", err) + } + if err := w.onDestroy(w); err != nil { + return fmt.Errorf("error while destroying kubernetes runner: %w", err) + } + return nil +} + +func (w *KubernetesPodWorkload) executeCommand(ctx context.Context, command string, + stdout, stderr io.Writer, exit chan<- ExitInfo, +) { + defer close(exit) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: w.podName, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "workload", + Image: w.environment.Image(), + Command: []string{"/bin/sh", "-c", command}, + }, + }, + }, + } + + _, err := w.clientset.CoreV1().Pods(w.namespace).Create(ctx, pod, metav1.CreateOptions{}) + if err != nil { + exit <- ExitInfo{1, fmt.Errorf("%w: %v", ErrPodCreationFailed, err)} + return + } + + req := w.clientset.CoreV1().Pods(w.namespace).GetLogs(w.podName, &corev1.PodLogOptions{ + Follow: true, + }) + podLogs, err := req.Stream(ctx) + if err != nil { + exit <- ExitInfo{1, fmt.Errorf("error in opening stream: %v", err)} + return + } + defer func(podLogs io.ReadCloser) { + err := podLogs.Close() + if err != nil { + exit <- ExitInfo{1, fmt.Errorf("error in closing stream: %v", err)} + } + }(podLogs) + + _, err = io.Copy(stdout, podLogs) + if err != nil { + exit <- ExitInfo{1, fmt.Errorf("error in copying logs: %v", err)} + return + } + + // Wait for the pod to complete + watch, err := w.clientset.CoreV1().Pods(w.namespace).Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", w.podName), + }) + if err != nil { + exit <- ExitInfo{1, fmt.Errorf("error watching pod: %v", err)} + return + } + defer watch.Stop() + + for event := range watch.ResultChan() { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + continue + } + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + exitCode := uint8(0) + if pod.Status.Phase == corev1.PodFailed { + exitCode = 1 + } + exit <- ExitInfo{exitCode, nil} + return + } + } +} + +func (w *KubernetesPodWorkload) handleRunnerTimeout(ctx context.Context, + exitInternal <-chan ExitInfo, exit chan<- ExitInfo, executionID string) { + executionCtx, cancelExecution := context.WithCancel(ctx) + w.runningExecutions[executionID] = cancelExecution + defer delete(w.runningExecutions, executionID) + defer close(exit) + + select { + case exitInfo := <-exitInternal: + exit <- exitInfo + case <-executionCtx.Done(): + exit <- ExitInfo{255, ErrorRunnerInactivityTimeout} + } +} + +// hideEnvironmentVariables sets the CODEOCEAN variable and unsets all variables starting with the passed prefix. +func hideEnvironmentVariablesK8s(request *dto.ExecutionRequest, unsetPrefix string) { + if request.Environment == nil { + request.Environment = make(map[string]string) + } + request.Command = "unset \"${!" + unsetPrefix + "@}\" && " + request.Command +} + +func prepareExecution(request *dto.ExecutionRequest, environmentCtx context.Context) ( + command string, ctx context.Context, cancel context.CancelFunc, +) { + command = request.FullCommand() + if request.TimeLimit == 0 { + ctx, cancel = context.WithCancel(environmentCtx) + } else { + ctx, cancel = context.WithTimeout(environmentCtx, time.Duration(request.TimeLimit)*time.Second) + } + return command, ctx, cancel +} diff --git a/internal/runner/manager.go b/internal/runner/manager.go new file mode 100644 index 0000000..76c5191 --- /dev/null +++ b/internal/runner/manager.go @@ -0,0 +1,54 @@ +package runner + +import "github.com/openHPI/poseidon/pkg/dto" + +// Manager keeps track of the used and unused runners of all execution environments in order to provide unused +// runners to new clients and ensure no runner is used twice. +type Manager interface { + EnvironmentAccessor + AccessorHandler +} + +// EnvironmentAccessor provides access to the stored environments. +type EnvironmentAccessor interface { + // ListEnvironments returns all execution environments known by Poseidon. + ListEnvironments() []ExecutionEnvironment + + // GetEnvironment returns the details of the requested environment. + // Iff the requested environment is not stored it returns false. + GetEnvironment(id dto.EnvironmentID) (ExecutionEnvironment, bool) + + // StoreEnvironment stores the environment in Poseidons memory. + StoreEnvironment(environment ExecutionEnvironment) + + // DeleteEnvironment removes the specified execution environment in Poseidons memory. + // It does nothing if the specified environment can not be found. + DeleteEnvironment(id dto.EnvironmentID) + + // EnvironmentStatistics returns statistical data for each execution environment. + EnvironmentStatistics() map[dto.EnvironmentID]*dto.StatisticalExecutionEnvironmentData +} + +// AccessorHandler is one handler in the chain of responsibility of runner accessors. +// Each runner accessor can handle different requests. +type AccessorHandler interface { + Accessor + SetNextHandler(m AccessorHandler) + NextHandler() AccessorHandler + HasNextHandler() bool +} + +// Accessor manages the lifecycle of Runner. +type Accessor interface { + // Claim returns a new runner. The runner is deleted after duration seconds if duration is not 0. + // It makes sure that the runner is not in use yet and returns an error if no runner could be provided. + Claim(id dto.EnvironmentID, duration int) (Runner, error) + + // Get returns the used runner with the given runnerId. + // If no runner with the given runnerId is currently used, it returns an error. + Get(runnerID string) (Runner, error) + + // Return signals that the runner is no longer used by the caller and can be claimed by someone else. + // The runner is deleted or cleaned up for reuse depending on the used executor. + Return(r Runner) error +} diff --git a/internal/runner/runner.go b/internal/runner/runner.go new file mode 100644 index 0000000..4504657 --- /dev/null +++ b/internal/runner/runner.go @@ -0,0 +1,91 @@ +package runner + +import ( + "context" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/monitoring" + "github.com/openHPI/poseidon/pkg/storage" + "io" + "net/http" +) + +type ExitInfo struct { + Code uint8 + Err error +} + +const ( + // runnerContextKey is the key used to store runners in context.Context. + runnerContextKey dto.ContextKey = "runner" +) + +type DestroyRunnerHandler = func(r Runner) error + +// DestroyReason specifies errors that are expected as reason for destroying a runner. +type DestroyReason error + +type Runner interface { + InactivityTimer + + // ID returns the id of the runner. + ID() string + + // Environment returns the id of the Environment to which the Runner belongs. + Environment() dto.EnvironmentID + + // MappedPorts returns the mapped ports of the runner. + MappedPorts() []*dto.MappedPort + + // StoreExecution adds a new execution to the runner that can then be executed using ExecuteInteractively. + StoreExecution(id string, executionRequest *dto.ExecutionRequest) + + // ExecutionExists returns whether the execution with the given id is already stored. + ExecutionExists(id string) bool + + // ExecuteInteractively runs the given execution request and forwards from and to the given reader and writers. + // An ExitInfo is sent to the exit channel on command completion. + // Output from the runner is forwarded immediately. + ExecuteInteractively( + id string, + stdin io.ReadWriter, + stdout, + stderr io.Writer, + ctx context.Context, + ) (exit <-chan ExitInfo, cancel context.CancelFunc, err error) + + // ListFileSystem streams the listing of the file system of the requested directory into the Writer provided. + // The result is streamed via the io.Writer in order to not overload the memory with user input. + ListFileSystem(path string, recursive bool, result io.Writer, privilegedExecution bool, ctx context.Context) error + + // UpdateFileSystem processes a dto.UpdateFileSystemRequest by first deleting each given dto.FilePath recursively + // and then copying each given dto.File to the runner. + UpdateFileSystem(request *dto.UpdateFileSystemRequest, ctx context.Context) error + + // GetFileContent streams the file content at the requested path into the Writer provided at content. + // The result is streamed via the io.Writer in order to not overload the memory with user input. + GetFileContent(path string, content http.ResponseWriter, privilegedExecution bool, ctx context.Context) error + + // Destroy destroys the Runner in Nomad. + // Depending on the reason special cases of the Destruction will be handled. + Destroy(reason DestroyReason) error +} + +// NewContext creates a context containing a runner. +func NewContext(ctx context.Context, runner Runner) context.Context { + return context.WithValue(ctx, runnerContextKey, runner) +} + +// FromContext returns a runner from a context. +func FromContext(ctx context.Context) (Runner, bool) { + runner, ok := ctx.Value(runnerContextKey).(Runner) + return runner, ok +} + +// monitorExecutionsRunnerID passes the id of the runner executing the execution into the monitoring Point p. +func monitorExecutionsRunnerID(env dto.EnvironmentID, runnerID string) storage.WriteCallback[*dto.ExecutionRequest] { + return func(p *write.Point, _ *dto.ExecutionRequest, _ storage.EventType) { + p.AddTag(monitoring.InfluxKeyEnvironmentID, env.ToString()) + p.AddTag(monitoring.InfluxKeyRunnerID, runnerID) + } +} diff --git a/pkg/dto/dto.go b/pkg/dto/dto.go new file mode 100644 index 0000000..a21a514 --- /dev/null +++ b/pkg/dto/dto.go @@ -0,0 +1,344 @@ +package dto + +import ( + "encoding/json" + "errors" + "fmt" + "path" + "strconv" + "strings" +) + +var ( + // UserAgentOut for outgoing requests (without libraries). The Git Hash will be replaced by main.go. + UserAgentOut = "Poseidon/" + UserAgentVCSPlaceholder + " Go-http-client/1.1" + UserAgentFiltered = "Poseidon/" + UserAgentVCSPlaceholder + " (" + UserAgentFilterTokenPlaceholder + ") Go-http-client/1.1" +) + +const ( + UserAgentVCSPlaceholder = "<7 Git Hash>" + UserAgentFilterTokenPlaceholder = "FilterToken" +) + +// RunnerRequest is the expected json structure of the request body for the ProvideRunner function. +type RunnerRequest struct { + ExecutionEnvironmentID int `json:"executionEnvironmentId"` + InactivityTimeout int `json:"inactivityTimeout"` +} + +// ExecutionRequest is the expected json structure of the request body for the ExecuteCommand function. +type ExecutionRequest struct { + Command string + PrivilegedExecution bool + TimeLimit int + Environment map[string]string +} + +// FullCommand joins the environment variables. +// It does not handle the TimeLimit or the PrivilegedExecution flag. +func (er *ExecutionRequest) FullCommand() string { + var command string + command += "env" + + if er.Environment == nil { + er.Environment = make(map[string]string) + } + er.Environment["CODEOCEAN"] = "true" + + for variable, value := range er.Environment { + command += fmt.Sprintf(" %s=%s", variable, value) + } + command += fmt.Sprintf(" %s", WrapBashCommand(er.Command)) + return command +} + +// BashEscapeCommand escapes the passed command and surrounds it with double-quotes. +// The escaping includes the characters ", \, $, ` (comma-separated) as they are the exceptional characters +// that still have a special meaning with double quotes. See the Bash Manual - Chapter Quoting. +// We only handle the dollar-character and the backquote because the %q format already escapes the other two. +func BashEscapeCommand(command string) string { + command = fmt.Sprintf("%q", command) + command = strings.ReplaceAll(command, "$", "\\$") + command = strings.ReplaceAll(command, "`", "\\`") + return command +} + +// WrapBashCommand escapes the passed command and wraps it into a new bash command. +func WrapBashCommand(command string) string { + return fmt.Sprintf("/bin/bash -c %s", BashEscapeCommand(command)) +} + +// EnvironmentID is an id of an environment. +type EnvironmentID int + +// NewEnvironmentID parses a string into an EnvironmentID. +func NewEnvironmentID(id string) (EnvironmentID, error) { + environment, err := strconv.Atoi(id) + return EnvironmentID(environment), err +} + +// ToString pareses an EnvironmentID back to a string. +func (e EnvironmentID) ToString() string { + return strconv.Itoa(int(e)) +} + +// ExecutionEnvironmentData is the expected json structure of the response body +// for routes returning an execution environment. +type ExecutionEnvironmentData struct { + ExecutionEnvironmentRequest + ID int `json:"id"` +} + +// StatisticalExecutionEnvironmentData is the expected json structure of the response body +// for routes returning statistics about execution environments. +type StatisticalExecutionEnvironmentData struct { + ID int `json:"id"` + PrewarmingPoolSize uint `json:"prewarmingPoolSize"` + IdleRunners uint `json:"idleRunners"` + UsedRunners uint `json:"usedRunners"` +} + +// ExecutionEnvironmentRequest is the expected json structure of the request body +// for the create execution environment function. +type ExecutionEnvironmentRequest struct { + PrewarmingPoolSize uint `json:"prewarmingPoolSize"` + CPULimit uint `json:"cpuLimit"` + MemoryLimit uint `json:"memoryLimit"` + Image string `json:"image"` + NetworkAccess bool `json:"networkAccess"` + ExposedPorts []uint16 `json:"exposedPorts"` +} + +// MappedPort contains the mapping from exposed port inside the container to the host address +// outside the container. +type MappedPort struct { + ExposedPort uint `json:"exposedPort"` + HostAddress string `json:"hostAddress"` +} + +// RunnerResponse is the expected response when providing a runner. +type RunnerResponse struct { + ID string `json:"runnerId"` + MappedPorts []*MappedPort `json:"mappedPorts"` +} + +// ExecutionResponse is the expected response when creating an execution for a runner. +type ExecutionResponse struct { + WebSocketURL string `json:"websocketUrl"` +} + +// ListFileSystemResponse is the expected response when listing the file system. +type ListFileSystemResponse struct { + Files []FileHeader `json:"files"` +} + +// UpdateFileSystemRequest is the expected json structure of the request body for the update file system route. +type UpdateFileSystemRequest struct { + Delete []FilePath `json:"delete"` + Copy []File `json:"copy"` +} + +// FilePath specifies the path of a file and is part of the UpdateFileSystemRequest. +type FilePath string + +// EntryType specifies the type of the object (file/link/directory/...) +type EntryType string + +// These are the common entry types. You find others in the man pages `info ls`. +const ( + EntryTypeRegularFile EntryType = "-" + EntryTypeLink EntryType = "l" +) + +// FileHeader specifies the information provided for listing a File. +type FileHeader struct { + Name FilePath `json:"name"` + EntryType EntryType `json:"entryType"` + LinkTarget FilePath `json:"linkTarget,omitempty"` + Size int `json:"size"` + ModificationTime int `json:"modificationTime"` + Permissions string `json:"permissions"` + Owner string `json:"owner"` + Group string `json:"group"` +} + +// File is a DTO for transmitting file contents. It is part of the UpdateFileSystemRequest. +type File struct { + Path FilePath `json:"path"` + Content []byte `json:"content"` +} + +// Cleaned returns the cleaned path of the FilePath. +func (f FilePath) Cleaned() string { + return path.Clean(string(f)) +} + +// CleanedPath returns the cleaned path of the file. +func (f File) CleanedPath() string { + return f.Path.Cleaned() +} + +// IsDirectory returns true iff the path of the File ends with a /. +func (f File) IsDirectory() bool { + return strings.HasSuffix(string(f.Path), "/") +} + +// ByteContent returns the content of the File. If the File is a directory, the content will be empty. +func (f File) ByteContent() []byte { + if f.IsDirectory() { + return []byte("") + } else { + return f.Content + } +} + +// Formatter mirrors the available Formatters of logrus for configuration purposes. +type Formatter string + +const ( + FormatterText = "TextFormatter" + FormatterJSON = "JSONFormatter" +) + +// ContextKey is the type for keys in a request context that is used for passing data to the next handler. +type ContextKey string + +// Keys to reference information (for logging or monitoring). +const ( + KeyRunnerID = "runner_id" + KeyEnvironmentID = "environment_id" + KeyRunnerDestroyReason = "destroy_reason" +) + +// LoggedContextKeys defines which keys will be logged if a context is passed to logrus. See ContextHook. +var LoggedContextKeys = []ContextKey{KeyRunnerID, KeyEnvironmentID, KeyRunnerDestroyReason} + +// WebSocketMessageType is the type for the messages from Poseidon to the client. +type WebSocketMessageType string + +const ( + WebSocketOutputStdout WebSocketMessageType = "stdout" + WebSocketOutputStderr WebSocketMessageType = "stderr" + WebSocketOutputError WebSocketMessageType = "error" + WebSocketMetaStart WebSocketMessageType = "start" + WebSocketMetaTimeout WebSocketMessageType = "timeout" + WebSocketExit WebSocketMessageType = "exit" +) + +var ( + ErrUnknownWebSocketMessageType = errors.New("unknown WebSocket message type") + // ErrOOMKilled is the exact message that CodeOcean expects to further handle these specific cases. + ErrOOMKilled = errors.New("the allocation was OOM Killed") + ErrMissingType = errors.New("type is missing") + ErrMissingData = errors.New("data is missing") + ErrInvalidType = errors.New("invalid type") + ErrNotSupported = errors.New("not supported") +) + +// WebSocketMessage is the type for all messages send in the WebSocket to the client. +// Depending on the MessageType the Data or ExitCode might not be included in the marshaled json message. +type WebSocketMessage struct { + Type WebSocketMessageType + Data string + ExitCode uint8 +} + +// MarshalJSON implements the json.Marshaler interface. +// This converts the WebSocketMessage into the expected schema (see docs/websocket.schema.json). +func (m WebSocketMessage) MarshalJSON() (res []byte, err error) { + switch m.Type { + case WebSocketOutputStdout, WebSocketOutputStderr, WebSocketOutputError: + res, err = json.Marshal(struct { + MessageType WebSocketMessageType `json:"type"` + Data string `json:"data"` + }{m.Type, m.Data}) + case WebSocketMetaStart, WebSocketMetaTimeout: + res, err = json.Marshal(struct { + MessageType WebSocketMessageType `json:"type"` + }{m.Type}) + case WebSocketExit: + res, err = json.Marshal(struct { + MessageType WebSocketMessageType `json:"type"` + ExitCode uint8 `json:"data"` + }{m.Type, m.ExitCode}) + } + if err != nil { + return nil, fmt.Errorf("error marshaling WebSocketMessage: %w", err) + } else if res == nil { + return nil, ErrUnknownWebSocketMessageType + } + return res, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// It is used by tests in order to ReceiveNextWebSocketMessage. +func (m *WebSocketMessage) UnmarshalJSON(rawMessage []byte) error { + messageMap := make(map[string]interface{}) + err := json.Unmarshal(rawMessage, &messageMap) + if err != nil { + return fmt.Errorf("error unmarshiling raw WebSocket message: %w", err) + } + messageType, ok := messageMap["type"] + if !ok { + return ErrMissingType + } + messageTypeString, ok := messageType.(string) + if !ok { + return fmt.Errorf("value of key type must be a string: %w", ErrInvalidType) + } + switch messageType := WebSocketMessageType(messageTypeString); messageType { + case WebSocketExit: + data, ok := messageMap["data"] + if !ok { + return ErrMissingData + } + // json.Unmarshal converts any number to a float64 in the massageMap, so we must first cast it to the float. + exit, ok := data.(float64) + if !ok { + return fmt.Errorf("value of key data must be a number: %w", ErrInvalidType) + } + if exit != float64(uint8(exit)) { + return fmt.Errorf("value of key data must be uint8: %w", ErrInvalidType) + } + m.Type = messageType + m.ExitCode = uint8(exit) + case WebSocketOutputStdout, WebSocketOutputStderr, WebSocketOutputError: + data, ok := messageMap["data"] + if !ok { + return ErrMissingData + } + text, ok := data.(string) + if !ok { + return fmt.Errorf("value of key data must be a string: %w", ErrInvalidType) + } + m.Type = messageType + m.Data = text + case WebSocketMetaStart, WebSocketMetaTimeout: + m.Type = messageType + default: + return ErrUnknownWebSocketMessageType + } + return nil +} + +// ClientError is the response interface if the request is not valid. +type ClientError struct { + Message string `json:"message"` +} + +// InternalServerError is the response interface that is returned when an error occurs. +type InternalServerError struct { + Message string `json:"message"` + ErrorCode ErrorCode `json:"errorCode"` +} + +// ErrorCode is the type for error codes expected by CodeOcean. +type ErrorCode string + +const ( + Errork8sUnreachable ErrorCode = "k8s_UNREACHABLE" + Errork8sOverload ErrorCode = "k8s_OVERLOAD" + Errork8sInternalServerError ErrorCode = "k8s_INTERNAL_SERVER_ERROR" + PrewarmingPoolDepleting ErrorCode = "PREWARMING_POOL_DEPLETING" + ErrorUnknown ErrorCode = "UNKNOWN" +) diff --git a/pkg/logging/context_hook.go b/pkg/logging/context_hook.go new file mode 100644 index 0000000..bbd6a96 --- /dev/null +++ b/pkg/logging/context_hook.go @@ -0,0 +1,41 @@ +package logging + +import ( + "github.com/openHPI/poseidon/pkg/dto" + "github.com/sirupsen/logrus" +) + +// ContextHook logs the values referenced by the of dto.LoggedContextKeys. +// By default Logrus does not log the values stored in the passed context. +type ContextHook struct{} + +// Fire is triggered on new log entries. +func (hook *ContextHook) Fire(entry *logrus.Entry) error { + if entry.Context != nil { + injectContextValuesIntoData(entry) + } + return nil +} + +func injectContextValuesIntoData(entry *logrus.Entry) { + for _, key := range dto.LoggedContextKeys { + value := entry.Context.Value(key) + _, valueExisting := entry.Data[string(key)] + if !valueExisting && value != nil { + entry.Data[string(key)] = value + } + } +} + +// Levels returns all levels this hook should be registered to. +func (hook *ContextHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + logrus.TraceLevel, + } +} diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go new file mode 100644 index 0000000..821b664 --- /dev/null +++ b/pkg/logging/logging.go @@ -0,0 +1,109 @@ +package logging + +import ( + "bufio" + "fmt" + "github.com/getsentry/sentry-go" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/sirupsen/logrus" + "net" + "net/http" + "os" + "strings" + "time" +) + +const TimestampFormat = "2006-01-02T15:04:05.000000Z" + +var log = &logrus.Logger{ + Out: os.Stderr, + Formatter: &logrus.TextFormatter{ + TimestampFormat: TimestampFormat, + DisableColors: true, + FullTimestamp: true, + }, + Hooks: make(logrus.LevelHooks), + Level: logrus.InfoLevel, +} + +const GracefulSentryShutdown = 5 * time.Second + +func InitializeLogging(logLevel string, formatter dto.Formatter) { + level, err := logrus.ParseLevel(logLevel) + if err != nil { + log.WithError(err).Fatal("Error parsing loglevel") + return + } + log.SetLevel(level) + if formatter == dto.FormatterJSON { + log.Formatter = &logrus.JSONFormatter{ + TimestampFormat: TimestampFormat, + } + } + log.AddHook(&ContextHook{}) + log.AddHook(&SentryHook{}) + log.ExitFunc = func(i int) { + sentry.Flush(GracefulSentryShutdown) + os.Exit(i) + } +} + +func GetLogger(pkg string) *logrus.Entry { + return log.WithField("package", pkg) +} + +// ResponseWriter wraps the default http.ResponseWriter and catches the status code +// that is written. +type ResponseWriter struct { + http.ResponseWriter + StatusCode int +} + +func NewLoggingResponseWriter(w http.ResponseWriter) *ResponseWriter { + return &ResponseWriter{w, http.StatusOK} +} + +func (writer *ResponseWriter) WriteHeader(code int) { + writer.StatusCode = code + writer.ResponseWriter.WriteHeader(code) +} + +func (writer *ResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + conn, rw, err := writer.ResponseWriter.(http.Hijacker).Hijack() + if err != nil { + return conn, nil, fmt.Errorf("hijacking connection failed: %w", err) + } + return conn, rw, nil +} + +// HTTPLoggingMiddleware returns a http.Handler that logs different information about every request. +func HTTPLoggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now().UTC() + path := RemoveNewlineSymbol(r.URL.Path) + + lrw := NewLoggingResponseWriter(w) + next.ServeHTTP(lrw, r) + + latency := time.Now().UTC().Sub(start) + logEntry := log.WithContext(r.Context()).WithFields(logrus.Fields{ + "code": lrw.StatusCode, + "method": r.Method, + "path": path, + "duration": latency, + "user_agent": RemoveNewlineSymbol(r.UserAgent()), + }) + if r.UserAgent() == dto.UserAgentFiltered { + logEntry.Trace() + } else { + logEntry.Debug() + } + }) +} + +// RemoveNewlineSymbol GOOD: remove newlines from user controlled input before logging. +func RemoveNewlineSymbol(data string) string { + data = strings.ReplaceAll(data, "\r", "") + data = strings.ReplaceAll(data, "\n", "") + return data +} diff --git a/pkg/logging/sentry_hook.go b/pkg/logging/sentry_hook.go new file mode 100644 index 0000000..315dc53 --- /dev/null +++ b/pkg/logging/sentry_hook.go @@ -0,0 +1,67 @@ +package logging + +import ( + "context" + "errors" + "github.com/getsentry/sentry-go" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/sirupsen/logrus" +) + +// SentryHook is a simple adapter that converts logrus entries into Sentry events. +// Consider replacing this with a more feature rich, additional dependency: https://github.com/evalphobia/logrus_sentry +type SentryHook struct{} + +var ErrorHubInvalid = errors.New("the hub is invalid") + +// Fire is triggered on new log entries. +func (hook *SentryHook) Fire(entry *logrus.Entry) error { + var hub *sentry.Hub + if entry.Context != nil { + hub = sentry.GetHubFromContext(entry.Context) + injectContextValuesIntoData(entry) + } + if hub == nil { + hub = sentry.CurrentHub() + } + client, scope := hub.Client(), hub.Scope() + if client == nil || scope == nil { + return ErrorHubInvalid + } + + scope.SetContext("Poseidon Details", entry.Data) + if runnerID, ok := entry.Data[dto.KeyRunnerID].(string); ok { + scope.SetTag(dto.KeyRunnerID, runnerID) + } + if environmentID, ok := entry.Data[dto.KeyEnvironmentID].(string); ok { + scope.SetTag(dto.KeyEnvironmentID, environmentID) + } + + event := client.EventFromMessage(entry.Message, sentry.Level(entry.Level.String())) + event.Timestamp = entry.Time + if data, ok := entry.Data["error"]; ok { + err, ok := data.(error) + if ok { + entry.Data["error"] = err.Error() + } + } + hub.CaptureEvent(event) + return nil +} + +// Levels returns all levels this hook should be registered to. +func (hook *SentryHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + } +} + +func StartSpan(op, description string, ctx context.Context, callback func(context.Context)) { + span := sentry.StartSpan(ctx, op) + span.Description = description + defer span.Finish() + callback(span.Context()) +} diff --git a/pkg/monitoring/influxdb2_middleware.go b/pkg/monitoring/influxdb2_middleware.go new file mode 100644 index 0000000..b6113ba --- /dev/null +++ b/pkg/monitoring/influxdb2_middleware.go @@ -0,0 +1,194 @@ +package monitoring + +import ( + "bytes" + "context" + "github.com/gorilla/mux" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + influxdb2API "github.com/influxdata/influxdb-client-go/v2/api" + http2 "github.com/influxdata/influxdb-client-go/v2/api/http" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/internal/config" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "io" + "net/http" + "strconv" + "time" +) + +const ( + // influxdbContextKey is a key (runner.ContextKey) to reference the influxdb data point in the request context. + influxdbContextKey dto.ContextKey = "influxdb data point" + // measurementPrefix allows easier filtering in influxdb. + measurementPrefix = "poseidon_" + measurementPoolSize = measurementPrefix + "poolsize" + + MeasurementExecutionsAWS = measurementPrefix + "aws_executions" + MeasurementExecutionsK8s = measurementPrefix + "k8s_executions" + MeasurementEnvironments = measurementPrefix + "environments" + MeasurementUsedRunner = measurementPrefix + "used_runners" + + // The keys for the monitored tags and fields. + + InfluxKeyRunnerID = dto.KeyRunnerID + InfluxKeyEnvironmentID = dto.KeyEnvironmentID + InfluxKeyActualContentLength = "actual_length" + InfluxKeyExpectedContentLength = "expected_length" + InfluxKeyDuration = "duration" + influxKeyEnvironmentPrewarmingPoolSize = "prewarming_pool_size" + influxKeyRequestSize = "request_size" +) + +var ( + log = logging.GetLogger("monitoring") + influxClient influxdb2API.WriteAPI +) + +func InitializeInfluxDB(db *config.InfluxDB) (cancel func()) { + if db.URL == "" { + return func() {} + } + + // How often to retry to write data. + const maxRetries = 50 + // How long to wait before retrying to write data. + const retryInterval = 5 * time.Second + // How old the data can be before we stop retrying to write it. Should be larger than maxRetries * retryInterval. + const retryExpire = 10 * time.Minute + // How many batches are buffered before dropping the oldest. + const retryBufferLimit = 100_000 + + // Set options for retrying with the influx client. + options := influxdb2.DefaultOptions() + options.SetRetryInterval(uint(retryInterval.Milliseconds())) + options.SetMaxRetries(maxRetries) + options.SetMaxRetryTime(uint(retryExpire.Milliseconds())) + options.SetRetryBufferLimit(retryBufferLimit) + + // Create a new influx client. + client := influxdb2.NewClientWithOptions(db.URL, db.Token, options) + influxClient = client.WriteAPI(db.Organization, db.Bucket) + influxClient.SetWriteFailedCallback(func(_ string, error http2.Error, retryAttempts uint) bool { + log.WithError(&error).WithField("retryAttempts", retryAttempts).Trace("Retrying to write influx data...") + + // retryAttempts means number of retries, 0 if it failed during first write. + if retryAttempts == options.MaxRetries() { + log.WithError(&error).Warn("Could not write influx data.") + return false // Disable retry. We failed to retry writing the data in time. + } + return true // Enable retry (default) + }) + + // Flush the influx client on shutdown. + cancel = func() { + influxClient.Flush() + influxClient = nil + client.Close() + } + return cancel +} + +// InfluxDB2Middleware is a middleware to send events to an influx database. +func InfluxDB2Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + route := mux.CurrentRoute(r).GetName() + p := influxdb2.NewPointWithMeasurement(measurementPrefix + route) + + start := time.Now().UTC() + p.SetTime(time.Now()) + + ctx := context.WithValue(r.Context(), influxdbContextKey, p) + requestWithPoint := r.WithContext(ctx) + lrw := logging.NewLoggingResponseWriter(w) + next.ServeHTTP(lrw, requestWithPoint) + + p.AddField(InfluxKeyDuration, time.Now().UTC().Sub(start).Nanoseconds()) + p.AddTag("status", strconv.Itoa(lrw.StatusCode)) + + WriteInfluxPoint(p) + }) +} + +// AddRunnerMonitoringData adds the data of the runner we want to monitor. +func AddRunnerMonitoringData(request *http.Request, runnerID string, environmentID dto.EnvironmentID) { + addRunnerID(request, runnerID) + addEnvironmentID(request, environmentID) +} + +// addRunnerID adds the runner id to the influx data point for the current request. +func addRunnerID(r *http.Request, id string) { + addInfluxDBTag(r, InfluxKeyRunnerID, id) +} + +// addEnvironmentID adds the environment id to the influx data point for the current request. +func addEnvironmentID(r *http.Request, id dto.EnvironmentID) { + addInfluxDBTag(r, InfluxKeyEnvironmentID, id.ToString()) +} + +// AddRequestSize adds the size of the request body to the influx data point for the current request. +func AddRequestSize(r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + log.WithContext(r.Context()).WithError(err).Debug("Failed to read request body") + return + } + + err = r.Body.Close() + if err != nil { + log.WithContext(r.Context()).WithError(err).Debug("Failed to close request body") + return + } + r.Body = io.NopCloser(bytes.NewBuffer(body)) + + addInfluxDBField(r, influxKeyRequestSize, len(body)) +} + +func ChangedPrewarmingPoolSize(id dto.EnvironmentID, count uint) { + p := influxdb2.NewPointWithMeasurement(measurementPoolSize) + + p.AddTag(InfluxKeyEnvironmentID, id.ToString()) + p.AddField(influxKeyEnvironmentPrewarmingPoolSize, count) + + WriteInfluxPoint(p) +} + +// WriteInfluxPoint schedules the influx data point to be sent. +func WriteInfluxPoint(p *write.Point) { + if influxClient != nil { + p.AddTag("stage", config.Config.InfluxDB.Stage) + // We identified that the influxClient is not truly asynchronous. See #541. + go func() { influxClient.WritePoint(p) }() + } else { + entry := log.WithField("name", p.Name()) + for _, tag := range p.TagList() { + if tag.Key == "event_type" && tag.Value == "periodically" { + return + } + entry = entry.WithField(tag.Key, tag.Value) + } + for _, field := range p.FieldList() { + entry = entry.WithField(field.Key, field.Value) + } + entry.Trace("Influx data point") + } +} + +// addInfluxDBTag adds a tag to the influxdb data point in the request. +func addInfluxDBTag(r *http.Request, key, value string) { + dataPointFromRequest(r).AddTag(key, value) +} + +// addInfluxDBField adds a field to the influxdb data point in the request. +func addInfluxDBField(r *http.Request, key string, value interface{}) { + dataPointFromRequest(r).AddField(key, value) +} + +// dataPointFromRequest returns the data point in the passed request. +func dataPointFromRequest(r *http.Request) *write.Point { + p, ok := r.Context().Value(influxdbContextKey).(*write.Point) + if !ok { + log.WithContext(r.Context()).Error("All http request must contain an influxdb data point!") + } + return p +} diff --git a/pkg/nullio/content_length.go b/pkg/nullio/content_length.go new file mode 100644 index 0000000..93ec5ca --- /dev/null +++ b/pkg/nullio/content_length.go @@ -0,0 +1,84 @@ +package nullio + +import ( + "errors" + "fmt" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/pkg/monitoring" + "net/http" + "strconv" +) + +var ErrRegexMatching = errors.New("could not match content length") + +// ContentLengthWriter implements io.Writer. +// It parses the size from the first line as Content Length Header and streams the following data to the Target. +// The first line is expected to follow the format headerLineRegex. +type ContentLengthWriter struct { + Target http.ResponseWriter + contentLengthSet bool + firstLine []byte + expectedContentLength int + actualContentLength int +} + +func (w *ContentLengthWriter) Write(p []byte) (count int, err error) { + if w.contentLengthSet { + return w.handleDataForwarding(p) + } else { + return w.handleContentLengthParsing(p) + } +} + +func (w *ContentLengthWriter) handleDataForwarding(p []byte) (int, error) { + count, err := w.Target.Write(p) + if err != nil { + err = fmt.Errorf("could not write to target: %w", err) + } + w.actualContentLength += count + return count, err +} + +func (w *ContentLengthWriter) handleContentLengthParsing(p []byte) (count int, err error) { + for i, char := range p { + if char != '\n' { + continue + } + + w.firstLine = append(w.firstLine, p[:i]...) + matches := headerLineRegex.FindSubmatch(w.firstLine) + if len(matches) < headerLineGroupName { + log.WithField("line", string(w.firstLine)).Error(ErrRegexMatching.Error()) + return 0, ErrRegexMatching + } + size := string(matches[headerLineGroupSize]) + w.expectedContentLength, err = strconv.Atoi(size) + if err != nil { + log.WithField("size", size).Warn("could not parse content length") + } + w.Target.Header().Set("Content-Length", size) + w.contentLengthSet = true + + if i < len(p)-1 { + count, err = w.Target.Write(p[i+1:]) + if err != nil { + err = fmt.Errorf("could not write to target: %w", err) + } + } + + return len(p[:i]) + 1 + count, err + } + + if !w.contentLengthSet { + w.firstLine = append(w.firstLine, p...) + } + + return len(p), nil +} + +// SendMonitoringData will send a monitoring event of the content length read and written. +func (w *ContentLengthWriter) SendMonitoringData(p *write.Point) { + p.AddField(monitoring.InfluxKeyExpectedContentLength, w.expectedContentLength) + p.AddField(monitoring.InfluxKeyActualContentLength, w.actualContentLength) + monitoring.WriteInfluxPoint(p) +} diff --git a/pkg/nullio/ls2json.go b/pkg/nullio/ls2json.go new file mode 100644 index 0000000..45394fa --- /dev/null +++ b/pkg/nullio/ls2json.go @@ -0,0 +1,184 @@ +package nullio + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/getsentry/sentry-go" + "github.com/openHPI/poseidon/pkg/dto" + "github.com/openHPI/poseidon/pkg/logging" + "io" + "regexp" + "strconv" + "strings" +) + +var ( + log = logging.GetLogger("nullio") + pathLineRegex = regexp.MustCompile(`(.*):$`) + headerLineRegex = regexp. + MustCompile(`([-aAbcCdDlMnpPsw?])([-rwxXsStT]{9})(\+?) +\d+ +(.+?) +(.+?) +(\d+) +(\d+) +(.*)$`) +) + +const ( + headerLineGroupEntryType = 1 + headerLineGroupPermissions = 2 + headerLineGroupACL = 3 + headerLineGroupOwner = 4 + headerLineGroupGroup = 5 + headerLineGroupSize = 6 + headerLineGroupTimestamp = 7 + headerLineGroupName = 8 +) + +// Ls2JsonWriter implements io.Writer. +// It streams the passed data to the Target and transforms the data into the json format. +type Ls2JsonWriter struct { + Target io.Writer + Ctx context.Context + jsonStartSent bool + setCommaPrefix bool + remaining []byte + latestPath []byte + sentrySpan *sentry.Span +} + +func (w *Ls2JsonWriter) HasStartedWriting() bool { + return w.jsonStartSent +} + +func (w *Ls2JsonWriter) Write(p []byte) (int, error) { + i, err := w.initializeJSONObject() + if err != nil { + return i, err + } + + start := 0 + for i, char := range p { + if char != '\n' { + continue + } + + line := p[start:i] + if len(w.remaining) > 0 { + line = append(w.remaining, line...) + w.remaining = []byte("") + } + + if len(line) != 0 { + count, err := w.writeLine(line) + if err != nil { + log.WithContext(w.Ctx).WithError(err).Warn("Could not write line to Target") + return count, err + } + } + start = i + 1 + } + + if start < len(p) { + w.remaining = p[start:] + } + + return len(p), nil +} + +func (w *Ls2JsonWriter) initializeJSONObject() (count int, err error) { + if !w.jsonStartSent { + count, err = w.Target.Write([]byte("{\"files\": [")) + if count == 0 || err != nil { + log.WithContext(w.Ctx).WithError(err).Warn("Could not write to target") + err = fmt.Errorf("could not write to target: %w", err) + } else { + w.jsonStartSent = true + w.sentrySpan = sentry.StartSpan(w.Ctx, "nullio.init") + w.sentrySpan.Description = "Forwarding" + } + } + return count, err +} + +func (w *Ls2JsonWriter) Close() { + if w.jsonStartSent { + count, err := w.Target.Write([]byte("]}")) + if count == 0 || err != nil { + log.WithContext(w.Ctx).WithError(err).Warn("Could not Close ls2json writer") + } + w.sentrySpan.Finish() + } +} + +func (w *Ls2JsonWriter) writeLine(line []byte) (count int, err error) { + matches := pathLineRegex.FindSubmatch(line) + if matches != nil { + w.latestPath = append(bytes.TrimSuffix(matches[1], []byte("/")), '/') + return 0, nil + } + + matches = headerLineRegex.FindSubmatch(line) + if matches != nil { + response, err1 := w.parseFileHeader(matches) + if err1 != nil { + return 0, err1 + } + + // Skip the first leading comma + if w.setCommaPrefix { + response = append([]byte{','}, response...) + } else { + w.setCommaPrefix = true + } + + count, err1 = w.Target.Write(response) + if err1 != nil { + err = fmt.Errorf("could not write to target: %w", err1) + } else if count == len(response) { + count = len(line) + } + } + + return count, err +} + +func (w *Ls2JsonWriter) parseFileHeader(matches [][]byte) ([]byte, error) { + entryType := dto.EntryType(matches[headerLineGroupEntryType][0]) + permissions := string(matches[headerLineGroupPermissions]) + acl := string(matches[headerLineGroupACL]) + if acl == "+" { + permissions += "+" + } + + size, err1 := strconv.Atoi(string(matches[headerLineGroupSize])) + timestamp, err2 := strconv.Atoi(string(matches[headerLineGroupTimestamp])) + if err1 != nil || err2 != nil { + return nil, fmt.Errorf("could not parse file details: %w %+v", err1, err2) + } + + name := dto.FilePath(append(w.latestPath, matches[headerLineGroupName]...)) + linkTarget := dto.FilePath("") + if entryType == dto.EntryTypeLink { + parts := strings.Split(string(name), " -> ") + const NumberOfPartsInALink = 2 + if len(parts) == NumberOfPartsInALink { + name = dto.FilePath(parts[0]) + linkTarget = dto.FilePath(parts[1]) + } else { + log.WithContext(w.Ctx).Error("could not split link into name and target") + } + } + + response, err := json.Marshal(dto.FileHeader{ + Name: name, + EntryType: entryType, + LinkTarget: linkTarget, + Size: size, + ModificationTime: timestamp, + Permissions: permissions, + Owner: string(matches[headerLineGroupOwner]), + Group: string(matches[headerLineGroupGroup]), + }) + if err != nil { + return nil, fmt.Errorf("could not marshal file header: %w", err) + } + return response, nil +} diff --git a/pkg/nullio/nullio.go b/pkg/nullio/nullio.go new file mode 100644 index 0000000..ad170df --- /dev/null +++ b/pkg/nullio/nullio.go @@ -0,0 +1,43 @@ +package nullio + +import ( + "context" + "fmt" + "io" +) + +// Reader is a struct that implements the io.Reader interface. +// Read does not return when called until the context is done. It is used to avoid reading anything and returning io.EOF +// before the context has finished. +// For example the reader is used by the execution that fetches the stderr stream from Nomad. We do not have a stdin +// that we want to send to Nomad. But we still have to pass Nomad a reader. +// Normally readers send an io.EOF as soon as they have nothing more to read. But we want to avoid this, because in that +// case Nomad will abort (not the execution but) the transmission. +// Why should the reader not just always return 0, nil? Because Nomad reads in an endless loop and thus a busy waiting +// is avoided. +type Reader struct { + Ctx context.Context +} + +func (r Reader) Read(_ []byte) (int, error) { + if r.Ctx == nil || r.Ctx.Err() != nil { + return 0, io.EOF + } + + <-r.Ctx.Done() + return 0, io.EOF +} + +// ReadWriter implements io.ReadWriter. It does not return from Read and discards everything on Write. +type ReadWriter struct { + Reader +} + +func (rw *ReadWriter) Write(p []byte) (int, error) { + n, err := io.Discard.Write(p) + if err != nil { + return n, fmt.Errorf("error writing to io.Discard: %w", err) + } else { + return n, nil + } +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 0000000..36792e5 --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,185 @@ +package storage + +import ( + "context" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" + "github.com/influxdata/influxdb-client-go/v2/api/write" + "github.com/openHPI/poseidon/pkg/monitoring" + "sync" + "time" +) + +// Storage is an interface for storing objects. +type Storage[T any] interface { + // List returns all objects from the storage. + List() []T + + // Add adds an object to the storage. + // It overwrites the old object if one with the same id was already stored. + Add(id string, o T) + + // Get returns an object from the storage. + // Iff the object does not exist in the storage, ok will be false. + Get(id string) (o T, ok bool) + + // Delete deletes the object with the passed id from the storage. + // It does nothing if no object with the id is present in the store. + Delete(id string) + + // Pop deletes the object with the given id from the storage and returns it. + // Iff no such execution exists, ok is false and true otherwise. + Pop(id string) (o T, ok bool) + + // Purge removes all objects from the storage. + Purge() + + // Length returns the number of currently stored objects in the storage. + Length() uint + + // Sample returns and removes an arbitrary object from the storage. + // ok is true iff an object was returned. + Sample() (o T, ok bool) +} + +// EventType is an enum type to declare the different causes of a monitoring event. +type EventType string + +const ( + Creation EventType = "creation" + Deletion EventType = "deletion" + Periodically EventType = "periodically" +) + +// WriteCallback is called before an event gets monitored. +// Iff eventType is Periodically it is no object provided. +type WriteCallback[T any] func(p *write.Point, object T, eventType EventType) + +// localStorage stores objects in the local application memory. +type localStorage[T any] struct { + sync.RWMutex + objects map[string]T + measurement string + callback WriteCallback[T] +} + +// NewLocalStorage responds with a Storage implementation. +// This implementation stores the data thread-safe in the local application memory. +func NewLocalStorage[T any]() *localStorage[T] { + return &localStorage[T]{ + objects: make(map[string]T), + } +} + +// NewMonitoredLocalStorage responds with a Storage implementation. +// All write operations are monitored in the passed measurement. +// Iff callback is set, it will be called on a write operation. +// Iff additionalEvents not zero, the duration will be used to periodically send additional monitoring events. +func NewMonitoredLocalStorage[T any]( + measurement string, callback WriteCallback[T], additionalEvents time.Duration, ctx context.Context) *localStorage[T] { + s := &localStorage[T]{ + objects: make(map[string]T), + measurement: measurement, + callback: callback, + } + if additionalEvents != 0 { + go s.periodicallySendMonitoringData(additionalEvents, ctx) + } + return s +} + +func (s *localStorage[T]) List() (o []T) { + s.RLock() + defer s.RUnlock() + for _, value := range s.objects { + o = append(o, value) + } + return o +} + +func (s *localStorage[T]) Add(id string, o T) { + s.Lock() + defer s.Unlock() + s.objects[id] = o + s.sendMonitoringData(id, o, Creation, s.unsafeLength()) +} + +func (s *localStorage[T]) Get(id string) (o T, ok bool) { + s.RLock() + defer s.RUnlock() + o, ok = s.objects[id] + return +} + +func (s *localStorage[T]) Delete(id string) { + s.Lock() + defer s.Unlock() + o, ok := s.objects[id] + if ok { + delete(s.objects, id) + s.sendMonitoringData(id, o, Deletion, s.unsafeLength()) + } +} + +func (s *localStorage[T]) Pop(id string) (T, bool) { + o, ok := s.Get(id) + s.Delete(id) + return o, ok +} + +func (s *localStorage[T]) Purge() { + s.Lock() + defer s.Unlock() + for key, object := range s.objects { + s.sendMonitoringData(key, object, Deletion, 0) + } + s.objects = make(map[string]T) +} + +func (s *localStorage[T]) Sample() (o T, ok bool) { + s.Lock() + defer s.Unlock() + for key, object := range s.objects { + delete(s.objects, key) + s.sendMonitoringData(key, object, Deletion, s.unsafeLength()) + return object, true + } + return o, false +} + +func (s *localStorage[T]) Length() uint { + s.RLock() + defer s.RUnlock() + return s.unsafeLength() +} + +func (s *localStorage[T]) unsafeLength() uint { + length := len(s.objects) + return uint(length) +} + +func (s *localStorage[T]) sendMonitoringData(id string, o T, eventType EventType, count uint) { + if s.measurement != "" { + p := influxdb2.NewPointWithMeasurement(s.measurement) + p.AddTag("id", id) + p.AddTag("event_type", string(eventType)) + p.AddField("count", count) + + if s.callback != nil { + s.callback(p, o, eventType) + } + + monitoring.WriteInfluxPoint(p) + } +} + +func (s *localStorage[T]) periodicallySendMonitoringData(d time.Duration, ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(d): + stub := new(T) + s.sendMonitoringData("", *stub, Periodically, s.Length()) + } + } +} diff --git a/pkg/util/merge_context.go b/pkg/util/merge_context.go new file mode 100644 index 0000000..3413c12 --- /dev/null +++ b/pkg/util/merge_context.go @@ -0,0 +1,66 @@ +package util + +import ( + "context" + "fmt" + "reflect" + "time" +) + +// mergeContext combines multiple contexts. +type mergeContext struct { + contexts []context.Context +} + +func NewMergeContext(contexts []context.Context) context.Context { + return mergeContext{contexts: contexts} +} + +// Deadline returns the earliest Deadline of all contexts. +func (m mergeContext) Deadline() (deadline time.Time, ok bool) { + for _, ctx := range m.contexts { + if anotherDeadline, anotherOk := ctx.Deadline(); anotherOk { + if ok && anotherDeadline.After(deadline) { + continue + } + deadline = anotherDeadline + ok = anotherOk + } + } + return deadline, ok +} + +// Done notifies when the first context is done. +func (m mergeContext) Done() <-chan struct{} { + ch := make(chan struct{}) + cases := make([]reflect.SelectCase, 0, len(m.contexts)) + for _, ctx := range m.contexts { + cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())}) + } + go func(cases []reflect.SelectCase, ch chan struct{}) { + _, _, _ = reflect.Select(cases) + close(ch) + }(cases, ch) + return ch +} + +// Err returns the error of any (random) context and nil iff no context has an error. +func (m mergeContext) Err() error { + for _, ctx := range m.contexts { + if ctx.Err() != nil { + return fmt.Errorf("mergeContext wrapped: %w", ctx.Err()) + } + } + return nil +} + +// Value returns the value for the key if any context has it. +// If multiple contexts have a value for the key, the result is any (random) of them. +func (m mergeContext) Value(key any) any { + for _, ctx := range m.contexts { + if value := ctx.Value(key); value != nil { + return value + } + } + return nil +} diff --git a/pkg/util/util.go b/pkg/util/util.go new file mode 100644 index 0000000..32bf11f --- /dev/null +++ b/pkg/util/util.go @@ -0,0 +1,82 @@ +package util + +import ( + "context" + "errors" + "github.com/openHPI/poseidon/pkg/logging" + "time" +) + +var ( + log = logging.GetLogger("util") + // MaxConnectionRetriesExponential is the default number of retries. It's exported for testing reasons. + MaxConnectionRetriesExponential = 18 + // InitialWaitingDuration is the default initial duration of waiting after a failed time. + InitialWaitingDuration = time.Second + ErrRetryContextDone = errors.New("the retry context is done") +) + +func retryExponential(ctx context.Context, sleep time.Duration, f func() error) func() error { + return func() error { + err := f() + + if err != nil { + select { + case <-ctx.Done(): + err = ErrRetryContextDone + case <-time.After(sleep): + sleep *= 2 + } + } + + return err + } +} + +func retryConstant(ctx context.Context, sleep time.Duration, f func() error) func() error { + return func() error { + err := f() + + if err != nil { + select { + case <-ctx.Done(): + return ErrRetryContextDone + case <-time.After(sleep): + } + } + + return err + } +} + +func retryAttempts(maxAttempts int, f func() error) (err error) { + for i := 0; i < maxAttempts; i++ { + err = f() + if err == nil { + return nil + } else if errors.Is(err, ErrRetryContextDone) { + return err + } + log.WithField("count", i).WithError(err).Debug("retrying after error") + } + return err +} + +// RetryExponentialWithContext executes the passed function with exponentially increasing time starting with one second +// up to a default maximum number of attempts as long as the context is not done. +func RetryExponentialWithContext(ctx context.Context, f func() error) error { + return retryAttempts(MaxConnectionRetriesExponential, retryExponential(ctx, InitialWaitingDuration, f)) +} + +// RetryExponential executes the passed function with exponentially increasing time starting with one second +// up to a default maximum number of attempts. +func RetryExponential(f func() error) error { + return retryAttempts(MaxConnectionRetriesExponential, + retryExponential(context.Background(), InitialWaitingDuration, f)) +} + +// RetryConstantAttemptsWithContext executes the passed function with a constant retry delay of one second +// up to the passed maximum number of attempts as long as the context is not done. +func RetryConstantAttemptsWithContext(attempts int, ctx context.Context, f func() error) error { + return retryAttempts(attempts, retryConstant(ctx, InitialWaitingDuration, f)) +}