added poseidon with aws to k8s changes
This commit is contained in:
344
pkg/dto/dto.go
Normal file
344
pkg/dto/dto.go
Normal file
@ -0,0 +1,344 @@
|
||||
package dto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// UserAgentOut for outgoing requests (without libraries). The Git Hash will be replaced by main.go.
|
||||
UserAgentOut = "Poseidon/" + UserAgentVCSPlaceholder + " Go-http-client/1.1"
|
||||
UserAgentFiltered = "Poseidon/" + UserAgentVCSPlaceholder + " (" + UserAgentFilterTokenPlaceholder + ") Go-http-client/1.1"
|
||||
)
|
||||
|
||||
const (
|
||||
UserAgentVCSPlaceholder = "<7 Git Hash>"
|
||||
UserAgentFilterTokenPlaceholder = "FilterToken"
|
||||
)
|
||||
|
||||
// RunnerRequest is the expected json structure of the request body for the ProvideRunner function.
|
||||
type RunnerRequest struct {
|
||||
ExecutionEnvironmentID int `json:"executionEnvironmentId"`
|
||||
InactivityTimeout int `json:"inactivityTimeout"`
|
||||
}
|
||||
|
||||
// ExecutionRequest is the expected json structure of the request body for the ExecuteCommand function.
|
||||
type ExecutionRequest struct {
|
||||
Command string
|
||||
PrivilegedExecution bool
|
||||
TimeLimit int
|
||||
Environment map[string]string
|
||||
}
|
||||
|
||||
// FullCommand joins the environment variables.
|
||||
// It does not handle the TimeLimit or the PrivilegedExecution flag.
|
||||
func (er *ExecutionRequest) FullCommand() string {
|
||||
var command string
|
||||
command += "env"
|
||||
|
||||
if er.Environment == nil {
|
||||
er.Environment = make(map[string]string)
|
||||
}
|
||||
er.Environment["CODEOCEAN"] = "true"
|
||||
|
||||
for variable, value := range er.Environment {
|
||||
command += fmt.Sprintf(" %s=%s", variable, value)
|
||||
}
|
||||
command += fmt.Sprintf(" %s", WrapBashCommand(er.Command))
|
||||
return command
|
||||
}
|
||||
|
||||
// BashEscapeCommand escapes the passed command and surrounds it with double-quotes.
|
||||
// The escaping includes the characters ", \, $, ` (comma-separated) as they are the exceptional characters
|
||||
// that still have a special meaning with double quotes. See the Bash Manual - Chapter Quoting.
|
||||
// We only handle the dollar-character and the backquote because the %q format already escapes the other two.
|
||||
func BashEscapeCommand(command string) string {
|
||||
command = fmt.Sprintf("%q", command)
|
||||
command = strings.ReplaceAll(command, "$", "\\$")
|
||||
command = strings.ReplaceAll(command, "`", "\\`")
|
||||
return command
|
||||
}
|
||||
|
||||
// WrapBashCommand escapes the passed command and wraps it into a new bash command.
|
||||
func WrapBashCommand(command string) string {
|
||||
return fmt.Sprintf("/bin/bash -c %s", BashEscapeCommand(command))
|
||||
}
|
||||
|
||||
// EnvironmentID is an id of an environment.
|
||||
type EnvironmentID int
|
||||
|
||||
// NewEnvironmentID parses a string into an EnvironmentID.
|
||||
func NewEnvironmentID(id string) (EnvironmentID, error) {
|
||||
environment, err := strconv.Atoi(id)
|
||||
return EnvironmentID(environment), err
|
||||
}
|
||||
|
||||
// ToString pareses an EnvironmentID back to a string.
|
||||
func (e EnvironmentID) ToString() string {
|
||||
return strconv.Itoa(int(e))
|
||||
}
|
||||
|
||||
// ExecutionEnvironmentData is the expected json structure of the response body
|
||||
// for routes returning an execution environment.
|
||||
type ExecutionEnvironmentData struct {
|
||||
ExecutionEnvironmentRequest
|
||||
ID int `json:"id"`
|
||||
}
|
||||
|
||||
// StatisticalExecutionEnvironmentData is the expected json structure of the response body
|
||||
// for routes returning statistics about execution environments.
|
||||
type StatisticalExecutionEnvironmentData struct {
|
||||
ID int `json:"id"`
|
||||
PrewarmingPoolSize uint `json:"prewarmingPoolSize"`
|
||||
IdleRunners uint `json:"idleRunners"`
|
||||
UsedRunners uint `json:"usedRunners"`
|
||||
}
|
||||
|
||||
// ExecutionEnvironmentRequest is the expected json structure of the request body
|
||||
// for the create execution environment function.
|
||||
type ExecutionEnvironmentRequest struct {
|
||||
PrewarmingPoolSize uint `json:"prewarmingPoolSize"`
|
||||
CPULimit uint `json:"cpuLimit"`
|
||||
MemoryLimit uint `json:"memoryLimit"`
|
||||
Image string `json:"image"`
|
||||
NetworkAccess bool `json:"networkAccess"`
|
||||
ExposedPorts []uint16 `json:"exposedPorts"`
|
||||
}
|
||||
|
||||
// MappedPort contains the mapping from exposed port inside the container to the host address
|
||||
// outside the container.
|
||||
type MappedPort struct {
|
||||
ExposedPort uint `json:"exposedPort"`
|
||||
HostAddress string `json:"hostAddress"`
|
||||
}
|
||||
|
||||
// RunnerResponse is the expected response when providing a runner.
|
||||
type RunnerResponse struct {
|
||||
ID string `json:"runnerId"`
|
||||
MappedPorts []*MappedPort `json:"mappedPorts"`
|
||||
}
|
||||
|
||||
// ExecutionResponse is the expected response when creating an execution for a runner.
|
||||
type ExecutionResponse struct {
|
||||
WebSocketURL string `json:"websocketUrl"`
|
||||
}
|
||||
|
||||
// ListFileSystemResponse is the expected response when listing the file system.
|
||||
type ListFileSystemResponse struct {
|
||||
Files []FileHeader `json:"files"`
|
||||
}
|
||||
|
||||
// UpdateFileSystemRequest is the expected json structure of the request body for the update file system route.
|
||||
type UpdateFileSystemRequest struct {
|
||||
Delete []FilePath `json:"delete"`
|
||||
Copy []File `json:"copy"`
|
||||
}
|
||||
|
||||
// FilePath specifies the path of a file and is part of the UpdateFileSystemRequest.
|
||||
type FilePath string
|
||||
|
||||
// EntryType specifies the type of the object (file/link/directory/...)
|
||||
type EntryType string
|
||||
|
||||
// These are the common entry types. You find others in the man pages `info ls`.
|
||||
const (
|
||||
EntryTypeRegularFile EntryType = "-"
|
||||
EntryTypeLink EntryType = "l"
|
||||
)
|
||||
|
||||
// FileHeader specifies the information provided for listing a File.
|
||||
type FileHeader struct {
|
||||
Name FilePath `json:"name"`
|
||||
EntryType EntryType `json:"entryType"`
|
||||
LinkTarget FilePath `json:"linkTarget,omitempty"`
|
||||
Size int `json:"size"`
|
||||
ModificationTime int `json:"modificationTime"`
|
||||
Permissions string `json:"permissions"`
|
||||
Owner string `json:"owner"`
|
||||
Group string `json:"group"`
|
||||
}
|
||||
|
||||
// File is a DTO for transmitting file contents. It is part of the UpdateFileSystemRequest.
|
||||
type File struct {
|
||||
Path FilePath `json:"path"`
|
||||
Content []byte `json:"content"`
|
||||
}
|
||||
|
||||
// Cleaned returns the cleaned path of the FilePath.
|
||||
func (f FilePath) Cleaned() string {
|
||||
return path.Clean(string(f))
|
||||
}
|
||||
|
||||
// CleanedPath returns the cleaned path of the file.
|
||||
func (f File) CleanedPath() string {
|
||||
return f.Path.Cleaned()
|
||||
}
|
||||
|
||||
// IsDirectory returns true iff the path of the File ends with a /.
|
||||
func (f File) IsDirectory() bool {
|
||||
return strings.HasSuffix(string(f.Path), "/")
|
||||
}
|
||||
|
||||
// ByteContent returns the content of the File. If the File is a directory, the content will be empty.
|
||||
func (f File) ByteContent() []byte {
|
||||
if f.IsDirectory() {
|
||||
return []byte("")
|
||||
} else {
|
||||
return f.Content
|
||||
}
|
||||
}
|
||||
|
||||
// Formatter mirrors the available Formatters of logrus for configuration purposes.
|
||||
type Formatter string
|
||||
|
||||
const (
|
||||
FormatterText = "TextFormatter"
|
||||
FormatterJSON = "JSONFormatter"
|
||||
)
|
||||
|
||||
// ContextKey is the type for keys in a request context that is used for passing data to the next handler.
|
||||
type ContextKey string
|
||||
|
||||
// Keys to reference information (for logging or monitoring).
|
||||
const (
|
||||
KeyRunnerID = "runner_id"
|
||||
KeyEnvironmentID = "environment_id"
|
||||
KeyRunnerDestroyReason = "destroy_reason"
|
||||
)
|
||||
|
||||
// LoggedContextKeys defines which keys will be logged if a context is passed to logrus. See ContextHook.
|
||||
var LoggedContextKeys = []ContextKey{KeyRunnerID, KeyEnvironmentID, KeyRunnerDestroyReason}
|
||||
|
||||
// WebSocketMessageType is the type for the messages from Poseidon to the client.
|
||||
type WebSocketMessageType string
|
||||
|
||||
const (
|
||||
WebSocketOutputStdout WebSocketMessageType = "stdout"
|
||||
WebSocketOutputStderr WebSocketMessageType = "stderr"
|
||||
WebSocketOutputError WebSocketMessageType = "error"
|
||||
WebSocketMetaStart WebSocketMessageType = "start"
|
||||
WebSocketMetaTimeout WebSocketMessageType = "timeout"
|
||||
WebSocketExit WebSocketMessageType = "exit"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownWebSocketMessageType = errors.New("unknown WebSocket message type")
|
||||
// ErrOOMKilled is the exact message that CodeOcean expects to further handle these specific cases.
|
||||
ErrOOMKilled = errors.New("the allocation was OOM Killed")
|
||||
ErrMissingType = errors.New("type is missing")
|
||||
ErrMissingData = errors.New("data is missing")
|
||||
ErrInvalidType = errors.New("invalid type")
|
||||
ErrNotSupported = errors.New("not supported")
|
||||
)
|
||||
|
||||
// WebSocketMessage is the type for all messages send in the WebSocket to the client.
|
||||
// Depending on the MessageType the Data or ExitCode might not be included in the marshaled json message.
|
||||
type WebSocketMessage struct {
|
||||
Type WebSocketMessageType
|
||||
Data string
|
||||
ExitCode uint8
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
// This converts the WebSocketMessage into the expected schema (see docs/websocket.schema.json).
|
||||
func (m WebSocketMessage) MarshalJSON() (res []byte, err error) {
|
||||
switch m.Type {
|
||||
case WebSocketOutputStdout, WebSocketOutputStderr, WebSocketOutputError:
|
||||
res, err = json.Marshal(struct {
|
||||
MessageType WebSocketMessageType `json:"type"`
|
||||
Data string `json:"data"`
|
||||
}{m.Type, m.Data})
|
||||
case WebSocketMetaStart, WebSocketMetaTimeout:
|
||||
res, err = json.Marshal(struct {
|
||||
MessageType WebSocketMessageType `json:"type"`
|
||||
}{m.Type})
|
||||
case WebSocketExit:
|
||||
res, err = json.Marshal(struct {
|
||||
MessageType WebSocketMessageType `json:"type"`
|
||||
ExitCode uint8 `json:"data"`
|
||||
}{m.Type, m.ExitCode})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling WebSocketMessage: %w", err)
|
||||
} else if res == nil {
|
||||
return nil, ErrUnknownWebSocketMessageType
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
// It is used by tests in order to ReceiveNextWebSocketMessage.
|
||||
func (m *WebSocketMessage) UnmarshalJSON(rawMessage []byte) error {
|
||||
messageMap := make(map[string]interface{})
|
||||
err := json.Unmarshal(rawMessage, &messageMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshiling raw WebSocket message: %w", err)
|
||||
}
|
||||
messageType, ok := messageMap["type"]
|
||||
if !ok {
|
||||
return ErrMissingType
|
||||
}
|
||||
messageTypeString, ok := messageType.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("value of key type must be a string: %w", ErrInvalidType)
|
||||
}
|
||||
switch messageType := WebSocketMessageType(messageTypeString); messageType {
|
||||
case WebSocketExit:
|
||||
data, ok := messageMap["data"]
|
||||
if !ok {
|
||||
return ErrMissingData
|
||||
}
|
||||
// json.Unmarshal converts any number to a float64 in the massageMap, so we must first cast it to the float.
|
||||
exit, ok := data.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("value of key data must be a number: %w", ErrInvalidType)
|
||||
}
|
||||
if exit != float64(uint8(exit)) {
|
||||
return fmt.Errorf("value of key data must be uint8: %w", ErrInvalidType)
|
||||
}
|
||||
m.Type = messageType
|
||||
m.ExitCode = uint8(exit)
|
||||
case WebSocketOutputStdout, WebSocketOutputStderr, WebSocketOutputError:
|
||||
data, ok := messageMap["data"]
|
||||
if !ok {
|
||||
return ErrMissingData
|
||||
}
|
||||
text, ok := data.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("value of key data must be a string: %w", ErrInvalidType)
|
||||
}
|
||||
m.Type = messageType
|
||||
m.Data = text
|
||||
case WebSocketMetaStart, WebSocketMetaTimeout:
|
||||
m.Type = messageType
|
||||
default:
|
||||
return ErrUnknownWebSocketMessageType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClientError is the response interface if the request is not valid.
|
||||
type ClientError struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// InternalServerError is the response interface that is returned when an error occurs.
|
||||
type InternalServerError struct {
|
||||
Message string `json:"message"`
|
||||
ErrorCode ErrorCode `json:"errorCode"`
|
||||
}
|
||||
|
||||
// ErrorCode is the type for error codes expected by CodeOcean.
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
Errork8sUnreachable ErrorCode = "k8s_UNREACHABLE"
|
||||
Errork8sOverload ErrorCode = "k8s_OVERLOAD"
|
||||
Errork8sInternalServerError ErrorCode = "k8s_INTERNAL_SERVER_ERROR"
|
||||
PrewarmingPoolDepleting ErrorCode = "PREWARMING_POOL_DEPLETING"
|
||||
ErrorUnknown ErrorCode = "UNKNOWN"
|
||||
)
|
41
pkg/logging/context_hook.go
Normal file
41
pkg/logging/context_hook.go
Normal file
@ -0,0 +1,41 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ContextHook logs the values referenced by the of dto.LoggedContextKeys.
|
||||
// By default Logrus does not log the values stored in the passed context.
|
||||
type ContextHook struct{}
|
||||
|
||||
// Fire is triggered on new log entries.
|
||||
func (hook *ContextHook) Fire(entry *logrus.Entry) error {
|
||||
if entry.Context != nil {
|
||||
injectContextValuesIntoData(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func injectContextValuesIntoData(entry *logrus.Entry) {
|
||||
for _, key := range dto.LoggedContextKeys {
|
||||
value := entry.Context.Value(key)
|
||||
_, valueExisting := entry.Data[string(key)]
|
||||
if !valueExisting && value != nil {
|
||||
entry.Data[string(key)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Levels returns all levels this hook should be registered to.
|
||||
func (hook *ContextHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
logrus.TraceLevel,
|
||||
}
|
||||
}
|
109
pkg/logging/logging.go
Normal file
109
pkg/logging/logging.go
Normal file
@ -0,0 +1,109 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const TimestampFormat = "2006-01-02T15:04:05.000000Z"
|
||||
|
||||
var log = &logrus.Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: &logrus.TextFormatter{
|
||||
TimestampFormat: TimestampFormat,
|
||||
DisableColors: true,
|
||||
FullTimestamp: true,
|
||||
},
|
||||
Hooks: make(logrus.LevelHooks),
|
||||
Level: logrus.InfoLevel,
|
||||
}
|
||||
|
||||
const GracefulSentryShutdown = 5 * time.Second
|
||||
|
||||
func InitializeLogging(logLevel string, formatter dto.Formatter) {
|
||||
level, err := logrus.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Error parsing loglevel")
|
||||
return
|
||||
}
|
||||
log.SetLevel(level)
|
||||
if formatter == dto.FormatterJSON {
|
||||
log.Formatter = &logrus.JSONFormatter{
|
||||
TimestampFormat: TimestampFormat,
|
||||
}
|
||||
}
|
||||
log.AddHook(&ContextHook{})
|
||||
log.AddHook(&SentryHook{})
|
||||
log.ExitFunc = func(i int) {
|
||||
sentry.Flush(GracefulSentryShutdown)
|
||||
os.Exit(i)
|
||||
}
|
||||
}
|
||||
|
||||
func GetLogger(pkg string) *logrus.Entry {
|
||||
return log.WithField("package", pkg)
|
||||
}
|
||||
|
||||
// ResponseWriter wraps the default http.ResponseWriter and catches the status code
|
||||
// that is written.
|
||||
type ResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *ResponseWriter {
|
||||
return &ResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (writer *ResponseWriter) WriteHeader(code int) {
|
||||
writer.StatusCode = code
|
||||
writer.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (writer *ResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
conn, rw, err := writer.ResponseWriter.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
return conn, nil, fmt.Errorf("hijacking connection failed: %w", err)
|
||||
}
|
||||
return conn, rw, nil
|
||||
}
|
||||
|
||||
// HTTPLoggingMiddleware returns a http.Handler that logs different information about every request.
|
||||
func HTTPLoggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now().UTC()
|
||||
path := RemoveNewlineSymbol(r.URL.Path)
|
||||
|
||||
lrw := NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, r)
|
||||
|
||||
latency := time.Now().UTC().Sub(start)
|
||||
logEntry := log.WithContext(r.Context()).WithFields(logrus.Fields{
|
||||
"code": lrw.StatusCode,
|
||||
"method": r.Method,
|
||||
"path": path,
|
||||
"duration": latency,
|
||||
"user_agent": RemoveNewlineSymbol(r.UserAgent()),
|
||||
})
|
||||
if r.UserAgent() == dto.UserAgentFiltered {
|
||||
logEntry.Trace()
|
||||
} else {
|
||||
logEntry.Debug()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveNewlineSymbol GOOD: remove newlines from user controlled input before logging.
|
||||
func RemoveNewlineSymbol(data string) string {
|
||||
data = strings.ReplaceAll(data, "\r", "")
|
||||
data = strings.ReplaceAll(data, "\n", "")
|
||||
return data
|
||||
}
|
67
pkg/logging/sentry_hook.go
Normal file
67
pkg/logging/sentry_hook.go
Normal file
@ -0,0 +1,67 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SentryHook is a simple adapter that converts logrus entries into Sentry events.
|
||||
// Consider replacing this with a more feature rich, additional dependency: https://github.com/evalphobia/logrus_sentry
|
||||
type SentryHook struct{}
|
||||
|
||||
var ErrorHubInvalid = errors.New("the hub is invalid")
|
||||
|
||||
// Fire is triggered on new log entries.
|
||||
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
|
||||
var hub *sentry.Hub
|
||||
if entry.Context != nil {
|
||||
hub = sentry.GetHubFromContext(entry.Context)
|
||||
injectContextValuesIntoData(entry)
|
||||
}
|
||||
if hub == nil {
|
||||
hub = sentry.CurrentHub()
|
||||
}
|
||||
client, scope := hub.Client(), hub.Scope()
|
||||
if client == nil || scope == nil {
|
||||
return ErrorHubInvalid
|
||||
}
|
||||
|
||||
scope.SetContext("Poseidon Details", entry.Data)
|
||||
if runnerID, ok := entry.Data[dto.KeyRunnerID].(string); ok {
|
||||
scope.SetTag(dto.KeyRunnerID, runnerID)
|
||||
}
|
||||
if environmentID, ok := entry.Data[dto.KeyEnvironmentID].(string); ok {
|
||||
scope.SetTag(dto.KeyEnvironmentID, environmentID)
|
||||
}
|
||||
|
||||
event := client.EventFromMessage(entry.Message, sentry.Level(entry.Level.String()))
|
||||
event.Timestamp = entry.Time
|
||||
if data, ok := entry.Data["error"]; ok {
|
||||
err, ok := data.(error)
|
||||
if ok {
|
||||
entry.Data["error"] = err.Error()
|
||||
}
|
||||
}
|
||||
hub.CaptureEvent(event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns all levels this hook should be registered to.
|
||||
func (hook *SentryHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func StartSpan(op, description string, ctx context.Context, callback func(context.Context)) {
|
||||
span := sentry.StartSpan(ctx, op)
|
||||
span.Description = description
|
||||
defer span.Finish()
|
||||
callback(span.Context())
|
||||
}
|
194
pkg/monitoring/influxdb2_middleware.go
Normal file
194
pkg/monitoring/influxdb2_middleware.go
Normal file
@ -0,0 +1,194 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/gorilla/mux"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
influxdb2API "github.com/influxdata/influxdb-client-go/v2/api"
|
||||
http2 "github.com/influxdata/influxdb-client-go/v2/api/http"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||
"github.com/openHPI/poseidon/internal/config"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/logging"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// influxdbContextKey is a key (runner.ContextKey) to reference the influxdb data point in the request context.
|
||||
influxdbContextKey dto.ContextKey = "influxdb data point"
|
||||
// measurementPrefix allows easier filtering in influxdb.
|
||||
measurementPrefix = "poseidon_"
|
||||
measurementPoolSize = measurementPrefix + "poolsize"
|
||||
|
||||
MeasurementExecutionsAWS = measurementPrefix + "aws_executions"
|
||||
MeasurementExecutionsK8s = measurementPrefix + "k8s_executions"
|
||||
MeasurementEnvironments = measurementPrefix + "environments"
|
||||
MeasurementUsedRunner = measurementPrefix + "used_runners"
|
||||
|
||||
// The keys for the monitored tags and fields.
|
||||
|
||||
InfluxKeyRunnerID = dto.KeyRunnerID
|
||||
InfluxKeyEnvironmentID = dto.KeyEnvironmentID
|
||||
InfluxKeyActualContentLength = "actual_length"
|
||||
InfluxKeyExpectedContentLength = "expected_length"
|
||||
InfluxKeyDuration = "duration"
|
||||
influxKeyEnvironmentPrewarmingPoolSize = "prewarming_pool_size"
|
||||
influxKeyRequestSize = "request_size"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.GetLogger("monitoring")
|
||||
influxClient influxdb2API.WriteAPI
|
||||
)
|
||||
|
||||
func InitializeInfluxDB(db *config.InfluxDB) (cancel func()) {
|
||||
if db.URL == "" {
|
||||
return func() {}
|
||||
}
|
||||
|
||||
// How often to retry to write data.
|
||||
const maxRetries = 50
|
||||
// How long to wait before retrying to write data.
|
||||
const retryInterval = 5 * time.Second
|
||||
// How old the data can be before we stop retrying to write it. Should be larger than maxRetries * retryInterval.
|
||||
const retryExpire = 10 * time.Minute
|
||||
// How many batches are buffered before dropping the oldest.
|
||||
const retryBufferLimit = 100_000
|
||||
|
||||
// Set options for retrying with the influx client.
|
||||
options := influxdb2.DefaultOptions()
|
||||
options.SetRetryInterval(uint(retryInterval.Milliseconds()))
|
||||
options.SetMaxRetries(maxRetries)
|
||||
options.SetMaxRetryTime(uint(retryExpire.Milliseconds()))
|
||||
options.SetRetryBufferLimit(retryBufferLimit)
|
||||
|
||||
// Create a new influx client.
|
||||
client := influxdb2.NewClientWithOptions(db.URL, db.Token, options)
|
||||
influxClient = client.WriteAPI(db.Organization, db.Bucket)
|
||||
influxClient.SetWriteFailedCallback(func(_ string, error http2.Error, retryAttempts uint) bool {
|
||||
log.WithError(&error).WithField("retryAttempts", retryAttempts).Trace("Retrying to write influx data...")
|
||||
|
||||
// retryAttempts means number of retries, 0 if it failed during first write.
|
||||
if retryAttempts == options.MaxRetries() {
|
||||
log.WithError(&error).Warn("Could not write influx data.")
|
||||
return false // Disable retry. We failed to retry writing the data in time.
|
||||
}
|
||||
return true // Enable retry (default)
|
||||
})
|
||||
|
||||
// Flush the influx client on shutdown.
|
||||
cancel = func() {
|
||||
influxClient.Flush()
|
||||
influxClient = nil
|
||||
client.Close()
|
||||
}
|
||||
return cancel
|
||||
}
|
||||
|
||||
// InfluxDB2Middleware is a middleware to send events to an influx database.
|
||||
func InfluxDB2Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
route := mux.CurrentRoute(r).GetName()
|
||||
p := influxdb2.NewPointWithMeasurement(measurementPrefix + route)
|
||||
|
||||
start := time.Now().UTC()
|
||||
p.SetTime(time.Now())
|
||||
|
||||
ctx := context.WithValue(r.Context(), influxdbContextKey, p)
|
||||
requestWithPoint := r.WithContext(ctx)
|
||||
lrw := logging.NewLoggingResponseWriter(w)
|
||||
next.ServeHTTP(lrw, requestWithPoint)
|
||||
|
||||
p.AddField(InfluxKeyDuration, time.Now().UTC().Sub(start).Nanoseconds())
|
||||
p.AddTag("status", strconv.Itoa(lrw.StatusCode))
|
||||
|
||||
WriteInfluxPoint(p)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRunnerMonitoringData adds the data of the runner we want to monitor.
|
||||
func AddRunnerMonitoringData(request *http.Request, runnerID string, environmentID dto.EnvironmentID) {
|
||||
addRunnerID(request, runnerID)
|
||||
addEnvironmentID(request, environmentID)
|
||||
}
|
||||
|
||||
// addRunnerID adds the runner id to the influx data point for the current request.
|
||||
func addRunnerID(r *http.Request, id string) {
|
||||
addInfluxDBTag(r, InfluxKeyRunnerID, id)
|
||||
}
|
||||
|
||||
// addEnvironmentID adds the environment id to the influx data point for the current request.
|
||||
func addEnvironmentID(r *http.Request, id dto.EnvironmentID) {
|
||||
addInfluxDBTag(r, InfluxKeyEnvironmentID, id.ToString())
|
||||
}
|
||||
|
||||
// AddRequestSize adds the size of the request body to the influx data point for the current request.
|
||||
func AddRequestSize(r *http.Request) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
log.WithContext(r.Context()).WithError(err).Debug("Failed to read request body")
|
||||
return
|
||||
}
|
||||
|
||||
err = r.Body.Close()
|
||||
if err != nil {
|
||||
log.WithContext(r.Context()).WithError(err).Debug("Failed to close request body")
|
||||
return
|
||||
}
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
addInfluxDBField(r, influxKeyRequestSize, len(body))
|
||||
}
|
||||
|
||||
func ChangedPrewarmingPoolSize(id dto.EnvironmentID, count uint) {
|
||||
p := influxdb2.NewPointWithMeasurement(measurementPoolSize)
|
||||
|
||||
p.AddTag(InfluxKeyEnvironmentID, id.ToString())
|
||||
p.AddField(influxKeyEnvironmentPrewarmingPoolSize, count)
|
||||
|
||||
WriteInfluxPoint(p)
|
||||
}
|
||||
|
||||
// WriteInfluxPoint schedules the influx data point to be sent.
|
||||
func WriteInfluxPoint(p *write.Point) {
|
||||
if influxClient != nil {
|
||||
p.AddTag("stage", config.Config.InfluxDB.Stage)
|
||||
// We identified that the influxClient is not truly asynchronous. See #541.
|
||||
go func() { influxClient.WritePoint(p) }()
|
||||
} else {
|
||||
entry := log.WithField("name", p.Name())
|
||||
for _, tag := range p.TagList() {
|
||||
if tag.Key == "event_type" && tag.Value == "periodically" {
|
||||
return
|
||||
}
|
||||
entry = entry.WithField(tag.Key, tag.Value)
|
||||
}
|
||||
for _, field := range p.FieldList() {
|
||||
entry = entry.WithField(field.Key, field.Value)
|
||||
}
|
||||
entry.Trace("Influx data point")
|
||||
}
|
||||
}
|
||||
|
||||
// addInfluxDBTag adds a tag to the influxdb data point in the request.
|
||||
func addInfluxDBTag(r *http.Request, key, value string) {
|
||||
dataPointFromRequest(r).AddTag(key, value)
|
||||
}
|
||||
|
||||
// addInfluxDBField adds a field to the influxdb data point in the request.
|
||||
func addInfluxDBField(r *http.Request, key string, value interface{}) {
|
||||
dataPointFromRequest(r).AddField(key, value)
|
||||
}
|
||||
|
||||
// dataPointFromRequest returns the data point in the passed request.
|
||||
func dataPointFromRequest(r *http.Request) *write.Point {
|
||||
p, ok := r.Context().Value(influxdbContextKey).(*write.Point)
|
||||
if !ok {
|
||||
log.WithContext(r.Context()).Error("All http request must contain an influxdb data point!")
|
||||
}
|
||||
return p
|
||||
}
|
84
pkg/nullio/content_length.go
Normal file
84
pkg/nullio/content_length.go
Normal file
@ -0,0 +1,84 @@
|
||||
package nullio
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var ErrRegexMatching = errors.New("could not match content length")
|
||||
|
||||
// ContentLengthWriter implements io.Writer.
|
||||
// It parses the size from the first line as Content Length Header and streams the following data to the Target.
|
||||
// The first line is expected to follow the format headerLineRegex.
|
||||
type ContentLengthWriter struct {
|
||||
Target http.ResponseWriter
|
||||
contentLengthSet bool
|
||||
firstLine []byte
|
||||
expectedContentLength int
|
||||
actualContentLength int
|
||||
}
|
||||
|
||||
func (w *ContentLengthWriter) Write(p []byte) (count int, err error) {
|
||||
if w.contentLengthSet {
|
||||
return w.handleDataForwarding(p)
|
||||
} else {
|
||||
return w.handleContentLengthParsing(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *ContentLengthWriter) handleDataForwarding(p []byte) (int, error) {
|
||||
count, err := w.Target.Write(p)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not write to target: %w", err)
|
||||
}
|
||||
w.actualContentLength += count
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (w *ContentLengthWriter) handleContentLengthParsing(p []byte) (count int, err error) {
|
||||
for i, char := range p {
|
||||
if char != '\n' {
|
||||
continue
|
||||
}
|
||||
|
||||
w.firstLine = append(w.firstLine, p[:i]...)
|
||||
matches := headerLineRegex.FindSubmatch(w.firstLine)
|
||||
if len(matches) < headerLineGroupName {
|
||||
log.WithField("line", string(w.firstLine)).Error(ErrRegexMatching.Error())
|
||||
return 0, ErrRegexMatching
|
||||
}
|
||||
size := string(matches[headerLineGroupSize])
|
||||
w.expectedContentLength, err = strconv.Atoi(size)
|
||||
if err != nil {
|
||||
log.WithField("size", size).Warn("could not parse content length")
|
||||
}
|
||||
w.Target.Header().Set("Content-Length", size)
|
||||
w.contentLengthSet = true
|
||||
|
||||
if i < len(p)-1 {
|
||||
count, err = w.Target.Write(p[i+1:])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not write to target: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return len(p[:i]) + 1 + count, err
|
||||
}
|
||||
|
||||
if !w.contentLengthSet {
|
||||
w.firstLine = append(w.firstLine, p...)
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// SendMonitoringData will send a monitoring event of the content length read and written.
|
||||
func (w *ContentLengthWriter) SendMonitoringData(p *write.Point) {
|
||||
p.AddField(monitoring.InfluxKeyExpectedContentLength, w.expectedContentLength)
|
||||
p.AddField(monitoring.InfluxKeyActualContentLength, w.actualContentLength)
|
||||
monitoring.WriteInfluxPoint(p)
|
||||
}
|
184
pkg/nullio/ls2json.go
Normal file
184
pkg/nullio/ls2json.go
Normal file
@ -0,0 +1,184 @@
|
||||
package nullio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/logging"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.GetLogger("nullio")
|
||||
pathLineRegex = regexp.MustCompile(`(.*):$`)
|
||||
headerLineRegex = regexp.
|
||||
MustCompile(`([-aAbcCdDlMnpPsw?])([-rwxXsStT]{9})(\+?) +\d+ +(.+?) +(.+?) +(\d+) +(\d+) +(.*)$`)
|
||||
)
|
||||
|
||||
const (
|
||||
headerLineGroupEntryType = 1
|
||||
headerLineGroupPermissions = 2
|
||||
headerLineGroupACL = 3
|
||||
headerLineGroupOwner = 4
|
||||
headerLineGroupGroup = 5
|
||||
headerLineGroupSize = 6
|
||||
headerLineGroupTimestamp = 7
|
||||
headerLineGroupName = 8
|
||||
)
|
||||
|
||||
// Ls2JsonWriter implements io.Writer.
|
||||
// It streams the passed data to the Target and transforms the data into the json format.
|
||||
type Ls2JsonWriter struct {
|
||||
Target io.Writer
|
||||
Ctx context.Context
|
||||
jsonStartSent bool
|
||||
setCommaPrefix bool
|
||||
remaining []byte
|
||||
latestPath []byte
|
||||
sentrySpan *sentry.Span
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) HasStartedWriting() bool {
|
||||
return w.jsonStartSent
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) Write(p []byte) (int, error) {
|
||||
i, err := w.initializeJSONObject()
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
start := 0
|
||||
for i, char := range p {
|
||||
if char != '\n' {
|
||||
continue
|
||||
}
|
||||
|
||||
line := p[start:i]
|
||||
if len(w.remaining) > 0 {
|
||||
line = append(w.remaining, line...)
|
||||
w.remaining = []byte("")
|
||||
}
|
||||
|
||||
if len(line) != 0 {
|
||||
count, err := w.writeLine(line)
|
||||
if err != nil {
|
||||
log.WithContext(w.Ctx).WithError(err).Warn("Could not write line to Target")
|
||||
return count, err
|
||||
}
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
|
||||
if start < len(p) {
|
||||
w.remaining = p[start:]
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) initializeJSONObject() (count int, err error) {
|
||||
if !w.jsonStartSent {
|
||||
count, err = w.Target.Write([]byte("{\"files\": ["))
|
||||
if count == 0 || err != nil {
|
||||
log.WithContext(w.Ctx).WithError(err).Warn("Could not write to target")
|
||||
err = fmt.Errorf("could not write to target: %w", err)
|
||||
} else {
|
||||
w.jsonStartSent = true
|
||||
w.sentrySpan = sentry.StartSpan(w.Ctx, "nullio.init")
|
||||
w.sentrySpan.Description = "Forwarding"
|
||||
}
|
||||
}
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) Close() {
|
||||
if w.jsonStartSent {
|
||||
count, err := w.Target.Write([]byte("]}"))
|
||||
if count == 0 || err != nil {
|
||||
log.WithContext(w.Ctx).WithError(err).Warn("Could not Close ls2json writer")
|
||||
}
|
||||
w.sentrySpan.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) writeLine(line []byte) (count int, err error) {
|
||||
matches := pathLineRegex.FindSubmatch(line)
|
||||
if matches != nil {
|
||||
w.latestPath = append(bytes.TrimSuffix(matches[1], []byte("/")), '/')
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
matches = headerLineRegex.FindSubmatch(line)
|
||||
if matches != nil {
|
||||
response, err1 := w.parseFileHeader(matches)
|
||||
if err1 != nil {
|
||||
return 0, err1
|
||||
}
|
||||
|
||||
// Skip the first leading comma
|
||||
if w.setCommaPrefix {
|
||||
response = append([]byte{','}, response...)
|
||||
} else {
|
||||
w.setCommaPrefix = true
|
||||
}
|
||||
|
||||
count, err1 = w.Target.Write(response)
|
||||
if err1 != nil {
|
||||
err = fmt.Errorf("could not write to target: %w", err1)
|
||||
} else if count == len(response) {
|
||||
count = len(line)
|
||||
}
|
||||
}
|
||||
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (w *Ls2JsonWriter) parseFileHeader(matches [][]byte) ([]byte, error) {
|
||||
entryType := dto.EntryType(matches[headerLineGroupEntryType][0])
|
||||
permissions := string(matches[headerLineGroupPermissions])
|
||||
acl := string(matches[headerLineGroupACL])
|
||||
if acl == "+" {
|
||||
permissions += "+"
|
||||
}
|
||||
|
||||
size, err1 := strconv.Atoi(string(matches[headerLineGroupSize]))
|
||||
timestamp, err2 := strconv.Atoi(string(matches[headerLineGroupTimestamp]))
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, fmt.Errorf("could not parse file details: %w %+v", err1, err2)
|
||||
}
|
||||
|
||||
name := dto.FilePath(append(w.latestPath, matches[headerLineGroupName]...))
|
||||
linkTarget := dto.FilePath("")
|
||||
if entryType == dto.EntryTypeLink {
|
||||
parts := strings.Split(string(name), " -> ")
|
||||
const NumberOfPartsInALink = 2
|
||||
if len(parts) == NumberOfPartsInALink {
|
||||
name = dto.FilePath(parts[0])
|
||||
linkTarget = dto.FilePath(parts[1])
|
||||
} else {
|
||||
log.WithContext(w.Ctx).Error("could not split link into name and target")
|
||||
}
|
||||
}
|
||||
|
||||
response, err := json.Marshal(dto.FileHeader{
|
||||
Name: name,
|
||||
EntryType: entryType,
|
||||
LinkTarget: linkTarget,
|
||||
Size: size,
|
||||
ModificationTime: timestamp,
|
||||
Permissions: permissions,
|
||||
Owner: string(matches[headerLineGroupOwner]),
|
||||
Group: string(matches[headerLineGroupGroup]),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not marshal file header: %w", err)
|
||||
}
|
||||
return response, nil
|
||||
}
|
43
pkg/nullio/nullio.go
Normal file
43
pkg/nullio/nullio.go
Normal file
@ -0,0 +1,43 @@
|
||||
package nullio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Reader is a struct that implements the io.Reader interface.
|
||||
// Read does not return when called until the context is done. It is used to avoid reading anything and returning io.EOF
|
||||
// before the context has finished.
|
||||
// For example the reader is used by the execution that fetches the stderr stream from Nomad. We do not have a stdin
|
||||
// that we want to send to Nomad. But we still have to pass Nomad a reader.
|
||||
// Normally readers send an io.EOF as soon as they have nothing more to read. But we want to avoid this, because in that
|
||||
// case Nomad will abort (not the execution but) the transmission.
|
||||
// Why should the reader not just always return 0, nil? Because Nomad reads in an endless loop and thus a busy waiting
|
||||
// is avoided.
|
||||
type Reader struct {
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
func (r Reader) Read(_ []byte) (int, error) {
|
||||
if r.Ctx == nil || r.Ctx.Err() != nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
<-r.Ctx.Done()
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// ReadWriter implements io.ReadWriter. It does not return from Read and discards everything on Write.
|
||||
type ReadWriter struct {
|
||||
Reader
|
||||
}
|
||||
|
||||
func (rw *ReadWriter) Write(p []byte) (int, error) {
|
||||
n, err := io.Discard.Write(p)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("error writing to io.Discard: %w", err)
|
||||
} else {
|
||||
return n, nil
|
||||
}
|
||||
}
|
185
pkg/storage/storage.go
Normal file
185
pkg/storage/storage.go
Normal file
@ -0,0 +1,185 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Storage is an interface for storing objects.
|
||||
type Storage[T any] interface {
|
||||
// List returns all objects from the storage.
|
||||
List() []T
|
||||
|
||||
// Add adds an object to the storage.
|
||||
// It overwrites the old object if one with the same id was already stored.
|
||||
Add(id string, o T)
|
||||
|
||||
// Get returns an object from the storage.
|
||||
// Iff the object does not exist in the storage, ok will be false.
|
||||
Get(id string) (o T, ok bool)
|
||||
|
||||
// Delete deletes the object with the passed id from the storage.
|
||||
// It does nothing if no object with the id is present in the store.
|
||||
Delete(id string)
|
||||
|
||||
// Pop deletes the object with the given id from the storage and returns it.
|
||||
// Iff no such execution exists, ok is false and true otherwise.
|
||||
Pop(id string) (o T, ok bool)
|
||||
|
||||
// Purge removes all objects from the storage.
|
||||
Purge()
|
||||
|
||||
// Length returns the number of currently stored objects in the storage.
|
||||
Length() uint
|
||||
|
||||
// Sample returns and removes an arbitrary object from the storage.
|
||||
// ok is true iff an object was returned.
|
||||
Sample() (o T, ok bool)
|
||||
}
|
||||
|
||||
// EventType is an enum type to declare the different causes of a monitoring event.
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
Creation EventType = "creation"
|
||||
Deletion EventType = "deletion"
|
||||
Periodically EventType = "periodically"
|
||||
)
|
||||
|
||||
// WriteCallback is called before an event gets monitored.
|
||||
// Iff eventType is Periodically it is no object provided.
|
||||
type WriteCallback[T any] func(p *write.Point, object T, eventType EventType)
|
||||
|
||||
// localStorage stores objects in the local application memory.
|
||||
type localStorage[T any] struct {
|
||||
sync.RWMutex
|
||||
objects map[string]T
|
||||
measurement string
|
||||
callback WriteCallback[T]
|
||||
}
|
||||
|
||||
// NewLocalStorage responds with a Storage implementation.
|
||||
// This implementation stores the data thread-safe in the local application memory.
|
||||
func NewLocalStorage[T any]() *localStorage[T] {
|
||||
return &localStorage[T]{
|
||||
objects: make(map[string]T),
|
||||
}
|
||||
}
|
||||
|
||||
// NewMonitoredLocalStorage responds with a Storage implementation.
|
||||
// All write operations are monitored in the passed measurement.
|
||||
// Iff callback is set, it will be called on a write operation.
|
||||
// Iff additionalEvents not zero, the duration will be used to periodically send additional monitoring events.
|
||||
func NewMonitoredLocalStorage[T any](
|
||||
measurement string, callback WriteCallback[T], additionalEvents time.Duration, ctx context.Context) *localStorage[T] {
|
||||
s := &localStorage[T]{
|
||||
objects: make(map[string]T),
|
||||
measurement: measurement,
|
||||
callback: callback,
|
||||
}
|
||||
if additionalEvents != 0 {
|
||||
go s.periodicallySendMonitoringData(additionalEvents, ctx)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) List() (o []T) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
for _, value := range s.objects {
|
||||
o = append(o, value)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Add(id string, o T) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.objects[id] = o
|
||||
s.sendMonitoringData(id, o, Creation, s.unsafeLength())
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Get(id string) (o T, ok bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
o, ok = s.objects[id]
|
||||
return
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Delete(id string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
o, ok := s.objects[id]
|
||||
if ok {
|
||||
delete(s.objects, id)
|
||||
s.sendMonitoringData(id, o, Deletion, s.unsafeLength())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Pop(id string) (T, bool) {
|
||||
o, ok := s.Get(id)
|
||||
s.Delete(id)
|
||||
return o, ok
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Purge() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for key, object := range s.objects {
|
||||
s.sendMonitoringData(key, object, Deletion, 0)
|
||||
}
|
||||
s.objects = make(map[string]T)
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Sample() (o T, ok bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for key, object := range s.objects {
|
||||
delete(s.objects, key)
|
||||
s.sendMonitoringData(key, object, Deletion, s.unsafeLength())
|
||||
return object, true
|
||||
}
|
||||
return o, false
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) Length() uint {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.unsafeLength()
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) unsafeLength() uint {
|
||||
length := len(s.objects)
|
||||
return uint(length)
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) sendMonitoringData(id string, o T, eventType EventType, count uint) {
|
||||
if s.measurement != "" {
|
||||
p := influxdb2.NewPointWithMeasurement(s.measurement)
|
||||
p.AddTag("id", id)
|
||||
p.AddTag("event_type", string(eventType))
|
||||
p.AddField("count", count)
|
||||
|
||||
if s.callback != nil {
|
||||
s.callback(p, o, eventType)
|
||||
}
|
||||
|
||||
monitoring.WriteInfluxPoint(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *localStorage[T]) periodicallySendMonitoringData(d time.Duration, ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(d):
|
||||
stub := new(T)
|
||||
s.sendMonitoringData("", *stub, Periodically, s.Length())
|
||||
}
|
||||
}
|
||||
}
|
66
pkg/util/merge_context.go
Normal file
66
pkg/util/merge_context.go
Normal file
@ -0,0 +1,66 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// mergeContext combines multiple contexts.
|
||||
type mergeContext struct {
|
||||
contexts []context.Context
|
||||
}
|
||||
|
||||
func NewMergeContext(contexts []context.Context) context.Context {
|
||||
return mergeContext{contexts: contexts}
|
||||
}
|
||||
|
||||
// Deadline returns the earliest Deadline of all contexts.
|
||||
func (m mergeContext) Deadline() (deadline time.Time, ok bool) {
|
||||
for _, ctx := range m.contexts {
|
||||
if anotherDeadline, anotherOk := ctx.Deadline(); anotherOk {
|
||||
if ok && anotherDeadline.After(deadline) {
|
||||
continue
|
||||
}
|
||||
deadline = anotherDeadline
|
||||
ok = anotherOk
|
||||
}
|
||||
}
|
||||
return deadline, ok
|
||||
}
|
||||
|
||||
// Done notifies when the first context is done.
|
||||
func (m mergeContext) Done() <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
cases := make([]reflect.SelectCase, 0, len(m.contexts))
|
||||
for _, ctx := range m.contexts {
|
||||
cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())})
|
||||
}
|
||||
go func(cases []reflect.SelectCase, ch chan struct{}) {
|
||||
_, _, _ = reflect.Select(cases)
|
||||
close(ch)
|
||||
}(cases, ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
// Err returns the error of any (random) context and nil iff no context has an error.
|
||||
func (m mergeContext) Err() error {
|
||||
for _, ctx := range m.contexts {
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("mergeContext wrapped: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the value for the key if any context has it.
|
||||
// If multiple contexts have a value for the key, the result is any (random) of them.
|
||||
func (m mergeContext) Value(key any) any {
|
||||
for _, ctx := range m.contexts {
|
||||
if value := ctx.Value(key); value != nil {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
82
pkg/util/util.go
Normal file
82
pkg/util/util.go
Normal file
@ -0,0 +1,82 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/openHPI/poseidon/pkg/logging"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logging.GetLogger("util")
|
||||
// MaxConnectionRetriesExponential is the default number of retries. It's exported for testing reasons.
|
||||
MaxConnectionRetriesExponential = 18
|
||||
// InitialWaitingDuration is the default initial duration of waiting after a failed time.
|
||||
InitialWaitingDuration = time.Second
|
||||
ErrRetryContextDone = errors.New("the retry context is done")
|
||||
)
|
||||
|
||||
func retryExponential(ctx context.Context, sleep time.Duration, f func() error) func() error {
|
||||
return func() error {
|
||||
err := f()
|
||||
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ErrRetryContextDone
|
||||
case <-time.After(sleep):
|
||||
sleep *= 2
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func retryConstant(ctx context.Context, sleep time.Duration, f func() error) func() error {
|
||||
return func() error {
|
||||
err := f()
|
||||
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrRetryContextDone
|
||||
case <-time.After(sleep):
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func retryAttempts(maxAttempts int, f func() error) (err error) {
|
||||
for i := 0; i < maxAttempts; i++ {
|
||||
err = f()
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if errors.Is(err, ErrRetryContextDone) {
|
||||
return err
|
||||
}
|
||||
log.WithField("count", i).WithError(err).Debug("retrying after error")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryExponentialWithContext executes the passed function with exponentially increasing time starting with one second
|
||||
// up to a default maximum number of attempts as long as the context is not done.
|
||||
func RetryExponentialWithContext(ctx context.Context, f func() error) error {
|
||||
return retryAttempts(MaxConnectionRetriesExponential, retryExponential(ctx, InitialWaitingDuration, f))
|
||||
}
|
||||
|
||||
// RetryExponential executes the passed function with exponentially increasing time starting with one second
|
||||
// up to a default maximum number of attempts.
|
||||
func RetryExponential(f func() error) error {
|
||||
return retryAttempts(MaxConnectionRetriesExponential,
|
||||
retryExponential(context.Background(), InitialWaitingDuration, f))
|
||||
}
|
||||
|
||||
// RetryConstantAttemptsWithContext executes the passed function with a constant retry delay of one second
|
||||
// up to the passed maximum number of attempts as long as the context is not done.
|
||||
func RetryConstantAttemptsWithContext(attempts int, ctx context.Context, f func() error) error {
|
||||
return retryAttempts(attempts, retryConstant(ctx, InitialWaitingDuration, f))
|
||||
}
|
Reference in New Issue
Block a user