#110 Add periodical monitoring events.
This commit is contained in:
@ -15,6 +15,7 @@ import (
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -37,7 +38,7 @@ func NewNomadEnvironment(apiClient nomad.ExecutorAPI, jobHCL string) (*NomadEnvi
|
||||
}
|
||||
|
||||
return &NomadEnvironment{apiClient, jobHCL, job, storage.NewMonitoredLocalStorage[runner.Runner](
|
||||
monitoring.MeasurementIdleRunnerNomad, runner.MonitorRunnersEnvironmentID)}, nil
|
||||
monitoring.MeasurementIdleRunnerNomad, runner.MonitorRunnersEnvironmentID, time.Minute)}, nil
|
||||
}
|
||||
|
||||
func NewNomadEnvironmentFromRequest(
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// templateEnvironmentJobHCL holds our default job in HCL format.
|
||||
@ -155,7 +156,7 @@ func newNomadEnvironmetFromJob(job *nomadApi.Job, apiClient nomad.ExecutorAPI) *
|
||||
jobHCL: templateEnvironmentJobHCL,
|
||||
job: job,
|
||||
idleRunners: storage.NewMonitoredLocalStorage[runner.Runner](
|
||||
monitoring.MeasurementIdleRunnerNomad, runner.MonitorRunnersEnvironmentID),
|
||||
monitoring.MeasurementIdleRunnerNomad, runner.MonitorRunnersEnvironmentID, time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrNullObject = errors.New("functionality not available for the null object")
|
||||
@ -24,13 +25,14 @@ type AbstractManager struct {
|
||||
func NewAbstractManager() *AbstractManager {
|
||||
return &AbstractManager{
|
||||
environments: storage.NewMonitoredLocalStorage[ExecutionEnvironment](
|
||||
monitoring.MeasurementEnvironments, monitorEnvironmentData),
|
||||
usedRunners: storage.NewMonitoredLocalStorage[Runner](monitoring.MeasurementUsedRunner, MonitorRunnersEnvironmentID),
|
||||
monitoring.MeasurementEnvironments, monitorEnvironmentData, 0),
|
||||
usedRunners: storage.NewMonitoredLocalStorage[Runner](
|
||||
monitoring.MeasurementUsedRunner, MonitorRunnersEnvironmentID, time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
// MonitorRunnersEnvironmentID passes the id of the environment e into the monitoring Point p.
|
||||
func MonitorRunnersEnvironmentID(p *write.Point, e Runner, _ bool) {
|
||||
func MonitorRunnersEnvironmentID(p *write.Point, e Runner, _ storage.EventType) {
|
||||
if e != nil {
|
||||
p.AddTag(monitoring.InfluxKeyEnvironmentID, e.Environment().ToString())
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/openHPI/poseidon/pkg/monitoring"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrWrongMessageType = errors.New("received message that is not a text message")
|
||||
@ -53,7 +54,7 @@ func NewAWSFunctionWorkload(
|
||||
environment: environment,
|
||||
}
|
||||
workload.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
|
||||
monitoring.MeasurementExecutionsAWS, monitorExecutionsRunnerID(environment.ID(), workload.id))
|
||||
monitoring.MeasurementExecutionsAWS, monitorExecutionsRunnerID(environment.ID(), workload.id), time.Minute)
|
||||
workload.InactivityTimer = NewInactivityTimer(workload, func(_ Runner) error {
|
||||
return workload.Destroy()
|
||||
})
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api/write"
|
||||
"github.com/openHPI/poseidon/pkg/dto"
|
||||
"github.com/openHPI/poseidon/pkg/storage"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@ -51,8 +52,8 @@ type ExecutionEnvironment interface {
|
||||
}
|
||||
|
||||
// monitorEnvironmentData passes the configuration of the environment e into the monitoring Point p.
|
||||
func monitorEnvironmentData(p *write.Point, e ExecutionEnvironment, isDeletion bool) {
|
||||
if !isDeletion && e != nil {
|
||||
func monitorEnvironmentData(p *write.Point, e ExecutionEnvironment, eventType storage.EventType) {
|
||||
if eventType == storage.Creation && e != nil {
|
||||
p.AddTag("image", e.Image())
|
||||
p.AddTag("cpu_limit", strconv.Itoa(int(e.CPULimit())))
|
||||
p.AddTag("memory_limit", strconv.Itoa(int(e.MemoryLimit())))
|
||||
|
@ -56,7 +56,7 @@ func NewNomadJob(id string, portMappings []nomadApi.PortMapping,
|
||||
onDestroy: onDestroy,
|
||||
}
|
||||
job.executions = storage.NewMonitoredLocalStorage[*dto.ExecutionRequest](
|
||||
monitoring.MeasurementExecutionsNomad, monitorExecutionsRunnerID(job.Environment(), id))
|
||||
monitoring.MeasurementExecutionsNomad, monitorExecutionsRunnerID(job.Environment(), id), time.Minute)
|
||||
job.InactivityTimer = NewInactivityTimer(job, onDestroy)
|
||||
return job
|
||||
}
|
||||
|
@ -65,8 +65,8 @@ func FromContext(ctx context.Context) (Runner, bool) {
|
||||
|
||||
// monitorExecutionsRunnerID passes the id of the runner executing the execution into the monitoring Point p.
|
||||
func monitorExecutionsRunnerID(env dto.EnvironmentID, runnerID string) storage.WriteCallback[*dto.ExecutionRequest] {
|
||||
return func(p *write.Point, e *dto.ExecutionRequest, isDeletion bool) {
|
||||
if !isDeletion && e != nil {
|
||||
return func(p *write.Point, e *dto.ExecutionRequest, eventType storage.EventType) {
|
||||
if eventType == storage.Creation && e != nil {
|
||||
p.AddTag(monitoring.InfluxKeyRunnerID, runnerID)
|
||||
p.AddTag(monitoring.InfluxKeyEnvironmentID, env.ToString())
|
||||
}
|
||||
|
Reference in New Issue
Block a user