Implement linting issues and merge request comments
This commit is contained in:

committed by
Maximilian Pass

parent
4b5f0a3eb6
commit
32fe47d669
@@ -17,29 +17,30 @@ var (
|
||||
ErrRunnerNotFound = errors.New("no runner found with this id")
|
||||
)
|
||||
|
||||
type EnvironmentId int
|
||||
type EnvironmentID int
|
||||
|
||||
func (e EnvironmentId) toString() string {
|
||||
func (e EnvironmentID) toString() string {
|
||||
return string(rune(e))
|
||||
}
|
||||
|
||||
type NomadJobId string
|
||||
type NomadJobID string
|
||||
|
||||
// Manager keeps track of the used and unused runners of all execution environments in order to provide unused runners to new clients and ensure no runner is used twice.
|
||||
// Manager keeps track of the used and unused runners of all execution environments in order to provide unused
|
||||
// runners to new clients and ensure no runner is used twice.
|
||||
type Manager interface {
|
||||
// RegisterEnvironment adds a new environment that should be managed.
|
||||
RegisterEnvironment(environmentId EnvironmentId, nomadJobId NomadJobId, desiredIdleRunnersCount uint)
|
||||
RegisterEnvironment(id EnvironmentID, nomadJobID NomadJobID, desiredIdleRunnersCount uint)
|
||||
|
||||
// EnvironmentExists returns whether the environment with the given id exists.
|
||||
EnvironmentExists(id EnvironmentId) bool
|
||||
EnvironmentExists(id EnvironmentID) bool
|
||||
|
||||
// Claim returns a new runner.
|
||||
// It makes sure that the runner is not in use yet and returns an error if no runner could be provided.
|
||||
Claim(id EnvironmentId) (Runner, error)
|
||||
Claim(id EnvironmentID) (Runner, error)
|
||||
|
||||
// Get returns the used runner with the given runnerId.
|
||||
// If no runner with the given runnerId is currently used, it returns an error.
|
||||
Get(runnerId string) (Runner, error)
|
||||
Get(runnerID string) (Runner, error)
|
||||
|
||||
// Return signals that the runner is no longer used by the caller and can be claimed by someone else.
|
||||
// The runner is deleted or cleaned up for reuse depending on the used executor.
|
||||
@@ -47,12 +48,15 @@ type Manager interface {
|
||||
}
|
||||
|
||||
type NomadRunnerManager struct {
|
||||
apiClient nomad.ExecutorApi
|
||||
apiClient nomad.ExecutorAPI
|
||||
jobs NomadJobStorage
|
||||
usedRunners Storage
|
||||
}
|
||||
|
||||
func NewNomadRunnerManager(apiClient nomad.ExecutorApi, ctx context.Context) *NomadRunnerManager {
|
||||
// NewNomadRunnerManager creates a new runner manager that keeps track of all runners.
|
||||
// It uses the apiClient for all requests and runs a background task to keep the runners in sync with Nomad.
|
||||
// If you cancel the context the background synchronization will be stopped.
|
||||
func NewNomadRunnerManager(apiClient nomad.ExecutorAPI, ctx context.Context) *NomadRunnerManager {
|
||||
m := &NomadRunnerManager{
|
||||
apiClient,
|
||||
NewLocalNomadJobStorage(),
|
||||
@@ -63,33 +67,34 @@ func NewNomadRunnerManager(apiClient nomad.ExecutorApi, ctx context.Context) *No
|
||||
}
|
||||
|
||||
type NomadJob struct {
|
||||
environmentId EnvironmentId
|
||||
jobId NomadJobId
|
||||
environmentID EnvironmentID
|
||||
jobID NomadJobID
|
||||
idleRunners Storage
|
||||
desiredIdleRunnersCount uint
|
||||
}
|
||||
|
||||
func (j *NomadJob) Id() EnvironmentId {
|
||||
return j.environmentId
|
||||
func (j *NomadJob) ID() EnvironmentID {
|
||||
return j.environmentID
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) RegisterEnvironment(environmentId EnvironmentId, nomadJobId NomadJobId, desiredIdleRunnersCount uint) {
|
||||
func (m *NomadRunnerManager) RegisterEnvironment(environmentID EnvironmentID, nomadJobID NomadJobID,
|
||||
desiredIdleRunnersCount uint) {
|
||||
m.jobs.Add(&NomadJob{
|
||||
environmentId,
|
||||
nomadJobId,
|
||||
environmentID,
|
||||
nomadJobID,
|
||||
NewLocalRunnerStorage(),
|
||||
desiredIdleRunnersCount,
|
||||
})
|
||||
go m.refreshEnvironment(environmentId)
|
||||
go m.refreshEnvironment(environmentID)
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) EnvironmentExists(id EnvironmentId) (ok bool) {
|
||||
func (m *NomadRunnerManager) EnvironmentExists(id EnvironmentID) (ok bool) {
|
||||
_, ok = m.jobs.Get(id)
|
||||
return
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) Claim(environmentId EnvironmentId) (Runner, error) {
|
||||
job, ok := m.jobs.Get(environmentId)
|
||||
func (m *NomadRunnerManager) Claim(environmentID EnvironmentID) (Runner, error) {
|
||||
job, ok := m.jobs.Get(environmentID)
|
||||
if !ok {
|
||||
return nil, ErrUnknownExecutionEnvironment
|
||||
}
|
||||
@@ -101,8 +106,8 @@ func (m *NomadRunnerManager) Claim(environmentId EnvironmentId) (Runner, error)
|
||||
return runner, nil
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) Get(runnerId string) (Runner, error) {
|
||||
runner, ok := m.usedRunners.Get(runnerId)
|
||||
func (m *NomadRunnerManager) Get(runnerID string) (Runner, error) {
|
||||
runner, ok := m.usedRunners.Get(runnerID)
|
||||
if !ok {
|
||||
return nil, ErrRunnerNotFound
|
||||
}
|
||||
@@ -119,42 +124,46 @@ func (m *NomadRunnerManager) Return(r Runner) (err error) {
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) updateRunners(ctx context.Context) {
|
||||
onCreate := func(alloc *nomadApi.Allocation) {
|
||||
log.WithField("id", alloc.ID).Debug("Allocation started")
|
||||
|
||||
intJobID, err := strconv.Atoi(alloc.JobID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
job, ok := m.jobs.Get(EnvironmentId(intJobID))
|
||||
if ok {
|
||||
job.idleRunners.Add(NewRunner(alloc.ID))
|
||||
}
|
||||
retries := 0
|
||||
for ctx.Err() == nil {
|
||||
err := m.apiClient.WatchAllocations(ctx, m.onAllocationAdded, m.onAllocationStopped)
|
||||
retries += 1
|
||||
log.WithError(err).Errorf("Stopped updating the runners! Retry %v", retries)
|
||||
<-time.After(time.Second)
|
||||
}
|
||||
onStop := func(alloc *nomadApi.Allocation) {
|
||||
log.WithField("id", alloc.ID).Debug("Allocation stopped")
|
||||
}
|
||||
|
||||
intJobID, err := strconv.Atoi(alloc.JobID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
func (m *NomadRunnerManager) onAllocationAdded(alloc *nomadApi.Allocation) {
|
||||
log.WithField("id", alloc.ID).Debug("Allocation started")
|
||||
|
||||
job, ok := m.jobs.Get(EnvironmentId(intJobID))
|
||||
if ok {
|
||||
job.idleRunners.Delete(alloc.ID)
|
||||
m.usedRunners.Delete(alloc.ID)
|
||||
}
|
||||
}
|
||||
|
||||
err := m.apiClient.WatchAllocations(ctx, onCreate, onStop)
|
||||
intJobID, err := strconv.Atoi(alloc.JobID)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed updating runners")
|
||||
return
|
||||
}
|
||||
|
||||
job, ok := m.jobs.Get(EnvironmentID(intJobID))
|
||||
if ok {
|
||||
job.idleRunners.Add(NewNomadAllocation(alloc.ID, m.apiClient))
|
||||
}
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) onAllocationStopped(alloc *nomadApi.Allocation) {
|
||||
log.WithField("id", alloc.ID).Debug("Allocation stopped")
|
||||
|
||||
intJobID, err := strconv.Atoi(alloc.JobID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.usedRunners.Delete(alloc.ID)
|
||||
job, ok := m.jobs.Get(EnvironmentID(intJobID))
|
||||
if ok {
|
||||
job.idleRunners.Delete(alloc.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh Big ToDo: Improve this function!! State out that it also rescales the job; Provide context to be terminable...
|
||||
func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
||||
func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentID) {
|
||||
job, ok := m.jobs.Get(id)
|
||||
if !ok {
|
||||
// this environment does not exist
|
||||
@@ -162,7 +171,7 @@ func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
||||
}
|
||||
var lastJobScaling uint = 0
|
||||
for {
|
||||
runners, err := m.apiClient.LoadRunners(string(job.jobId))
|
||||
runners, err := m.apiClient.LoadRunners(string(job.jobID))
|
||||
if err != nil {
|
||||
log.WithError(err).Printf("Failed fetching runners")
|
||||
break
|
||||
@@ -173,7 +182,7 @@ func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
||||
|
||||
job.idleRunners.Add(r)
|
||||
}
|
||||
jobScale, err := m.apiClient.JobScale(string(job.jobId))
|
||||
jobScale, err := m.apiClient.JobScale(string(job.jobID))
|
||||
if err != nil {
|
||||
log.WithError(err).Printf("Failed get allocation count")
|
||||
break
|
||||
@@ -186,7 +195,7 @@ func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
if requiredRunnerCount != lastJobScaling {
|
||||
log.Printf("Set job scaling %d", requiredRunnerCount)
|
||||
err = m.apiClient.SetJobScale(string(job.jobId), requiredRunnerCount, "Runner Requested")
|
||||
err = m.apiClient.SetJobScale(string(job.jobID), requiredRunnerCount, "Runner Requested")
|
||||
if err != nil {
|
||||
log.WithError(err).Printf("Failed set allocation scaling")
|
||||
continue
|
||||
@@ -196,19 +205,19 @@ func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *NomadRunnerManager) unusedRunners(environmentId EnvironmentId, fetchedRunnerIds []string) (newRunners []Runner) {
|
||||
func (m *NomadRunnerManager) unusedRunners(environmentId EnvironmentID, fetchedRunnerIds []string) (newRunners []Runner) {
|
||||
newRunners = make([]Runner, 0)
|
||||
job, ok := m.jobs.Get(environmentId)
|
||||
if !ok {
|
||||
// the environment does not exist, so it won't have any unused runners
|
||||
return
|
||||
}
|
||||
for _, runnerId := range fetchedRunnerIds {
|
||||
_, ok := m.usedRunners.Get(runnerId)
|
||||
for _, runnerID := range fetchedRunnerIds {
|
||||
_, ok := m.usedRunners.Get(runnerID)
|
||||
if !ok {
|
||||
_, ok = job.idleRunners.Get(runnerId)
|
||||
_, ok = job.idleRunners.Get(runnerID)
|
||||
if !ok {
|
||||
newRunners = append(newRunners, NewNomadAllocation(runnerId, m.apiClient))
|
||||
newRunners = append(newRunners, NewNomadAllocation(runnerID, m.apiClient))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user