Remove off by one with needed runners
Earlier we used a channel to store the runners. To make the environment refresh block, we scheduled an additional runner as the buffered channel was then filled up. As we don't use the channel anymore, we don't need the additional runner anymore. Furthermore this leads to weird race conditions in tests when comparing the runner count to the desired one.
This commit is contained in:

committed by
Tobias Kantusch

parent
3d7b7e1761
commit
b32e9c2a67
@ -138,7 +138,7 @@ func (m *NomadRunnerManager) refreshEnvironment(id EnvironmentId) {
|
|||||||
log.WithError(err).Printf("Failed get allocation count")
|
log.WithError(err).Printf("Failed get allocation count")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
additionallyNeededRunners := job.desiredIdleRunnersCount - uint(job.idleRunners.Length()) + 1
|
additionallyNeededRunners := job.desiredIdleRunnersCount - uint(job.idleRunners.Length())
|
||||||
requiredRunnerCount := jobScale
|
requiredRunnerCount := jobScale
|
||||||
if additionallyNeededRunners > 0 {
|
if additionallyNeededRunners > 0 {
|
||||||
requiredRunnerCount += additionallyNeededRunners
|
requiredRunnerCount += additionallyNeededRunners
|
||||||
|
@ -168,7 +168,7 @@ func (s *ManagerTestSuite) TestRefreshScalesJob() {
|
|||||||
// use one runner to necessitate rescaling
|
// use one runner to necessitate rescaling
|
||||||
_, _ = s.nomadRunnerManager.Claim(defaultEnvironmentId)
|
_, _ = s.nomadRunnerManager.Claim(defaultEnvironmentId)
|
||||||
s.waitForRunnerRefresh()
|
s.waitForRunnerRefresh()
|
||||||
s.apiMock.AssertCalled(s.T(), "SetJobScale", tests.DefaultJobId, defaultDesiredRunnersCount+1, "Runner Requested")
|
s.apiMock.AssertCalled(s.T(), "SetJobScale", tests.DefaultJobId, defaultDesiredRunnersCount, "Runner Requested")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ManagerTestSuite) TestRefreshAddsRunnerToPool() {
|
func (s *ManagerTestSuite) TestRefreshAddsRunnerToPool() {
|
||||||
|
Reference in New Issue
Block a user