Refactor MemoryLeakTestSuite

as we identified two issues where the goroutine count from before differs from after the test.

1) It seemed like a Go runtime specific Goroutine appeared in rare cases before the test. To avoid this, we introduced a short timeout before looking up the Goroutines.
Another solution might be to do the lookup twice and check if the count matches.

2) A Goroutine that periodically monitors some storage unexpectedly got closed in rare cases. As we could not identify the cause for this, we removed the leaking Goroutines by properly cleaning up.
This commit is contained in:
Maximilian Paß
2024-02-27 21:42:18 +01:00
parent 80b8c27924
commit ab938bfc22
6 changed files with 56 additions and 33 deletions

View File

@ -528,7 +528,7 @@ func (s *MainTestSuite) TestNomadRunnerManager_Load() {
jobID := tests.DefaultRunnerID
job.ID = &jobID
job.Name = &jobID
s.ExpectedGoroutingIncrease++ // We dont care about destroying the created runner.
s.ExpectedGoroutineIncrease++ // We dont care about destroying the created runner.
call.Return([]*nomadApi.Job{job}, nil)
runnerManager.Load()
@ -544,7 +544,7 @@ func (s *MainTestSuite) TestNomadRunnerManager_Load() {
configTaskGroup := nomad.FindTaskGroup(job, nomad.ConfigTaskGroupName)
s.Require().NotNil(configTaskGroup)
configTaskGroup.Meta[nomad.ConfigMetaUsedKey] = nomad.ConfigMetaUsedValue
s.ExpectedGoroutingIncrease++ // We don't care about destroying the created runner.
s.ExpectedGoroutineIncrease++ // We don't care about destroying the created runner.
call.Return([]*nomadApi.Job{job}, nil)
s.Require().Zero(runnerManager.usedRunners.Length())