diff --git a/internal/armada/configuration/constants.go b/internal/armada/configuration/constants.go index b5bb9be9b88..86bdc38f4a8 100644 --- a/internal/armada/configuration/constants.go +++ b/internal/armada/configuration/constants.go @@ -7,16 +7,11 @@ const ( // GangCardinalityAnnotation All jobs in a gang must specify the total number of jobs in the gang via this annotation. // The cardinality should be expressed as a positive integer, e.g., "3". GangCardinalityAnnotation = "armadaproject.io/gangCardinality" - // GangMinimumCardinalityAnnotation All jobs in a gang must specify the minimum size for the gang to be schedulable via this annotation. - // The cardinality should be expressed as a positive integer, e.g., "3". - GangMinimumCardinalityAnnotation = "armadaproject.io/gangMinimumCardinality" // The jobs that make up a gang may be constrained to be scheduled across a set of uniform nodes. // Specifically, if provided, all gang jobs are scheduled onto nodes for which the value of the provided label is equal. // Used to ensure, e.g., that all gang jobs are scheduled onto the same cluster or rack. GangNodeUniformityLabelAnnotation = "armadaproject.io/gangNodeUniformityLabel" // GangNumJobsScheduledAnnotation is set by the scheduler and indicates how many gang jobs were scheduled. - // For example, a gang composed of 4 jobs may only have a subset be scheduled if GangMinimumCardinalityAnnotation < 4. - GangNumJobsScheduledAnnotation = "armadaproject.io/numGangJobsScheduled" // FailFastAnnotation, if set to true, ensures Armada does not re-schedule jobs that fail to start. // Instead, the job the pod is part of fails immediately. FailFastAnnotation = "armadaproject.io/failFast" @@ -25,9 +20,7 @@ const ( var schedulingAnnotations = map[string]bool{ GangIdAnnotation: true, GangCardinalityAnnotation: true, - GangMinimumCardinalityAnnotation: true, GangNodeUniformityLabelAnnotation: true, - GangNumJobsScheduledAnnotation: true, FailFastAnnotation: true, } diff --git a/internal/armada/submit/validation/submit_request.go b/internal/armada/submit/validation/submit_request.go index 748f84626bd..04591aa0981 100644 --- a/internal/armada/submit/validation/submit_request.go +++ b/internal/armada/submit/validation/submit_request.go @@ -293,12 +293,6 @@ func validateGangs(request *api.JobSubmitRequest, _ configuration.SubmissionConf actual.Id, expected.Cardinality, actual.Cardinality, ) } - if expected.MinimumCardinality != actual.MinimumCardinality { - return errors.Errorf( - "inconsistent gang minimum cardinality in gang %s: expected %d but got %d", - actual.Id, expected.MinimumCardinality, actual.MinimumCardinality, - ) - } if expected.PriorityClassName != actual.PriorityClassName { return errors.Errorf( "inconsistent PriorityClassName in gang %s: expected %s but got %s", diff --git a/internal/armada/submit/validation/submit_request_test.go b/internal/armada/submit/validation/submit_request_test.go index 7d16e80d1b6..396cdbfe41a 100644 --- a/internal/armada/submit/validation/submit_request_test.go +++ b/internal/armada/submit/validation/submit_request_test.go @@ -141,18 +141,6 @@ func TestValidateGangs(t *testing.T) { }, expectSuccess: true, }, - "complete gang job of cardinality 2 with minimum cardinality of 1": { - jobRequests: []*api.JobSubmitRequestItem{ - { - Annotations: map[string]string{ - configuration.GangIdAnnotation: "foo", - configuration.GangCardinalityAnnotation: strconv.Itoa(2), - configuration.GangMinimumCardinalityAnnotation: strconv.Itoa(1), - }, - }, - }, - expectSuccess: true, - }, "empty gangId": { jobRequests: []*api.JobSubmitRequestItem{ { diff --git a/internal/scheduler/context/context.go b/internal/scheduler/context/context.go index 5c8c4f6cbca..5a72f866024 100644 --- a/internal/scheduler/context/context.go +++ b/internal/scheduler/context/context.go @@ -214,18 +214,16 @@ func (sctx *SchedulingContext) ReportString(verbosity int32) string { func (sctx *SchedulingContext) AddGangSchedulingContext(gctx *GangSchedulingContext) (bool, error) { allJobsEvictedInThisRound := true - numberOfSuccessfulJobs := 0 + allJobsSuccessful := true for _, jctx := range gctx.JobSchedulingContexts { evictedInThisRound, err := sctx.AddJobSchedulingContext(jctx) if err != nil { return false, err } allJobsEvictedInThisRound = allJobsEvictedInThisRound && evictedInThisRound - if jctx.IsSuccessful() { - numberOfSuccessfulJobs++ - } + allJobsSuccessful = allJobsSuccessful && jctx.IsSuccessful() } - if numberOfSuccessfulJobs >= gctx.GangInfo.MinimumCardinality && !allJobsEvictedInThisRound { + if allJobsSuccessful && !allJobsEvictedInThisRound { sctx.NumScheduledGangs++ } return allJobsEvictedInThisRound, nil @@ -358,13 +356,6 @@ type QueueSchedulingContext struct { EvictedJobsById map[string]bool } -func GetSchedulingContextFromQueueSchedulingContext(qctx *QueueSchedulingContext) *SchedulingContext { - if qctx == nil { - return nil - } - return qctx.SchedulingContext -} - func (qctx *QueueSchedulingContext) String() string { return qctx.ReportString(0) } @@ -629,8 +620,6 @@ type JobSchedulingContext struct { // GangInfo holds all the information that is necessary to schedule a gang, // such as the lower and upper bounds on its size. GangInfo - // If set, indicates this job should be failed back to the client when the gang is scheduled. - ShouldFail bool } func (jctx *JobSchedulingContext) String() string { @@ -689,24 +678,22 @@ func (jctx *JobSchedulingContext) GetNodeSelector(key string) (string, bool) { } type GangInfo struct { - Id string - Cardinality int - MinimumCardinality int - PriorityClassName string - NodeUniformity string + Id string + Cardinality int + PriorityClassName string + NodeUniformity string } // EmptyGangInfo returns a GangInfo for a job that is not in a gang. func EmptyGangInfo(job interfaces.MinimalJob) GangInfo { return GangInfo{ // An Id of "" indicates that this job is not in a gang; we set - // Cardinality and MinimumCardinality (as well as the other fields, + // Cardinality (as well as the other fields, // which all make sense in this context) accordingly. - Id: "", - Cardinality: 1, - MinimumCardinality: 1, - PriorityClassName: job.PriorityClassName(), - NodeUniformity: job.Annotations()[configuration.GangNodeUniformityLabelAnnotation], + Id: "", + Cardinality: 1, + PriorityClassName: job.PriorityClassName(), + NodeUniformity: job.Annotations()[configuration.GangNodeUniformityLabelAnnotation], } } @@ -735,25 +722,8 @@ func GangInfoFromLegacySchedulerJob(job interfaces.MinimalJob) (GangInfo, error) return gangInfo, errors.Errorf("gang cardinality %d is non-positive", gangCardinality) } - gangMinimumCardinalityString, ok := annotations[configuration.GangMinimumCardinalityAnnotation] - if !ok { - // If it is not set, use gangCardinality as the minimum gang size. - gangMinimumCardinalityString = gangCardinalityString - } - gangMinimumCardinality, err := strconv.Atoi(gangMinimumCardinalityString) - if err != nil { - return gangInfo, errors.WithStack(err) - } - if gangMinimumCardinality <= 0 { - return gangInfo, errors.Errorf("gang minimum cardinality %d is non-positive", gangMinimumCardinality) - } - if gangMinimumCardinality > gangCardinality { - return gangInfo, errors.Errorf("gang minimum cardinality %d is greater than gang cardinality %d", gangMinimumCardinality, gangCardinality) - } - gangInfo.Id = gangId gangInfo.Cardinality = gangCardinality - gangInfo.MinimumCardinality = gangMinimumCardinality return gangInfo, nil } @@ -776,7 +746,6 @@ func JobSchedulingContextFromJob(job *jobdb.Job) *JobSchedulingContext { Job: job, PodRequirements: job.PodRequirements(), GangInfo: gangInfo, - ShouldFail: false, } } diff --git a/internal/scheduler/gang_scheduler.go b/internal/scheduler/gang_scheduler.go index bfbfcdcbfce..5fb28d49fbe 100644 --- a/internal/scheduler/gang_scheduler.go +++ b/internal/scheduler/gang_scheduler.go @@ -226,18 +226,9 @@ func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn * } else { unschedulableReason = "job does not fit on any node" } - } else { - // When a gang schedules successfully, update state for failed jobs if they exist. - for _, jctx := range gctx.JobSchedulingContexts { - if jctx.ShouldFail { - jctx.Fail("job does not fit on any node") - } - } } - return } - return } diff --git a/internal/scheduler/gang_scheduler_test.go b/internal/scheduler/gang_scheduler_test.go index 689be00b291..3835849b0a4 100644 --- a/internal/scheduler/gang_scheduler_test.go +++ b/internal/scheduler/gang_scheduler_test.go @@ -69,32 +69,6 @@ func TestGangScheduler(t *testing.T) { ExpectedCumulativeScheduledJobs: []int{0}, ExpectedRuntimeGangCardinality: []int{0}, }, - "simple success where min cardinality is met": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 32, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), - ), - }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedCumulativeScheduledJobs: []int{32}, - ExpectedRuntimeGangCardinality: []int{32}, - }, - "simple failure where min cardinality is not met": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 33, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), - ), - }, - ExpectedScheduledIndices: nil, - ExpectedCumulativeScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, - }, "one success and one failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), @@ -106,20 +80,6 @@ func TestGangScheduler(t *testing.T) { ExpectedCumulativeScheduledJobs: []int{32, 32}, ExpectedRuntimeGangCardinality: []int{32, 0}, }, - "one success and one failure using min cardinality": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 32, - testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), - ), - testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), - }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedCumulativeScheduledJobs: []int{32, 32}, - ExpectedRuntimeGangCardinality: []int{32, 0}, - }, "multiple nodes": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), @@ -400,11 +360,10 @@ func TestGangScheduler(t *testing.T) { )..., ), Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, + testfixtures.WithGangAnnotationsJobs( testfixtures.WithNodeUniformityLabelAnnotationJobs( "my-cool-node-uniformity", - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 4), + testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 3), ), ), }, @@ -413,43 +372,6 @@ func TestGangScheduler(t *testing.T) { ExpectedNodeUniformity: map[int]string{0: "b"}, ExpectedRuntimeGangCardinality: []int{3}, }, - "NodeUniformityLabel PreemptedAtPriority tiebreak": { - SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( - []string{"my-cool-node-uniformity"}, - testfixtures.TestSchedulingConfig(), - ), - Nodes: append( - testfixtures.WithUsedResourcesNodes( - 1, - schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}}, - testfixtures.WithLabelsNodes( - map[string]string{"my-cool-node-uniformity": "a"}, - testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - ), - ), - testfixtures.WithUsedResourcesNodes( - 0, - schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}}, - testfixtures.WithLabelsNodes( - map[string]string{"my-cool-node-uniformity": "b"}, - testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - ), - )..., - ), - Gangs: [][]*jobdb.Job{ - testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, - testfixtures.WithNodeUniformityLabelAnnotationJobs( - "my-cool-node-uniformity", - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass2, 4), - ), - ), - }, - ExpectedScheduledIndices: []int{0}, - ExpectedCumulativeScheduledJobs: []int{2}, - ExpectedNodeUniformity: map[int]string{0: "b"}, - ExpectedRuntimeGangCardinality: []int{2}, - }, "AwayNodeTypes": { SchedulingConfig: func() configuration.SchedulingConfig { config := testfixtures.TestSchedulingConfig() @@ -691,16 +613,6 @@ func TestGangScheduler(t *testing.T) { } } - // Verify any excess jobs that failed have the correct state set - for _, jctx := range jctxs { - if jctx.ShouldFail { - if jctx.PodSchedulingContext != nil { - require.Equal(t, "", jctx.PodSchedulingContext.NodeId) - } - require.Equal(t, "job does not fit on any node", jctx.UnschedulableReason) - } - } - // Verify accounting scheduledGangs++ require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) diff --git a/internal/scheduler/nodedb/nodedb.go b/internal/scheduler/nodedb/nodedb.go index 0b2ddaa2e5e..bd380169850 100644 --- a/internal/scheduler/nodedb/nodedb.go +++ b/internal/scheduler/nodedb/nodedb.go @@ -444,43 +444,34 @@ func NodeJobDiff(txnA, txnB *memdb.Txn) (map[string]*internaltypes.Node, map[str func (nodeDb *NodeDb) ScheduleManyWithTxn(txn *memdb.Txn, gctx *schedulercontext.GangSchedulingContext) (bool, error) { // Attempt to schedule pods one by one in a transaction. - numScheduled := 0 for _, jctx := range gctx.JobSchedulingContexts { // In general, we may attempt to schedule a gang multiple times (in // order to find the best fit for this gang); clear out any remnants of // previous attempts. jctx.UnschedulableReason = "" - jctx.ShouldFail = false node, err := nodeDb.SelectNodeForJobWithTxn(txn, jctx) if err != nil { return false, err } - if node == nil { - // Indicates that when the min cardinality is met, we should fail this job back to the client. - jctx.ShouldFail = true - continue - } - - // If we found a node for this pod, bind it and continue to the next pod. - if node, err := nodeDb.bindJobToNode(node, jctx.Job, jctx.PodSchedulingContext.ScheduledAtPriority); err != nil { - return false, err - } else { - if err := nodeDb.UpsertWithTxn(txn, node); err != nil { + if node != nil { + // If we found a node for this pod, bind it and continue to the next pod. + if node, err := nodeDb.bindJobToNode(node, jctx.Job, jctx.PodSchedulingContext.ScheduledAtPriority); err != nil { return false, err + } else { + if err := nodeDb.UpsertWithTxn(txn, node); err != nil { + return false, err + } } - } - // Once a job is scheduled, it should no longer be considered for preemption. - if err := deleteEvictedJobSchedulingContextIfExistsWithTxn(txn, jctx.JobId); err != nil { - return false, err + // Once a job is scheduled, it should no longer be considered for preemption. + if err := deleteEvictedJobSchedulingContextIfExistsWithTxn(txn, jctx.JobId); err != nil { + return false, err + } + } else { + return false, nil } - - numScheduled++ - } - if numScheduled < gctx.GangInfo.MinimumCardinality { - return false, nil } return true, nil } diff --git a/internal/scheduler/nodedb/nodedb_test.go b/internal/scheduler/nodedb/nodedb_test.go index e4181d9dc7a..ee1b20d0cc3 100644 --- a/internal/scheduler/nodedb/nodedb_test.go +++ b/internal/scheduler/nodedb/nodedb_test.go @@ -486,19 +486,13 @@ func TestScheduleMany(t *testing.T) { // For each group, whether we expect scheduling to succeed. ExpectSuccess []bool }{ - // Attempts to schedule 32 jobs with a minimum gang cardinality of 1 job. All jobs get scheduled. + // Attempts to schedule 32. All jobs get scheduled. "simple success": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: [][]*jobdb.Job{gangSuccess}, ExpectSuccess: []bool{true}, }, - // Attempts to schedule 33 jobs with a minimum gang cardinality of 32 jobs. One fails, but the overall result is a success. - "simple success with min cardinality": { - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Jobs: [][]*jobdb.Job{testfixtures.WithGangAnnotationsAndMinCardinalityJobs(32, testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33))}, - ExpectSuccess: []bool{true}, - }, - // Attempts to schedule 33 jobs with a minimum gang cardinality of 33. The overall result fails. + // Attempts to schedule 33 jobs. The overall result fails. "simple failure with min cardinality": { Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: [][]*jobdb.Job{gangFailure}, @@ -549,9 +543,7 @@ func TestScheduleMany(t *testing.T) { for _, jctx := range jctxs { pctx := jctx.PodSchedulingContext require.NotNil(t, pctx) - if !jctx.ShouldFail { - assert.NotEqual(t, "", pctx.NodeId) - } + assert.NotEqual(t, "", pctx.NodeId) } } }) diff --git a/internal/scheduler/preempting_queue_scheduler.go b/internal/scheduler/preempting_queue_scheduler.go index b7c7b46f8f9..12f21ce370e 100644 --- a/internal/scheduler/preempting_queue_scheduler.go +++ b/internal/scheduler/preempting_queue_scheduler.go @@ -269,7 +269,6 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche return &SchedulerResult{ PreemptedJobs: preemptedJobs, ScheduledJobs: scheduledJobs, - FailedJobs: schedulerResult.FailedJobs, NodeIdByJobId: sch.nodeIdByJobId, AdditionalAnnotationsByJobId: additionalAnnotationsByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, @@ -415,9 +414,8 @@ func (sch *PreemptingQueueScheduler) setEvictedGangCardinality(evictorResult *Ev continue } - // Override cardinality and min cardinality with the number of evicted jobs in this gang. + // Override cardinality with the number of evicted jobs in this gang. jctx.GangInfo.Cardinality = len(sch.jobIdsByGangId[gangId]) - jctx.GangInfo.MinimumCardinality = jctx.GangInfo.Cardinality } return } diff --git a/internal/scheduler/preempting_queue_scheduler_test.go b/internal/scheduler/preempting_queue_scheduler_test.go index cdeaaea04e6..999635c3f51 100644 --- a/internal/scheduler/preempting_queue_scheduler_test.go +++ b/internal/scheduler/preempting_queue_scheduler_test.go @@ -572,36 +572,6 @@ func TestPreemptingQueueScheduler(t *testing.T) { "C": 1, }, }, - "gang preemption with partial gang": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), - Rounds: []SchedulingRound{ - { - // Schedule a gang across two nodes. - JobsByQueue: map[string][]*jobdb.Job{ - "A": testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 1, - testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2), - ), - }, - ExpectedScheduledIndices: map[string][]int{ - "A": testfixtures.IntRange(0, 1), - }, - }, - { - // Unbind one of the jobs in the gang (simulating that job terminating) - // and test that the remaining job isn't preempted. - IndicesToUnbind: map[string]map[int][]int{ - "A": { - 0: testfixtures.IntRange(0, 0), - }, - }, - }, - }, - PriorityFactorByQueue: map[string]float64{ - "A": 1, - }, - }, "gang preemption with NodeEvictionProbability 0": { SchedulingConfig: testfixtures.WithNodeEvictionProbabilityConfig( 0.0, // To test the gang evictor, we need to disable stochastic eviction. diff --git a/internal/scheduler/queue_scheduler.go b/internal/scheduler/queue_scheduler.go index 2978b15f98b..7ce04a30ab4 100644 --- a/internal/scheduler/queue_scheduler.go +++ b/internal/scheduler/queue_scheduler.go @@ -4,11 +4,8 @@ import ( "container/heap" "fmt" "reflect" - "strconv" "time" - "github.com/armadaproject/armada/internal/armada/configuration" - "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -64,7 +61,7 @@ func (sch *QueueScheduler) SkipUnsuccessfulSchedulingKeyCheck() { func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResult, error) { var scheduledJobs []*schedulercontext.JobSchedulingContext - var failedJobs []*schedulercontext.JobSchedulingContext + nodeIdByJobId := make(map[string]string) additionalAnnotationsByJobId := make(map[string]map[string]string) ctx.Info("Looping through candidate gangs...") @@ -106,14 +103,10 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul if ok, unschedulableReason, err := sch.gangScheduler.Schedule(ctx, gctx); err != nil { return nil, err } else if ok { - numScheduled := gctx.Fit().NumScheduled for _, jctx := range gctx.JobSchedulingContexts { if pctx := jctx.PodSchedulingContext; pctx.IsSuccessful() { scheduledJobs = append(scheduledJobs, jctx) nodeIdByJobId[jctx.JobId] = pctx.NodeId - additionalAnnotationsByJobId[jctx.JobId] = map[string]string{configuration.GangNumJobsScheduledAnnotation: strconv.Itoa(numScheduled)} - } else if jctx.ShouldFail { - failedJobs = append(failedJobs, jctx) } } } else if schedulerconstraints.IsTerminalUnschedulableReason(unschedulableReason) { @@ -154,7 +147,6 @@ func (sch *QueueScheduler) Schedule(ctx *armadacontext.Context) (*SchedulerResul return &SchedulerResult{ PreemptedJobs: nil, ScheduledJobs: scheduledJobs, - FailedJobs: failedJobs, NodeIdByJobId: nodeIdByJobId, AdditionalAnnotationsByJobId: additionalAnnotationsByJobId, SchedulingContexts: []*schedulercontext.SchedulingContext{sch.schedulingContext}, diff --git a/internal/scheduler/queue_scheduler_test.go b/internal/scheduler/queue_scheduler_test.go index 87fa2cbad08..1c94bae0066 100644 --- a/internal/scheduler/queue_scheduler_test.go +++ b/internal/scheduler/queue_scheduler_test.go @@ -420,16 +420,14 @@ func TestQueueScheduler(t *testing.T) { Nodes: testfixtures.N32CpuNodes(3, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.WithAnnotationsJobs(map[string]string{ - armadaconfiguration.GangIdAnnotation: "my-gang", - armadaconfiguration.GangCardinalityAnnotation: "2", - armadaconfiguration.GangMinimumCardinalityAnnotation: "1", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithAnnotationsJobs(map[string]string{ - armadaconfiguration.GangIdAnnotation: "my-gang", - armadaconfiguration.GangCardinalityAnnotation: "2", - armadaconfiguration.GangMinimumCardinalityAnnotation: "1", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), @@ -448,16 +446,14 @@ func TestQueueScheduler(t *testing.T) { Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.WithAnnotationsJobs(map[string]string{ - armadaconfiguration.GangIdAnnotation: "my-gang", - armadaconfiguration.GangCardinalityAnnotation: "2", - armadaconfiguration.GangMinimumCardinalityAnnotation: "2", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.WithAnnotationsJobs(map[string]string{ - armadaconfiguration.GangIdAnnotation: "my-gang", - armadaconfiguration.GangCardinalityAnnotation: "2", - armadaconfiguration.GangMinimumCardinalityAnnotation: "2", + armadaconfiguration.GangIdAnnotation: "my-gang", + armadaconfiguration.GangCardinalityAnnotation: "2", }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), diff --git a/internal/scheduler/result.go b/internal/scheduler/result.go index 2ac4bddb32f..9c42e107810 100644 --- a/internal/scheduler/result.go +++ b/internal/scheduler/result.go @@ -11,9 +11,6 @@ type SchedulerResult struct { PreemptedJobs []*schedulercontext.JobSchedulingContext // Queued jobs that should be scheduled. ScheduledJobs []*schedulercontext.JobSchedulingContext - // Queued jobs that could not be scheduled. - // This is used to fail jobs that could not schedule above `minimumGangCardinality`. - FailedJobs []*schedulercontext.JobSchedulingContext // For each preempted job, maps the job id to the id of the node on which the job was running. // For each scheduled job, maps the job id to the id of the node on which the job should be scheduled. NodeIdByJobId map[string]string @@ -43,12 +40,3 @@ func ScheduledJobsFromSchedulerResult(sr *SchedulerResult) []*jobdb.Job { } return rv } - -// FailedJobsFromSchedulerResult returns the slice of scheduled jobs in the result. -func FailedJobsFromSchedulerResult(sr *SchedulerResult) []*jobdb.Job { - rv := make([]*jobdb.Job, len(sr.FailedJobs)) - for i, jctx := range sr.FailedJobs { - rv[i] = jctx.Job - } - return rv -} diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 33578da615d..101bc350a30 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -404,11 +404,6 @@ func (s *Scheduler) updateMetricsFromSchedulerResult(ctx *armadacontext.Context, return err } } - for _, jctx := range overallSchedulerResult.FailedJobs { - if err := s.schedulerMetrics.UpdateFailed(ctx, jctx.Job, nil); err != nil { - return err - } - } // UpdatePreempted is called from within UpdateFailed if the job has a JobRunPreemptedError. // This is to make sure that preemption is counted only when the job is actually preempted, not when the scheduler decides to preempt it. return nil @@ -517,7 +512,7 @@ func (s *Scheduler) eventsFromSchedulerResult(result *SchedulerResult) ([]*armad // EventsFromSchedulerResult generates necessary EventSequences from the provided SchedulerResult. func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*armadaevents.EventSequence, error) { - eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)+len(result.FailedJobs)) + eventSequences := make([]*armadaevents.EventSequence, 0, len(result.PreemptedJobs)+len(result.ScheduledJobs)) eventSequences, err := AppendEventSequencesFromPreemptedJobs(eventSequences, PreemptedJobsFromSchedulerResult(result), time) if err != nil { return nil, err @@ -526,10 +521,6 @@ func EventsFromSchedulerResult(result *SchedulerResult, time time.Time) ([]*arma if err != nil { return nil, err } - eventSequences, err = AppendEventSequencesFromUnschedulableJobs(eventSequences, FailedJobsFromSchedulerResult(result), time) - if err != nil { - return nil, err - } return eventSequences, nil } @@ -643,32 +634,6 @@ func AppendEventSequencesFromScheduledJobs(eventSequences []*armadaevents.EventS return eventSequences, nil } -func AppendEventSequencesFromUnschedulableJobs(eventSequences []*armadaevents.EventSequence, jobs []*jobdb.Job, time time.Time) ([]*armadaevents.EventSequence, error) { - for _, job := range jobs { - jobId, err := armadaevents.ProtoUuidFromUlidString(job.Id()) - if err != nil { - return nil, err - } - gangJobUnschedulableError := &armadaevents.Error{ - Terminal: true, - Reason: &armadaevents.Error_GangJobUnschedulable{GangJobUnschedulable: &armadaevents.GangJobUnschedulable{Message: "Job did not meet the minimum gang cardinality"}}, - } - eventSequences = append(eventSequences, &armadaevents.EventSequence{ - Queue: job.Queue(), - JobSetName: job.Jobset(), - Events: []*armadaevents.EventSequence_Event{ - { - Created: &time, - Event: &armadaevents.EventSequence_Event_JobErrors{ - JobErrors: &armadaevents.JobErrors{JobId: jobId, Errors: []*armadaevents.Error{gangJobUnschedulableError}}, - }, - }, - }, - }) - } - return eventSequences, nil -} - // generateUpdateMessages generates EventSequences representing the state changes on updated jobs. // If there are no state changes then an empty slice will be returned. func (s *Scheduler) generateUpdateMessages(ctx *armadacontext.Context, txn *jobdb.Txn, updatedJobs []*jobdb.Job, jobRunErrors map[uuid.UUID]*armadaevents.Error) ([]*armadaevents.EventSequence, error) { diff --git a/internal/scheduler/scheduler_test.go b/internal/scheduler/scheduler_test.go index ca2768c524d..2a7c633654f 100644 --- a/internal/scheduler/scheduler_test.go +++ b/internal/scheduler/scheduler_test.go @@ -357,7 +357,6 @@ func TestScheduler_TestCycle(t *testing.T) { expectedJobRunLeased []string // ids of jobs we expect to have produced leased messages expectedJobRunErrors []string // ids of jobs we expect to have produced jobRunErrors messages expectedJobErrors []string // ids of jobs we expect to have produced jobErrors messages - expectedJobsToFail []string // ids of jobs we expect to fail without having failed the overall scheduling cycle expectedJobsRunsToPreempt []string // ids of jobs we expect to be preempted by the scheduler expectedJobRunPreempted []string // ids of jobs we expect to have produced jobRunPreempted messages expectedJobRunCancelled []string // ids of jobs we expect to have produced jobRunPreempted messages @@ -472,12 +471,6 @@ func TestScheduler_TestCycle(t *testing.T) { expectedQueued: []string{queuedJob.Id()}, expectedQueuedVersion: queuedJob.QueuedVersion(), }, - "FailedJobs in scheduler result will publish appropriate messages": { - initialJobs: []*jobdb.Job{queuedJob}, - expectedJobErrors: []string{queuedJob.Id()}, - expectedJobsToFail: []string{queuedJob.Id()}, - expectedTerminal: []string{queuedJob.Id()}, - }, "No updates to an already leased job": { initialJobs: []*jobdb.Job{leasedJob}, expectedLeased: []string{leasedJob.Id()}, @@ -892,7 +885,6 @@ func TestScheduler_TestCycle(t *testing.T) { schedulingAlgo := &testSchedulingAlgo{ jobsToSchedule: tc.expectedJobRunLeased, jobsToPreempt: tc.expectedJobsRunsToPreempt, - jobsToFail: tc.expectedJobsToFail, shouldError: tc.scheduleError, } publisher := &testPublisher{shouldError: tc.publishError} @@ -1424,7 +1416,6 @@ type testSchedulingAlgo struct { numberOfScheduleCalls int jobsToPreempt []string jobsToSchedule []string - jobsToFail []string shouldError bool // Set to true to indicate that preemption/scheduling/failure decisions have been persisted. // Until persisted is set to true, the same jobs are preempted/scheduled/failed on every call. @@ -1442,7 +1433,6 @@ func (t *testSchedulingAlgo) Schedule(_ *armadacontext.Context, txn *jobdb.Txn) } preemptedJobs := make([]*jobdb.Job, 0, len(t.jobsToPreempt)) scheduledJobs := make([]*jobdb.Job, 0, len(t.jobsToSchedule)) - failedJobs := make([]*jobdb.Job, 0, len(t.jobsToFail)) for _, id := range t.jobsToPreempt { job := txn.GetById(id) if job == nil { @@ -1479,27 +1469,13 @@ func (t *testSchedulingAlgo) Schedule(_ *armadacontext.Context, txn *jobdb.Txn) ) scheduledJobs = append(scheduledJobs, job) } - for _, id := range t.jobsToFail { - job := txn.GetById(id) - if job == nil { - return nil, errors.Errorf("was asked to lease %s but job does not exist", id) - } - if !job.Queued() { - return nil, errors.Errorf("was asked to lease %s but job is not queued", job.Id()) - } - job = job.WithQueued(false).WithFailed(true) - failedJobs = append(failedJobs, job) - } if err := txn.Upsert(preemptedJobs); err != nil { return nil, err } if err := txn.Upsert(scheduledJobs); err != nil { return nil, err } - if err := txn.Upsert(failedJobs); err != nil { - return nil, err - } - return NewSchedulerResultForTest(preemptedJobs, scheduledJobs, failedJobs, nil), nil + return NewSchedulerResultForTest(preemptedJobs, scheduledJobs, nil), nil } func (t *testSchedulingAlgo) Persist() { @@ -1514,13 +1490,11 @@ func (t *testSchedulingAlgo) Persist() { func NewSchedulerResultForTest[S ~[]T, T *jobdb.Job]( preemptedJobs S, scheduledJobs S, - failedJobs S, nodeIdByJobId map[string]string, ) *SchedulerResult { return &SchedulerResult{ PreemptedJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, preemptedJobs), ScheduledJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, scheduledJobs), - FailedJobs: schedulercontext.JobSchedulingContextsFromJobs(testfixtures.TestPriorityClasses, failedJobs), NodeIdByJobId: nodeIdByJobId, } } @@ -1799,7 +1773,6 @@ func TestCycleConsistency(t *testing.T) { // Controls which jobs the scheduler should schedule/preempt/fail. idsOfJobsToSchedule []string idsOfJobsToPreempt []string - idsOfJobsToFail []string // Expected jobDbs for scenario 1, i.e., the baseline scenario. // Only compared against if not nil. @@ -2053,44 +2026,6 @@ func TestCycleConsistency(t *testing.T) { }, }, }, - "Fail a new job": { - firstSchedulerDbUpdate: schedulerDbUpdate{ - jobUpdates: []*database.Job{ - queuedJobA, - }, - }, - idsOfJobsToFail: []string{queuedJobA.JobID}, - expectedJobDbCycleOne: []*jobdb.Job{}, - expectedJobDbCycleTwo: []*jobdb.Job{}, - expectedJobDbCycleThree: []*jobdb.Job{}, - expectedEventSequencesCycleThree: []*armadaevents.EventSequence{ - { - Queue: queuedJobA.Queue, - JobSetName: queuedJobA.JobSet, - Events: []*armadaevents.EventSequence_Event{ - { - Created: pointerFromValue(time.Unix(0, 0)), - Event: &armadaevents.EventSequence_Event_JobErrors{ - JobErrors: &armadaevents.JobErrors{ - JobId: armadaevents.MustProtoUuidFromUlidString(queuedJobA.JobID), - Errors: []*armadaevents.Error{ - { - Terminal: true, - Reason: &armadaevents.Error_GangJobUnschedulable{ - GangJobUnschedulable: &armadaevents.GangJobUnschedulable{ - // This message is somewhat arbitrary here. - Message: "Job did not meet the minimum gang cardinality", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, "Schedule a job that then succeeds": { firstSchedulerDbUpdate: schedulerDbUpdate{ jobUpdates: []*database.Job{ @@ -2480,7 +2415,6 @@ func TestCycleConsistency(t *testing.T) { &testSchedulingAlgo{ jobsToSchedule: tc.idsOfJobsToSchedule, jobsToPreempt: tc.idsOfJobsToPreempt, - jobsToFail: tc.idsOfJobsToFail, }, leader.NewStandaloneLeaderController(), newTestPublisher(), diff --git a/internal/scheduler/scheduling_algo.go b/internal/scheduler/scheduling_algo.go index b4f22778542..9962116fe57 100644 --- a/internal/scheduler/scheduling_algo.go +++ b/internal/scheduler/scheduling_algo.go @@ -204,21 +204,17 @@ func (l *FairSchedulingAlgo) Schedule( preemptedJobs := PreemptedJobsFromSchedulerResult(schedulerResult) scheduledJobs := ScheduledJobsFromSchedulerResult(schedulerResult) - failedJobs := FailedJobsFromSchedulerResult(schedulerResult) + if err := txn.Upsert(preemptedJobs); err != nil { return nil, err } if err := txn.Upsert(scheduledJobs); err != nil { return nil, err } - if err := txn.Upsert(failedJobs); err != nil { - return nil, err - } // Aggregate changes across executors. overallSchedulerResult.PreemptedJobs = append(overallSchedulerResult.PreemptedJobs, schedulerResult.PreemptedJobs...) overallSchedulerResult.ScheduledJobs = append(overallSchedulerResult.ScheduledJobs, schedulerResult.ScheduledJobs...) - overallSchedulerResult.FailedJobs = append(overallSchedulerResult.FailedJobs, schedulerResult.FailedJobs...) overallSchedulerResult.SchedulingContexts = append(overallSchedulerResult.SchedulingContexts, schedulerResult.SchedulingContexts...) maps.Copy(overallSchedulerResult.NodeIdByJobId, schedulerResult.NodeIdByJobId) maps.Copy(overallSchedulerResult.AdditionalAnnotationsByJobId, schedulerResult.AdditionalAnnotationsByJobId) @@ -508,10 +504,6 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( WithQueued(false). WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), priority) } - for i, jctx := range result.FailedJobs { - jobDbJob := jctx.Job - result.FailedJobs[i].Job = jobDbJob.WithQueued(false).WithFailed(true) - } return result, sctx, nil } diff --git a/internal/scheduler/scheduling_algo_test.go b/internal/scheduler/scheduling_algo_test.go index ecbaab4a56a..a67e0fcb7ea 100644 --- a/internal/scheduler/scheduling_algo_test.go +++ b/internal/scheduler/scheduling_algo_test.go @@ -12,7 +12,6 @@ import ( "golang.org/x/exp/slices" "k8s.io/apimachinery/pkg/util/clock" - armadaconfiguration "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" @@ -49,9 +48,6 @@ func TestSchedule(t *testing.T) { // Indices of queued jobs expected to be scheduled. expectedScheduledIndices []int - - // Count of jobs expected to fail - expectedFailedJobCount int }{ "scheduling": { schedulingConfig: testfixtures.TestSchedulingConfig(), @@ -301,17 +297,6 @@ func TestSchedule(t *testing.T) { queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2)), expectedScheduledIndices: []int{0, 1}, }, - "gang scheduling successful with some jobs failing to schedule above min cardinality": { - schedulingConfig: testfixtures.TestSchedulingConfig(), - executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*api.Queue{{Name: "A", PriorityFactor: 0.01}}, - queuedJobs: testfixtures.WithGangAnnotationsAndMinCardinalityJobs( - 2, - testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 10), - ), - expectedScheduledIndices: []int{0, 1}, - expectedFailedJobCount: 8, - }, "not scheduling a gang that does not fit on any executor": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{ @@ -479,14 +464,6 @@ func TestSchedule(t *testing.T) { } else { assert.Equal(t, tc.expectedScheduledIndices, actualScheduledIndices) } - // Sanity check: we've set `GangNumJobsScheduledAnnotation` for all scheduled jobs. - for _, job := range scheduledJobs { - assert.Contains(t, schedulerResult.AdditionalAnnotationsByJobId[job.Id()], armadaconfiguration.GangNumJobsScheduledAnnotation) - } - - // Check that we failed the correct number of excess jobs when a gang schedules >= minimum cardinality - failedJobs := FailedJobsFromSchedulerResult(schedulerResult) - assert.Equal(t, tc.expectedFailedJobCount, len(failedJobs)) // Check that preempted jobs are marked as such consistently. for _, job := range preemptedJobs { @@ -506,13 +483,6 @@ func TestSchedule(t *testing.T) { assert.NotEmpty(t, dbRun.NodeName()) } - // Check that failed jobs are marked as such consistently. - for _, job := range failedJobs { - dbJob := txn.GetById(job.Id()) - assert.True(t, dbJob.Failed()) - assert.False(t, dbJob.Queued()) - } - // Check that jobDb was updated correctly. // TODO: Check that there are no unexpected jobs in the jobDb. for _, job := range preemptedJobs { @@ -523,10 +493,6 @@ func TestSchedule(t *testing.T) { dbJob := txn.GetById(job.Id()) assert.True(t, job.Equal(dbJob), "expected %v but got %v", job, dbJob) } - for _, job := range failedJobs { - dbJob := txn.GetById(job.Id()) - assert.True(t, job.Equal(dbJob), "expected %v but got %v", job, dbJob) - } }) } } diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index 247707c5ad1..7cd401b3d9f 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -510,7 +510,6 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { // Sort jobs to ensure deterministic event ordering. preemptedJobs := scheduler.PreemptedJobsFromSchedulerResult(result) scheduledJobs := slices.Clone(result.ScheduledJobs) - failedJobs := scheduler.FailedJobsFromSchedulerResult(result) lessJob := func(a, b *jobdb.Job) int { if a.Queue() < b.Queue() { return -1 @@ -528,7 +527,6 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { slices.SortFunc(scheduledJobs, func(a, b *schedulercontext.JobSchedulingContext) int { return lessJob(a.Job, b.Job) }) - slices.SortFunc(failedJobs, lessJob) for i, job := range preemptedJobs { if run := job.LatestRun(); run != nil { job = job.WithUpdatedRun(run.WithFailed(true)) @@ -553,21 +551,12 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { scheduledJobs[i].Job = job.WithQueued(false).WithNewRun(node.GetExecutor(), node.GetId(), node.GetName(), priority) } } - for i, job := range failedJobs { - if run := job.LatestRun(); run != nil { - job = job.WithUpdatedRun(run.WithFailed(true)) - } - failedJobs[i] = job.WithQueued(false).WithFailed(true) - } if err := txn.Upsert(preemptedJobs); err != nil { return err } if err := txn.Upsert(armadaslices.Map(scheduledJobs, func(jctx *schedulercontext.JobSchedulingContext) *jobdb.Job { return jctx.Job })); err != nil { return err } - if err := txn.Upsert(failedJobs); err != nil { - return err - } // Update allocation. s.allocationByPoolAndQueueAndPriorityClass[pool.Name] = sctx.AllocatedByQueueAndPriority() @@ -582,10 +571,6 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { if err != nil { return err } - eventSequences, err = scheduler.AppendEventSequencesFromUnschedulableJobs(eventSequences, failedJobs, s.time) - if err != nil { - return err - } // Update event timestamps to be consistent with simulated time. t := s.time @@ -597,7 +582,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { // If nothing changed, we're in steady state and can safely skip scheduling until something external has changed. // Do this only if a non-zero amount of time has passed. - if !s.time.Equal(time.Time{}) && len(result.ScheduledJobs) == 0 && len(result.PreemptedJobs) == 0 && len(result.FailedJobs) == 0 { + if !s.time.Equal(time.Time{}) && len(result.ScheduledJobs) == 0 && len(result.PreemptedJobs) == 0 { s.shouldSchedule = false } } diff --git a/internal/scheduler/submitcheck.go b/internal/scheduler/submitcheck.go index b720af91362..33486e91296 100644 --- a/internal/scheduler/submitcheck.go +++ b/internal/scheduler/submitcheck.go @@ -191,14 +191,6 @@ func (srv *SubmitChecker) getIndividualSchedulingResult(jctx *schedulercontext.J // - Node Uniformity Label (although it will work if this is per cluster) // - Gang jobs that will use more than the allowed capacity limit func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedulingContext, state *executorState) schedulingResult { - // Skip submit checks if this batch contains less than the min cardinality jobs. - // Reason: - // - We need to support submitting gang jobs across batches and allow for gang jobs to queue until min cardinality is satisfied. - // - We cannot verify if min cardinality jobs are schedulable unless we are given at least that many in a single batch. - // - A side effect of this is that users can submit jobs in gangs that skip this check and are never schedulable, which will be handled via queue-ttl. - if len(gctx.JobSchedulingContexts) < gctx.GangInfo.MinimumCardinality { - return schedulingResult{isSchedulable: true, reason: ""} - } sucessfulPools := map[string]bool{} var sb strings.Builder for id, ex := range state.executorsById { @@ -262,7 +254,7 @@ func (srv *SubmitChecker) getSchedulingResult(gctx *schedulercontext.GangSchedul sb.WriteString( fmt.Sprintf( ": %d out of %d pods schedulable (minCardinality %d)\n", - numSuccessfullyScheduled, len(gctx.JobSchedulingContexts), gctx.GangInfo.MinimumCardinality, + numSuccessfullyScheduled, len(gctx.JobSchedulingContexts), gctx.GangInfo.Cardinality, ), ) } diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index 7530cedebca..9bffb1a2cb0 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -360,21 +360,7 @@ func WithGangAnnotationsJobs(jobs []*jobdb.Job) []*jobdb.Job { gangId := uuid.NewString() gangCardinality := fmt.Sprintf("%d", len(jobs)) return WithAnnotationsJobs( - map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality, configuration.GangMinimumCardinalityAnnotation: gangCardinality}, - jobs, - ) -} - -func WithGangAnnotationsAndMinCardinalityJobs(minimumCardinality int, jobs []*jobdb.Job) []*jobdb.Job { - gangId := uuid.NewString() - gangCardinality := fmt.Sprintf("%d", len(jobs)) - gangMinCardinality := fmt.Sprintf("%d", minimumCardinality) - return WithAnnotationsJobs( - map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: gangCardinality, - configuration.GangMinimumCardinalityAnnotation: gangMinCardinality, - }, + map[string]string{configuration.GangIdAnnotation: gangId, configuration.GangCardinalityAnnotation: gangCardinality}, jobs, ) } @@ -894,24 +880,8 @@ func TestNSubmitMsgGang(n int) []*armadaevents.SubmitJob { for i := 0; i < n; i++ { job := Test1CoreSubmitMsg() job.MainObject.ObjectMeta.Annotations = map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n), - configuration.GangMinimumCardinalityAnnotation: fmt.Sprintf("%d", n), - } - gang[i] = job - } - return gang -} - -func TestNSubmitMsgGangLessThanMinCardinality(n int) []*armadaevents.SubmitJob { - gangId := uuid.NewString() - gang := make([]*armadaevents.SubmitJob, n) - for i := 0; i < n; i++ { - job := Test1CoreSubmitMsg() - job.MainObject.ObjectMeta.Annotations = map[string]string{ - configuration.GangIdAnnotation: gangId, - configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n+2), - configuration.GangMinimumCardinalityAnnotation: fmt.Sprintf("%d", n+1), + configuration.GangIdAnnotation: gangId, + configuration.GangCardinalityAnnotation: fmt.Sprintf("%d", n), } gang[i] = job }