From c674f0ad2f04134d0afa45c5b160fb06c37f334b Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Thu, 7 Apr 2022 15:30:37 +0200 Subject: [PATCH] Flip RemovePodsViolatingTopologySpreadConstraint into plugin --- .../strategies/validation/strategyparams.go | 71 -------- .../validation/strategyparams_test.go | 79 --------- .../topologyspreadconstraint.go | 122 +++++++++---- .../topologyspreadconstraint_test.go | 164 ++++++------------ pkg/framework/types.go | 14 ++ 5 files changed, 158 insertions(+), 292 deletions(-) delete mode 100644 pkg/descheduler/strategies/validation/strategyparams.go delete mode 100644 pkg/descheduler/strategies/validation/strategyparams_test.go rename pkg/{descheduler/strategies => framework/plugins/removepodsviolatingtopologyspreadconstraint}/topologyspreadconstraint.go (80%) rename pkg/{descheduler/strategies => framework/plugins/removepodsviolatingtopologyspreadconstraint}/topologyspreadconstraint_test.go (88%) diff --git a/pkg/descheduler/strategies/validation/strategyparams.go b/pkg/descheduler/strategies/validation/strategyparams.go deleted file mode 100644 index 33446e6fa1..0000000000 --- a/pkg/descheduler/strategies/validation/strategyparams.go +++ /dev/null @@ -1,71 +0,0 @@ -package validation - -import ( - "context" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - clientset "k8s.io/client-go/kubernetes" - - "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/utils" -) - -// ValidatedStrategyParams contains validated common strategy parameters -type ValidatedStrategyParams struct { - ThresholdPriority int32 - IncludedNamespaces sets.String - ExcludedNamespaces sets.String - LabelSelector labels.Selector - NodeFit bool -} - -func DefaultValidatedStrategyParams() ValidatedStrategyParams { - return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority} -} - -func ValidateAndParseStrategyParams( - ctx context.Context, - client clientset.Interface, - params *api.StrategyParameters, -) (*ValidatedStrategyParams, error) { - if params == nil { - defaultValidatedStrategyParams := DefaultValidatedStrategyParams() - return &defaultValidatedStrategyParams, nil - } - - // At most one of include/exclude can be set - var includedNamespaces, excludedNamespaces sets.String - if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 { - return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set") - } - if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" { - return nil, fmt.Errorf("only one of ThresholdPriority and thresholdPriorityClassName can be set") - } - - thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params) - if err != nil { - return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err) - } - if params.Namespaces != nil { - includedNamespaces = sets.NewString(params.Namespaces.Include...) - excludedNamespaces = sets.NewString(params.Namespaces.Exclude...) - } - var selector labels.Selector - if params.LabelSelector != nil { - selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector) - if err != nil { - return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err) - } - } - - return &ValidatedStrategyParams{ - ThresholdPriority: thresholdPriority, - IncludedNamespaces: includedNamespaces, - ExcludedNamespaces: excludedNamespaces, - LabelSelector: selector, - NodeFit: params.NodeFit, - }, nil -} diff --git a/pkg/descheduler/strategies/validation/strategyparams_test.go b/pkg/descheduler/strategies/validation/strategyparams_test.go deleted file mode 100644 index 54fd0e2aca..0000000000 --- a/pkg/descheduler/strategies/validation/strategyparams_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package validation - -import ( - "context" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - "sigs.k8s.io/descheduler/pkg/api" -) - -var ( - thresholdPriority int32 = 1000 -) - -func TestValidStrategyParams(t *testing.T) { - ctx := context.Background() - fakeClient := &fake.Clientset{} - testCases := []struct { - name string - params *api.StrategyParameters - }{ - {name: "validate nil params", params: nil}, - {name: "validate empty params", params: &api.StrategyParameters{}}, - {name: "validate params with NodeFit", params: &api.StrategyParameters{NodeFit: true}}, - {name: "validate params with ThresholdPriority", params: &api.StrategyParameters{ThresholdPriority: &thresholdPriority}}, - {name: "validate params with priorityClassName", params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority"}}, - {name: "validate params with excluded namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"excluded-ns"}}}}, - {name: "validate params with included namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}}}}, - {name: "validate params with empty label selector", params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{}}}, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params) - if err != nil { - t.Errorf("strategy params should be valid but got err: %v", err.Error()) - } - if params == nil { - t.Errorf("strategy params should return a strategyParams but got nil") - } - }) - } -} - -func TestInvalidStrategyParams(t *testing.T) { - ctx := context.Background() - fakeClient := &fake.Clientset{} - testCases := []struct { - name string - params *api.StrategyParameters - }{ - { - name: "invalid params with both included and excluded namespaces nil params", - params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}, Exclude: []string{"exclude-ns"}}}, - }, - { - name: "invalid params with both threshold priority and priority class name", - params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority", ThresholdPriority: &thresholdPriority}, - }, - { - name: "invalid params with bad label selector", - params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"": "missing-label"}}}, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params) - if err == nil { - t.Errorf("strategy params should be invalid but did not get err") - } - if params != nil { - t.Errorf("strategy params should return a nil strategyParams but got %v", params) - } - }) - } -} diff --git a/pkg/descheduler/strategies/topologyspreadconstraint.go b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go similarity index 80% rename from pkg/descheduler/strategies/topologyspreadconstraint.go rename to pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go index 2f0fda87ac..bebac71238 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint.go +++ b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package strategies +package removepodsviolatingtopologyspreadconstraint import ( "context" @@ -25,18 +25,79 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" - podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation" + "sigs.k8s.io/descheduler/pkg/framework" "sigs.k8s.io/descheduler/pkg/utils" ) +const PluginName = "RemovePodsViolatingTopologySpreadConstraint" + +// RemoveFailedPods removes Pods that are in failed status phase. +type RemovePodsViolatingTopologySpreadConstraint struct { + handle framework.Handle + args *framework.RemovePodsViolatingTopologySpreadConstraintArgs + isEvictable func(pod *v1.Pod) bool + includedNamespaces sets.String + excludedNamespaces sets.String +} + +var _ framework.Plugin = &RemovePodsViolatingTopologySpreadConstraint{} +var _ framework.DeschedulePlugin = &RemovePodsViolatingTopologySpreadConstraint{} + +func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) { + topologyArgs, ok := args.(*framework.RemovePodsViolatingTopologySpreadConstraintArgs) + if !ok { + return nil, fmt.Errorf("want args to be of type RemovePodsViolatingTopologySpreadConstraintArgs, got %T", args) + } + + if err := framework.ValidateCommonArgs(topologyArgs.CommonArgs); err != nil { + return nil, err + } + + thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), topologyArgs.PriorityThreshold) + if err != nil { + return nil, fmt.Errorf("failed to get priority threshold: %v", err) + } + + var selector labels.Selector + if topologyArgs.LabelSelector != nil { + selector, err = metav1.LabelSelectorAsSelector(topologyArgs.LabelSelector) + if err != nil { + return nil, fmt.Errorf("failed to get label selectors: %v", err) + } + } + + evictable := handle.PodEvictor().Evictable( + evictions.WithPriorityThreshold(thresholdPriority), + evictions.WithNodeFit(topologyArgs.NodeFit), + evictions.WithLabelSelector(selector), + ) + + var includedNamespaces, excludedNamespaces sets.String + if topologyArgs.Namespaces != nil { + includedNamespaces = sets.NewString(topologyArgs.Namespaces.Include...) + excludedNamespaces = sets.NewString(topologyArgs.Namespaces.Exclude...) + } + + return &RemovePodsViolatingTopologySpreadConstraint{ + handle: handle, + args: topologyArgs, + isEvictable: evictable.IsEvictable, + includedNamespaces: includedNamespaces, + excludedNamespaces: excludedNamespaces, + }, nil +} + +func (d *RemovePodsViolatingTopologySpreadConstraint) Name() string { + return PluginName +} + // AntiAffinityTerm's topology key value used in predicate metadata type topologyPair struct { key string @@ -48,26 +109,15 @@ type topology struct { pods []*v1.Pod } -func RemovePodsViolatingTopologySpreadConstraint( - ctx context.Context, - client clientset.Interface, - strategy api.DeschedulerStrategy, - nodes []*v1.Node, - podEvictor *evictions.PodEvictor, - getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, -) { - strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params) - if err != nil { - klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters") - return - } - - evictable := podEvictor.Evictable( - evictions.WithPriorityThreshold(strategyParams.ThresholdPriority), - evictions.WithNodeFit(strategyParams.NodeFit), - evictions.WithLabelSelector(strategyParams.LabelSelector), - ) - +// func RemovePodsViolatingTopologySpreadConstraint( +// ctx context.Context, +// client clientset.Interface, +// strategy api.DeschedulerStrategy, +// nodes []*v1.Node, +// podEvictor *evictions.PodEvictor, +// getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, +// ) { +func (d *RemovePodsViolatingTopologySpreadConstraint) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status { nodeMap := make(map[string]*v1.Node, len(nodes)) for _, node := range nodes { nodeMap[node.Name] = node @@ -86,20 +136,22 @@ func RemovePodsViolatingTopologySpreadConstraint( // if diff > maxSkew, add this pod in the current bucket for eviction // First record all of the constraints by namespace - namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + namespaces, err := d.handle.ClientSet().CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { klog.ErrorS(err, "Couldn't list namespaces") - return + return &framework.Status{ + Err: fmt.Errorf("couldn't list namespaces: %v", err), + } } klog.V(1).InfoS("Processing namespaces for topology spread constraints") podsForEviction := make(map[*v1.Pod]struct{}) // 1. for each namespace... for _, namespace := range namespaces.Items { - if (len(strategyParams.IncludedNamespaces) > 0 && !strategyParams.IncludedNamespaces.Has(namespace.Name)) || - (len(strategyParams.ExcludedNamespaces) > 0 && strategyParams.ExcludedNamespaces.Has(namespace.Name)) { + if (len(d.includedNamespaces) > 0 && !d.includedNamespaces.Has(namespace.Name)) || + (len(d.excludedNamespaces) > 0 && d.excludedNamespaces.Has(namespace.Name)) { continue } - namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) + namespacePods, err := d.handle.ClientSet().CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { klog.ErrorS(err, "Couldn't list pods in namespace", "namespace", namespace) continue @@ -110,7 +162,7 @@ func RemovePodsViolatingTopologySpreadConstraint( for _, pod := range namespacePods.Items { for _, constraint := range pod.Spec.TopologySpreadConstraints { // Ignore soft topology constraints if they are not included - if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (strategy.Params == nil || !strategy.Params.IncludeSoftConstraints) { + if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (!d.args.IncludeSoftConstraints) { continue } namespaceTopologySpreadConstraints[constraint] = struct{}{} @@ -170,19 +222,21 @@ func RemovePodsViolatingTopologySpreadConstraint( klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint) continue } - balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap) + balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, d.isEvictable, nodeMap) } } for pod := range podsForEviction { - if !evictable.IsEvictable(pod) { + if !d.isEvictable(pod) { continue } - if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil { + if _, err := d.handle.PodEvictor().EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil { klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod)) break } } + + return nil } // topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew diff --git a/pkg/descheduler/strategies/topologyspreadconstraint_test.go b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go similarity index 88% rename from pkg/descheduler/strategies/topologyspreadconstraint_test.go rename to pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go index 22a8636514..34abe9e5b1 100644 --- a/pkg/descheduler/strategies/topologyspreadconstraint_test.go +++ b/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint/topologyspreadconstraint_test.go @@ -1,4 +1,4 @@ -package strategies +package removepodsviolatingtopologyspreadconstraint import ( "context" @@ -9,22 +9,43 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" + "sigs.k8s.io/descheduler/pkg/framework" "sigs.k8s.io/descheduler/test" ) +type frameworkHandle struct { + clientset clientset.Interface + podEvictor *evictions.PodEvictor + getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc +} + +func (f frameworkHandle) ClientSet() clientset.Interface { + return f.clientset +} +func (f frameworkHandle) PodEvictor() *evictions.PodEvictor { + return f.podEvictor +} +func (f frameworkHandle) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc { + return f.getPodsAssignedToNodeFunc +} + func TestTopologySpreadConstraint(t *testing.T) { testCases := []struct { name string pods []*v1.Pod expectedEvictedCount uint nodes []*v1.Node - strategy api.DeschedulerStrategy - namespaces []string + + namespaces *api.Namespaces + nodeFit bool + includeSoftConstraints bool + labelSelector *metav1.LabelSelector }{ { name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods", @@ -58,12 +79,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]", @@ -90,12 +105,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)", @@ -128,9 +137,8 @@ func TestTopologySpreadConstraint(t *testing.T) { labels: map[string]string{"foo": "bar"}, }, }), - expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}}, - namespaces: []string{"ns1"}, + expectedEvictedCount: 1, + includeSoftConstraints: true, }, { name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods", @@ -160,12 +168,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace", @@ -192,8 +194,8 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{NodeFit: true, Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}}, - namespaces: []string{"ns1"}, + nodeFit: true, + namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}, }, { name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]", @@ -220,12 +222,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]", @@ -247,12 +243,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 2, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted", @@ -291,12 +281,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: true, - }, - }, - namespaces: []string{"ns1"}, + nodeFit: true, }, { name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes", @@ -338,12 +323,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 2, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]", @@ -366,8 +345,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 66, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]", @@ -396,12 +373,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 3, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]", @@ -428,12 +399,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: false, - }, - }, - namespaces: []string{"ns1"}, }, { name: "2 domains size [2 6], maxSkew=2, can't move any because of node taints", @@ -476,12 +441,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - NodeFit: true, - }, - }, - namespaces: []string{"ns1"}, + nodeFit: true, }, { // see https://github.com/kubernetes-sigs/descheduler/issues/564 @@ -584,9 +544,8 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }, }), - expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}}, - namespaces: []string{"ns1"}, + expectedEvictedCount: 1, + includeSoftConstraints: true, }, { name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]", @@ -610,8 +569,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 5, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]", @@ -641,8 +598,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint", @@ -682,8 +637,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node", @@ -715,8 +668,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint", @@ -748,8 +699,6 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{}, - namespaces: []string{"ns1"}, }, { name: "2 domains, sizes [2,0], maxSkew=1, move 0 pod for node with unmatched label filtering", @@ -771,12 +720,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn), - }, - }, - namespaces: []string{"ns1"}, + labelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn), }, { name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering", @@ -798,12 +742,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn), - }, - }, - namespaces: []string{"ns1"}, + labelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn), }, { name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering (NotIn op)", @@ -825,12 +764,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 1, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn), - }, - }, - namespaces: []string{"ns1"}, + labelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn), }, { name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved", @@ -860,12 +794,7 @@ func TestTopologySpreadConstraint(t *testing.T) { }, }), expectedEvictedCount: 0, - strategy: api.DeschedulerStrategy{ - Params: &api.StrategyParameters{ - LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn), - }, - }, - namespaces: []string{"ns1"}, + labelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn), }, } @@ -908,7 +837,26 @@ func TestTopologySpreadConstraint(t *testing.T) { false, false, ) - RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode) + + plugin, err := New(&framework.RemovePodsViolatingTopologySpreadConstraintArgs{ + CommonArgs: framework.CommonArgs{ + NodeFit: tc.nodeFit, + Namespaces: tc.namespaces, + }, + LabelSelector: tc.labelSelector, + IncludeSoftConstraints: tc.includeSoftConstraints, + }, + frameworkHandle{ + clientset: fakeClient, + podEvictor: podEvictor, + getPodsAssignedToNodeFunc: getPodsAssignedToNode, + }, + ) + if err != nil { + t.Fatalf("Unable to initialize the plugin: %v", err) + } + + plugin.(interface{}).(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != tc.expectedEvictedCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted) diff --git a/pkg/framework/types.go b/pkg/framework/types.go index fc2f3c917f..ee685d3cff 100644 --- a/pkg/framework/types.go +++ b/pkg/framework/types.go @@ -143,6 +143,20 @@ func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyObject() runtime.Object { return nil } +// RemovePodsViolatingTopologySpreadConstraintArgs holds arguments used to configure the RemovePodsViolatingTopologySpreadConstraint plugin. +type RemovePodsViolatingTopologySpreadConstraintArgs struct { + metav1.TypeMeta + + CommonArgs + LabelSelector *metav1.LabelSelector + IncludeSoftConstraints bool +} + +// TODO(jchaloup): have this go generated +func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyObject() runtime.Object { + return nil +} + func ValidateCommonArgs(args CommonArgs) error { // At most one of include/exclude can be set if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {