From 3f1509a33309952a8eeb7eb0f10a57a95aaac380 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Tue, 24 Dec 2024 13:43:59 +0900 Subject: [PATCH 01/27] Add labels for application tracking in Kubernetes manifests Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server_test.go | 11 +++++++++++ .../pipedv1/plugin/kubernetes/provider/loader.go | 7 +++++++ .../plugin/kubernetes/provider/manifest.go | 16 ++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 4eb49d17f7..c43ddf9a57 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -183,6 +183,12 @@ func TestDeploymentService_executeK8sSyncStage(t *testing.T) { assert.Equal(t, "simple", deployment.GetName()) assert.Equal(t, "simple", deployment.GetLabels()["app"]) + + assert.Equal(t, "piped", deployment.GetLabels()["pipecd.dev/managed-by"]) + assert.Equal(t, "piped-id", deployment.GetLabels()["pipecd.dev/piped"]) + assert.Equal(t, "app-id", deployment.GetLabels()["pipecd.dev/application"]) + assert.Equal(t, "0123456789", deployment.GetLabels()["pipecd.dev/commit-hash"]) + assert.Equal(t, "piped", deployment.GetAnnotations()["pipecd.dev/managed-by"]) assert.Equal(t, "piped-id", deployment.GetAnnotations()["pipecd.dev/piped"]) assert.Equal(t, "app-id", deployment.GetAnnotations()["pipecd.dev/application"]) @@ -246,6 +252,11 @@ func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) deployment, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).Namespace("test-namespace").Get(context.Background(), "simple", metav1.GetOptions{}) require.NoError(t, err) + assert.Equal(t, "piped", deployment.GetLabels()["pipecd.dev/managed-by"]) + assert.Equal(t, "piped-id", deployment.GetLabels()["pipecd.dev/piped"]) + assert.Equal(t, "app-id", deployment.GetLabels()["pipecd.dev/application"]) + assert.Equal(t, "0123456789", deployment.GetLabels()["pipecd.dev/commit-hash"]) + assert.Equal(t, "simple", deployment.GetName()) assert.Equal(t, "simple", deployment.GetLabels()["app"]) assert.Equal(t, "piped", deployment.GetAnnotations()["pipecd.dev/managed-by"]) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go index 8cac2b9965..38ff1f7de2 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go @@ -87,6 +87,13 @@ func (l *Loader) LoadManifests(ctx context.Context, input LoaderInput) (manifest defer func() { // Add builtin annotations for tracking application live state. for i := range manifests { + manifests[i].AddLabels(map[string]string{ + LabelManagedBy: ManagedByPiped, + LabelPiped: input.PipedID, + LabelApplication: input.AppID, + LabelCommitHash: input.CommitHash, + }) + manifests[i].AddAnnotations(map[string]string{ LabelManagedBy: ManagedByPiped, LabelPiped: input.PipedID, diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index d964c332eb..8820724749 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -115,6 +115,22 @@ func (m Manifest) NestedMap(fields ...string) (map[string]any, bool, error) { return unstructured.NestedMap(m.body.Object, fields...) } +func (m Manifest) AddLabels(labels map[string]string) { + if len(labels) == 0 { + return + } + + lbs := m.body.GetLabels() + if lbs == nil { + m.body.SetLabels(labels) + return + } + for k, v := range labels { + lbs[k] = v + } + m.body.SetLabels(lbs) +} + func (m Manifest) AddAnnotations(annotations map[string]string) { if len(annotations) == 0 { return From e6f5bc9277fb462f197084c34b8f859b9dbd387e Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Wed, 25 Dec 2024 17:38:56 +0900 Subject: [PATCH 02/27] Add IsManagedByPiped method Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/provider/manifest.go | 5 ++ .../kubernetes/provider/manifest_test.go | 68 +++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index 8820724749..e7c8c2298c 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -147,6 +147,11 @@ func (m Manifest) AddAnnotations(annotations map[string]string) { m.body.SetAnnotations(annos) } +// IsManagedByPiped returns true if the manifest is managed by Piped. +func (m Manifest) IsManagedByPiped() bool { + return len(m.body.GetOwnerReferences()) == 0 && m.body.GetAnnotations()[LabelManagedBy] == ManagedByPiped +} + // AddStringMapValues adds or overrides the given key-values into the string map // that can be found at the specified fields. func (m Manifest) AddStringMapValues(values map[string]string, fields ...string) error { diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go index fa01c1261c..f4bf6fb16f 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go @@ -723,3 +723,71 @@ data: }) } } + +func TestIsManagedByPiped(t *testing.T) { + testcases := []struct { + name string + manifest Manifest + wantResult bool + }{ + { + name: "managed by Piped", + manifest: Manifest{ + body: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + LabelManagedBy: ManagedByPiped, + }, + }, + }, + }, + }, + wantResult: true, + }, + { + name: "not managed by Piped", + manifest: Manifest{ + body: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "some-other-label": "some-value", + }, + }, + }, + }, + }, + wantResult: false, + }, + { + name: "has owner references", + manifest: Manifest{ + body: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + LabelManagedBy: ManagedByPiped, + }, + "ownerReferences": []interface{}{ + map[string]interface{}{ + "apiVersion": "v1", + "kind": "ReplicaSet", + "name": "example-replicaset", + }, + }, + }, + }, + }, + }, + wantResult: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + gotResult := tc.manifest.IsManagedByPiped() + assert.Equal(t, tc.wantResult, gotResult) + }) + } +} From b1509594499e01922d79ecc4a253df43e22a6149 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Wed, 25 Dec 2024 17:45:18 +0900 Subject: [PATCH 03/27] Remove Namespace method from ResourceKey Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/helm_test.go | 1 - pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 4 ---- 2 files changed, 5 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/helm_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/helm_test.go index a6b4528638..476a5bfa2e 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/helm_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/helm_test.go @@ -84,7 +84,6 @@ func TestTemplateLocalChart_WithNamespace(t *testing.T) { metadata, _, err := manifest.NestedMap("metadata") require.NoError(t, err) require.Equal(t, namespace, metadata["namespace"]) - require.Equal(t, namespace, manifest.Key().Namespace()) } } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 05780ee75f..f1ed82116d 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -43,10 +43,6 @@ func (k ResourceKey) Kind() string { return k.kind } -func (k ResourceKey) Namespace() string { - return k.namespace -} - func (k ResourceKey) Name() string { return k.name } From 9c5d959b03b5f83ddad67a1a55f7ca3bac990484 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Wed, 25 Dec 2024 17:47:37 +0900 Subject: [PATCH 04/27] Remove APIVersion method from ResourceKey Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/loader.go | 2 +- pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go index 38ff1f7de2..043b105340 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go @@ -98,7 +98,7 @@ func (l *Loader) LoadManifests(ctx context.Context, input LoaderInput) (manifest LabelManagedBy: ManagedByPiped, LabelPiped: input.PipedID, LabelApplication: input.AppID, - LabelOriginalAPIVersion: manifests[i].Key().APIVersion(), + LabelOriginalAPIVersion: manifests[i].body.GetAPIVersion(), LabelResourceKey: manifests[i].Key().String(), LabelCommitHash: input.CommitHash, }) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index f1ed82116d..fb462b795f 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -35,10 +35,6 @@ type ResourceKey struct { name string } -func (k ResourceKey) APIVersion() string { - return k.apiVersion -} - func (k ResourceKey) Kind() string { return k.kind } From 9aea549b6a989a3fadd9b7f5ee44c7e153036b2f Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Wed, 25 Dec 2024 17:56:42 +0900 Subject: [PATCH 05/27] Refactor manifest handling to use Name and Kind methods instead of Key Signed-off-by: Shinnosuke Sawada-Dazai --- .../pipedv1/plugin/kubernetes/deployment/annotate.go | 4 ++-- .../pipedv1/plugin/kubernetes/deployment/determine.go | 10 +++++----- pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go | 8 ++++++++ 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/annotate.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/annotate.go index b9357ea2c7..007c843db9 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/annotate.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/annotate.go @@ -32,11 +32,11 @@ func annotateConfigHash(manifests []provider.Manifest) error { secrets := make(map[string]provider.Manifest) for _, m := range manifests { if m.IsConfigMap() { - configMaps[m.Key().Name()] = m + configMaps[m.Name()] = m continue } if m.IsSecret() { - secrets[m.Key().Name()] = m + secrets[m.Name()] = m } } diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/determine.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/determine.go index fd34ed65a0..9833c30ff4 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/determine.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/determine.go @@ -73,10 +73,10 @@ func determineVersions(manifests []provider.Manifest) ([]*model.ArtifactVersion, func findManifests(kind, name string, manifests []provider.Manifest) []provider.Manifest { out := make([]provider.Manifest, 0, len(manifests)) for _, m := range manifests { - if m.Key().Kind() != kind { + if m.Kind() != kind { continue } - if name != "" && m.Key().Name() != name { + if name != "" && m.Name() != name { continue } out = append(out, m) @@ -186,7 +186,7 @@ func determineStrategy(olds, news []provider.Manifest, workloadRefs []config.K8s if msg, changed := checkImageChange(templateDiffs); changed { return model.SyncStrategy_PIPELINE, msg } - return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because pod template of workload %s was changed", w.New.Key().Name()) + return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because pod template of workload %s was changed", w.New.Name()) } } @@ -203,14 +203,14 @@ func determineStrategy(olds, news []provider.Manifest, workloadRefs []config.K8s for k, oc := range oldConfigs { nc, ok := newConfigs[k] if !ok { - return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was deleted", oc.Key().Kind(), oc.Key().Name()) + return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was deleted", oc.Kind(), oc.Name()) } result, err := provider.Diff(oc, nc, logger) if err != nil { return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively due to an error while calculating the diff (%v)", err) } if result.HasDiff() { - return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was updated", oc.Key().Kind(), oc.Key().Name()) + return model.SyncStrategy_PIPELINE, fmt.Sprintf("Sync progressively because %s %s was updated", oc.Kind(), oc.Name()) } } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index e7c8c2298c..384a0c25a1 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -59,6 +59,14 @@ func (m Manifest) Key() ResourceKey { return makeResourceKey(m.body) } +func (m Manifest) Kind() string { + return m.body.GetKind() +} + +func (m Manifest) Name() string { + return m.body.GetName() +} + // IsDeployment returns true if the manifest is a Deployment. // It checks the API group and the kind of the manifest. func (m Manifest) IsDeployment() bool { From 5134edbbfd211d1a34abcbb6c1e134b10eec0ce6 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Wed, 25 Dec 2024 18:10:55 +0900 Subject: [PATCH 06/27] Change apiVersion and kind to groupKind Signed-off-by: Shinnosuke Sawada-Dazai --- .../kubernetes/deployment/server_test.go | 4 +- .../plugin/kubernetes/provider/applier.go | 2 +- .../kubernetes/provider/applier_test.go | 79 +++++++++++++------ .../plugin/kubernetes/provider/manifest.go | 3 +- .../kubernetes/provider/manifest_test.go | 43 +++++----- .../plugin/kubernetes/provider/resource.go | 21 +++-- 6 files changed, 88 insertions(+), 64 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index c43ddf9a57..839163e03d 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -193,7 +193,7 @@ func TestDeploymentService_executeK8sSyncStage(t *testing.T) { assert.Equal(t, "piped-id", deployment.GetAnnotations()["pipecd.dev/piped"]) assert.Equal(t, "app-id", deployment.GetAnnotations()["pipecd.dev/application"]) assert.Equal(t, "apps/v1", deployment.GetAnnotations()["pipecd.dev/original-api-version"]) - assert.Equal(t, "apps/v1:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. + assert.Equal(t, "apps:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. assert.Equal(t, "0123456789", deployment.GetAnnotations()["pipecd.dev/commit-hash"]) } @@ -263,6 +263,6 @@ func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) assert.Equal(t, "piped-id", deployment.GetAnnotations()["pipecd.dev/piped"]) assert.Equal(t, "app-id", deployment.GetAnnotations()["pipecd.dev/application"]) assert.Equal(t, "apps/v1", deployment.GetAnnotations()["pipecd.dev/original-api-version"]) - assert.Equal(t, "apps/v1:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. + assert.Equal(t, "apps:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. assert.Equal(t, "0123456789", deployment.GetAnnotations()["pipecd.dev/commit-hash"]) } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go b/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go index 7f1bee6100..9f80c9edfc 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go @@ -144,7 +144,7 @@ func (a *Applier) Delete(ctx context.Context, k ResourceKey) (err error) { return err } - if k.String() != m.body.GetAnnotations()[LabelResourceKey] { + if k.String() != m.Key().String() { return ErrNotFound } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go index d095b15e9c..d05e4bb7fc 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go @@ -21,6 +21,7 @@ import ( "go.uber.org/zap" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config" ) @@ -430,13 +431,12 @@ kind: ConfigMap metadata: name: test-config annotations: - pipecd.dev/resource-key: "v1:ConfigMap::test-config" + pipecd.dev/resource-key: ":ConfigMap::test-config" `, resourceKey: ResourceKey{ - apiVersion: "v1", - kind: "ConfigMap", - namespace: "", - name: "test-config", + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "", + name: "test-config", }, expectedErr: nil, }, @@ -449,13 +449,12 @@ kind: ConfigMap metadata: name: test-config annotations: - pipecd.dev/resource-key: "v1:ConfigMap::test-config" + pipecd.dev/resource-key: ":ConfigMap::test-config" `, resourceKey: ResourceKey{ - apiVersion: "v1", - kind: "ConfigMap", - namespace: "", - name: "test-config", + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "", + name: "test-config", }, expectedErr: errGet, }, @@ -468,13 +467,12 @@ kind: ConfigMap metadata: name: test-config annotations: - pipecd.dev/resource-key: "v1:ConfigMap::test-config" + pipecd.dev/resource-key: ":ConfigMap::test-config" `, resourceKey: ResourceKey{ - apiVersion: "v1", - kind: "ConfigMap", - namespace: "", - name: "test-config", + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "", + name: "test-config", }, expectedErr: errDelete, }, @@ -486,13 +484,12 @@ kind: ConfigMap metadata: name: test-config annotations: - pipecd.dev/resource-key: "v1:ConfigMap::test-config" + pipecd.dev/resource-key: ":ConfigMap::test-config" `, resourceKey: ResourceKey{ - apiVersion: "v1", - kind: "ConfigMap", - namespace: "", - name: "another-config", + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "", + name: "another-config", }, expectedErr: ErrNotFound, }, @@ -501,6 +498,41 @@ metadata: manifest: ` apiVersion: v1 kind: ConfigMap +metadata: + name: test-config + namespace: test-namespace + annotations: + pipecd.dev/resource-key: ":ConfigMap:test-namespace:test-config" +`, + resourceKey: ResourceKey{ + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "test-namespace", + name: "test-config", + }, + expectedErr: nil, + }, + { + name: "successful delete with old format of resource key", + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + annotations: + pipecd.dev/resource-key: "v1:ConfigMap::test-config" +`, + resourceKey: ResourceKey{ + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "", + name: "test-config", + }, + expectedErr: nil, + }, + { + name: "successful delete with namespace with old format of resource key", + manifest: ` +apiVersion: v1 +kind: ConfigMap metadata: name: test-config namespace: test-namespace @@ -508,10 +540,9 @@ metadata: pipecd.dev/resource-key: "v1:ConfigMap:test-namespace:test-config" `, resourceKey: ResourceKey{ - apiVersion: "v1", - kind: "ConfigMap", - namespace: "test-namespace", - name: "test-config", + groupKind: schema.ParseGroupKind("ConfigMap"), + namespace: "test-namespace", + name: "test-config", }, expectedErr: nil, }, diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index 384a0c25a1..282b310367 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -200,8 +200,7 @@ func FindSameManifests(olds, news []Manifest) []WorkloadPair { pairs := make([]WorkloadPair, 0) oldMap := make(map[ResourceKey]Manifest, len(olds)) nomalizeKey := func(k ResourceKey) ResourceKey { - // Ignoring APIVersion because user can upgrade to the new APIVersion for the same workload. - k.apiVersion = "" + // Normalize the key by removing the default namespace. if k.namespace == DefaultNamespace { k.namespace = "" } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go index f4bf6fb16f..a1de84bbd7 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest_test.go @@ -21,6 +21,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" ) func TestManifest_AddStringMapValues(t *testing.T) { @@ -155,10 +156,9 @@ spec: }, want: map[ResourceKey]Manifest{ { - apiVersion: "v1", - kind: "ConfigMap", - name: "my-config", - namespace: "default", + groupKind: schema.ParseGroupKind("ConfigMap"), + name: "my-config", + namespace: "default", }: mustParseManifests(t, ` apiVersion: v1 kind: ConfigMap @@ -169,10 +169,9 @@ data: key: value `)[0], { - apiVersion: "v1", - kind: "Secret", - name: "my-secret", - namespace: "default", + groupKind: schema.ParseGroupKind("Secret"), + name: "my-secret", + namespace: "default", }: mustParseManifests(t, ` apiVersion: v1 kind: Secret @@ -217,10 +216,9 @@ data: }, want: map[ResourceKey]Manifest{ { - apiVersion: "v1", - kind: "ConfigMap", - name: "my-config", - namespace: "default", + groupKind: schema.ParseGroupKind("ConfigMap"), + name: "my-config", + namespace: "default", }: mustParseManifests(t, ` apiVersion: v1 kind: ConfigMap @@ -247,10 +245,9 @@ data: }, want: map[ResourceKey]Manifest{ { - apiVersion: "v1", - kind: "Secret", - name: "my-secret", - namespace: "default", + groupKind: schema.ParseGroupKind("Secret"), + name: "my-secret", + namespace: "default", }: mustParseManifests(t, ` apiVersion: v1 kind: Secret @@ -286,10 +283,9 @@ data: }, want: map[ResourceKey]Manifest{ { - apiVersion: "v1", - kind: "ConfigMap", - name: "my-config", - namespace: "custom-namespace", + groupKind: schema.ParseGroupKind("ConfigMap"), + name: "my-config", + namespace: "custom-namespace", }: mustParseManifests(t, ` apiVersion: v1 kind: ConfigMap @@ -300,10 +296,9 @@ data: key: value `)[0], { - apiVersion: "v1", - kind: "Secret", - name: "my-secret", - namespace: "custom-namespace", + groupKind: schema.ParseGroupKind("Secret"), + name: "my-secret", + namespace: "custom-namespace", }: mustParseManifests(t, ` apiVersion: v1 kind: Secret diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index fb462b795f..9cbe40f9c4 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -18,6 +18,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" ) const ( @@ -29,14 +30,13 @@ const ( ) type ResourceKey struct { - apiVersion string - kind string - namespace string - name string + groupKind schema.GroupKind + namespace string + name string } func (k ResourceKey) Kind() string { - return k.kind + return k.groupKind.Kind } func (k ResourceKey) Name() string { @@ -44,19 +44,18 @@ func (k ResourceKey) Name() string { } func (k ResourceKey) String() string { - return fmt.Sprintf("%s:%s:%s:%s", k.apiVersion, k.kind, k.namespace, k.name) + return fmt.Sprintf("%s:%s:%s:%s", k.groupKind.Group, k.groupKind.Kind, k.namespace, k.name) } func (k ResourceKey) ReadableString() string { - return fmt.Sprintf("name=%q, kind=%q, namespace=%q, apiVersion=%q", k.name, k.kind, k.namespace, k.apiVersion) + return fmt.Sprintf("name=%q, kind=%q, namespace=%q, apiGroup=%q", k.name, k.groupKind.Kind, k.namespace, k.groupKind.Group) } func makeResourceKey(obj *unstructured.Unstructured) ResourceKey { k := ResourceKey{ - apiVersion: obj.GetAPIVersion(), - kind: obj.GetKind(), - namespace: obj.GetNamespace(), - name: obj.GetName(), + groupKind: obj.GroupVersionKind().GroupKind(), + namespace: obj.GetNamespace(), + name: obj.GetName(), } return k } From f8f0c7e5b6e270d084f85f6bfdb560e4c1aaadbe Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 09:33:18 +0900 Subject: [PATCH 07/27] Implement pruning resources Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/config/application.go | 11 +++ .../plugin/kubernetes/deployment/server.go | 81 ++++++++++++++++++- .../plugin/kubernetes/provider/kubectl.go | 28 +++++++ 3 files changed, 118 insertions(+), 2 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/config/application.go b/pkg/app/pipedv1/plugin/kubernetes/config/application.go index c2c3acaad9..794d5e18b3 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/config/application.go +++ b/pkg/app/pipedv1/plugin/kubernetes/config/application.go @@ -36,6 +36,9 @@ type KubernetesApplicationSpec struct { // Input for Kubernetes deployment such as kubectl version, helm version, manifests filter... Input KubernetesDeploymentInput `json:"input"` + // Configuration for quick sync. + QuickSync K8sSyncStageOptions `json:"quickSync"` + // Which resources should be considered as the Workload of application. // Empty means all Deployments. // e.g. @@ -100,6 +103,14 @@ type KubernetesDeployTargetConfig struct { KubectlVersion string `json:"kubectlVersion"` } +// K8sSyncStageOptions contains all configurable values for a K8S_SYNC stage. +type K8sSyncStageOptions struct { + // Whether the PRIMARY variant label should be added to manifests if they were missing. + AddVariantLabelToSelector bool `json:"addVariantLabelToSelector"` + // Whether the resources that are no longer defined in Git should be removed or not. + Prune bool `json:"prune"` +} + // FindDeployTarget finds the deploy target configuration by the given name. func FindDeployTarget(cfg *config.PipedPlugin, name string) (KubernetesDeployTargetConfig, error) { if cfg == nil { diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index 02bbdb0173..f354a1b6d4 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -17,6 +17,8 @@ package deployment import ( "cmp" "context" + "errors" + "fmt" "time" kubeconfig "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config" @@ -266,6 +268,9 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi // Add variant annotations to all manifests. for i := range manifests { + manifests[i].AddLabels(map[string]string{ + variantLabel: primaryVariant, + }) manifests[i].AddAnnotations(map[string]string{ variantLabel: primaryVariant, }) @@ -290,8 +295,11 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi return model.StageStatus_STAGE_FAILURE } + // Create the kubectl wrapper for the target cluster. + kubectl := provider.NewKubectl(kubectlPath) + // Create the applier for the target cluster. - applier := provider.NewApplier(provider.NewKubectl(kubectlPath), cfg.Spec.Input, deployTargetConfig, a.logger) + applier := provider.NewApplier(kubectl, cfg.Spec.Input, deployTargetConfig, a.logger) // Start applying all manifests to add or update running resources. if err := applyManifests(ctx, applier, manifests, cfg.Spec.Input.Namespace, lp); err != nil { @@ -299,11 +307,80 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi return model.StageStatus_STAGE_FAILURE } - // TODO: implement prune resources + // TODO: treat the stage options specified under "with" + if !cfg.Spec.QuickSync.Prune { + lp.Info("Resource GC was skipped because sync.prune was not configured") + return model.StageStatus_STAGE_SUCCESS + } + + // Wait for all applied manifests to be stable. + // In theory, we don't need to wait for them to be stable before going to the next step + // but waiting for a while reduces the number of Kubernetes changes in a short time. + lp.Info("Waiting for the applied manifests to be stable") + select { + case <-time.After(15 * time.Second): + break + case <-ctx.Done(): + break + } + + lp.Info("Start finding all running resources but no longer defined in Git") + liveResources, err := kubectl.GetAll(ctx, deployTargetConfig.KubeConfigPath, "", "all", + fmt.Sprintf("%s=%s", provider.LabelApplication, input.GetDeployment().GetApplicationId()), + fmt.Sprintf("%s=%s", variantLabel, primaryVariant)) + if err != nil { + lp.Errorf("Failed while getting live resources to prune (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + if len(liveResources) == 0 { + lp.Info("There is no data about live resource so no resource will be removed") + return model.StageStatus_STAGE_SUCCESS + } + lp.Successf("Successfully loaded %d live resources", len(liveResources)) + + removeKeys := findRemoveResources(manifests, liveResources) + if len(removeKeys) == 0 { + lp.Info("There are no live resources should be removed") + return model.StageStatus_STAGE_SUCCESS + } + + lp.Infof("Start pruning %d resources", len(removeKeys)) + var deletedCount int + for _, key := range removeKeys { + if err := kubectl.Delete(ctx, deployTargetConfig.KubeConfigPath, "", key); err != nil { + if errors.Is(err, provider.ErrNotFound) { + lp.Infof("Specified resource does not exist, so skip deleting the resource: %s (%v)", key.ReadableString(), err) + continue + } + lp.Errorf("Failed while deleting resource %s (%v)", key.ReadableString(), err) + continue // continue to delete other resources + } + deletedCount++ + lp.Successf("- deleted resource: %s", key.ReadableString()) + } + + lp.Successf("Successfully deleted %d resources", deletedCount) return model.StageStatus_STAGE_SUCCESS } +func findRemoveResources(manifests []provider.Manifest, liveResources []provider.Manifest) []provider.ResourceKey { + var ( + keys = make(map[provider.ResourceKey]struct{}, len(manifests)) + removeKeys = make([]provider.ResourceKey, 0, len(liveResources)) + ) + for _, m := range manifests { + keys[m.Key()] = struct{}{} + } + for _, m := range liveResources { + if _, ok := keys[m.Key()]; ok { + continue + } + removeKeys = append(removeKeys, m.Key()) + } + return removeKeys +} + func (a *DeploymentService) executeK8sRollbackStage(ctx context.Context, lp logpersister.StageLogPersister, input *deployment.ExecutePluginInput) model.StageStatus { if input.GetDeployment().GetRunningCommitHash() == "" { lp.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go index f3b16f0941..6c9f7f4d57 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -230,6 +230,34 @@ func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r Resou return ms[0], nil } +func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace, kind string, selector ...string) (ms []Manifest, err error) { + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace == "" { + args = append(args, "--all-namespaces") + } else { + args = append(args, "--namespace", namespace) + } + args = append(args, "get", kind, "-o", "yaml", "--selector", strings.Join(selector, ",")) + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if err != nil { + return nil, fmt.Errorf("failed to get: %s, %w", string(out), err) + } + if strings.Contains(string(out), "(NotFound)") { + // No resources found. Return nil. This is not an error. + return nil, nil + } + ms, err = ParseManifests(string(out)) + if err != nil { + return nil, fmt.Errorf("failed to parse manifests %v: %w", kind, err) + } + return ms, nil +} + // CreateNamespace runs kubectl create namespace with the given namespace. func (c *Kubectl) CreateNamespace(ctx context.Context, kubeconfig, namespace string) (err error) { // TODO: record the metrics for the kubectl create namespace command. From b9879cecf8f503f105285321a8bd400b70994ee4 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 10:14:22 +0900 Subject: [PATCH 08/27] Implement resource pruning in k8s_sync Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server.go | 19 +--- .../kubernetes/deployment/server_test.go | 104 ++++++++++++++++++ .../testdata/prune/running/app.pipecd.yaml | 18 +++ .../testdata/prune/running/deployment.yaml | 27 +++++ .../testdata/prune/running/service.yaml | 11 ++ .../testdata/prune/target/app.pipecd.yaml | 17 +++ .../testdata/prune/target/deployment.yaml | 27 +++++ .../plugin/kubernetes/provider/kubectl.go | 23 +++- .../plugin/kubernetes/provider/resource.go | 33 ++++++ 9 files changed, 257 insertions(+), 22 deletions(-) create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/deployment.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/service.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/deployment.yaml diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index f354a1b6d4..cf1908ad9f 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -339,7 +339,7 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi lp.Successf("Successfully loaded %d live resources", len(liveResources)) - removeKeys := findRemoveResources(manifests, liveResources) + removeKeys := provider.FindRemoveResources(manifests, liveResources) if len(removeKeys) == 0 { lp.Info("There are no live resources should be removed") return model.StageStatus_STAGE_SUCCESS @@ -364,23 +364,6 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi return model.StageStatus_STAGE_SUCCESS } -func findRemoveResources(manifests []provider.Manifest, liveResources []provider.Manifest) []provider.ResourceKey { - var ( - keys = make(map[provider.ResourceKey]struct{}, len(manifests)) - removeKeys = make([]provider.ResourceKey, 0, len(liveResources)) - ) - for _, m := range manifests { - keys[m.Key()] = struct{}{} - } - for _, m := range liveResources { - if _, ok := keys[m.Key()]; ok { - continue - } - removeKeys = append(removeKeys, m.Key()) - } - return removeKeys -} - func (a *DeploymentService) executeK8sRollbackStage(ctx context.Context, lp logpersister.StageLogPersister, input *deployment.ExecutePluginInput) model.StageStatus { if input.GetDeployment().GetRunningCommitHash() == "" { lp.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 839163e03d..d5fe3cb046 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -266,3 +266,107 @@ func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) assert.Equal(t, "apps:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. assert.Equal(t, "0123456789", deployment.GetAnnotations()["pipecd.dev/commit-hash"]) } + +func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { + ctx := context.Background() + + // initialize tool registry + testRegistry, err := toolregistrytest.NewToolRegistry(t) + require.NoError(t, err) + + // initialize plugin config and dynamic client for assertions with envtest + pluginCfg, dynamicClient := setupTestPluginConfigAndDynamicClient(t) + + svc := NewDeploymentService(pluginCfg, zaptest.NewLogger(t), testRegistry, logpersistertest.NewTestLogPersister(t)) + + running := filepath.Join("./", "testdata", "prune", "running") + + // read the running application config from the example file + runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err := svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, ":Service::simple", service.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. + require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) + + target := filepath.Join("./", "testdata", "prune", "target") + + // read the running application config from the example file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err = svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) + require.Error(t, err) +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/app.pipecd.yaml new file mode 100644 index 0000000000..257d9c46d7 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/app.pipecd.yaml @@ -0,0 +1,18 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: simple + labels: + env: example + team: product + quickSync: + prune: true + input: + manifests: + - deployment.yaml + - service.yaml + kubectlVersion: 1.31.0 + description: | + This app demonstrates how to deploy a Kubernetes application with [Quick Sync](https://pipecd.dev/docs/concepts/#sync-strategy) strategy.\ + No pipeline is specified then in each deployment PipeCD will roll out the new version and switch all traffic to it immediately.\ + References: [adding a new app](https://pipecd.dev/docs/user-guide/managing-application/adding-an-application/), [app configuration](https://pipecd.dev/docs/user-guide/configuration-reference/) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/deployment.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/deployment.yaml new file mode 100644 index 0000000000..f1e0d4b29a --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: ghcr.io/pipe-cd/helloworld:v0.32.0 + args: + - server + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/service.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/service.yaml new file mode 100644 index 0000000000..52ca9d1f59 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/running/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple + ports: + - protocol: TCP + port: 9085 + targetPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/app.pipecd.yaml new file mode 100644 index 0000000000..d9140b2e81 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/app.pipecd.yaml @@ -0,0 +1,17 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: simple + labels: + env: example + team: product + quickSync: + prune: true + input: + manifests: + - deployment.yaml + kubectlVersion: 1.31.0 + description: | + This app demonstrates how to deploy a Kubernetes application with [Quick Sync](https://pipecd.dev/docs/concepts/#sync-strategy) strategy.\ + No pipeline is specified then in each deployment PipeCD will roll out the new version and switch all traffic to it immediately.\ + References: [adding a new app](https://pipecd.dev/docs/user-guide/managing-application/adding-an-application/), [app configuration](https://pipecd.dev/docs/user-guide/configuration-reference/) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/deployment.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/deployment.yaml new file mode 100644 index 0000000000..f1e0d4b29a --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune/target/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: ghcr.io/pipe-cd/helloworld:v0.32.0 + args: + - server + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go index 6c9f7f4d57..26d2002717 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -21,6 +21,9 @@ import ( "fmt" "os/exec" "strings" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" ) var ( @@ -232,6 +235,7 @@ func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r Resou func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace, kind string, selector ...string) (ms []Manifest, err error) { args := make([]string, 0, 7) + args = append(args, "get", kind, "-o", "yaml", "--selector", strings.Join(selector, ",")) if kubeconfig != "" { args = append(args, "--kubeconfig", kubeconfig) } @@ -240,7 +244,6 @@ func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace, kind string } else { args = append(args, "--namespace", namespace) } - args = append(args, "get", kind, "-o", "yaml", "--selector", strings.Join(selector, ",")) cmd := exec.CommandContext(ctx, c.execPath, args...) out, err := cmd.CombinedOutput() @@ -251,10 +254,22 @@ func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace, kind string // No resources found. Return nil. This is not an error. return nil, nil } - ms, err = ParseManifests(string(out)) - if err != nil { - return nil, fmt.Errorf("failed to parse manifests %v: %w", kind, err) + + // Unmarshal the output to the list of manifests. + var list v1.List + if err := yaml.Unmarshal(out, &list); err != nil { + return nil, fmt.Errorf("failed to unmarshal the output: %w", err) + } + + ms = make([]Manifest, 0, len(list.Items)) + for _, item := range list.Items { + m, err := ParseManifests(string(item.Raw)) + if err != nil { + return nil, fmt.Errorf("failed to parse the manifest: %w", err) + } + ms = append(ms, m...) } + return ms, nil } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 9cbe40f9c4..1aadffad60 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -43,6 +43,14 @@ func (k ResourceKey) Name() string { return k.name } +// normalizeDefaultNamespace converts the default namespace to an empty string. +func (k ResourceKey) normalizeDefaultNamespace() ResourceKey { + if k.namespace == DefaultNamespace { + k.namespace = "" + } + return k +} + func (k ResourceKey) String() string { return fmt.Sprintf("%s:%s:%s:%s", k.groupKind.Group, k.groupKind.Kind, k.namespace, k.name) } @@ -59,3 +67,28 @@ func makeResourceKey(obj *unstructured.Unstructured) ResourceKey { } return k } + +func FindRemoveResources(manifests []Manifest, liveResources []Manifest) []ResourceKey { + var ( + keys = make(map[ResourceKey]struct{}, len(manifests)) + removeKeys = make([]ResourceKey, 0, len(liveResources)) + ) + for _, m := range manifests { + keys[m.Key()] = struct{}{} + keys[m.Key().normalizeDefaultNamespace()] = struct{}{} + } + for _, m := range liveResources { + if !m.IsManagedByPiped() { + continue + } + if _, ok := keys[m.Key()]; ok { + continue + } + if _, ok := keys[m.Key().normalizeDefaultNamespace()]; ok { + continue + } + + removeKeys = append(removeKeys, m.Key()) + } + return removeKeys +} From bb444a75c7495d5f7b553f6bf1e43c9d0131e188 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 10:31:26 +0900 Subject: [PATCH 09/27] Fix the pruning in the case namespace changes Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server.go | 2 +- .../kubernetes/deployment/server_test.go | 120 ++++++++++++++++++ .../running/app.pipecd.yaml | 20 +++ .../running/deployment.yaml | 27 ++++ .../running/service.yaml | 11 ++ .../target/app.pipecd.yaml | 20 +++ .../target/deployment.yaml | 27 ++++ .../target/service.yaml | 11 ++ .../plugin/kubernetes/provider/loader.go | 5 +- .../plugin/kubernetes/provider/resource.go | 4 + 10 files changed, 245 insertions(+), 2 deletions(-) create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/deployment.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/service.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/deployment.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/service.yaml diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index cf1908ad9f..2057d331d4 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -348,7 +348,7 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi lp.Infof("Start pruning %d resources", len(removeKeys)) var deletedCount int for _, key := range removeKeys { - if err := kubectl.Delete(ctx, deployTargetConfig.KubeConfigPath, "", key); err != nil { + if err := kubectl.Delete(ctx, deployTargetConfig.KubeConfigPath, key.Namespace(), key); err != nil { if errors.Is(err, provider.ErrNotFound) { lp.Infof("Specified resource does not exist, so skip deleting the resource: %s (%v)", key.ReadableString(), err) continue diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index d5fe3cb046..d5aee40b7f 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -370,3 +370,123 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) require.Error(t, err) } + +func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *testing.T) { + ctx := context.Background() + + // initialize tool registry + testRegistry, err := toolregistrytest.NewToolRegistry(t) + require.NoError(t, err) + + // initialize plugin config and dynamic client for assertions with envtest + pluginCfg, dynamicClient := setupTestPluginConfigAndDynamicClient(t) + + svc := NewDeploymentService(pluginCfg, zaptest.NewLogger(t), testRegistry, logpersistertest.NewTestLogPersister(t)) + + running := filepath.Join("./", "testdata", "prune_with_change_namespace", "running") + + // read the running application config from the example file + runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err := svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) + + target := filepath.Join("./", "testdata", "prune_with_change_namespace", "target") + + // read the running application config from the example file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err = svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The service should be removed from the previous namespace + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) + require.Error(t, err) + + // The service should be created in the new namespace + service, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-2").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0012345678", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, "0012345678", service.GetAnnotations()["pipecd.dev/commit-hash"]) +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/app.pipecd.yaml new file mode 100644 index 0000000000..092b75bc56 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/app.pipecd.yaml @@ -0,0 +1,20 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: simple + labels: + env: example + team: product + quickSync: + prune: true + input: + autoCreateNamespace: true + namespace: test-1 + manifests: + - deployment.yaml + - service.yaml + kubectlVersion: 1.31.0 + description: | + This app demonstrates how to deploy a Kubernetes application with [Quick Sync](https://pipecd.dev/docs/concepts/#sync-strategy) strategy.\ + No pipeline is specified then in each deployment PipeCD will roll out the new version and switch all traffic to it immediately.\ + References: [adding a new app](https://pipecd.dev/docs/user-guide/managing-application/adding-an-application/), [app configuration](https://pipecd.dev/docs/user-guide/configuration-reference/) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/deployment.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/deployment.yaml new file mode 100644 index 0000000000..f1e0d4b29a --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: ghcr.io/pipe-cd/helloworld:v0.32.0 + args: + - server + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/service.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/service.yaml new file mode 100644 index 0000000000..52ca9d1f59 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/running/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple + ports: + - protocol: TCP + port: 9085 + targetPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/app.pipecd.yaml new file mode 100644 index 0000000000..c25555fb66 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/app.pipecd.yaml @@ -0,0 +1,20 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: simple + labels: + env: example + team: product + quickSync: + prune: true + input: + autoCreateNamespace: true + namespace: test-2 + manifests: + - deployment.yaml + - service.yaml + kubectlVersion: 1.31.0 + description: | + This app demonstrates how to deploy a Kubernetes application with [Quick Sync](https://pipecd.dev/docs/concepts/#sync-strategy) strategy.\ + No pipeline is specified then in each deployment PipeCD will roll out the new version and switch all traffic to it immediately.\ + References: [adding a new app](https://pipecd.dev/docs/user-guide/managing-application/adding-an-application/), [app configuration](https://pipecd.dev/docs/user-guide/configuration-reference/) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/deployment.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/deployment.yaml new file mode 100644 index 0000000000..f1e0d4b29a --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple + labels: + app: simple +spec: + replicas: 2 + selector: + matchLabels: + app: simple + pipecd.dev/variant: primary + template: + metadata: + labels: + app: simple + pipecd.dev/variant: primary + annotations: + sidecar.istio.io/inject: "false" + spec: + containers: + - name: helloworld + image: ghcr.io/pipe-cd/helloworld:v0.32.0 + args: + - server + ports: + - containerPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/service.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/service.yaml new file mode 100644 index 0000000000..52ca9d1f59 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_with_change_namespace/target/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: simple +spec: + selector: + app: simple + ports: + - protocol: TCP + port: 9085 + targetPort: 9085 diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go index 043b105340..f051d0f1c5 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go @@ -85,8 +85,11 @@ func NewLoader(registry ToolRegistry) *Loader { func (l *Loader) LoadManifests(ctx context.Context, input LoaderInput) (manifests []Manifest, err error) { defer func() { - // Add builtin annotations for tracking application live state. for i := range manifests { + // Set the namespace for all manifests. + manifests[i].body.SetNamespace(input.Namespace) + + // Add builtin labels and annotations for tracking application live state. manifests[i].AddLabels(map[string]string{ LabelManagedBy: ManagedByPiped, LabelPiped: input.PipedID, diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 1aadffad60..c96d234f81 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -43,6 +43,10 @@ func (k ResourceKey) Name() string { return k.name } +func (k ResourceKey) Namespace() string { + return k.namespace +} + // normalizeDefaultNamespace converts the default namespace to an empty string. func (k ResourceKey) normalizeDefaultNamespace() ResourceKey { if k.namespace == DefaultNamespace { From 54309c9d7a555ae06bf86668599b34783990d3af Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 16:16:52 +0900 Subject: [PATCH 10/27] Fix pruning the cluster-scoped resources Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server.go | 25 +++- .../kubernetes/deployment/server_test.go | 135 ++++++++++++++++- .../prepare/app.pipecd.yaml | 10 ++ .../prepare/crd.yaml | 40 +++++ .../running/app.pipecd.yaml | 13 ++ .../running/crontab-2.yaml | 7 + .../running/crontab.yaml | 7 + .../target/app.pipecd.yaml | 12 ++ .../target/crontab.yaml | 7 + .../plugin/kubernetes/provider/kubectl.go | 60 +++++++- .../plugin/kubernetes/provider/resource.go | 70 ++++++--- .../kubernetes/provider/resource_test.go | 139 ++++++++++++++++++ 12 files changed, 497 insertions(+), 28 deletions(-) create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-2.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/app.pipecd.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/crontab.yaml create mode 100644 pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index 2057d331d4..666b62454f 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -325,21 +325,34 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi } lp.Info("Start finding all running resources but no longer defined in Git") - liveResources, err := kubectl.GetAll(ctx, deployTargetConfig.KubeConfigPath, "", "all", + + namespacedLiveResources, err := kubectl.GetAll(ctx, deployTargetConfig.KubeConfigPath, + "", + fmt.Sprintf("%s=%s", provider.LabelManagedBy, provider.ManagedByPiped), fmt.Sprintf("%s=%s", provider.LabelApplication, input.GetDeployment().GetApplicationId()), - fmt.Sprintf("%s=%s", variantLabel, primaryVariant)) + ) if err != nil { - lp.Errorf("Failed while getting live resources to prune (%v)", err) + lp.Errorf("Failed while listing all resources (%v)", err) return model.StageStatus_STAGE_FAILURE } - if len(liveResources) == 0 { + + clusterLiveResources, err := kubectl.GetAllClusterScoped(ctx, deployTargetConfig.KubeConfigPath, + fmt.Sprintf("%s=%s", provider.LabelManagedBy, provider.ManagedByPiped), + fmt.Sprintf("%s=%s", provider.LabelApplication, input.GetDeployment().GetApplicationId()), + ) + if err != nil { + lp.Errorf("Failed while listing all cluster-scoped resources (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + if len(namespacedLiveResources)+len(clusterLiveResources) == 0 { lp.Info("There is no data about live resource so no resource will be removed") return model.StageStatus_STAGE_SUCCESS } - lp.Successf("Successfully loaded %d live resources", len(liveResources)) + lp.Successf("Successfully loaded %d live resources", len(namespacedLiveResources)+len(clusterLiveResources)) - removeKeys := provider.FindRemoveResources(manifests, liveResources) + removeKeys := provider.FindRemoveResources(manifests, namespacedLiveResources, clusterLiveResources) if len(removeKeys) == 0 { lp.Info("There are no live resources should be removed") return model.StageStatus_STAGE_SUCCESS diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index d5aee40b7f..28cc45b620 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -263,7 +263,7 @@ func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) assert.Equal(t, "piped-id", deployment.GetAnnotations()["pipecd.dev/piped"]) assert.Equal(t, "app-id", deployment.GetAnnotations()["pipecd.dev/application"]) assert.Equal(t, "apps/v1", deployment.GetAnnotations()["pipecd.dev/original-api-version"]) - assert.Equal(t, "apps:Deployment::simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. + assert.Equal(t, "apps:Deployment:test-namespace:simple", deployment.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. assert.Equal(t, "0123456789", deployment.GetAnnotations()["pipecd.dev/commit-hash"]) } @@ -490,3 +490,136 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) require.Equal(t, "0012345678", service.GetAnnotations()["pipecd.dev/commit-hash"]) } + +func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testing.T) { + ctx := context.Background() + + // initialize tool registry + testRegistry, err := toolregistrytest.NewToolRegistry(t) + require.NoError(t, err) + + // initialize plugin config and dynamic client for assertions with envtest + pluginCfg, dynamicClient := setupTestPluginConfigAndDynamicClient(t) + + svc := NewDeploymentService(pluginCfg, zaptest.NewLogger(t), testRegistry, logpersistertest.NewTestLogPersister(t)) + + // prepare the custom resource definition + prepare := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "prepare") + + prepareCfg, err := os.ReadFile(filepath.Join(prepare, "app.pipecd.yaml")) + require.NoError(t, err) + + prepareRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "prepare-app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: prepare, + CommitHash: "0123456789", + ApplicationConfig: prepareCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err := svc.ExecuteStage(ctx, prepareRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // prepare the running resources + running := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "running") + + // read the running application config from the example file + runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err = svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The my-new-cron-object/my-new-cron-object-2 should be created + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) + require.NoError(t, err) + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) + require.NoError(t, err) + + // sync the target resources and assert the prune behavior + target := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "target") + + // read the running application config from the example file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + }, + } + + resp, err = svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The my-new-cron-object should not be removed + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) + require.NoError(t, err) + // The my-new-cron-object-2 should be removed + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) + require.Error(t, err) +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/app.pipecd.yaml new file mode 100644 index 0000000000..1a151ec49b --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/app.pipecd.yaml @@ -0,0 +1,10 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: crd + quickSync: + prune: false + input: + manifests: + - crd.yaml + kubectlVersion: 1.31.0 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml new file mode 100644 index 0000000000..96ee6a8f06 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: crontabs.stable.example.com +spec: + # group name to use for REST API: /apis// + group: stable.example.com + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer + # either Namespaced or Cluster + scope: Cluster + names: + # plural name to be used in the URL: /apis/// + plural: crontabs + # singular name to be used as an alias on the CLI and for display + singular: crontab + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: CronTab + # shortNames allow shorter string to match your resource on the CLI + shortNames: + - ct diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml new file mode 100644 index 0000000000..a5ccfbc0cc --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml @@ -0,0 +1,13 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: namespace + quickSync: + prune: true + input: + autoCreateNamespace: true + namespace: test-1 + manifests: + - crontab.yaml + - crontab-2.yaml + kubectlVersion: 1.31.0 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-2.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-2.yaml new file mode 100644 index 0000000000..8bc6589555 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-2.yaml @@ -0,0 +1,7 @@ +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object-2 +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab.yaml new file mode 100644 index 0000000000..6e66452e55 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab.yaml @@ -0,0 +1,7 @@ +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/app.pipecd.yaml new file mode 100644 index 0000000000..a867e2dd23 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/app.pipecd.yaml @@ -0,0 +1,12 @@ +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: namespace + quickSync: + prune: true + input: + autoCreateNamespace: true + namespace: test-1 + manifests: + - crontab.yaml + kubectlVersion: 1.31.0 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/crontab.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/crontab.yaml new file mode 100644 index 0000000000..6e66452e55 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/target/crontab.yaml @@ -0,0 +1,7 @@ +apiVersion: "stable.example.com/v1" +kind: CronTab +metadata: + name: my-new-cron-object +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go index 26d2002717..9c4a51b709 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -233,9 +233,65 @@ func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r Resou return ms[0], nil } -func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace, kind string, selector ...string) (ms []Manifest, err error) { +func (c *Kubectl) getAPIResources(ctx context.Context, kubeconfig string) ([]string, error) { + args := []string{"api-resources", "--namespaced=false", "--verbs=list,get,delete", "--output=name"} + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to get API resources: %s, %v", string(out), err) + } + lines := strings.Split(string(out), "\n") + resources := make([]string, 0, len(lines)) + for _, line := range lines { + if line != "" { + resources = append(resources, line) + } + } + + return resources, nil +} + +func (c *Kubectl) GetAllClusterScoped(ctx context.Context, kubeconfig string, selector ...string) ([]Manifest, error) { + resources, err := c.getAPIResources(ctx, kubeconfig) + if err != nil { + return nil, err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + args = append(args, "get", strings.Join(resources, ","), "-o", "yaml", "--selector", strings.Join(selector, ",")) + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to get cluster-scoped resources: %s, %v", string(out), err) + } + + // Unmarshal the output to the list of manifests. + var list v1.List + if err := yaml.Unmarshal(out, &list); err != nil { + return nil, fmt.Errorf("failed to unmarshal the output: %w", err) + } + + ms := make([]Manifest, 0, len(list.Items)) + for _, item := range list.Items { + m, err := ParseManifests(string(item.Raw)) + if err != nil { + return nil, fmt.Errorf("failed to parse the manifest: %w", err) + } + ms = append(ms, m...) + } + + return ms, nil +} + +func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace string, selector ...string) (ms []Manifest, err error) { args := make([]string, 0, 7) - args = append(args, "get", kind, "-o", "yaml", "--selector", strings.Join(selector, ",")) + args = append(args, "get", "all", "-o", "yaml", "--selector", strings.Join(selector, ",")) if kubeconfig != "" { args = append(args, "--kubeconfig", kubeconfig) } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index c96d234f81..118122b9e9 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -16,6 +16,7 @@ package provider import ( "fmt" + "strings" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -47,14 +48,26 @@ func (k ResourceKey) Namespace() string { return k.namespace } -// normalizeDefaultNamespace converts the default namespace to an empty string. -func (k ResourceKey) normalizeDefaultNamespace() ResourceKey { +// normalize converts the group and kind to lower case. +// It also converts the default namespace to an empty string. +func (k ResourceKey) normalize() ResourceKey { + k.groupKind = normalizeGroupKind(k.groupKind) + return k.normalizeNamespace() +} + +// normalizeNamespace converts the default namespace to an empty string. +func (k ResourceKey) normalizeNamespace() ResourceKey { if k.namespace == DefaultNamespace { - k.namespace = "" + return k.withoutNamespace() } return k } +func (k ResourceKey) withoutNamespace() ResourceKey { + k.namespace = "" + return k +} + func (k ResourceKey) String() string { return fmt.Sprintf("%s:%s:%s:%s", k.groupKind.Group, k.groupKind.Kind, k.namespace, k.name) } @@ -72,27 +85,46 @@ func makeResourceKey(obj *unstructured.Unstructured) ResourceKey { return k } -func FindRemoveResources(manifests []Manifest, liveResources []Manifest) []ResourceKey { +func normalizeGroupKind(gk schema.GroupKind) schema.GroupKind { + gk.Group = strings.ToLower(gk.Group) + gk.Kind = strings.ToLower(gk.Kind) + return gk +} + +func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources []Manifest) []ResourceKey { var ( - keys = make(map[ResourceKey]struct{}, len(manifests)) - removeKeys = make([]ResourceKey, 0, len(liveResources)) + removeKeys = make([]ResourceKey, 0, len(namespacedLiveResources)+len(clusterScopedLiveResources)) ) - for _, m := range manifests { - keys[m.Key()] = struct{}{} - keys[m.Key().normalizeDefaultNamespace()] = struct{}{} - } - for _, m := range liveResources { - if !m.IsManagedByPiped() { - continue - } - if _, ok := keys[m.Key()]; ok { - continue + + { + keys := make(map[ResourceKey]struct{}, len(manifests)) + for _, m := range manifests { + keys[m.Key().normalize()] = struct{}{} } - if _, ok := keys[m.Key().normalizeDefaultNamespace()]; ok { - continue + + for _, r := range namespacedLiveResources { + ns := r.Key().namespace + k := r.Key().normalize() + if _, ok := keys[k]; !ok { + // The namespace should be set to the live resource's value. + k.namespace = ns + removeKeys = append(removeKeys, k) + } } + } - removeKeys = append(removeKeys, m.Key()) + { + keys := make(map[ResourceKey]struct{}, len(manifests)) + for _, m := range manifests { + keys[m.Key().normalize().withoutNamespace()] = struct{}{} + } + for _, r := range clusterScopedLiveResources { + k := r.Key().normalize().withoutNamespace() + if _, ok := keys[k]; !ok { + removeKeys = append(removeKeys, k) + } + } } + return removeKeys } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go new file mode 100644 index 0000000000..e65cc4cce0 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go @@ -0,0 +1,139 @@ +package provider + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestResourceKey_normalizeNamespace(t *testing.T) { + tests := []struct { + name string + resourceKey ResourceKey + expected ResourceKey + }{ + { + name: "default namespace", + resourceKey: ResourceKey{ + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + namespace: DefaultNamespace, + name: "test-deployment", + }, + expected: ResourceKey{ + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + namespace: "", + name: "test-deployment", + }, + }, + { + name: "non-default namespace", + resourceKey: ResourceKey{ + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + namespace: "custom-namespace", + name: "test-deployment", + }, + expected: ResourceKey{ + groupKind: schema.GroupKind{Group: "apps", Kind: "Deployment"}, + namespace: "custom-namespace", + name: "test-deployment", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.resourceKey.normalizeNamespace() + assert.Equal(t, tt.expected, actual) + }) + } +} + +func TestFindRemoveResources(t *testing.T) { + tests := []struct { + name string + manifestsYAML string + namespacedLiveResourcesYAML string + clusterScopedLiveResourcesYAML string + expectedRemoveKeys []ResourceKey + }{ + { + name: "find remove resources", + manifestsYAML: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap + namespace: default + annotations: + "pipecd.dev/managed-by": "piped" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + namespace: default + annotations: + "pipecd.dev/managed-by": "piped" +`, + namespacedLiveResourcesYAML: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap + namespace: default + annotations: + "pipecd.dev/managed-by": "piped" +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + namespace: default + annotations: + "pipecd.dev/managed-by": "piped" +--- +apiVersion: v1 +kind: Secret +metadata: + name: old-secret + namespace: default + annotations: + "pipecd.dev/managed-by": "piped" +`, + clusterScopedLiveResourcesYAML: ` +apiVersion: v1 +kind: Namespace +metadata: + name: test-namespace + annotations: + "pipecd.dev/managed-by": "piped" +`, + expectedRemoveKeys: []ResourceKey{ + { + groupKind: schema.GroupKind{Group: "", Kind: "secret"}, + namespace: "default", + name: "old-secret", + }, + { + groupKind: schema.GroupKind{Group: "", Kind: "namespace"}, + namespace: "", + name: "test-namespace", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + manifests := mustParseManifests(t, tt.manifestsYAML) + + namespacedLiveResources := mustParseManifests(t, tt.namespacedLiveResourcesYAML) + + clusterScopedLiveResources := mustParseManifests(t, tt.clusterScopedLiveResourcesYAML) + + removeKeys := FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources) + assert.ElementsMatch(t, tt.expectedRemoveKeys, removeKeys) + }) + } +} From b8495174ad10c3c4153bdebfda4e12b9e714c93d Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 16:18:31 +0900 Subject: [PATCH 11/27] Enable parallel execution for deployment service tests Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 28cc45b620..92d6a5293c 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -136,6 +136,8 @@ func setupTestPluginConfigAndDynamicClient(t *testing.T) (*config.PipedPlugin, d } func TestDeploymentService_executeK8sSyncStage(t *testing.T) { + t.Parallel() + ctx := context.Background() // read the application config from the example file @@ -199,6 +201,8 @@ func TestDeploymentService_executeK8sSyncStage(t *testing.T) { } func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) { + t.Parallel() + ctx := context.Background() // read the application config from the example file @@ -268,6 +272,8 @@ func TestDeploymentService_executeK8sSyncStage_withInputNamespace(t *testing.T) } func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { + t.Parallel() + ctx := context.Background() // initialize tool registry @@ -372,6 +378,8 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { } func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *testing.T) { + t.Parallel() + ctx := context.Background() // initialize tool registry @@ -492,6 +500,8 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes } func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testing.T) { + t.Parallel() + ctx := context.Background() // initialize tool registry From 3ed43201ed04bfdd903a2037cd50b5f7b712fc9b Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 16:30:56 +0900 Subject: [PATCH 12/27] Add documentation for Kubernetes resource retrieval functions Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go | 9 +++++++++ pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 1 + 2 files changed, 10 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go index 9c4a51b709..2a1fbaac02 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -233,6 +233,9 @@ func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r Resou return ms[0], nil } +// getAPIResources retrieves the list of available API resources from the Kubernetes cluster. +// It runs the `kubectl api-resources` command with the specified kubeconfig and returns the +// names of the resources that support the "list", "get", and "delete" verbs. func (c *Kubectl) getAPIResources(ctx context.Context, kubeconfig string) ([]string, error) { args := []string{"api-resources", "--namespaced=false", "--verbs=list,get,delete", "--output=name"} if kubeconfig != "" { @@ -254,6 +257,9 @@ func (c *Kubectl) getAPIResources(ctx context.Context, kubeconfig string) ([]str return resources, nil } +// GetAllClusterScoped retrieves all cluster-scoped resources from the Kubernetes cluster +// using the provided kubeconfig and optional selectors. It returns a slice of Manifests +// representing the resources or an error if the operation fails. func (c *Kubectl) GetAllClusterScoped(ctx context.Context, kubeconfig string, selector ...string) ([]Manifest, error) { resources, err := c.getAPIResources(ctx, kubeconfig) if err != nil { @@ -289,6 +295,9 @@ func (c *Kubectl) GetAllClusterScoped(ctx context.Context, kubeconfig string, se return ms, nil } +// GetAll retrieves all Kubernetes resources in the specified namespace and matching the given selector. +// It returns a list of manifests or an error if the retrieval or unmarshalling fails. +// If no resources are found, it returns nil without an error. func (c *Kubectl) GetAll(ctx context.Context, kubeconfig, namespace string, selector ...string) (ms []Manifest, err error) { args := make([]string, 0, 7) args = append(args, "get", "all", "-o", "yaml", "--selector", strings.Join(selector, ",")) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 118122b9e9..375d30c152 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -91,6 +91,7 @@ func normalizeGroupKind(gk schema.GroupKind) schema.GroupKind { return gk } +// FindRemoveResources identifies resources that are present in the live state but not in the desired manifests. func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources []Manifest) []ResourceKey { var ( removeKeys = make([]ResourceKey, 0, len(namespacedLiveResources)+len(clusterScopedLiveResources)) From d1e023632bdcf211df8197d6145029a44590c7b7 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 16:33:08 +0900 Subject: [PATCH 13/27] Add license header to resource_test.go Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/provider/resource_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go index e65cc4cce0..8ff9e3e4ff 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go @@ -1,3 +1,17 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package provider import ( From 85ba187ab579264b63a1445aec7881689c12cfaf Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Thu, 26 Dec 2024 16:37:01 +0900 Subject: [PATCH 14/27] Fix the LoadManifests Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/loader.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go index f051d0f1c5..9b1134cbb8 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go @@ -86,8 +86,11 @@ func NewLoader(registry ToolRegistry) *Loader { func (l *Loader) LoadManifests(ctx context.Context, input LoaderInput) (manifests []Manifest, err error) { defer func() { for i := range manifests { - // Set the namespace for all manifests. - manifests[i].body.SetNamespace(input.Namespace) + // Set the namespace for all manifests if the namespace is not specified in the manifest, + // we have to do this to ensure that the namespace of loaded manifests are consistent with the applied resources. + if input.Namespace != "" { + manifests[i].body.SetNamespace(input.Namespace) + } // Add builtin labels and annotations for tracking application live state. manifests[i].AddLabels(map[string]string{ From c2a747f7e47f5753e9fa3497a0b1fa313d068d47 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:16:04 +0900 Subject: [PATCH 15/27] Rename getAPIResources to getClusterScopedAPIResources for clarity Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go index 2a1fbaac02..76b9efb236 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -233,10 +233,10 @@ func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r Resou return ms[0], nil } -// getAPIResources retrieves the list of available API resources from the Kubernetes cluster. +// getClusterScopedAPIResources retrieves the list of available API resources from the Kubernetes cluster. // It runs the `kubectl api-resources` command with the specified kubeconfig and returns the -// names of the resources that support the "list", "get", and "delete" verbs. -func (c *Kubectl) getAPIResources(ctx context.Context, kubeconfig string) ([]string, error) { +// names of the resources that support the "list", "get", and "delete" verbs, and are cluster-scoped. +func (c *Kubectl) getClusterScopedAPIResources(ctx context.Context, kubeconfig string) ([]string, error) { args := []string{"api-resources", "--namespaced=false", "--verbs=list,get,delete", "--output=name"} if kubeconfig != "" { args = append(args, "--kubeconfig", kubeconfig) @@ -261,7 +261,7 @@ func (c *Kubectl) getAPIResources(ctx context.Context, kubeconfig string) ([]str // using the provided kubeconfig and optional selectors. It returns a slice of Manifests // representing the resources or an error if the operation fails. func (c *Kubectl) GetAllClusterScoped(ctx context.Context, kubeconfig string, selector ...string) ([]Manifest, error) { - resources, err := c.getAPIResources(ctx, kubeconfig) + resources, err := c.getClusterScopedAPIResources(ctx, kubeconfig) if err != nil { return nil, err } From 48c0fb3de4ff6e661846c4a126d35b6cbfbab7b9 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:18:11 +0900 Subject: [PATCH 16/27] Refactor manifest key normalization Signed-off-by: Shinnosuke Sawada-Dazai --- .../pipedv1/plugin/kubernetes/provider/manifest.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index 282b310367..bff04fbc07 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -199,19 +199,12 @@ type WorkloadPair struct { func FindSameManifests(olds, news []Manifest) []WorkloadPair { pairs := make([]WorkloadPair, 0) oldMap := make(map[ResourceKey]Manifest, len(olds)) - nomalizeKey := func(k ResourceKey) ResourceKey { - // Normalize the key by removing the default namespace. - if k.namespace == DefaultNamespace { - k.namespace = "" - } - return k - } for _, m := range olds { - key := nomalizeKey(m.Key()) + key := m.Key().normalize() oldMap[key] = m } for _, n := range news { - key := nomalizeKey(n.Key()) + key := n.Key().normalize() if o, ok := oldMap[key]; ok { pairs = append(pairs, WorkloadPair{ Old: o, From 6fb0aab7b4e7d325ef1e6f3fa6c0c31a854a5ddc Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:26:05 +0900 Subject: [PATCH 17/27] Add comments to ResourceKey Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 375d30c152..57577cacfc 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -30,10 +30,18 @@ const ( DefaultNamespace = "default" ) +// ResourceKey represents a unique key of a Kubernetes resource. +// We use GroupKind, namespace, and name to identify a resource. type ResourceKey struct { + // We use GroupKind instead of GroupVersionKind because we don't care about the version. groupKind schema.GroupKind + // The namespace of the resource. + // We use namespace as a part of the key to identify a resource + // We have to distinguish the namespaces to prune the old resource when users change the namespace of a resource. + // If the resource is cluster-scoped, this field should be empty. namespace string - name string + // The name of the resource. + name string } func (k ResourceKey) Kind() string { From 69ac113977ead6b92164482653b2709e70408ac7 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:45:02 +0900 Subject: [PATCH 18/27] Refactor to use normalized key only to compare Co-authored-by: Yoshiki Fujikane <40124947+ffjlabo@users.noreply.github.com> Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 57577cacfc..0b27b31ddb 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -112,12 +112,8 @@ func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveRe } for _, r := range namespacedLiveResources { - ns := r.Key().namespace - k := r.Key().normalize() - if _, ok := keys[k]; !ok { - // The namespace should be set to the live resource's value. - k.namespace = ns - removeKeys = append(removeKeys, k) + if _, ok := keys[r.Key().normalize()]; !ok { + removeKeys = append(removeKeys, r.Key()) } } } From 6392bca42cf4d162ca0ac380d488281d90ae1cdd Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:46:55 +0900 Subject: [PATCH 19/27] Rename variable clusterLiveResources to clusterScopedLiveResources Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/deployment/server.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index 666b62454f..11a6f51324 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -336,7 +336,7 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi return model.StageStatus_STAGE_FAILURE } - clusterLiveResources, err := kubectl.GetAllClusterScoped(ctx, deployTargetConfig.KubeConfigPath, + clusterScopedLiveResources, err := kubectl.GetAllClusterScoped(ctx, deployTargetConfig.KubeConfigPath, fmt.Sprintf("%s=%s", provider.LabelManagedBy, provider.ManagedByPiped), fmt.Sprintf("%s=%s", provider.LabelApplication, input.GetDeployment().GetApplicationId()), ) @@ -345,14 +345,14 @@ func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersi return model.StageStatus_STAGE_FAILURE } - if len(namespacedLiveResources)+len(clusterLiveResources) == 0 { + if len(namespacedLiveResources)+len(clusterScopedLiveResources) == 0 { lp.Info("There is no data about live resource so no resource will be removed") return model.StageStatus_STAGE_SUCCESS } - lp.Successf("Successfully loaded %d live resources", len(namespacedLiveResources)+len(clusterLiveResources)) + lp.Successf("Successfully loaded %d live resources", len(namespacedLiveResources)+len(clusterScopedLiveResources)) - removeKeys := provider.FindRemoveResources(manifests, namespacedLiveResources, clusterLiveResources) + removeKeys := provider.FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources) if len(removeKeys) == 0 { lp.Info("There are no live resources should be removed") return model.StageStatus_STAGE_SUCCESS From e1dec976382307bd10e96e3dcfc262cbdd9e19b9 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:50:09 +0900 Subject: [PATCH 20/27] Rename keys to normalizedKeys and add comments Signed-off-by: Shinnosuke Sawada-Dazai --- .../pipedv1/plugin/kubernetes/provider/resource.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 0b27b31ddb..79d9563db8 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -106,26 +106,28 @@ func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveRe ) { - keys := make(map[ResourceKey]struct{}, len(manifests)) + normalizedKeys := make(map[ResourceKey]struct{}, len(manifests)) for _, m := range manifests { - keys[m.Key().normalize()] = struct{}{} + normalizedKeys[m.Key().normalize()] = struct{}{} } for _, r := range namespacedLiveResources { - if _, ok := keys[r.Key().normalize()]; !ok { + if _, ok := normalizedKeys[r.Key().normalize()]; !ok { removeKeys = append(removeKeys, r.Key()) } } } { - keys := make(map[ResourceKey]struct{}, len(manifests)) + normalizedKeys := make(map[ResourceKey]struct{}, len(manifests)) for _, m := range manifests { - keys[m.Key().normalize().withoutNamespace()] = struct{}{} + // We don't care about the namespace of the cluster-scoped resources. + normalizedKeys[m.Key().normalize().withoutNamespace()] = struct{}{} } for _, r := range clusterScopedLiveResources { + // We don't care about the namespace of the cluster-scoped resources. k := r.Key().normalize().withoutNamespace() - if _, ok := keys[k]; !ok { + if _, ok := normalizedKeys[k]; !ok { removeKeys = append(removeKeys, k) } } From bcfd121f95c074895e5a1986232b54501574e3fc Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:52:57 +0900 Subject: [PATCH 21/27] Refactor to remove normalizeGroupKinds Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index 79d9563db8..f7169cd088 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -59,7 +59,8 @@ func (k ResourceKey) Namespace() string { // normalize converts the group and kind to lower case. // It also converts the default namespace to an empty string. func (k ResourceKey) normalize() ResourceKey { - k.groupKind = normalizeGroupKind(k.groupKind) + k.groupKind.Group = strings.ToLower(k.groupKind.Group) + k.groupKind.Kind = strings.ToLower(k.groupKind.Kind) return k.normalizeNamespace() } @@ -93,12 +94,6 @@ func makeResourceKey(obj *unstructured.Unstructured) ResourceKey { return k } -func normalizeGroupKind(gk schema.GroupKind) schema.GroupKind { - gk.Group = strings.ToLower(gk.Group) - gk.Kind = strings.ToLower(gk.Kind) - return gk -} - // FindRemoveResources identifies resources that are present in the live state but not in the desired manifests. func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveResources []Manifest) []ResourceKey { var ( From 2e7b2f5cd099a2f86469cad573de10834023643a Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Fri, 27 Dec 2024 15:56:52 +0900 Subject: [PATCH 22/27] Fix to pass tests Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/provider/resource.go | 5 ++--- pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go index f7169cd088..b11164f52e 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource.go @@ -121,9 +121,8 @@ func FindRemoveResources(manifests, namespacedLiveResources, clusterScopedLiveRe } for _, r := range clusterScopedLiveResources { // We don't care about the namespace of the cluster-scoped resources. - k := r.Key().normalize().withoutNamespace() - if _, ok := normalizedKeys[k]; !ok { - removeKeys = append(removeKeys, k) + if _, ok := normalizedKeys[r.Key().normalize().withoutNamespace()]; !ok { + removeKeys = append(removeKeys, r.Key()) } } } diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go index 8ff9e3e4ff..3bc45f7e73 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/resource_test.go @@ -125,12 +125,12 @@ metadata: `, expectedRemoveKeys: []ResourceKey{ { - groupKind: schema.GroupKind{Group: "", Kind: "secret"}, + groupKind: schema.GroupKind{Group: "", Kind: "Secret"}, namespace: "default", name: "old-secret", }, { - groupKind: schema.GroupKind{Group: "", Kind: "namespace"}, + groupKind: schema.GroupKind{Group: "", Kind: "Namespace"}, namespace: "", name: "test-namespace", }, From f47da4b51cefa09a63fa32a8f47ec1a74e5eee6e Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Mon, 6 Jan 2025 09:16:01 +0900 Subject: [PATCH 23/27] Update comments to clarify source of application config in tests Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 92d6a5293c..0fe38ffde4 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -287,7 +287,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { running := filepath.Join("./", "testdata", "prune", "running") - // read the running application config from the example file + // read the running application config from the testdata file runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) require.NoError(t, err) @@ -337,7 +337,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { target := filepath.Join("./", "testdata", "prune", "target") - // read the running application config from the example file + // read the running application config from the testdata file targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) require.NoError(t, err) From b12e31770d4fb4e1201c0dda0cef3f4ffa62d921 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Mon, 6 Jan 2025 09:24:25 +0900 Subject: [PATCH 24/27] Add assertions to check for NotFound errors in deployment service tests Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 0fe38ffde4..2f8a2bc695 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" @@ -375,6 +376,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) } func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *testing.T) { @@ -481,6 +483,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes // The service should be removed from the previous namespace _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) // The service should be created in the new namespace service, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-2").Get(context.Background(), "simple", metav1.GetOptions{}) @@ -632,4 +635,5 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testin // The my-new-cron-object-2 should be removed _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) } From d75fd949bb568401b9b176a3c24217ecde3c3254 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Mon, 6 Jan 2025 09:29:15 +0900 Subject: [PATCH 25/27] Add resource-key assertions to deployment service tests Signed-off-by: Shinnosuke Sawada-Dazai --- pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 2f8a2bc695..8e1fa09869 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -441,6 +441,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) + require.Equal(t, ":Service:test-1:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) target := filepath.Join("./", "testdata", "prune_with_change_namespace", "target") @@ -500,6 +501,7 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) require.Equal(t, "0012345678", service.GetAnnotations()["pipecd.dev/commit-hash"]) + require.Equal(t, ":Service:test-2:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) } func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testing.T) { From 34c7807b2771309ebad439d63445e6687cf92875 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Mon, 6 Jan 2025 09:42:11 +0900 Subject: [PATCH 26/27] Use subtest to prepare e2e tests Signed-off-by: Shinnosuke Sawada-Dazai --- .../kubernetes/deployment/server_test.go | 576 +++++++++--------- 1 file changed, 298 insertions(+), 278 deletions(-) diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index 8e1fa09869..a3210b0b93 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -292,91 +292,98 @@ func TestDeploymentService_executeK8sSyncStage_withPrune(t *testing.T) { runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) require.NoError(t, err) - // prepare the request to ensure the running deployment exists - runningRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, + ok := t.Run("prepare", func(t *testing.T) { + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: nil, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - }, - } - - resp, err := svc.ExecuteStage(ctx, runningRequest) - - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - - service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) - require.NoError(t, err) - - require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) - require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) - - require.Equal(t, "simple", service.GetName()) - require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) - require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) - require.Equal(t, ":Service::simple", service.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. - require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) - - target := filepath.Join("./", "testdata", "prune", "target") - - // read the running application config from the testdata file - targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) - require.NoError(t, err) + } - // prepare the request to ensure the running deployment exists - targetRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, - }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: target, - CommitHash: "0012345678", - ApplicationConfig: targetCfg, - ApplicationConfigFilename: "app.pipecd.yaml", + resp, err := svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, ":Service::simple", service.GetAnnotations()["pipecd.dev/resource-key"]) // This assertion differs from the non-plugin-arched piped's Kubernetes platform provider, but we decided to change this behavior. + require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) + }) + require.Truef(t, ok, "expected prepare to succeed") + + t.Run("run with prune", func(t *testing.T) { + + // prepare the request to ensure the running deployment exists + + target := filepath.Join("./", "testdata", "prune", "target") + + // read the running application config from the testdata file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - }, - } + } - resp, err = svc.ExecuteStage(ctx, targetRequest) - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + resp, err := svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) - require.Error(t, err) - require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("default").Get(context.Background(), "simple", metav1.GetOptions{}) + require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + }) } func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *testing.T) { @@ -399,109 +406,114 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_changesNamespace(t *tes runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) require.NoError(t, err) - // prepare the request to ensure the running deployment exists - runningRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, - }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: nil, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", + ok := t.Run("prepare", func(t *testing.T) { + // prepare the request to ensure the running deployment exists + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - }, - } - - resp, err := svc.ExecuteStage(ctx, runningRequest) - - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - - service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) - require.NoError(t, err) - - require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) - require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) - - require.Equal(t, "simple", service.GetName()) - require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) - require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) - require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) - require.Equal(t, ":Service:test-1:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) - - target := filepath.Join("./", "testdata", "prune_with_change_namespace", "target") - - // read the running application config from the example file - targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) - require.NoError(t, err) + } - // prepare the request to ensure the running deployment exists - targetRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, + resp, err := svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0123456789", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, "0123456789", service.GetAnnotations()["pipecd.dev/commit-hash"]) + require.Equal(t, ":Service:test-1:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) + }) + require.Truef(t, ok, "expected prepare to succeed") + + t.Run("run with prune", func(t *testing.T) { + target := filepath.Join("./", "testdata", "prune_with_change_namespace", "target") + + // read the running application config from the example file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: target, - CommitHash: "0012345678", - ApplicationConfig: targetCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - }, - } - - resp, err = svc.ExecuteStage(ctx, targetRequest) - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - - // The service should be removed from the previous namespace - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) - require.Error(t, err) - require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) - - // The service should be created in the new namespace - service, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-2").Get(context.Background(), "simple", metav1.GetOptions{}) - require.NoError(t, err) + } - require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) - require.Equal(t, "0012345678", service.GetLabels()["pipecd.dev/commit-hash"]) - - require.Equal(t, "simple", service.GetName()) - require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) - require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) - require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) - require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) - require.Equal(t, "0012345678", service.GetAnnotations()["pipecd.dev/commit-hash"]) - require.Equal(t, ":Service:test-2:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) + resp, err := svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The service should be removed from the previous namespace + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-1").Get(context.Background(), "simple", metav1.GetOptions{}) + require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + + // The service should be created in the new namespace + service, err := dynamicClient.Resource(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}).Namespace("test-2").Get(context.Background(), "simple", metav1.GetOptions{}) + require.NoError(t, err) + + require.Equal(t, "piped", service.GetLabels()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetLabels()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetLabels()["pipecd.dev/application"]) + require.Equal(t, "0012345678", service.GetLabels()["pipecd.dev/commit-hash"]) + + require.Equal(t, "simple", service.GetName()) + require.Equal(t, "piped", service.GetAnnotations()["pipecd.dev/managed-by"]) + require.Equal(t, "piped-id", service.GetAnnotations()["pipecd.dev/piped"]) + require.Equal(t, "app-id", service.GetAnnotations()["pipecd.dev/application"]) + require.Equal(t, "v1", service.GetAnnotations()["pipecd.dev/original-api-version"]) + require.Equal(t, "0012345678", service.GetAnnotations()["pipecd.dev/commit-hash"]) + require.Equal(t, ":Service:test-2:simple", service.GetAnnotations()["pipecd.dev/resource-key"]) + }) } func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testing.T) { @@ -524,32 +536,35 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testin prepareCfg, err := os.ReadFile(filepath.Join(prepare, "app.pipecd.yaml")) require.NoError(t, err) - prepareRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "prepare-app-id", - DeployTargets: []string{"default"}, - }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", + ok := t.Run("prepare crd", func(t *testing.T) { + prepareRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "prepare-app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: prepare, + CommitHash: "0123456789", + ApplicationConfig: prepareCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - StageConfig: []byte(``), - RunningDeploymentSource: nil, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: prepare, - CommitHash: "0123456789", - ApplicationConfig: prepareCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - }, - } + } - resp, err := svc.ExecuteStage(ctx, prepareRequest) + resp, err := svc.ExecuteStage(ctx, prepareRequest) - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + }) + require.Truef(t, ok, "expected prepare to succeed") // prepare the running resources running := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "running") @@ -558,84 +573,89 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testin runningCfg, err := os.ReadFile(filepath.Join(running, "app.pipecd.yaml")) require.NoError(t, err) - // prepare the request to ensure the running deployment exists - runningRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, + ok = t.Run("prepare running", func(t *testing.T) { + // prepare the request to ensure the running deployment exists + runningRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: nil, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: nil, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - }, - } - - resp, err = svc.ExecuteStage(ctx, runningRequest) - - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - - // The my-new-cron-object/my-new-cron-object-2 should be created - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) - require.NoError(t, err) - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) - require.NoError(t, err) - - // sync the target resources and assert the prune behavior - target := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "target") - - // read the running application config from the example file - targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) - require.NoError(t, err) + } - // prepare the request to ensure the running deployment exists - targetRequest := &deployment.ExecuteStageRequest{ - Input: &deployment.ExecutePluginInput{ - Deployment: &model.Deployment{ - PipedId: "piped-id", - ApplicationId: "app-id", - DeployTargets: []string{"default"}, - }, - Stage: &model.PipelineStage{ - Id: "stage-id", - Name: "K8S_SYNC", - }, - StageConfig: []byte(``), - RunningDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: running, - CommitHash: "0123456789", - ApplicationConfig: runningCfg, - ApplicationConfigFilename: "app.pipecd.yaml", + resp, err := svc.ExecuteStage(ctx, runningRequest) + + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The my-new-cron-object/my-new-cron-object-2 should be created + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) + require.NoError(t, err) + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) + require.NoError(t, err) + }) + require.Truef(t, ok, "expected prepare to succeed") + + t.Run("sync", func(t *testing.T) { + // sync the target resources and assert the prune behavior + target := filepath.Join("./", "testdata", "prune_cluster_scoped_resource", "target") + + // read the running application config from the example file + targetCfg, err := os.ReadFile(filepath.Join(target, "app.pipecd.yaml")) + require.NoError(t, err) + + // prepare the request to ensure the running deployment exists + targetRequest := &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: &model.Deployment{ + PipedId: "piped-id", + ApplicationId: "app-id", + DeployTargets: []string{"default"}, + }, + Stage: &model.PipelineStage{ + Id: "stage-id", + Name: "K8S_SYNC", + }, + StageConfig: []byte(``), + RunningDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: running, + CommitHash: "0123456789", + ApplicationConfig: runningCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, + TargetDeploymentSource: &deployment.DeploymentSource{ + ApplicationDirectory: target, + CommitHash: "0012345678", + ApplicationConfig: targetCfg, + ApplicationConfigFilename: "app.pipecd.yaml", + }, }, - TargetDeploymentSource: &deployment.DeploymentSource{ - ApplicationDirectory: target, - CommitHash: "0012345678", - ApplicationConfig: targetCfg, - ApplicationConfigFilename: "app.pipecd.yaml", - }, - }, - } - - resp, err = svc.ExecuteStage(ctx, targetRequest) - require.NoError(t, err) - require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + } - // The my-new-cron-object should not be removed - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) - require.NoError(t, err) - // The my-new-cron-object-2 should be removed - _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) - require.Error(t, err) - require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + resp, err := svc.ExecuteStage(ctx, targetRequest) + require.NoError(t, err) + require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) + + // The my-new-cron-object should not be removed + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) + require.NoError(t, err) + // The my-new-cron-object-2 should be removed + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) + require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + }) } From 40af37c9b1f274beba4b783e6af2592716bdea64 Mon Sep 17 00:00:00 2001 From: Shinnosuke Sawada-Dazai Date: Mon, 6 Jan 2025 09:48:20 +0900 Subject: [PATCH 27/27] Add v1beta1 version for CronTab and update tests for pruning behavior Signed-off-by: Shinnosuke Sawada-Dazai --- .../plugin/kubernetes/deployment/server_test.go | 8 +++++++- .../prepare/crd.yaml | 17 +++++++++++++++++ .../running/app.pipecd.yaml | 1 + .../running/crontab-v1beta1.yaml | 7 +++++++ 4 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-v1beta1.yaml diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go index a3210b0b93..fa4af44c92 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server_test.go @@ -602,11 +602,13 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testin require.NoError(t, err) require.Equal(t, model.StageStatus_STAGE_SUCCESS.String(), resp.GetStatus().String()) - // The my-new-cron-object/my-new-cron-object-2 should be created + // The my-new-cron-object/my-new-cron-object-2/my-new-cron-object-v1beta1 should be created _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object", metav1.GetOptions{}) require.NoError(t, err) _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) require.NoError(t, err) + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-v1beta1", metav1.GetOptions{}) + require.NoError(t, err) }) require.Truef(t, ok, "expected prepare to succeed") @@ -657,5 +659,9 @@ func TestDeploymentService_executeK8sSyncStage_withPrune_clusterScoped(t *testin _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-2", metav1.GetOptions{}) require.Error(t, err) require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) + // The my-new-cron-object-v1beta1 should be removed + _, err = dynamicClient.Resource(schema.GroupVersionResource{Group: "stable.example.com", Version: "v1", Resource: "crontabs"}).Get(context.Background(), "my-new-cron-object-v1beta1", metav1.GetOptions{}) + require.Error(t, err) + require.Truef(t, apierrors.IsNotFound(err), "expected error to be NotFound, but got %v", err) }) } diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml index 96ee6a8f06..32f7df0ce0 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/prepare/crd.yaml @@ -13,6 +13,23 @@ spec: served: true # One and only one version must be marked as the storage version. storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + - name: v1beta1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: false schema: openAPIV3Schema: type: object diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml index a5ccfbc0cc..f93b4e51b9 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/app.pipecd.yaml @@ -10,4 +10,5 @@ spec: manifests: - crontab.yaml - crontab-2.yaml + - crontab-v1beta1.yaml kubectlVersion: 1.31.0 diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-v1beta1.yaml b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-v1beta1.yaml new file mode 100644 index 0000000000..4aec6c08df --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/testdata/prune_cluster_scoped_resource/running/crontab-v1beta1.yaml @@ -0,0 +1,7 @@ +apiVersion: "stable.example.com/v1beta1" +kind: CronTab +metadata: + name: my-new-cron-object-v1beta1 +spec: + cronSpec: "* * * * */5" + image: my-awesome-cron-image