From 1a4dd9578784b360ef27054e553d4923a7eb0c10 Mon Sep 17 00:00:00 2001 From: "Dat. Ba Dao" Date: Tue, 7 Jan 2025 16:21:20 +0700 Subject: [PATCH] internal/controller: check lint policy in custom config (#252) --- api/v1alpha1/atlasschema_types.go | 16 ++ internal/controller/atlasschema_controller.go | 106 +++++++++-- .../controller/atlasschema_controller_test.go | 5 +- ...txtar => migration-config-variables.txtar} | 114 +----------- .../schema-config-lint-destructive.txtar | 156 ++++++++++++++++ .../testscript/schema-config-variables.txtar | 168 ++++++++++++++++++ ...nancy.txtar => schema-multi-tenancy.txtar} | 0 7 files changed, 434 insertions(+), 131 deletions(-) rename test/e2e/testscript/{custom-config.txtar => migration-config-variables.txtar} (58%) create mode 100644 test/e2e/testscript/schema-config-lint-destructive.txtar create mode 100644 test/e2e/testscript/schema-config-variables.txtar rename test/e2e/testscript/{multi-tenancy.txtar => schema-multi-tenancy.txtar} (100%) diff --git a/api/v1alpha1/atlasschema_types.go b/api/v1alpha1/atlasschema_types.go index cf379ef..9ae07a5 100644 --- a/api/v1alpha1/atlasschema_types.go +++ b/api/v1alpha1/atlasschema_types.go @@ -329,3 +329,19 @@ func (d Diff) AsBlock() *hclwrite.Block { } return blk } + +func (p *Policy) HasLint() bool { + return p != nil && p.Lint != nil +} + +func (p *Policy) HasDiff() bool { + return p != nil && p.Diff != nil +} + +func (p *Policy) HasLintDestructive() bool { + return p.HasLint() && p.Lint.Destructive != nil +} + +func (p *Policy) HasLintReview() bool { + return p.HasLint() && p.Lint.Review != "" +} diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index faa61f8..420886a 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -680,6 +680,70 @@ func (d *managedData) hasTargets() bool { return env.Body().GetAttribute("for_each") != nil } +// hasLint returns true if the environment has multiple targets/ multi-tenancy. +func (d *managedData) hasLint() bool { + if d.Policy != nil && d.Policy.HasLint() { + return true + } + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false + } + return searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) != nil +} + +// hasLintDestructive returns true if the environment has a lint destructive policy. +func (d *managedData) hasLintDestructive() bool { + if d.Policy != nil && d.Policy.HasLintDestructive() { + return true + } + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false + } + lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + // search global lint block + lint = searchBlock(d.Config.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + return false + } + return false + } + destructive := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil)) + return destructive != nil && destructive.Body().GetAttribute("error") != nil +} + +// hasLintReview returns true if the environment has a lint review policy. +func (d *managedData) hasLintReview() bool { + if d.Policy != nil && d.Policy.HasLintReview() { + return true + } + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false + } + lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + // search global lint block + lint = searchBlock(d.Config.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + return false + } + return false + } + return lint.Body().GetAttribute("review") != nil +} + // hash returns the sha256 hash of the desired. func (d *managedData) hash() (string, error) { h := sha256.New() @@ -749,30 +813,42 @@ func (d *managedData) render(w io.Writer) error { // enableDestructive enables the linting policy for destructive changes. // If the force is set to true, it will override the existing value. func (d *managedData) enableDestructive(force bool) { - check := &dbv1alpha1.CheckConfig{Error: true} - destructive := &dbv1alpha1.Lint{Destructive: check} - switch { - case d.Policy == nil: - d.Policy = &dbv1alpha1.Policy{Lint: destructive} - case d.Policy.Lint == nil: - d.Policy.Lint = destructive - case d.Policy.Lint.Destructive == nil, force: + override := func() { + check := &dbv1alpha1.CheckConfig{Error: true} + destructive := &dbv1alpha1.Lint{Destructive: check} + if d.Policy == nil { + d.Policy = &dbv1alpha1.Policy{Lint: destructive} + return + } + if d.Policy.Lint == nil { + d.Policy.Lint = destructive + return + } d.Policy.Lint.Destructive = check } + if !d.hasLint() || !d.hasLintDestructive() || force { + override() + } } // setLintReview sets the lint review policy. // If the force is set to true, it will override the existing value. func (d *managedData) setLintReview(v dbv1alpha1.LintReview, force bool) { - lint := &dbv1alpha1.Lint{Review: v} - switch { - case d.Policy == nil: - d.Policy = &dbv1alpha1.Policy{Lint: lint} - case d.Policy.Lint == nil: - d.Policy.Lint = lint - case d.Policy.Lint.Review == "", force: + override := func() { + lint := &dbv1alpha1.Lint{Review: v} + if d.Policy == nil { + d.Policy = &dbv1alpha1.Policy{Lint: lint} + return + } + if d.Policy.Lint == nil { + d.Policy.Lint = lint + return + } d.Policy.Lint.Review = v } + if !d.hasLint() || !d.hasLintReview() || force { + override() + } } // asBlocks returns the HCL block for the environment configuration. diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index bea073e..54ca3a7 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -27,6 +27,8 @@ import ( "time" "ariga.io/atlas-go-sdk/atlasexec" + dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" + "github.com/ariga/atlas-operator/internal/controller/watch" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -39,9 +41,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" - "github.com/ariga/atlas-operator/internal/controller/watch" ) const ( diff --git a/test/e2e/testscript/custom-config.txtar b/test/e2e/testscript/migration-config-variables.txtar similarity index 58% rename from test/e2e/testscript/custom-config.txtar rename to test/e2e/testscript/migration-config-variables.txtar index db63a4e..21ff12c 100644 --- a/test/e2e/testscript/custom-config.txtar +++ b/test/e2e/testscript/migration-config-variables.txtar @@ -1,10 +1,6 @@ -env SCHEMA_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable -env SCHEMA_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable env MIGRATE_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5434/postgres?sslmode=disable env MIGRATE_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5435/postgres?sslmode=disable kubectl apply -f database.yaml -kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL} -kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL} kubectl create secret generic migrate-db-creds --from-literal=url=${MIGRATE_DB_URL} kubectl create configmap migrate-db-dev-creds --from-literal=url=${MIGRATE_DB_DEV_URL} # Create the secret to store ATLAS_TOKEN @@ -15,85 +11,25 @@ kubectl-wait-available deploy/postgres # Wait for the DB ready before creating the schema kubectl-wait-ready -l app=postgres pods -# Create the schema -kubectl apply -f schema.yaml -kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample - # Create the migration kubectl apply -f migration.yaml kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample # Patch a invalid config of schema from secret -kubectl create secret generic schema-config --from-file=config.hcl kubectl create secret generic migration-config --from-file=config.hcl -kubectl patch -f schema.yaml --type merge --patch-file patch-schema-config-from-configmap.yaml kubectl patch -f migration.yaml --type merge --patch-file patch-migration-config-from-configmap.yaml -kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample # Ensure the operator priority the spec url over the config -kubectl patch -f schema.yaml --type merge --patch-file patch-invalid-url.yaml kubectl patch -f migration.yaml --type merge --patch-file patch-invalid-url.yaml -kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasSchemas/sample kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasMigrations/sample --- schema.yaml -- -apiVersion: db.atlasgo.io/v1alpha1 -kind: AtlasSchema -metadata: - name: sample -spec: - envName: "test" - schema: - sql: | - create table users ( - id int not null, - name varchar(255) not null, - email varchar(255) unique not null, - short_bio varchar(255) not null, - primary key (id) - ); - cloud: - repo: atlas-operator - tokenFrom: - secretKeyRef: - name: atlas-token - key: ATLAS_TOKEN - vars: - - key: "db_url" - valueFrom: - secretKeyRef: - name: schema-db-creds - key: url - - key: "dev_db_url" - valueFrom: - configMapKeyRef: - name: schema-db-dev-creds - key: url - config: | - variable "db_url" { - type = string - } - variable "dev_db_url" { - type = string - } - env "test" { - url = var.db_url - dev = var.dev_db_url - } --- patch-schema-config-from-configmap.yaml -- -spec: - config: - configFrom: - secretKeyRef: - name: schema-config - key: config.hcl -- patch-migration-config-from-configmap.yaml -- spec: config: configFrom: secretKeyRef: - name: schema-config + name: migration-config key: config.hcl -- patch-invalid-url.yaml -- spec: @@ -166,12 +102,6 @@ spec: selector: app: postgres ports: - - name: postgres - port: 5432 - targetPort: postgres - - name: postgres-dev - port: 5433 - targetPort: postgres-dev - name: pg-migrate port: 5434 targetPort: pg-migrate @@ -198,48 +128,6 @@ spec: runAsNonRoot: true runAsUser: 999 containers: - - name: postgres - image: postgres:15.4 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - all - env: - - name: POSTGRES_PASSWORD - value: pass - - name: POSTGRES_USER - value: root - ports: - - containerPort: 5432 - name: postgres - startupProbe: - exec: - command: [ "pg_isready" ] - failureThreshold: 30 - periodSeconds: 10 - - name: postgres-dev - image: postgres:15.4 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - all - env: - - name: POSTGRES_PASSWORD - value: pass - - name: POSTGRES_USER - value: root - - name: PGPORT - value: "5433" - ports: - - containerPort: 5433 - name: postgres-dev - startupProbe: - exec: - command: [ "pg_isready" ] - failureThreshold: 30 - periodSeconds: 10 - name: pg-migrate image: postgres:15.4 securityContext: diff --git a/test/e2e/testscript/schema-config-lint-destructive.txtar b/test/e2e/testscript/schema-config-lint-destructive.txtar new file mode 100644 index 0000000..a187768 --- /dev/null +++ b/test/e2e/testscript/schema-config-lint-destructive.txtar @@ -0,0 +1,156 @@ +env DB_URL=mysql://root:pass@mysql.${NAMESPACE}:3306/myapp +kubectl apply -f database.yaml +kubectl create secret generic db-creds --from-literal=url=${DB_URL} +kubectl create secret generic atlas-token --from-literal=ATLAS_TOKEN=${ATLAS_TOKEN} + +# Wait for the first pod created +kubectl-wait-available deploy/mysql +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=mysql pods + +# Create the schema +kubectl apply -f schema.yaml +kubectl-wait-ready AtlasSchema/mysql + +# Inspect the schema to ensure it's correct +atlas schema inspect -u ${DB_URL} +cmp stdout schema.hcl + +kubectl patch -f schema.yaml --type merge --patch-file patch-remove-bio.yaml + +# Wait for the controller to detect the change +exec sleep 10 + +# Ensure the controller is aware of the change +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=ApplyingSchema --timeout=500s AtlasSchemas/mysql +# Check the error message +kubectl get AtlasSchema/mysql -o jsonpath --template='{.status.conditions[*].message}' +stdout 'Rejected by review policy: errors or warnings were found' + +# Disable the lint policy error via custom config +kubectl patch -f schema.yaml --type merge --patch-file patch-disable-lint-destructive.yaml +# Wait for the controller to detect the change +exec sleep 10 +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/mysql + +-- schema.hcl -- +table "users" { + schema = schema.myapp + column "id" { + null = false + type = int + auto_increment = true + } + column "name" { + null = false + type = varchar(255) + } + column "email" { + null = false + type = varchar(255) + } + column "short_bio" { + null = false + type = varchar(255) + } + primary_key { + columns = [column.id] + } + index "email" { + unique = true + columns = [column.email] + } +} +schema "myapp" { + charset = "utf8mb4" + collate = "utf8mb4_0900_ai_ci" +} +-- patch-remove-bio.yaml -- +spec: + schema: + sql: | + create table users ( + id int not null auto_increment, + name varchar(255) not null, + email varchar(255) unique not null, + primary key (id) + ); +-- patch-disable-lint-destructive.yaml -- +spec: + envName: "test" + config: | + env "test" { + lint { + destructive { + error = false + } + } + } +-- schema.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: mysql +spec: + urlFrom: + secretKeyRef: + name: db-creds + key: url + cloud: + tokenFrom: + secretKeyRef: + name: atlas-token + key: ATLAS_TOKEN + schema: + sql: | + create table users ( + id int not null auto_increment, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); +-- database.yaml -- +apiVersion: v1 +kind: Service +metadata: + name: mysql +spec: + selector: + app: mysql + ports: + - name: mysql + port: 3306 + targetPort: mysql + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mysql +spec: + selector: + matchLabels: + app: mysql + replicas: 1 + template: + metadata: + labels: + app: mysql + spec: + containers: + - name: mysql + image: mysql:latest + env: + - name: MYSQL_ROOT_PASSWORD + value: pass + - name: MYSQL_DATABASE + value: myapp + ports: + - containerPort: 3306 + name: mysql + startupProbe: + exec: + command: [ "mysql", "-ppass", "-h", "127.0.0.1", "-e", "SELECT 1" ] + failureThreshold: 30 + periodSeconds: 10 diff --git a/test/e2e/testscript/schema-config-variables.txtar b/test/e2e/testscript/schema-config-variables.txtar new file mode 100644 index 0000000..e1a55bd --- /dev/null +++ b/test/e2e/testscript/schema-config-variables.txtar @@ -0,0 +1,168 @@ +env SCHEMA_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable +env SCHEMA_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable +kubectl apply -f database.yaml +kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL} +kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL} +# Create the secret to store ATLAS_TOKEN +kubectl create secret generic atlas-token --from-literal=ATLAS_TOKEN=${ATLAS_TOKEN} + +# Wait for the first pod created +kubectl-wait-available deploy/postgres +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=postgres pods + +# Create the schema +kubectl apply -f schema.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample + +# Patch a invalid config of schema from secret +kubectl create secret generic schema-config --from-file=config.hcl +kubectl patch -f schema.yaml --type merge --patch-file patch-schema-config-from-configmap.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample + +# Ensure the operator priority the spec url over the config +kubectl patch -f schema.yaml --type merge --patch-file patch-invalid-url.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasSchemas/sample + +-- schema.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: sample +spec: + envName: "test" + schema: + sql: | + create table users ( + id int not null, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + cloud: + repo: atlas-operator + tokenFrom: + secretKeyRef: + name: atlas-token + key: ATLAS_TOKEN + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: schema-db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: schema-db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + env "test" { + url = var.db_url + dev = var.dev_db_url + } +-- patch-schema-config-from-configmap.yaml -- +spec: + config: + configFrom: + secretKeyRef: + name: schema-config + key: config.hcl +-- patch-invalid-url.yaml -- +spec: + url: "postgres://invalid" +-- config.hcl -- +variable "db_url" { + type = string +} +variable "dev_db_url" { + type = string +} +env "test" { + url = var.db_url + dev = var.dev_db_url +} +-- database.yaml -- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: postgres + port: 5432 + targetPort: postgres + - name: postgres-dev + port: 5433 + targetPort: postgres-dev + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + selector: + matchLabels: + app: postgres + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + securityContext: + runAsNonRoot: true + runAsUser: 999 + containers: + - name: postgres + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + ports: + - containerPort: 5432 + name: postgres + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: postgres-dev + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5433" + ports: + - containerPort: 5433 + name: postgres-dev + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 diff --git a/test/e2e/testscript/multi-tenancy.txtar b/test/e2e/testscript/schema-multi-tenancy.txtar similarity index 100% rename from test/e2e/testscript/multi-tenancy.txtar rename to test/e2e/testscript/schema-multi-tenancy.txtar