Skip to content

Commit

Permalink
internal/controller: check lint policy in custom config (#252)
Browse files Browse the repository at this point in the history
  • Loading branch information
datdao authored Jan 7, 2025
1 parent 85a2be2 commit 1a4dd95
Show file tree
Hide file tree
Showing 7 changed files with 434 additions and 131 deletions.
16 changes: 16 additions & 0 deletions api/v1alpha1/atlasschema_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,3 +329,19 @@ func (d Diff) AsBlock() *hclwrite.Block {
}
return blk
}

func (p *Policy) HasLint() bool {
return p != nil && p.Lint != nil
}

func (p *Policy) HasDiff() bool {
return p != nil && p.Diff != nil
}

func (p *Policy) HasLintDestructive() bool {
return p.HasLint() && p.Lint.Destructive != nil
}

func (p *Policy) HasLintReview() bool {
return p.HasLint() && p.Lint.Review != ""
}
106 changes: 91 additions & 15 deletions internal/controller/atlasschema_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -680,6 +680,70 @@ func (d *managedData) hasTargets() bool {
return env.Body().GetAttribute("for_each") != nil
}

// hasLint returns true if the environment has multiple targets/ multi-tenancy.
func (d *managedData) hasLint() bool {
if d.Policy != nil && d.Policy.HasLint() {
return true
}
if d.Config == nil {
return false
}
env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName}))
if env == nil {
return false
}
return searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) != nil
}

// hasLintDestructive returns true if the environment has a lint destructive policy.
func (d *managedData) hasLintDestructive() bool {
if d.Policy != nil && d.Policy.HasLintDestructive() {
return true
}
if d.Config == nil {
return false
}
env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName}))
if env == nil {
return false
}
lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil))
if lint == nil {
// search global lint block
lint = searchBlock(d.Config.Body(), hclwrite.NewBlock("lint", nil))
if lint == nil {
return false
}
return false
}
destructive := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil))
return destructive != nil && destructive.Body().GetAttribute("error") != nil
}

// hasLintReview returns true if the environment has a lint review policy.
func (d *managedData) hasLintReview() bool {
if d.Policy != nil && d.Policy.HasLintReview() {
return true
}
if d.Config == nil {
return false
}
env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName}))
if env == nil {
return false
}
lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil))
if lint == nil {
// search global lint block
lint = searchBlock(d.Config.Body(), hclwrite.NewBlock("lint", nil))
if lint == nil {
return false
}
return false
}
return lint.Body().GetAttribute("review") != nil
}

// hash returns the sha256 hash of the desired.
func (d *managedData) hash() (string, error) {
h := sha256.New()
Expand Down Expand Up @@ -749,30 +813,42 @@ func (d *managedData) render(w io.Writer) error {
// enableDestructive enables the linting policy for destructive changes.
// If the force is set to true, it will override the existing value.
func (d *managedData) enableDestructive(force bool) {
check := &dbv1alpha1.CheckConfig{Error: true}
destructive := &dbv1alpha1.Lint{Destructive: check}
switch {
case d.Policy == nil:
d.Policy = &dbv1alpha1.Policy{Lint: destructive}
case d.Policy.Lint == nil:
d.Policy.Lint = destructive
case d.Policy.Lint.Destructive == nil, force:
override := func() {
check := &dbv1alpha1.CheckConfig{Error: true}
destructive := &dbv1alpha1.Lint{Destructive: check}
if d.Policy == nil {
d.Policy = &dbv1alpha1.Policy{Lint: destructive}
return
}
if d.Policy.Lint == nil {
d.Policy.Lint = destructive
return
}
d.Policy.Lint.Destructive = check
}
if !d.hasLint() || !d.hasLintDestructive() || force {
override()
}
}

// setLintReview sets the lint review policy.
// If the force is set to true, it will override the existing value.
func (d *managedData) setLintReview(v dbv1alpha1.LintReview, force bool) {
lint := &dbv1alpha1.Lint{Review: v}
switch {
case d.Policy == nil:
d.Policy = &dbv1alpha1.Policy{Lint: lint}
case d.Policy.Lint == nil:
d.Policy.Lint = lint
case d.Policy.Lint.Review == "", force:
override := func() {
lint := &dbv1alpha1.Lint{Review: v}
if d.Policy == nil {
d.Policy = &dbv1alpha1.Policy{Lint: lint}
return
}
if d.Policy.Lint == nil {
d.Policy.Lint = lint
return
}
d.Policy.Lint.Review = v
}
if !d.hasLint() || !d.hasLintReview() || force {
override()
}
}

// asBlocks returns the HCL block for the environment configuration.
Expand Down
5 changes: 2 additions & 3 deletions internal/controller/atlasschema_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ import (
"time"

"ariga.io/atlas-go-sdk/atlasexec"
dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1"
"github.com/ariga/atlas-operator/internal/controller/watch"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand All @@ -39,9 +41,6 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1"
"github.com/ariga/atlas-operator/internal/controller/watch"
)

const (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
env SCHEMA_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable
env SCHEMA_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable
env MIGRATE_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5434/postgres?sslmode=disable
env MIGRATE_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5435/postgres?sslmode=disable
kubectl apply -f database.yaml
kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL}
kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL}
kubectl create secret generic migrate-db-creds --from-literal=url=${MIGRATE_DB_URL}
kubectl create configmap migrate-db-dev-creds --from-literal=url=${MIGRATE_DB_DEV_URL}
# Create the secret to store ATLAS_TOKEN
Expand All @@ -15,85 +11,25 @@ kubectl-wait-available deploy/postgres
# Wait for the DB ready before creating the schema
kubectl-wait-ready -l app=postgres pods

# Create the schema
kubectl apply -f schema.yaml
kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample

# Create the migration
kubectl apply -f migration.yaml
kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample

# Patch a invalid config of schema from secret
kubectl create secret generic schema-config --from-file=config.hcl
kubectl create secret generic migration-config --from-file=config.hcl
kubectl patch -f schema.yaml --type merge --patch-file patch-schema-config-from-configmap.yaml
kubectl patch -f migration.yaml --type merge --patch-file patch-migration-config-from-configmap.yaml
kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample
kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample

# Ensure the operator priority the spec url over the config
kubectl patch -f schema.yaml --type merge --patch-file patch-invalid-url.yaml
kubectl patch -f migration.yaml --type merge --patch-file patch-invalid-url.yaml
kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasSchemas/sample
kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasMigrations/sample

-- schema.yaml --
apiVersion: db.atlasgo.io/v1alpha1
kind: AtlasSchema
metadata:
name: sample
spec:
envName: "test"
schema:
sql: |
create table users (
id int not null,
name varchar(255) not null,
email varchar(255) unique not null,
short_bio varchar(255) not null,
primary key (id)
);
cloud:
repo: atlas-operator
tokenFrom:
secretKeyRef:
name: atlas-token
key: ATLAS_TOKEN
vars:
- key: "db_url"
valueFrom:
secretKeyRef:
name: schema-db-creds
key: url
- key: "dev_db_url"
valueFrom:
configMapKeyRef:
name: schema-db-dev-creds
key: url
config: |
variable "db_url" {
type = string
}
variable "dev_db_url" {
type = string
}
env "test" {
url = var.db_url
dev = var.dev_db_url
}
-- patch-schema-config-from-configmap.yaml --
spec:
config:
configFrom:
secretKeyRef:
name: schema-config
key: config.hcl
-- patch-migration-config-from-configmap.yaml --
spec:
config:
configFrom:
secretKeyRef:
name: schema-config
name: migration-config
key: config.hcl
-- patch-invalid-url.yaml --
spec:
Expand Down Expand Up @@ -166,12 +102,6 @@ spec:
selector:
app: postgres
ports:
- name: postgres
port: 5432
targetPort: postgres
- name: postgres-dev
port: 5433
targetPort: postgres-dev
- name: pg-migrate
port: 5434
targetPort: pg-migrate
Expand All @@ -198,48 +128,6 @@ spec:
runAsNonRoot: true
runAsUser: 999
containers:
- name: postgres
image: postgres:15.4
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
env:
- name: POSTGRES_PASSWORD
value: pass
- name: POSTGRES_USER
value: root
ports:
- containerPort: 5432
name: postgres
startupProbe:
exec:
command: [ "pg_isready" ]
failureThreshold: 30
periodSeconds: 10
- name: postgres-dev
image: postgres:15.4
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- all
env:
- name: POSTGRES_PASSWORD
value: pass
- name: POSTGRES_USER
value: root
- name: PGPORT
value: "5433"
ports:
- containerPort: 5433
name: postgres-dev
startupProbe:
exec:
command: [ "pg_isready" ]
failureThreshold: 30
periodSeconds: 10
- name: pg-migrate
image: postgres:15.4
securityContext:
Expand Down
Loading

0 comments on commit 1a4dd95

Please sign in to comment.