Skip to content

Commit

Permalink
feat(pdb): add PodDisruptionBudget support
Browse files Browse the repository at this point in the history
  • Loading branch information
lwpk110 committed Nov 12, 2024
1 parent 8d5a385 commit 0ebf4ff
Show file tree
Hide file tree
Showing 9 changed files with 165 additions and 116 deletions.
14 changes: 3 additions & 11 deletions api/v1alpha1/sparkhistoryserver_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,9 @@ type RoleSpec struct {

RoleGroups map[string]*RoleGroupSpec `json:"roleGroups,omitempty"`

// +kubebuilder:validation:Optional
RoleConfig *commonsv1alpha1.RoleConfigSpec `json:"roleConfig,omitempty"`

// +kubebuilder:validation:Optional
CliOverrides []string `json:"cliOverrides,omitempty"`

Expand All @@ -191,9 +194,6 @@ type ConfigSpec struct {
// +kubebuilder:validation:Optional
Affinity *corev1.Affinity `json:"affinity"`

// +kubebuilder:validation:Optional
PodDisruptionBudget *PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty"`

// Use time.ParseDuration to parse the string
// +kubebuilder:validation:Optional
GracefulShutdownTimeout *string `json:"gracefulShutdownTimeout,omitempty"`
Expand All @@ -208,14 +208,6 @@ type ConfigSpec struct {
Cleaner *bool `json:"cleaner,omitempty"`
}

type PodDisruptionBudgetSpec struct {
// +kubebuilder:validation:Optional
MinAvailable int32 `json:"minAvailable,omitempty"`

// +kubebuilder:validation:Optional
MaxUnavailable int32 `json:"maxUnavailable,omitempty"`
}

type RoleGroupSpec struct {
// +kubebuilder:validation:Optional
// +kubebuilder:default:=1
Expand Down
25 changes: 5 additions & 20 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

151 changes: 66 additions & 85 deletions config/crd/bases/spark.zncdata.dev_sparkhistoryservers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.5
controller-gen.kubebuilder.io/version: v0.16.2
name: sparkhistoryservers.spark.zncdata.dev
spec:
group: spark.zncdata.dev
Expand Down Expand Up @@ -1169,18 +1169,15 @@ spec:
console:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -1196,18 +1193,15 @@ spec:
file:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -1224,18 +1218,15 @@ spec:
additionalProperties:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -1258,15 +1249,6 @@ spec:
enableVectorAgent:
type: boolean
type: object
podDisruptionBudget:
properties:
maxUnavailable:
format: int32
type: integer
minAvailable:
format: int32
type: integer
type: object
resources:
properties:
cpu:
Expand Down Expand Up @@ -1321,6 +1303,23 @@ spec:
type: object
podOverrides:
type: object
roleConfig:
properties:
podDisruptionBudget:
description: |-
This struct is used to configure:
1. If PodDisruptionBudgets are created by the operator
2. The allowed number of Pods to be unavailable (`maxUnavailable`)
properties:
enabled:
default: true
type: boolean
maxUnavailable:
format: int32
type: integer
type: object
type: object
roleGroups:
additionalProperties:
properties:
Expand Down Expand Up @@ -2286,18 +2285,15 @@ spec:
console:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -2313,18 +2309,15 @@ spec:
file:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -2341,18 +2334,15 @@ spec:
additionalProperties:
description: |-
LogLevelSpec
level mapping example:
level mapping if app log level is not standard
- FATAL -> CRITICAL
- ERROR -> ERROR
- WARN -> WARNING
- INFO -> INFO
- DEBUG -> DEBUG
- TRACE -> DEBUG
|---------------------|-----------------|
| App log level | kds log level |
|---------------------|-----------------|
| CRITICAL | FATAL |
| ERROR | ERROR |
| WARNING | WARN |
| INFO | INFO |
| DEBUG | DEBUG |
| DEBUG | TRACE |
|---------------------|-----------------|
Default log level is INFO
properties:
level:
default: INFO
Expand All @@ -2375,15 +2365,6 @@ spec:
enableVectorAgent:
type: boolean
type: object
podDisruptionBudget:
properties:
maxUnavailable:
format: int32
type: integer
minAvailable:
format: int32
type: integer
type: object
resources:
properties:
cpu:
Expand Down
12 changes: 12 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,18 @@ rules:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- s3.zncdata.dev
resources:
Expand Down
1 change: 1 addition & 0 deletions internal/controller/historyserver/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ type SparkHistoryServerReconciler struct {
// +kubebuilder:rbac:groups=authentication.zncdata.dev,resources=authenticationclasses,verbs=get;list;watch
// +kubebuilder:rbac:groups=s3.zncdata.dev,resources=s3connections,verbs=get;list;watch
// +kubebuilder:rbac:groups=s3.zncdata.dev,resources=s3buckets,verbs=get;list;watch
// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete

func (r *SparkHistoryServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {

Expand Down
38 changes: 38 additions & 0 deletions test/e2e/pdb/chainsaw-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
name: pdb
spec:
bindings:
- name: MINIO_USER
value: miniouser
- name: MINIO_PASSWORD
value: miniouserpassword
- name: MINIO_BUCKET
value: spark-history
steps:
- try:
- apply:
file: ../setup/minio.yaml
- assert:
file: ../setup/minio-assert.yaml
- try:
- apply:
file: ../setup/spark-s3-credentials.yaml
- assert:
file: ../setup/spark-s3-credentials-assert.yaml
- apply:
file: ../setup/s3bucket.yaml
- assert:
file: ../setup/s3bucket.yaml
cleanup:
- try:
- apply:
file: sparkhistoryserver.yaml
- assert:
file: sparkhistoryserver-assert.yaml
- assert:
file: pdb-assert.yaml
cleanup:
- sleep:
duration: 60s
6 changes: 6 additions & 0 deletions test/e2e/pdb/pdb-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: sparkhistory-node
spec:
maxUnavailable: 2
15 changes: 15 additions & 0 deletions test/e2e/pdb/sparkhistoryserver-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sparkhistory-node-default
status:
availableReplicas: 1
replicas: 1
readyReplicas: 1
---
apiVersion: v1
kind: Service
metadata:
name: sparkhistory-node-default
spec:
type: ClusterIP
Loading

0 comments on commit 0ebf4ff

Please sign in to comment.