diff --git a/.chainsaw.yaml b/.chainsaw.yaml index 41e9964..cb81a19 100644 --- a/.chainsaw.yaml +++ b/.chainsaw.yaml @@ -5,11 +5,11 @@ metadata: spec: timeouts: apply: 120s - assert: 180s + assert: 200s cleanup: 120s delete: 120s error: 10s exec: 45s + forceTerminationGracePeriod: 10s # skipDelete: true failFast: true - parallel: 1 # use 1 concurrent to test, to voide multiple csi driver conflict diff --git a/Makefile b/Makefile index 1207475..d001875 100644 --- a/Makefile +++ b/Makefile @@ -339,9 +339,8 @@ $(CHAINSAW): $(LOCALBIN) .PHONY: chainsaw-setup chainsaw-setup: ## Run the chainsaw setup - make docker-build make csi-docker-build - $(KIND) --name $(KIND_CLUSTER_NAME) load docker-image $(IMG) $(CSIDRIVER_IMG) + $(KIND) --name $(KIND_CLUSTER_NAME) load docker-image $(CSIDRIVER_IMG) KUBECONFIG=$(KIND_KUBECONFIG) make helm-install-depends KUBECONFIG=$(KIND_KUBECONFIG) make deploy diff --git a/cmd/csi_driver/main.go b/cmd/csi_driver/main.go index 45ab5b3..eccdf9b 100644 --- a/cmd/csi_driver/main.go +++ b/cmd/csi_driver/main.go @@ -29,6 +29,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + listenerv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/listeners/v1alpha1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -55,6 +56,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(listenerv1alpha1.AddToScheme(scheme)) utilruntime.Must(secretv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme diff --git a/config/csi/daemonset.yaml b/config/csi/daemonset.yaml index 92d5de5..a976780 100644 --- a/config/csi/daemonset.yaml +++ b/config/csi/daemonset.yaml @@ -27,7 +27,7 @@ spec: securityContext: {} containers: - - name: secret-operator + - name: secret-csi-driver securityContext: privileged: true runAsUser: 0 @@ -35,11 +35,11 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 100m + cpu: 512m memory: 128Mi requests: - cpu: 100m - memory: 128Mi + cpu: 50m + memory: 50Mi env: - name: ADDRESS value: unix:///csi/csi.sock diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 406af05..2dceb70 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -38,6 +38,14 @@ rules: - patch - update - watch +- apiGroups: + - listeners.kubedoop.dev + resources: + - listeners + verbs: + - get + - list + - watch - apiGroups: - secrets.kubedoop.dev resources: diff --git a/internal/controller/secretclass_controller.go b/internal/controller/secretclass_controller.go index de5182e..bff3e1e 100644 --- a/internal/controller/secretclass_controller.go +++ b/internal/controller/secretclass_controller.go @@ -44,6 +44,7 @@ type SecretClassReconciler struct { // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=listeners.kubedoop.dev,resources=listeners,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/internal/csi/backend/ca/ca_manager.go b/internal/csi/backend/ca/ca_manager.go index 04ef085..d7bb194 100644 --- a/internal/csi/backend/ca/ca_manager.go +++ b/internal/csi/backend/ca/ca_manager.go @@ -22,10 +22,10 @@ var ( ) type CertificateManager struct { - client client.Client - caCertficateLifetime time.Duration - auto bool - name, namespace string + client client.Client + caCertificateLifetime time.Duration + auto bool + name, namespace string secret *corev1.Secret cas []*CertificateAuthority @@ -39,16 +39,16 @@ type CertificateManager struct { // Now, pem key supports only RSA 256. func NewCertificateManager( client client.Client, - caCertficateLifetime time.Duration, + caCertificateLifetime time.Duration, auto bool, name, namespace string, ) *CertificateManager { obj := &CertificateManager{ - client: client, - caCertficateLifetime: caCertficateLifetime, - auto: auto, - name: name, - namespace: namespace, + client: client, + caCertificateLifetime: caCertificateLifetime, + auto: auto, + name: name, + namespace: namespace, secret: &corev1.Secret{ ObjectMeta: ctrl.ObjectMeta{ @@ -95,7 +95,7 @@ func (c *CertificateManager) secretCreateIfDoesNotExist(ctx context.Context) err return err } - logger.V(1).Info("Created a new secret", "name", c.name, "namespace", c.namespace, "auto", c.auto) + logger.V(1).Info("created a new secret", "name", c.name, "namespace", c.namespace, "auto", c.auto) return nil } @@ -115,7 +115,7 @@ func (c CertificateManager) getPEMKeyPairsFromSecret(ctx context.Context) ([]PEM } } - logger.V(0).Info("Get certificate authorities PEM key pairs from secret", "name", c.name, "namespace", c.namespace, "len", len(keyPairs)) + logger.V(0).Info("got certificate authorities PEM key pairs from secret", "name", c.name, "namespace", c.namespace, "len", len(keyPairs)) return keyPairs, nil } @@ -171,7 +171,7 @@ func (c *CertificateManager) getCertificateAuthorities(pemKeyPairs []PEMkeyPair) return nil, err } if ca.Certificate.NotAfter.Before(time.Now()) { - logger.V(0).Info("Certificate authority is expired, skip it.", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) + logger.V(0).Info("certificate authority is expired, skip it.", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) continue } cas = append(cas, ca) @@ -187,7 +187,7 @@ func (c *CertificateManager) getCertificateAuthorities(pemKeyPairs []PEMkeyPair) ) } - logger.V(0).Info("Could not find any certificate authorities, created a new self-signed certificate authority", "name", c.name, "namespace", c.namespace, "auto", c.auto) + logger.V(0).Info("could not find any certificate authorities, created a new self-signed certificate authority", "name", c.name, "namespace", c.namespace, "auto", c.auto) ca, err := c.createSelfSignedCertificateAuthority() if err != nil { return nil, err @@ -208,12 +208,12 @@ func (c *CertificateManager) getCertificateAuthorities(pemKeyPairs []PEMkeyPair) // create a new self-signed certificate authority only no certificate authority is found func (c *CertificateManager) createSelfSignedCertificateAuthority() (*CertificateAuthority, error) { - notAfter := time.Now().Add(c.caCertficateLifetime) + notAfter := time.Now().Add(c.caCertificateLifetime) ca, err := NewSelfSignedCertificateAuthority(notAfter, nil, nil) if err != nil { return nil, err } - logger.V(0).Info("Created new self-signed certificate authority", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) + logger.V(0).Info("created new self-signed certificate authority", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) return ca, nil } @@ -242,25 +242,25 @@ func (c *CertificateManager) rotateCertificateAuthority(cas []*CertificateAuthor newestCA := cas[len(cas)-1] - if time.Now().Add(c.caCertficateLifetime / 2).After(newestCA.Certificate.NotAfter) { + if time.Now().Add(c.caCertificateLifetime / 2).After(newestCA.Certificate.NotAfter) { if c.auto { - newCA, err := newestCA.Rotate(time.Now().Add(c.caCertficateLifetime)) + newCA, err := newestCA.Rotate(time.Now().Add(c.caCertificateLifetime)) if err != nil { return nil, err } - logger.V(0).Info("Rotated certificate authority, because the old ca is about to expire", + logger.V(0).Info("rotated certificate authority, because the old ca is about to expire", "serialNumber", newestCA.SerialNumber(), "notAfter", newCA.Certificate.NotAfter, ) cas = append(cas, newCA) } else { - logger.V(0).Info("Certificate authority is about to expire, but auto-generate is disabled, please rotate manually.", + logger.V(0).Info("certificate authority is about to expire, but auto-generate is disabled, please rotate manually.", "serialNumber", newestCA.SerialNumber(), "notAfter", newestCA.Certificate.NotAfter, ) } } else { - logger.V(0).Info("Certificate authority is still valid, no need to rotate", + logger.V(0).Info("certificate authority is still valid, no need to rotate", "serialNumber", newestCA.SerialNumber(), "notAfter", newestCA.Certificate.NotAfter, ) @@ -283,7 +283,7 @@ func (c *CertificateManager) getAliveCertificateAuthority(atAfter time.Time, cas } return 0 }) - logger.V(0).Info("Get alive certificate authority", "serialNumber", oldestCA.SerialNumber(), "notAfter", oldestCA.Certificate.NotAfter) + logger.V(0).Info("got alive certificate authority", "serialNumber", oldestCA.SerialNumber(), "notAfter", oldestCA.Certificate.NotAfter) return oldestCA } diff --git a/internal/csi/controller.go b/internal/csi/controller.go index 3e992f8..94c805d 100644 --- a/internal/csi/controller.go +++ b/internal/csi/controller.go @@ -56,7 +56,7 @@ func (c *ControllerServer) CreateVolume(ctx context.Context, request *csi.Create requiredCap := request.CapacityRange.GetRequiredBytes() if request.Parameters["secretFinalizer"] == "true" { - logger.V(1).Info("Finalizer is true") + logger.V(1).Info("finalizer is true") } // requests.parameters is StorageClass.Parameters, which is set by user when creating PVC. @@ -179,7 +179,7 @@ func (c *ControllerServer) DeleteVolume(ctx context.Context, request *csi.Delete } if !dynamic { - logger.V(5).Info("Volume is not dynamic, skip delete volume") + logger.V(5).Info("volume is not dynamic, skip delete volume") return &csi.DeleteVolumeResponse{}, nil } diff --git a/internal/csi/driver.go b/internal/csi/driver.go index 47123d2..46652b7 100644 --- a/internal/csi/driver.go +++ b/internal/csi/driver.go @@ -47,7 +47,7 @@ func NewDriver( func (d *Driver) Run(ctx context.Context, testMode bool) error { - logger.V(1).Info("Driver information", "versionInfo", version.GetVersion(d.name)) + logger.V(1).Info("driver information", "versionInfo", version.GetVersion(d.name)) // check node id if d.nodeID == "" { @@ -56,7 +56,7 @@ func (d *Driver) Run(ctx context.Context, testMode bool) error { ns := NewNodeServer( d.nodeID, - mount.New(""), + mount.New("secret-csi"), d.client, ) @@ -72,7 +72,7 @@ func (d *Driver) Run(ctx context.Context, testMode bool) error { }() d.server.Wait() - logger.Info("Server stopped") + logger.Info("csi driver stopped") return nil } diff --git a/internal/csi/node.go b/internal/csi/node.go index 6cdbf5c..2313be5 100644 --- a/internal/csi/node.go +++ b/internal/csi/node.go @@ -144,7 +144,7 @@ func (n *NodeServer) updatePod(ctx context.Context, pod *corev1.Pod, volumeID st } patch := client.MergeFrom(pod.DeepCopy()) if expiresTime == nil { - logger.V(5).Info("Expiration time is nil, skip update pod annotation", "pod", pod.Name) + logger.V(5).Info("expiration time is nil, skip update pod annotation", "pod", pod.Name) return nil } @@ -157,14 +157,14 @@ func (n *NodeServer) updatePod(ctx context.Context, pod *corev1.Pod, volumeID st annotationExpiresName := constants.PrefixLabelRestarterExpiresAt + hex.EncodeToString(volumeTag) expiresTimeStr := expiresTime.Format(time.RFC3339) - logger.V(5).Info("Update pod annotation", "pod", pod.Name, "key", annotationExpiresName, "value", expiresTimeStr) + logger.V(5).Info("update pod annotation", "pod", pod.Name, "key", annotationExpiresName, "value", expiresTimeStr) pod.Annotations[annotationExpiresName] = expiresTimeStr if err := n.client.Patch(ctx, pod, patch); err != nil { return err } - logger.V(5).Info("Pod patched", "pod", pod.Name) + logger.V(5).Info("pod patched", "pod", pod.Name) return nil } @@ -177,9 +177,9 @@ func (n *NodeServer) writeData(targetPath string, data map[string]string) error if err := os.WriteFile(fileName, []byte(content), fs.FileMode(0644)); err != nil { return err } - logger.V(5).Info("File written", "file", fileName) + logger.V(5).Info("file written", "file", fileName) } - logger.V(5).Info("Data written", "target", targetPath) + logger.V(5).Info("data written", "target", targetPath) return nil } @@ -218,7 +218,7 @@ func (n *NodeServer) mount(targetPath string) error { if err := n.mounter.Mount("tmpfs", targetPath, "tmpfs", opts); err != nil { return status.Error(codes.Internal, err.Error()) } - logger.V(1).Info("Volume mounted", "source", "tmpfs", "target", targetPath, "fsType", "tmpfs", "options", opts) + logger.V(1).Info("volume mounted", "source", "tmpfs", "target", targetPath, "fsType", "tmpfs", "options", opts) return nil } @@ -239,7 +239,7 @@ func (n *NodeServer) NodeUnpublishVolume(ctx context.Context, request *csi.NodeU if err := n.mounter.Unmount(targetPath); err != nil { // FIXME: use status.Error to return error // return nil, status.Error(codes.Internal, err.Error()) - logger.V(0).Info("Volume not found, skip delete volume") + logger.V(0).Info("volume not found, skip delete volume") } // remove the target path diff --git a/internal/csi/server.go b/internal/csi/server.go index 5583539..896f164 100644 --- a/internal/csi/server.go +++ b/internal/csi/server.go @@ -72,7 +72,7 @@ func (s *nonBlockingServer) serveGrpc(endpoint string, ids csi.IdentityServer, c if proto == "unix" { addr = "/" + addr if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - logger.V(0).Info("Failed to remove", "addr", addr, "error", err.Error()) + logger.V(0).Info("failed to remove", "addr", addr, "error", err.Error()) } } diff --git a/pkg/kerberos/kadmin.go b/pkg/kerberos/kadmin.go index 4b9e17e..ed470ed 100644 --- a/pkg/kerberos/kadmin.go +++ b/pkg/kerberos/kadmin.go @@ -118,7 +118,7 @@ func (k *Kadmin) Query(query string) (result string, err error) { kadminLogger.Error(err, "Failed to execute kadmin query", "cmd", cmd.String(), "output", result) return "", err } - kadminLogger.Info("Executed kadmin query", "cmd", cmd.String(), "output", result) + kadminLogger.Info("executed kadmin query", "cmd", cmd.String(), "output", result) return result, nil @@ -149,7 +149,7 @@ func (k *Kadmin) Ktadd(principals ...string) ([]byte, error) { return nil, err } - kadminLogger.Info("Saved keytab", "principal", principals, "keytab", keytab, "output", output) + kadminLogger.Info("saved keytab", "principal", principals, "keytab", keytab, "output", output) return os.ReadFile(keytab) } @@ -186,7 +186,7 @@ func (k *Kadmin) AddPrincipal(principal string) error { return err } - kadminLogger.Info("Added principal", "principal", principal, "output", output) + kadminLogger.Info("created a new principal", "principal", principal, "output", output) return nil } diff --git a/pkg/pod_info/pod_info.go b/pkg/pod_info/pod_info.go index fc689cb..0c4f996 100644 --- a/pkg/pod_info/pod_info.go +++ b/pkg/pod_info/pod_info.go @@ -76,7 +76,7 @@ func (p *PodInfo) getNodeAddresses(ctx context.Context) ([]Address, error) { } } - logger.V(1).Info("get node ip filter by internal and external", "pod", p.getPodName(), + logger.V(1).Info("got node ip filter by internal and external", "pod", p.getPodName(), "namespace", p.getPodNamespace(), "addresses", addresses) return addresses, nil } @@ -333,6 +333,12 @@ func (p *PodInfo) getListenerAddresses(ctx context.Context) ([]Address, error) { if err != nil { return nil, err } + + // check listener status + if len(listener.Status.IngressAddresses) == 0 { + return nil, fmt.Errorf("listener %s/%s status not ready", listener.Namespace, listener.Name) + } + for _, ingressAddress := range listener.Status.IngressAddresses { if ingressAddress.AddressType == operatorlistenersv1alpha1.AddressTypeHostname { addresses = append(addresses, Address{ diff --git a/pkg/util/log.go b/pkg/util/log.go index 6e4ab01..c8b8f36 100644 --- a/pkg/util/log.go +++ b/pkg/util/log.go @@ -14,30 +14,32 @@ var ( log = ctrl.Log.WithName("csi-grpc") ) -func GetLogLevel(method string) int { +func getLogLevel(method string) int { + v := ctrl.Log.GetV() + if method == "/csi.v1.Identity/Probe" || method == "/csi.v1.Node/NodeGetCapabilities" || method == "/csi.v1.Node/NodeGetVolumeStats" { return 8 } - return 2 + return v } func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - level := GetLogLevel(info.FullMethod) - log.V(level).Info("GRPC calling", "method", info.FullMethod, "request", protosanitizer.StripSecrets(req)) + level := getLogLevel(info.FullMethod) + log.V(level).Info("gRPC calling", "method", info.FullMethod, "request", protosanitizer.StripSecrets(req)) resp, err := handler(ctx, req) if err != nil { - log.Error(err, "GRPC called error", "method", info.FullMethod) + log.Error(err, "RPC called error", "method", info.FullMethod) if level >= 5 { stack := debug.Stack() errStack := fmt.Errorf("\n%s", stack) log.Error(err, "GRPC called error", errStack.Error()) } } else { - log.V(level).Info("GRPC called", "method", info.FullMethod, "response", protosanitizer.StripSecrets(resp)) + log.V(level).Info("gRPC called", "method", info.FullMethod, "response", protosanitizer.StripSecrets(resp)) } return resp, err } diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index d01d098..5b37451 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -188,7 +188,7 @@ func NewvolumeContextFromMap(parameters map[string]string) (*SecretVolumeContext case VolumeKubernetesStorageProvisioner: v.Provisioner = value case DeprecatedVolumeKubernetesStorageProvisioner: - logger.V(0).Info("Deprecated key since v1.23, please use new key", + logger.V(0).Info("deprecated key since v1.23, please use new key", "key", key, "value", value, "new key", VolumeKubernetesStorageProvisioner, diff --git a/test/e2e/krb5/chainsaw-test.yaml b/test/e2e/krb5/chainsaw-test.yaml index 75cd8c4..5c73291 100644 --- a/test/e2e/krb5/chainsaw-test.yaml +++ b/test/e2e/krb5/chainsaw-test.yaml @@ -89,6 +89,14 @@ spec: file: node-scope.yaml - assert: file: node-scope-assert.yaml + catch: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl -n "$NAMESPACE" get pod + kubectl -n "$NAMESPACE" describe pod - try: - apply: file: service-scope.yaml diff --git a/test/e2e/krb5/krb5.yaml b/test/e2e/krb5/krb5.yaml index 9688116..2fce1d3 100644 --- a/test/e2e/krb5/krb5.yaml +++ b/test/e2e/krb5/krb5.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: krb5 - image: quay.io/zncdatadev-test/krb5:dev + image: quay.io/zncdatadev/krb5:1.21.1-kubedoop0.0.0-dev args: - -r - ($relam) # chainsaw bindings value @@ -50,8 +50,8 @@ spec: mountPath: /data resources: limits: - memory: "526Mi" - cpu: "300m" + memory: "128Mi" + cpu: "100m" readinessProbe: exec: command: diff --git a/test/e2e/krb5/node-scope-assert.yaml b/test/e2e/krb5/node-scope-assert.yaml index c6dc451..491eaf6 100644 --- a/test/e2e/krb5/node-scope-assert.yaml +++ b/test/e2e/krb5/node-scope-assert.yaml @@ -4,9 +4,7 @@ kind: Pod metadata: name: krb5-node-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/node-scope.yaml b/test/e2e/krb5/node-scope.yaml index 610c214..9d71d16 100644 --- a/test/e2e/krb5/node-scope.yaml +++ b/test/e2e/krb5/node-scope.yaml @@ -5,56 +5,45 @@ metadata: labels: name: krb5-node-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret/krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/$NODE_NAME@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/$NODE_NAME@$KERBEROS_REALM klist -e - - echo start nginx server - nginx -g "daemon off;" env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: - memory: "526Mi" - cpu: "500m" + memory: "50Mi" + cpu: "50m" ports: - containerPort: 80 protocol: TCP name: http volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 - + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/krb5/pod-scope-assert.yaml b/test/e2e/krb5/pod-scope-assert.yaml index 833384a..ebdace7 100644 --- a/test/e2e/krb5/pod-scope-assert.yaml +++ b/test/e2e/krb5/pod-scope-assert.yaml @@ -1,17 +1,10 @@ --- apiVersion: v1 -kind: Service -metadata: - name: krb5-pod-scope-service ---- -apiVersion: v1 kind: Pod metadata: name: krb5-pod-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/pod-scope.yaml b/test/e2e/krb5/pod-scope.yaml index e414013..e80cbac 100644 --- a/test/e2e/krb5/pod-scope.yaml +++ b/test/e2e/krb5/pod-scope.yaml @@ -1,15 +1,3 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: krb5-pod-scope-service -spec: - selector: - app: krb5-pod-scope - ports: - - port: 80 - targetPort: http - --- apiVersion: v1 kind: Pod @@ -18,33 +6,28 @@ metadata: labels: name: krb5-pod-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret//krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM - kinit -kt /opt/secret/keytab foo/krb5-pod-scope.krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-pod-scope.krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM klist -e - - echo start nginx server - nginx -g "daemon off;" env: - name: NAMESPACE valueFrom: @@ -55,24 +38,18 @@ spec: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: - memory: "526Mi" - cpu: "500m" + memory: "50Mi" + cpu: "50m" ports: - containerPort: 80 protocol: TCP name: http - volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 + mountPath: /kubedoop/secret subdomain: krb5-pod-scope-service volumes: - name: secret diff --git a/test/e2e/krb5/service-scope-assert.yaml b/test/e2e/krb5/service-scope-assert.yaml index 66ccb43..ed08850 100644 --- a/test/e2e/krb5/service-scope-assert.yaml +++ b/test/e2e/krb5/service-scope-assert.yaml @@ -10,9 +10,7 @@ kind: Pod metadata: name: krb5-svc-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/service-scope.yaml b/test/e2e/krb5/service-scope.yaml index 6463e41..db96c40 100644 --- a/test/e2e/krb5/service-scope.yaml +++ b/test/e2e/krb5/service-scope.yaml @@ -9,7 +9,6 @@ spec: ports: - port: 80 targetPort: http - --- apiVersion: v1 kind: Pod @@ -18,32 +17,28 @@ metadata: labels: name: krb5-svc-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret/krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/krb5-svc-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-svc-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM klist -e - echo start nginx server - nginx -g "daemon off;" env: - name: NAMESPACE valueFrom: @@ -54,23 +49,14 @@ spec: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: - memory: "526Mi" - cpu: "500m" - ports: - - containerPort: 80 - protocol: TCP - name: http + memory: "50Mi" + cpu: "50m" volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/search-in-default-ns/01-assert.yaml b/test/e2e/search-in-default-ns/01-assert.yaml index cf16fa6..48d9508 100644 --- a/test/e2e/search-in-default-ns/01-assert.yaml +++ b/test/e2e/search-in-default-ns/01-assert.yaml @@ -4,9 +4,7 @@ kind: Pod metadata: name: search-ns status: - phase: Running + phase: Succeeded containerStatuses: - name: pod - ready: true restartCount: 0 - started: true diff --git a/test/e2e/search-in-default-ns/01-pod-for-ns.yaml b/test/e2e/search-in-default-ns/01-pod-for-ns.yaml index 02bd076..bb5d0bc 100644 --- a/test/e2e/search-in-default-ns/01-pod-for-ns.yaml +++ b/test/e2e/search-in-default-ns/01-pod-for-ns.yaml @@ -5,6 +5,7 @@ metadata: labels: name: search-ns spec: + restartPolicy: Never containers: - name: pod image: busybox:stable @@ -15,24 +16,18 @@ spec: - "sh" - "-c" - | - trap 'echo "Exiting..."; exit 1' SIGINT SIGTERM - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while true; do + # Continuous check /kubedoop/secret/mode exist + while ! test -f /kubedoop/secret/mode; do sleep 1 - tail -f /opt/secret/mode || (sleep 1 && echo 'Waiting for content...') + echo "Waiting for content..." done resources: limits: - memory: "512Mi" - cpu: "500m" + memory: "50Mi" + cpu: "50m" volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - exec: - command: ["sh", "-c", "cat /opt/secret/mode | grep -q search-default-ns"] - initialDelaySeconds: 1 - periodSeconds: 5 + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/search-in-default-ns/11-assert.yaml b/test/e2e/search-in-default-ns/11-assert.yaml index 167a40c..4a77fed 100644 --- a/test/e2e/search-in-default-ns/11-assert.yaml +++ b/test/e2e/search-in-default-ns/11-assert.yaml @@ -4,9 +4,7 @@ kind: Pod metadata: name: search-with-pod status: - phase: Running + phase: Succeeded containerStatuses: - name: pod - ready: true restartCount: 0 - started: true diff --git a/test/e2e/search-in-default-ns/11-pod-for-pod.yaml b/test/e2e/search-in-default-ns/11-pod-for-pod.yaml index 1697b5e..9111295 100644 --- a/test/e2e/search-in-default-ns/11-pod-for-pod.yaml +++ b/test/e2e/search-in-default-ns/11-pod-for-pod.yaml @@ -5,6 +5,7 @@ metadata: labels: name: search-with-pod spec: + restartPolicy: Never containers: - name: pod image: busybox:stable @@ -15,24 +16,18 @@ spec: - "sh" - "-c" - | - trap 'echo "Exiting..."; exit 1' SIGINT SIGTERM - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while true; do + # Continuous check /kubedoop/secret/mode exist + while ! test -f /kubedoop/secret/mode; do sleep 1 - tail -f /opt/secret/mode || (sleep 1 && echo 'Waiting for content...') + echo "Waiting for content..." done resources: limits: - memory: "512Mi" - cpu: "500m" + memory: "50Mi" + cpu: "50m" volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - exec: - command: ["sh", "-c", "cat /opt/secret/mode | grep -q search-with-pod"] - initialDelaySeconds: 1 - periodSeconds: 5 + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/search-in-default-ns/21-assert.yaml b/test/e2e/search-in-default-ns/21-assert.yaml index 1b1a16d..2443139 100644 --- a/test/e2e/search-in-default-ns/21-assert.yaml +++ b/test/e2e/search-in-default-ns/21-assert.yaml @@ -4,9 +4,7 @@ kind: Pod metadata: name: search-with-pod-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: pod - ready: true restartCount: 0 - started: true diff --git a/test/e2e/search-in-default-ns/21-pod-with-scope.yaml b/test/e2e/search-in-default-ns/21-pod-with-scope.yaml index 915db25..a62e11f 100644 --- a/test/e2e/search-in-default-ns/21-pod-with-scope.yaml +++ b/test/e2e/search-in-default-ns/21-pod-with-scope.yaml @@ -5,6 +5,7 @@ metadata: labels: name: search-with-pod-scope spec: + restartPolicy: Never containers: - name: pod image: busybox:stable @@ -15,24 +16,18 @@ spec: - "sh" - "-c" - | - trap 'echo "Exiting..."; exit 1' SIGINT SIGTERM - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while true; do + # Continuous check /kubedoop/secret/mode exist + while ! test -f /kubedoop/secret/mode; do sleep 1 - tail -f /opt/secret/mode || (sleep 1 && echo 'Waiting for content...') + echo "Waiting for content..." done resources: limits: - memory: "512Mi" - cpu: "500m" + memory: "50Mi" + cpu: "50m" volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - exec: - command: ["sh", "-c", "cat /opt/secret/mode | grep -q nginx-secret-pod-scope"] - initialDelaySeconds: 1 - periodSeconds: 5 + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/tls/autotls.yaml b/test/e2e/tls/autotls.yaml deleted file mode 100644 index 28b5eef..0000000 --- a/test/e2e/tls/autotls.yaml +++ /dev/null @@ -1,130 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: auto-tls -spec: - resources: - requests: - storage: 1Gi - volumeMode: Filesystem - accessModes: - - ReadWriteOnce ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: auto-tls - labels: - name: auto-tls -spec: - replicas: 3 - selector: - matchLabels: - name: auto-tls - template: - metadata: - labels: - name: auto-tls - spec: - containers: - - name: auto-tls - image: registry.access.redhat.com/ubi9/openjdk-21:1.20 - command: - - /bin/sh - - -c - - | - set -xe - microdnf install -y diffutils openssl - - KEYSTORE_FILE=/opt/security/tls/keystore.p12 - TRUSTSTORE_FILE=/opt/security/tls/truststore.p12 - SERVER_PEM_FILE=/opt/security/tls/server.pem - - # Check if keystore and truststore exist - if [ ! -f $KEYSTORE_FILE ] || [ ! -f $TRUSTSTORE_FILE ]; then - echo "Keystore or truststore does not exist." >&2 - exit 1 - fi - - # Check if keystore contains more than 0 entries - entryCount=$(keytool -list -keystore $KEYSTORE_FILE -storepass $P12PASSWORD | grep 'Your keystore contains' | awk '{print $4}') - if [ $entryCount -gt 0 ]; then - echo "Keystore contains more than 0 entries." - else - echo "Keystore contains 0 entries." >&2 - exit 1 - fi - - # Check server certificate in keystore is not expired - EXPIRESLIFE=$((23 * 60 * 60)) # 23 hours - openssl pkcs12 -in "$KEYSTORE_FILE" -passin pass:"$P12PASSWORD" -nokeys -clcerts -out "$SERVER_PEM_FILE" - if ! openssl x509 -checkend $EXPIRESLIFE -noout -in "$SERVER_PEM_FILE"; then - echo "Server certificate in keystore is expired within $EXPIRESLIFE seconds." >&2 - exit 1 - fi - - # Save the server certificate summary to a file - keytool -list -keystore $TRUSTSTORE_FILE -storepass $P12PASSWORD >> /opt/summary/$POD_NAME.txt - cat /opt/summary/$POD_NAME.txt - - tail -f /dev/null - resources: - limits: - memory: "512Mi" - cpu: "300m" - securityContext: - runAsUser: 0 - runAsGroup: 0 - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: P12PASSWORD - value: changeit - ports: - - containerPort: 80 - name: web - readinessProbe: - exec: - command: - - /bin/sh - - -c - - | - set -e - # check the all files content under /opt/certs/ are the same - for file in $(ls /opt/summary); do - if ! diff /opt/summary/$file /opt/summary/$POD_NAME.txt; then - echo "Files are different /opt/summary/$file /opt/summary/$POD_NAME.txt" >&2 - exit 1 - fi - done - echo "All checks passed." - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - volumeMounts: - - name: tls - mountPath: /opt/security/tls - - name: certs - mountPath: /opt/summary - volumes: - - name: certs - persistentVolumeClaim: - claimName: auto-tls - - name: tls - ephemeral: - volumeClaimTemplate: - metadata: - annotations: - secrets.kubedoop.dev/class: tls - secrets.kubedoop.dev/format: tls-p12 - secrets.kubedoop.dev/scope: pod,node - secrets.kubedoop.dev/tlsPKCS12Password: changeit - spec: - accessModes: ["ReadWriteOnce"] - storageClassName: secrets.kubedoop.dev - resources: - requests: - storage: 1Mi diff --git a/test/e2e/tls/chainsaw-test.yaml b/test/e2e/tls/chainsaw-test.yaml index 7a8edec..46e55f9 100644 --- a/test/e2e/tls/chainsaw-test.yaml +++ b/test/e2e/tls/chainsaw-test.yaml @@ -4,29 +4,53 @@ metadata: name: tls spec: steps: + # tls smoke test case with pkcs12 - try: + - apply: + file: tls-pkcs12.yaml - assert: - file: secretcsi-assert.yaml + file: tls-pkcs12-assert.yaml - try: - apply: - file: autotls.yaml + file: tls-scope.yaml - assert: - file: autotls-assert.yaml + file: tls-scope-assert.yaml catch: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl -n "$NAMESPACE" get pod + kubectl -n "$NAMESPACE" describe pod + # tls will expires case + - try: + - apply: + file: tls-will-expires.yaml + - assert: + resource: + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: tls-will-expires + status: + availableReplicas: 1 + readyReplicas: 1 - sleep: - duration: 5s + duration: 80s - script: env: - name: NAMESPACE value: ($namespace) content: | - set -ex - kubectl -n $NAMESPACE get pods - kubectl -n - - describe: - apiVersion: v1 - kind: Pod - selector: name=auto-tls - - podLogs: - selector: name=auto-tls - tail: -1 + # count k8s events with `Successfully assigned default/tls-will-expires-0 to ` filter with pod name + # If the count > 1, then the test is fine, pod is restarted when the certificate expires + restart_count=$(kubectl -n "$NAMESPACE" get events --field-selector involvedObject.name=tls-will-expires-0 --no-headers | grep 'Started container tls-will-expires' | wc -l) + if [ $restart_count -gt 1 ]; then + echo "Pod tls-will-expires-0 is restarted when the certificate expires" + else + echo "Pod tls-will-expires-0 is not restarted when the certificate expires" + exit 1 + fi + check: + ($error == null): true diff --git a/test/e2e/tls/secretcsi-assert.yaml b/test/e2e/tls/secretcsi-assert.yaml deleted file mode 100644 index 04f4d66..0000000 --- a/test/e2e/tls/secretcsi-assert.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: secret-operator-daemonset - namespace: secret-operator-system -status: - (numberReady >= `1`): true ---- -apiVersion: secrets.kubedoop.dev/v1alpha1 -kind: SecretClass -metadata: - name: tls -spec: - backend: - autoTls: - ca: - autoGenerated: true - caCertificateLifeTime: 8760h - secret: - name: secret-provisioner-tls-ca - namespace: kubedoop-operators - maxCertificateLifeTime: 360h diff --git a/test/e2e/tls/autotls-assert.yaml b/test/e2e/tls/tls-pkcs12-assert.yaml similarity index 79% rename from test/e2e/tls/autotls-assert.yaml rename to test/e2e/tls/tls-pkcs12-assert.yaml index 6f8b584..d105be8 100644 --- a/test/e2e/tls/autotls-assert.yaml +++ b/test/e2e/tls/tls-pkcs12-assert.yaml @@ -1,9 +1,9 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: auto-tls + name: tls-p12 labels: - name: auto-tls + name: tls-p12 status: availableReplicas: 3 readyReplicas: 3 diff --git a/test/e2e/tls/tls-pkcs12.yaml b/test/e2e/tls/tls-pkcs12.yaml new file mode 100644 index 0000000..19f1159 --- /dev/null +++ b/test/e2e/tls/tls-pkcs12.yaml @@ -0,0 +1,118 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tls-p12 + labels: + name: tls-p12 +spec: + replicas: 3 + selector: + matchLabels: + name: tls-p12 + template: + metadata: + labels: + name: tls-p12 + spec: + containers: + - name: tls-p12 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev + command: + - /bin/bash + - -c + - | + KEYSTORE_FILE=/kubedoop/tls/keystore.p12 + TRUSTSTORE_FILE=/kubedoop/tls/truststore.p12 + SERVER_PEM_FILE=/kubedoop/tls/server.pem + + # Arguments: + # Returns: + # 0: success + # 1: fail + function checkTLSPKCS12 () { + # Check if keystore and truststore exist + if [ ! -f $KEYSTORE_FILE ] || [ ! -f $TRUSTSTORE_FILE ]; then + echo "Keystore or truststore does not exist." >&2 + return 1 + fi + + # Check if keystore contains more than 0 entries + entryCount=$(keytool -list -keystore $KEYSTORE_FILE -storepass $P12PASSWORD | grep 'Your keystore contains' | awk '{print $4}') + if [ $entryCount -gt 0 ]; then + echo "Keystore contains more than 0 entries." >&2 + else + echo "Keystore contains 0 entries." >&2 + return 1 + fi + + # Check server certificate in keystore is not expired, default is 60 * 60 * 24 * 7 seconds + EXPIRESLIFE=$((23 * 60 * 60)) # 23 hours + openssl pkcs12 -in "$KEYSTORE_FILE" -passin pass:"$P12PASSWORD" -nokeys -clcerts -out "$SERVER_PEM_FILE" + if ! openssl x509 -checkend $EXPIRESLIFE -noout -in "$SERVER_PEM_FILE"; then + echo "Server certificate in keystore is expired within $EXPIRESLIFE ." >&2 + return 1 + fi + + echo "All checks passed." >&2 + return 0 + } + + # Continuous check with 10 seconds interval until the function returns 0 + while true; do + if checkTLSPKCS12; then + break + fi + sleep 10 + done + + # save assert result success to /tmp/assert + echo "success" > /tmp/assert + + sleep infinity + + resources: + limits: + memory: "50Mi" + cpu: "50m" + securityContext: + runAsUser: 0 + runAsGroup: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: P12PASSWORD + value: changeit + ports: + - containerPort: 80 + name: web + readinessProbe: + exec: + command: + - test + - -f + - /tmp/assert + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + volumeMounts: + - name: tls + mountPath: /kubedoop/tls + volumes: + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: tls-p12 + secrets.kubedoop.dev/scope: pod,node + secrets.kubedoop.dev/tlsPKCS12Password: changeit + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi diff --git a/test/e2e/tls/tls-scope-assert.yaml b/test/e2e/tls/tls-scope-assert.yaml new file mode 100644 index 0000000..b94ad97 --- /dev/null +++ b/test/e2e/tls/tls-scope-assert.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tls-scope + labels: + name: tls-scope +status: + availableReplicas: 3 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 diff --git a/test/e2e/tls/tls-scope.yaml b/test/e2e/tls/tls-scope.yaml new file mode 100644 index 0000000..cb81929 --- /dev/null +++ b/test/e2e/tls/tls-scope.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tls-scope + labels: + name: tls-scope +spec: + replicas: 3 + selector: + matchLabels: + app: tls-scope + template: + metadata: + labels: + app: tls-scope + spec: + containers: + - name: tls-scope + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + limits: + memory: "50Mi" + cpu: "50m" + command: + - /bin/bash + - -c + - | + # check tls.crt san has secret scope value + # Arguments: + # Returns: + # 0: success + # 1: fail + function checkTLSSAN () { + # check /kubedoop/tls/tls.crt exists + if [ ! -f /kubedoop/tls/tls.crt ]; then + echo "/kubedoop/tls/tls.crt does not exist" >&2 + return 1 + fi + + # Tls SAN check + # X509v3 Subject Alternative Name: critical + # DNS:secret-operator-1.26.15-control-plane, DNS:tls-scope-https.default.svc.cluster.local, DNS:tls-scope-74c794dc64-88mh8-tls-scope-listener-eph-volume.default.svc.cluster.local, IP Address:172.18.0.2 + tls_san=$(openssl x509 -in /kubedoop/tls/tls.crt -noout -text | grep -A 1 "Subject Alternative Name") + echo "tls_san: \n$tls_san\n" >&2 + + # check tls_san container some value + svc_san="tls-scope-https.$NAMESPACE.svc.cluster.local" + listener_san="tls-scope-listener-eph-volume.$NAMESPACE.svc.cluster.local" + check_lists=($svc_san $listener_san) + + for check in ${check_lists[@]}; do + if [[ $tls_san != *$check* ]]; then + echo "tls_san does not contain $check" >&2 + return 1 + fi + done + + return 0 + } + + # Continuous check with 10 seconds interval until the function returns 0 + while true; do + checkTLSSAN + if [ $? -eq 0 ]; then + echo "tls_san check success" + break + fi + sleep 10 + done + + # save assert result success to /tmp/assert + echo "success" > /tmp/assert + sleep infinity + ports: + - containerPort: 443 + name: https + readinessProbe: + exec: + command: + - test + - -f + - /tmp/assert + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: tls + mountPath: /kubedoop/tls + - name: tls-scope-listener-eph-volume + mountPath: /kubedoop/listener + volumes: + - name: assert + emptyDir: {} + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: pem + secrets.kubedoop.dev/scope: pod,node,service=tls-scope-https,listener-volume=tls-scope-listener-eph-volume + secrets.kubedoop.dev/tlsPKCS12Password: changeit + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi + - name: tls-scope-listener-eph-volume + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + listeners.kubedoop.dev/class: cluster-internal # this is service ClusterIP + # listeners.kubedoop.dev/class: external-unstable # this is service NodePort + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: listeners.kubedoop.dev + resources: + requests: + storage: 1Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: tls-scope-https +spec: + selector: + app: tls-scope + ports: + - port: 443 + targetPort: https diff --git a/test/e2e/tls/tls-will-expires.yaml b/test/e2e/tls/tls-will-expires.yaml new file mode 100644 index 0000000..bcde3d2 --- /dev/null +++ b/test/e2e/tls/tls-will-expires.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: tls-will-expires + labels: + name: tls-will-expires +spec: + replicas: 1 + serviceName: default + selector: + matchLabels: + name: tls-will-expires + template: + metadata: + labels: + name: tls-will-expires + spec: + containers: + - name: tls-will-expires + image: caddy:2 + resources: + limits: + memory: "128Mi" + cpu: "100m" + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + volumeMounts: + - name: tls + mountPath: /opt/security/tls + volumes: + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: pem + secrets.kubedoop.dev/scope: pod,node + secrets.kubedoop.dev/tlsPKCS12Password: changeit + # Golang duration string is a possibly signed sequence of decimal numbers, + # each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". + # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + secrets.kubedoop.dev/autoTlsCertLifetime: 60s + secrets.kubedoop.dev/autoTlsCertRestartBuffer: 10s + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi