From 875dbfc82e304361d5d31bbb572faa8e52b9e91f Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Tue, 24 Sep 2024 21:50:06 +0800 Subject: [PATCH 1/9] capkk controller --- pkg/controllers/kkcluster_controller.go | 1201 +++++++++++++++++++++++ pkg/controllers/kkmachine_controller.go | 325 ++++++ 2 files changed, 1526 insertions(+) create mode 100644 pkg/controllers/kkcluster_controller.go create mode 100644 pkg/controllers/kkmachine_controller.go diff --git a/pkg/controllers/kkcluster_controller.go b/pkg/controllers/kkcluster_controller.go new file mode 100644 index 000000000..9876a09ab --- /dev/null +++ b/pkg/controllers/kkcluster_controller.go @@ -0,0 +1,1201 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "math/big" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + ctrlfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + infrav1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" + "github.com/kubesphere/kubekey/v4/pkg/scope" +) + +// Defines some useful static strings. +const ( + MaxPipelineCounts int = 3 + PipelineUpperLimitReason string = "PipelineUpperLimit" + + pipelineNameLabel = "kubekey.kubesphere.capkk.io/pipeline" + + CheckConnectPlaybookName string = "check-connect" + CheckConnectPlaybook string = "capkk/playbooks/capkk_check_connect.yaml" + + PreparationPlaybookName string = "preparation" + PreparationPlaybook string = "capkk/playbooks/capkk_preparation.yaml" + + EtcdInstallPlaybookName string = "etcd-install" + EtcdInstallPlaybook string = "capkk/playbooks/capkk_etcd_binary_install.yaml" + + BinaryInstallPlaybookName string = "binary-install" + BinaryInstallPlaybook string = "capkk/playbooks/capkk_binary_install.yaml" + + BootstrapPlaybookName string = "bootstrap-ready" + BootstrapPlaybook string = "capkk/playbooks/capkk_bootstrap_ready.yaml" +) + +// KKClusterReconciler reconciles a KKCluster object +type KKClusterReconciler struct { + *runtime.Scheme + ctrlclient.Client + record.EventRecorder + + ctrlfinalizer.Finalizers + MaxConcurrentReconciles int +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments;machinedeployments/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets;machinesets/status,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;patch + +func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + // Get KKCluster. + kkCluster := &infrav1beta1.KKCluster{} + err := r.Client.Get(ctx, req.NamespacedName, kkCluster) + if err != nil { + if apierrors.IsNotFound(err) { + klog.V(5).InfoS("`api-server` not found", + "KKCluster", ctrlclient.ObjectKeyFromObject(kkCluster)) + + return ctrl.Result{}, nil + } + + return ctrl.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, kkCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + klog.V(5).InfoS("Cluster has not yet set OwnerRef") + + return reconcile.Result{}, nil + } + + klog.V(4).InfoS("Fetched cluster", "cluster", cluster.Name) + + // Create the scope. + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: r.Client, + Cluster: cluster, + KKCluster: kkCluster, + ControllerName: cluster.Name, + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("[%s]: failed to create scope: %w", cluster.Name, err) + } + + // Always close the scope when exiting this function, so we can persist any KKCluster changes. + defer func() { + if err := clusterScope.Close(ctx); err != nil && retErr == nil { + klog.V(5).ErrorS(err, "Failed to patch object") + retErr = err + } + }() + + // : IsPaused filtered + if annotations.IsPaused(clusterScope.Cluster, clusterScope.KKCluster) { + klog.InfoS("KKCluster or linked Cluster is marked as paused. Won't reconcile") + + return reconcile.Result{}, nil + } + + // Handle deleted clusters + if !kkCluster.DeletionTimestamp.IsZero() { + r.reconcileDelete(clusterScope) + + return ctrl.Result{}, nil + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, clusterScope) +} + +func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.ClusterScope) (reconcile.Result, error) { + klog.V(4).Info("Reconcile KKCluster normal") + + kkCluster := s.KKCluster + + // If the KKCluster doesn't have our finalizer, add it. + if controllerutil.AddFinalizer(kkCluster, infrav1beta1.ClusterFinalizer) { + // Register the finalizer immediately to avoid orphaning KK resources on delete + if err := s.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + } + + switch kkCluster.Status.Phase { + case "": + // Switch kkCluster.Status.Phase to `Pending` + excepted := kkCluster.DeepCopy() + kkCluster.Status.Phase = infrav1beta1.KKClusterPhasePending + if err := r.Client.Status().Patch(ctx, kkCluster, ctrlclient.MergeFrom(excepted)); err != nil { + klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", ctrlclient.ObjectKeyFromObject(kkCluster)) + + return ctrl.Result{}, err + } + case infrav1beta1.KKClusterPhasePending: + // Switch kkCluster.Status.Phase to `Pending`, also add HostReadyCondition. + excepted := kkCluster.DeepCopy() + kkCluster.Status.Phase = infrav1beta1.KKClusterPhaseRunning + // Set series of conditions as `Unknown` for the next reconciles. + conditions.MarkUnknown(s.KKCluster, infrav1beta1.HostsReadyCondition, + infrav1beta1.WaitingCheckHostReadyReason, infrav1beta1.WaitingCheckHostReadyMessage) + if err := r.Client.Status().Patch(ctx, kkCluster, ctrlclient.MergeFrom(excepted)); err != nil { + klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", ctrlclient.ObjectKeyFromObject(kkCluster)) + + return ctrl.Result{}, err + } + case infrav1beta1.KKClusterPhaseRunning: + if err := r.reconcileNormalRunning(ctx, s); err != nil { + return ctrl.Result{}, err + } + case infrav1beta1.KKClusterPhaseSucceed: + return ctrl.Result{}, nil + case infrav1beta1.KKClusterPhaseFailed: + return ctrl.Result{}, nil + default: + return ctrl.Result{}, nil + } + + if lb := s.ControlPlaneLoadBalancer(); lb != nil { + kkCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: lb.Host, + Port: s.APIServerPort(), + } + } + + kkCluster.Status.Ready = true + + return ctrl.Result{ + RequeueAfter: 30 * time.Second, + }, nil +} + +func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *scope.ClusterScope) error { + var reset bool + for { + reset = false + for _, condition := range s.KKCluster.Status.Conditions { + conditionsCnt := len(s.KKCluster.Status.Conditions) + if conditions.IsFalse(s.KKCluster, condition.Type) { + continue + } + + switch condition.Type { + case infrav1beta1.HostsReadyCondition: + if err := r.dealWithHostConnectCheck(ctx, s); err != nil { + return err + } + case infrav1beta1.PreparationReadyCondition: + if err := r.dealWithPreparation(ctx, s); err != nil { + return err + } + case infrav1beta1.EtcdReadyCondition: + if err := r.dealWithEtcdInstall(ctx, s); err != nil { + return err + } + case infrav1beta1.BinaryInstallCondition: + if err := r.dealWithBinaryInstall(ctx, s); err != nil { + return err + } + case infrav1beta1.BootstrapReadyCondition: + // kubeadm init, kubeadm join + if err := r.dealWithBootstrapReady(ctx, s); err != nil { + return err + } + case infrav1beta1.ClusterReadyCondition: + // kubectl get node + // master -> configmap -> kubeconfig -> Client: get node + if err := r.dealWithClusterReadyCheck(ctx, s); err != nil { + return err + } + // Switch `KKCluster.Phase` to `Succeed` + s.KKCluster.Status.Phase = infrav1beta1.KKClusterPhaseSucceed + if err := r.Client.Status().Update(ctx, s.KKCluster); err != nil { + klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", + ctrlclient.ObjectKeyFromObject(s.KKCluster)) + + return err + } + default: + } + + // If add new conditions, restart loop. + if len(s.KKCluster.Status.Conditions) > conditionsCnt { + reset = true + + break + } + } + + if !reset { + break + } + } + + return nil +} + +func (r *KKClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) { + klog.V(4).Info("Reconcile KKCluster delete") + + // : pipeline delete + switch clusterScope.KKCluster.Status.Phase { + case infrav1beta1.KKClusterPhasePending: + // transfer into Delete phase + case infrav1beta1.KKClusterPhaseRunning: + // delete running pipeline & recreate delete pipeline + case infrav1beta1.KKClusterPhaseFailed: + // delete + case infrav1beta1.KKClusterPhaseSucceed: + // + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(clusterScope.KKCluster, infrav1beta1.ClusterFinalizer) +} + +// dealWithHostConnectCheck and dealWithHostSelector function used to pre-check inventory configuration, especially +// hosts and groups. In CAPKK, we defined three default groups to describe a complete kubernetes cluster. Firstly, +// dealWithHostConnectCheck function will check hosts connectivity by one simple pipeline. Secondly, dealWithHostSelector +// function will automatically initialize `Groups` defined by `Inventory` from connected hosts. +// Note: The second step always be executed although all hosts are disconnected. +func (r *KKClusterReconciler) dealWithHostConnectCheck(ctx context.Context, s *scope.ClusterScope) error { + var p *kkcorev1.Pipeline + var err error + if p, err = r.dealWithExecutePlaybookReconcile( + ctx, s, CheckConnectPlaybook, CheckConnectPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.HostsReadyCondition, + infrav1beta1.WaitingHostsSelectReason, clusterv1.ConditionSeverityInfo, infrav1beta1.WaitingHostsSelectMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.HostsNotReadyReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.HostsReadyCondition, + infrav1beta1.HostsNotReadyReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + if p.Status.Phase == kkcorev1.PipelinePhaseSucceed { + return r.dealWithHostSelector(ctx, s, *p) + } + + return nil +} + +// dealWithHostSelector function will be executed by dealWithHostConnectCheck function, if relevant pipeline run complete. +func (r *KKClusterReconciler) dealWithHostSelector(ctx context.Context, s *scope.ClusterScope, _ kkcorev1.Pipeline) error { + // Initialize node select mode + if err := r.initNodeSelectMode(s); err != nil { + return err + } + + // Fetch groups and hosts of `Inventory`, replicas of `KubeadmControlPlane` and `MachineDeployment`. + inv, err := r.getInitialedInventory(ctx, s) + if err != nil { + return err + } + + kcp, err := GetKubeadmControlPlane(ctx, r.Client, s) + if err != nil { + return err + } + + md, err := GetMachineDeployment(ctx, r.Client, s) + if err != nil { + return err + } + + // Initialize unavailable map to de-duplicate. + unavailableHosts, unavailableGroups := make(map[string]struct{}), make(map[string]struct{}) + + // Validate kubernetes cluster's controlPlaneGroup. + controlPlaneGroup, err := validateInventoryGroup(s.KKCluster, inv, s.KKCluster.Spec.ControlPlaneGroupName, + int(*kcp.Spec.Replicas), unavailableHosts, unavailableGroups, false, + ) + if err != nil { + return err + } + + inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] = controlPlaneGroup + + // Validate kubernetes cluster's workerGroup. + workerGroup, err := validateInventoryGroup(s.KKCluster, inv, s.KKCluster.Spec.WorkerGroupName, + int(*md.Spec.Replicas), unavailableHosts, unavailableGroups, false, + ) + if err != nil { + return err + } + + inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] = workerGroup + + // Update `Inventory` resource. + if err := r.Client.Update(ctx, inv); err != nil { + klog.V(5).ErrorS(err, "Update Inventory error", "Inventory", ctrlclient.ObjectKeyFromObject(inv)) + + return err + } + + // Update Conditions of `KKCluster`. + conditions.MarkUnknown(s.KKCluster, infrav1beta1.PreparationReadyCondition, + infrav1beta1.WaitingPreparationReason, infrav1beta1.WaitingPreparationMessage) + if conditions.GetReason(s.KKCluster, infrav1beta1.HostsReadyCondition) == infrav1beta1.WaitingHostsSelectReason { + conditions.MarkFalse(s.KKCluster, infrav1beta1.HostsReadyCondition, infrav1beta1.HostsReadyReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.HostsReadyMessage) + } else { + condition := conditions.Get(s.KKCluster, infrav1beta1.HostsReadyCondition) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.HostsReadyCondition, condition.Reason, + clusterv1.ConditionSeverityWarning, condition.Message) + } + + return nil +} + +// dealWithPreparation function will pre-check & pre-install artifacts and initialize os system. +func (r *KKClusterReconciler) dealWithPreparation(ctx context.Context, s *scope.ClusterScope) error { + if _, err := r.dealWithExecutePlaybookReconcile( + ctx, s, PreparationPlaybook, PreparationPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkUnknown(s.KKCluster, infrav1beta1.EtcdReadyCondition, + infrav1beta1.WaitingInstallEtcdReason, infrav1beta1.WaitingInstallEtcdMessage) + conditions.MarkFalse(s.KKCluster, infrav1beta1.PreparationReadyCondition, infrav1beta1.PreparationReadyReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.PreparationReadyMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.PreparationNotReadyReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.PreparationReadyCondition, + infrav1beta1.PreparationNotReadyReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + return nil +} + +// dealWithEtcdInstall function will install binary Etcd. +func (r *KKClusterReconciler) dealWithEtcdInstall(ctx context.Context, s *scope.ClusterScope) error { + if _, err := r.dealWithExecutePlaybookReconcile( + ctx, s, EtcdInstallPlaybook, EtcdInstallPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkUnknown(s.KKCluster, infrav1beta1.BinaryInstallCondition, + infrav1beta1.WaitingInstallClusterBinaryReason, infrav1beta1.WaitingInstallClusterBinaryMessage) + conditions.MarkFalse(s.KKCluster, infrav1beta1.EtcdReadyCondition, infrav1beta1.EtcdReadyReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.EtcdReadyMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.EtcdNotReadyReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.EtcdReadyCondition, + infrav1beta1.EtcdNotReadyReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + return nil +} + +// dealWithBinaryInstall function will install cluster binary tools. +func (r *KKClusterReconciler) dealWithBinaryInstall(ctx context.Context, s *scope.ClusterScope) error { + if _, err := r.dealWithExecutePlaybookReconcile( + ctx, s, BinaryInstallPlaybook, BinaryInstallPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkUnknown(s.KKCluster, infrav1beta1.BootstrapReadyCondition, + infrav1beta1.WaitingCheckBootstrapReadyReason, infrav1beta1.WaitingCheckBootstrapReadyMessage) + conditions.MarkFalse(s.KKCluster, infrav1beta1.BinaryInstallCondition, infrav1beta1.BinaryReadyReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.BinaryReadyMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.BinaryNotReadyReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.BinaryInstallCondition, + infrav1beta1.BinaryNotReadyReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + return nil +} + +// dealWithBootstrapReady function will initialize cluster, and finally switch `KKCluster.Phase` to `Succeed`. +func (r *KKClusterReconciler) dealWithBootstrapReady(ctx context.Context, s *scope.ClusterScope) error { + if _, err := r.dealWithExecutePlaybookReconcile( + ctx, s, BootstrapPlaybook, BootstrapPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkUnknown(s.KKCluster, infrav1beta1.ClusterReadyCondition, + infrav1beta1.WaitingCheckClusterReadyReason, infrav1beta1.WaitingCheckClusterReadyMessage) + conditions.MarkFalse(s.KKCluster, infrav1beta1.BootstrapReadyCondition, infrav1beta1.BootstrapReadyReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.BootstrapReadyMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.BootstrapNotReadyReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.BootstrapReadyCondition, + infrav1beta1.BootstrapNotReadyReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + return nil +} + +// dealWithBootstrapReady function will initialize cluster, and finally switch `KKCluster.Phase` to `Succeed`. +func (r *KKClusterReconciler) dealWithClusterReadyCheck(ctx context.Context, s *scope.ClusterScope) error { + inv, err := GetInventory(ctx, r.Client, s) + if err != nil { + return err + } + + return r.updateInventoryStatus(ctx, s, inv) +} + +// dealWithExecutePlaybookReconcile will judge the closest pipeline's `.Status.Phase` to the latest state of the cluster, +// and execute exactly stage to adjustment the cluster conditions. It will return one pipeline if it's useful, used for +// the other judgements. +func (r *KKClusterReconciler) dealWithExecutePlaybookReconcile(ctx context.Context, s *scope.ClusterScope, + playbook, playbookName string, funcWithSucceed, funcWithFailed func(p *kkcorev1.Pipeline)) (*kkcorev1.Pipeline, error) { + p, err := r.dealWithPipelinesReconcile(ctx, s, playbook, playbookName) + if err != nil { + return &kkcorev1.Pipeline{}, err + } + + switch p.Status.Phase { + case kkcorev1.PipelinePhasePending: + return &kkcorev1.Pipeline{}, nil + case kkcorev1.PipelinePhaseRunning: + return &kkcorev1.Pipeline{}, nil + case kkcorev1.PipelinePhaseSucceed: + r.dealWithExecuteSucceed(p, funcWithSucceed) + + return p, nil + case kkcorev1.PipelinePhaseFailed: + return p, r.dealWithExecuteFailed(p, funcWithFailed) + default: + return &kkcorev1.Pipeline{}, nil + } +} + +// dealWithExecuteSucceed function used by dealWithExecutePlaybookReconcile, mark current condition as false and mark the +// next condition as true (if exist). +func (r *KKClusterReconciler) dealWithExecuteSucceed(p *kkcorev1.Pipeline, function func(p *kkcorev1.Pipeline)) { + function(p) + klog.V(5).InfoS("Pipeline execute succeed", "pipeline", p.Name) +} + +// dealWithExecuteSucceed function used by dealWithExecutePlaybookReconcile, throw one warning event and mark the current +// condition as negative polarity. +func (r *KKClusterReconciler) dealWithExecuteFailed(p *kkcorev1.Pipeline, function func(p *kkcorev1.Pipeline)) error { + function(p) + err := fmt.Errorf("pipeline %s execute failed", p.Name) + klog.V(5).ErrorS(err, "") + + return err +} + +// dealWithPipelinesReconciles will reconcile all pipelines created for execute `playbookName` tasks, and belong to current cluster. +// It will create one +func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s *scope.ClusterScope, + playbook, playbookName string) (*kkcorev1.Pipeline, error) { + pipelines := &kkcorev1.PipelineList{} + + // Check if pipeline existed, or an unexpected error happened. + if err := r.Client.List(ctx, pipelines, ctrlclient.InNamespace(s.Namespace()), ctrlclient.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + pipelineNameLabel: playbookName, + }); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + // Fetch the latest pipeline + var latestPipeline *kkcorev1.Pipeline + allPipelinesFailed := true + + for _, pipeline := range pipelines.Items { + if pipeline.Status.Phase == kkcorev1.PipelinePhaseSucceed { + return &pipeline, nil + } + if pipeline.Status.Phase != kkcorev1.PipelinePhaseFailed { + allPipelinesFailed = false + } + if latestPipeline == nil || pipeline.CreationTimestamp.After(latestPipeline.CreationTimestamp.Time) { + pipelineCopy := pipeline.DeepCopy() + latestPipeline = pipelineCopy + } + } + + // If pipeline count less than upper limit and all pipeline are failed, create new one. + if allPipelinesFailed && len(pipelines.Items) < MaxPipelineCounts { + return r.generatePipelineByTemplate(ctx, s, playbookName, playbook) + } else if len(pipelines.Items) >= MaxPipelineCounts { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, PipelineUpperLimitReason, + fmt.Sprintf("Can't create more %s pipeline, the upperlimit is %d", playbookName, MaxPipelineCounts)) + } + + return latestPipeline, nil +} + +// initNodeSelectMode function used to initialize some necessary configuration information if yaml file not config them. +func (r *KKClusterReconciler) initNodeSelectMode(s *scope.ClusterScope) error { + // Set default value of `KKCluster` resource. + if s.KKCluster.Spec.NodeSelectorMode == "" { + s.KKCluster.Spec.NodeSelectorMode = infrav1beta1.DefaultNodeSelectorMode + } + if s.KKCluster.Spec.ControlPlaneGroupName == "" { + s.KKCluster.Spec.ControlPlaneGroupName = infrav1beta1.DefaultControlPlaneGroupName + } + if s.KKCluster.Spec.WorkerGroupName == "" { + s.KKCluster.Spec.WorkerGroupName = infrav1beta1.DefaultWorkerGroupName + } + if s.KKCluster.Spec.ClusterGroupName == "" { + s.KKCluster.Spec.ClusterGroupName = infrav1beta1.DefaultClusterGroupName + } + + return nil +} + +// getInitialedInventory function is a pre-processor function, used to process `Groups` of `Inventory`to streamline +// formal processing in `dealWithHostSelector` function. +func (r *KKClusterReconciler) getInitialedInventory(ctx context.Context, s *scope.ClusterScope) ( + *kkcorev1.Inventory, error) { + inv, err := GetInventory(ctx, r.Client, s) + if err != nil { + return nil, err + } + + hosts := inv.Spec.Hosts + groups := inv.Spec.Groups + if groups == nil { + groups = make(map[string]kkcorev1.InventoryGroup) + } + + // Assert hosts must be available (not empty). + if len(hosts) == 0 { + err := errors.New("unavailable hosts") + klog.V(5).InfoS("Unavailable hosts, please check `Inventory` resource") + + return nil, err + } + + // Initialize kubernetes necessary groups. + groups[s.KKCluster.Spec.ClusterGroupName] = kkcorev1.InventoryGroup{ + Groups: []string{s.KKCluster.Spec.ControlPlaneGroupName, s.KKCluster.Spec.WorkerGroupName}, + } + if _, exists := groups[s.KKCluster.Spec.ControlPlaneGroupName]; !exists { + groups[s.KKCluster.Spec.ControlPlaneGroupName] = kkcorev1.InventoryGroup{} + } + if _, exists := groups[s.KKCluster.Spec.WorkerGroupName]; !exists { + groups[s.KKCluster.Spec.WorkerGroupName] = kkcorev1.InventoryGroup{} + } + inv.Spec.Groups = groups + + if err := controllerutil.SetControllerReference(s.KKCluster, inv, r.Scheme); err != nil { + return nil, err + } + + if err := r.Update(ctx, inv); err != nil { + klog.ErrorS(err, "Failed to update Inventory", "Inventory", inv) + + return nil, err + } + + return inv, nil +} + +func (r *KKClusterReconciler) updateInventoryStatus(ctx context.Context, s *scope.ClusterScope, inv *kkcorev1.Inventory) error { + // Get HostMachineMapping, and create a new one for update. + hostMachineMapping := inv.Status.HostMachineMapping + newHostMachineMapping := make(map[string]kkcorev1.MachineBinding) + + // Get ControlPlaneGroup and WorkerGroup. + controlPlaneGroup := inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] + workerGroup := inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] + + // Update control-plane nodes. + for _, h := range controlPlaneGroup.Hosts { + if binding, exists := hostMachineMapping[h]; exists { + newHostMachineMapping[h] = binding + } else { + newHostMachineMapping[h] = kkcorev1.MachineBinding{ + Machine: "", + Roles: []string{infrav1beta1.ControlPlaneRole}, + } + } + } + + // Update worker nodes. + for _, h := range workerGroup.Hosts { + if binding, exists := hostMachineMapping[h]; exists { + newHostMachineMapping[h] = binding + } else { + newHostMachineMapping[h] = kkcorev1.MachineBinding{ + Machine: "", + Roles: []string{infrav1beta1.WorkerRole}, + } + } + } + + // Update HostMachineMapping. + inv.Status.HostMachineMapping = newHostMachineMapping + + return r.Client.Status().Update(ctx, inv) +} + +// validateInventoryGroup function validates an invalidated group defined in `Inventory` and returns. +// Param::ghosts is the regional group hosts list. +// Param::hosts is all usable hosts of the `Inventory`. +// Param::gName is the name of the `InventoryGroup`. +// Param::cnt is the target hosts count of the `InventoryGroup`. +// Param::unavailableHosts & Param::unavailableGroups are used to remove duplicates, ensuring non-repeatability of +// nodes and avoiding cyclic group dependencies. +// Param::isRepeatable defines if the group hosts are repeatable. If true, all hosts will not be added into +// unavailableHosts. +func validateInventoryGroup( + kkc *infrav1beta1.KKCluster, inv *kkcorev1.Inventory, gName string, cnt int, + unavailableHosts, unavailableGroups map[string]struct{}, isRepeatable bool, +) (kkcorev1.InventoryGroup, error) { + // Get the hosts already assigned to this group + ghosts := kkcorev1.GetHostsFromGroup(inv, gName, unavailableHosts, unavailableGroups) + hosts := inv.Spec.Hosts + + // Check if we have fewer hosts than needed + if len(ghosts) < cnt { + var availableHosts []string + for host := range hosts { + if _, exists := unavailableHosts[host]; !exists { + availableHosts = append(availableHosts, host) + } + } + + remainingHostsCount := cnt - len(ghosts) + // If not enough hosts are available, return an error + if len(availableHosts) < remainingHostsCount { + conditions.MarkTrueWithNegativePolarity(kkc, infrav1beta1.HostsReadyCondition, infrav1beta1.HostsNotReadyReason, + clusterv1.ConditionSeverityError, infrav1beta1.HostsSelectFailedMessage, + ) + + return kkcorev1.InventoryGroup{}, fmt.Errorf("not enough available hosts for group %s", gName) + } + + // Select the remaining hosts based on the selector mode + hs, err := groupHostsSelector(availableHosts, remainingHostsCount, kkc.Spec.NodeSelectorMode) + if err != nil { + return kkcorev1.InventoryGroup{}, err + } + + // Append the selected hosts to ghosts + ghosts = append(ghosts, hs...) + } else { + var err error + ghosts, err = groupHostsSelector(ghosts, cnt, kkc.Spec.NodeSelectorMode) + if err != nil { + return kkcorev1.InventoryGroup{}, err + } + } + + if !isRepeatable { + for _, host := range ghosts { + if _, exists := unavailableHosts[host]; !exists { + unavailableHosts[host] = struct{}{} + } + } + } + + return kkcorev1.InventoryGroup{ + Groups: make([]string, 0), + Hosts: ghosts, + Vars: inv.Spec.Groups[kkc.Spec.ControlPlaneGroupName].Vars, + }, nil +} + +// groupHostsSelector selects nodes based on the NodeSelectorMode +func groupHostsSelector(availableHosts []string, cnt int, nodeSelectMode infrav1beta1.NodeSelectorMode) ([]string, error) { + if cnt >= len(availableHosts) { + return availableHosts, nil + } + + selectedHosts := append([]string(nil), availableHosts...) + + switch nodeSelectMode { + case infrav1beta1.RandomNodeSelectorMode: + shuffledHosts, err := secureShuffle(selectedHosts) + if err != nil { + return nil, err + } + + return shuffledHosts[:cnt], nil + + case infrav1beta1.SequenceNodeSelectorMode: + return selectedHosts[:cnt], nil + } + + return selectedHosts[:cnt], nil +} + +// Secure shuffle function using crypto/rand +func secureShuffle(hosts []string) ([]string, error) { + shuffledHosts := append([]string(nil), hosts...) + n := len(shuffledHosts) + + for i := n - 1; i > 0; i-- { + j, err := secureRandomInt(i + 1) + if err != nil { + return nil, err + } + shuffledHosts[i], shuffledHosts[j] = shuffledHosts[j], shuffledHosts[i] + } + + return shuffledHosts, nil +} + +// Generate secure random integer using crypto/rand +func secureRandomInt(upperLimit int) (int, error) { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(upperLimit))) + if err != nil { + return 0, err + } + + return int(nBig.Int64()), nil +} + +// generatePipelineByTemplate function can generate a generic pipeline by `PipelineTemplate`. +func (r *KKClusterReconciler) generatePipelineByTemplate(ctx context.Context, s *scope.ClusterScope, name string, playbook string, +) (*kkcorev1.Pipeline, error) { + ref := s.KKCluster.Spec.PipelineRef + if ref.Namespace == "" { + ref.Namespace = s.Namespace() + } + + pipelineTemplate, err := GetPipelineTemplateFromRef(ctx, r.Client, s.KKCluster.Spec.PipelineRef) + if err != nil { + return nil, err + } + + pipeline := &kkcorev1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: name + "-", + Namespace: s.Namespace(), + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.Name(), + pipelineNameLabel: name, + }, + }, + Spec: kkcorev1.PipelineSpec{ + Project: pipelineTemplate.Spec.Project, + Playbook: playbook, + InventoryRef: pipelineTemplate.Spec.InventoryRef, + ConfigRef: pipelineTemplate.Spec.ConfigRef, + Tags: pipelineTemplate.Spec.Tags, + SkipTags: pipelineTemplate.Spec.SkipTags, + Debug: pipelineTemplate.Spec.Debug, + JobSpec: pipelineTemplate.Spec.JobSpec, + }, + } + + // Check if `.Spec.InventoryRef` is nil + if pipeline.Spec.InventoryRef == nil { + err := errors.New("pipeline must be generated with `.Spec.inventoryRef`, but given nil") + klog.V(5).ErrorS(err, "", "pipeline", pipeline.Name) + + return nil, err + } + + // Create pipeline. + if err := controllerutil.SetControllerReference(s.KKCluster, pipeline, r.Scheme); err != nil { + return nil, err + } + + if err := r.Client.Create(ctx, pipeline); err != nil { + return nil, err + } + klog.V(5).InfoS("Create pipeline successfully", "pipeline", pipeline.Name) + + return pipeline, nil +} + +// generateKKMachines function can generate KKMachines bind with both control plane nodes and worker nodes. +// func (r *KKClusterReconciler) generateKKMachines(ctx context.Context, s *scope.ClusterScope) error { +// // Fetch groups and hosts of `Inventory`, replicas of `KubeadmControlPlane` and `MachineDeployment`. +// inv, err := GetInventory(ctx, r.Client, s) +// if err != nil { +// return err +// } +// +// kcp, err := GetKubeadmControlPlane(ctx, r.Client, s) +// if err != nil { +// return err +// } +// +// md, err := GetMachineDeployment(ctx, r.Client, s) +// if err != nil { +// return err +// } +// +// controlPlaneGroup := inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] +// workerGroup := inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] +// controlPlaneInfraRef := kcp.Spec.MachineTemplate.InfrastructureRef +// workerInfraRef := kcp.Spec.MachineTemplate.InfrastructureRef +// +// // Iterate through the control plane hosts +// for _, hostName := range controlPlaneGroup.Hosts { +// // Generate labels for control plane +// labels := map[string]string{ +// clusterv1.ClusterNameLabel: s.Name(), +// clusterv1.MachineControlPlaneLabel: "true", +// } +// +// // Check if the KKMachine already exists +// kkMachine := &infrav1beta1.KKMachine{} +// err := r.Client.Get(ctx, ctrlclient.ObjectKey{ +// Name: s.Name() + "-" + hostName, // Name convention +// Namespace: s.Namespace(), +// }, kkMachine) +// +// if err != nil && apierrors.IsNotFound(err) { +// if err := r.generateKKMachine(ctx, s, controlPlaneInfraRef, hostName, labels); err != nil { +// return err +// } +// } else if err == nil { +// // If exists, update the KKMachine if necessary +// if err := r.updateKKMachine(ctx, kkMachine, controlPlaneInfraRef, labels); err != nil { +// return err +// } +// } +// } +// +// // Iterate through the worker group hosts +// for _, hostName := range workerGroup.Hosts { +// // Generate labels for worker nodes +// labels := map[string]string{ +// clusterv1.ClusterNameLabel: s.Name(), +// clusterv1.MachineDeploymentNameLabel: md.Name, +// } +// +// // Check if the KKMachine already exists +// kkMachine := &infrav1beta1.KKMachine{} +// err := r.Client.Get(ctx, ctrlclient.ObjectKey{ +// Name: s.Name() + "-" + hostName, // Name convention +// Namespace: s.Namespace(), +// }, kkMachine) +// +// if err != nil && apierrors.IsNotFound(err) { +// // If not found, generate a new KKMachine +// if err := r.generateKKMachine(ctx, s, workerInfraRef, hostName, labels); err != nil { +// return err +// } +// } else if err == nil { +// // If exists, update the KKMachine if necessary +// if err := r.updateKKMachine(ctx, kkMachine, workerInfraRef, labels); err != nil { +// return err +// } +// } +// } +// +// return nil +// } + +// generateKKMachine function is used for generate a `KKMachine` resource by `Ref` and `providerID` given by other CRDs. +// Param::providerID: from `Inventory` resource, Param::ref: from `KubeadmControlPlane` for `MachineDeployment` resource. +// Param::ref is used for get `KKMachineTemplate` +// Param::labels used for bind with other CRDs. +// func (r *KKClusterReconciler) generateKKMachine(ctx context.Context, s *scope.ClusterScope, ref corev1.ObjectReference, +// providerID string, labels map[string]string) error { +// kkMachineTemplate, err := GetKKMachineTemplateFromRef(ctx, r.Client, ref) +// if err != nil { +// return err +// } +// +// // Create a new KKMachine based on the template +// kkMachine := &infrav1beta1.KKMachine{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: s.Name() + "-" + providerID, +// Namespace: s.Namespace(), +// Labels: kkMachineTemplate.Spec.Template.ObjectMeta.Labels, +// }, +// Spec: kkMachineTemplate.Spec.Template.Spec, +// } +// +// // Add additional labels provided +// for k, v := range labels { +// kkMachine.ObjectMeta.Labels[k] = labels[v] +// } +// +// // Assign the providerID to the new KKMachine +// kkMachine.Spec.ProviderID = &providerID +// +// // Create the new KKMachine resource +// return r.Client.Create(ctx, kkMachine) +// } + +// updateKKMachine function used for update one exist `KKMachine` resource. Usually update `labels` and `roles`. +// func (r *KKClusterReconciler) updateKKMachine(ctx context.Context, kkm *infrav1beta1.KKMachine, +// ref corev1.ObjectReference, labels map[string]string) error { +// kkMachineTemplate, err := GetKKMachineTemplateFromRef(ctx, r.Client, ref) +// if err != nil { +// return err +// } +// +// // Update labels if they don't exist +// for key, value := range labels { +// if _, exists := kkm.Labels[key]; !exists { +// kkm.Labels[key] = value +// } +// } +// +// // Append roles if they are missing +// // convert old role to roleSet, used for de-duplicated +// roleSet := make(map[string]struct{}) +// for _, role := range kkm.Spec.Roles { +// roleSet[role] = struct{}{} +// } +// // Append missing roles from the template +// for _, role := range kkMachineTemplate.Spec.Template.Spec.Roles { +// if _, exists := roleSet[role]; !exists { +// kkm.Spec.Roles = append(kkm.Spec.Roles, role) +// } +// } +// +// // Update the KKMachine resource +// return r.Client.Update(ctx, kkm) +// } + +// GetInventory function return cluster's `Inventory` resource. +func GetInventory(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*kkcorev1.Inventory, error) { + inventory := &kkcorev1.Inventory{} + + namespace := s.KKCluster.Spec.InventoryRef.Namespace + if namespace == "" { + namespace = s.Namespace() + } + + err := client.Get(ctx, + types.NamespacedName{ + Name: s.KKCluster.Spec.InventoryRef.Name, + Namespace: namespace, + }, inventory) + if err != nil { + klog.V(5).InfoS("`kk-cluster` must set `InventoryRef`, but not found", + "Inventory", ctrlclient.ObjectKeyFromObject(inventory)) + + return nil, fmt.Errorf("[%s]: kk-cluster must set `InventoryRef`, but not found", + s.Cluster.Name) + } + + return inventory, nil +} + +// GetConfig function return cluster's `Config` resource. +func GetConfig(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) *kkcorev1.Config { + config := &kkcorev1.Config{} + + namespace := s.KKCluster.Spec.ConfigRef.Namespace + if namespace == "" { + namespace = s.Namespace() + } + + err := client.Get(ctx, + types.NamespacedName{ + Name: s.KKCluster.Spec.ConfigRef.Name, + Namespace: namespace, + }, config) + + if err != nil { + klog.V(5).InfoS("Cluster not found customize `Config` resource, use default configuration default", + "Config", ctrlclient.ObjectKeyFromObject(config)) + + return nil + } + + return config +} + +// GetKubeadmControlPlane function return cluster's `KubeadmControlPlane` resource. +func GetKubeadmControlPlane(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*v1beta1.KubeadmControlPlane, error) { + kcp := &v1beta1.KubeadmControlPlane{} + + namespace := s.Cluster.Spec.ControlPlaneRef.Namespace + if namespace == "" { + namespace = s.Namespace() + } + + if err := client.Get(ctx, + types.NamespacedName{ + Name: s.Cluster.Spec.ControlPlaneRef.Name, + Namespace: namespace, + }, kcp, + ); err != nil { + return nil, err + } + + return kcp, nil +} + +// GetMachineDeployment function return cluster's `MachineDeployment` resource. +func GetMachineDeployment(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*clusterv1.MachineDeployment, error) { + mdList := &clusterv1.MachineDeploymentList{} + + namespace := s.KKCluster.Spec.InventoryRef.Namespace + if namespace == "" { + namespace = s.Namespace() + } + + err := client.List(ctx, mdList, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + }) + if err != nil && !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("error listing MachineDeployments: %w", err) + } + + if len(mdList.Items) == 0 { + return nil, errors.New("no MachineDeployment found for cluster " + s.Name()) + } + + if len(mdList.Items) > 1 { + return nil, errors.New("multiple MachineDeployments found for cluster " + s.Name()) + } + + return &mdList.Items[0], nil +} + +// GetPipelineTemplateFromRef function used for generate `Pipeline` resources by `PipelineTemplate` +func GetPipelineTemplateFromRef(ctx context.Context, client ctrlclient.Client, ref *corev1.ObjectReference) (*kkcorev1.PipelineTemplate, error) { + pipelineTemplate := &kkcorev1.PipelineTemplate{} + + namespacedName := types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + } + + if err := client.Get(ctx, namespacedName, pipelineTemplate); err != nil { + return nil, err + } + + return pipelineTemplate, nil +} + +// GetKKMachineTemplateFromRef function return `KKMachineTemplate` resource based on `ObjectReference`. +// e.g. `ObjectReference` from `KubeadmControlPlane` & `MachineDeployment` resources. +// func GetKKMachineTemplateFromRef(ctx context.Context, client ctrlclient.Client, ref corev1.ObjectReference) (*infrav1beta1.KKMachineTemplate, error) { +// kkMachineTemplate := &infrav1beta1.KKMachineTemplate{} +// +// namespacedName := types.NamespacedName{ +// Namespace: ref.Namespace, +// Name: ref.Name, +// } +// +// if err := client.Get(ctx, namespacedName, kkMachineTemplate); err != nil { +// return nil, err +// } +// +// return kkMachineTemplate, nil +// } + +// SetupWithManager sets up the controller with the Manager. +func (r *KKClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + pipelinePhaseFilter := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + newPipeline, ok := e.ObjectNew.(*kkcorev1.Pipeline) + if !ok { + return false + } + + return newPipeline.Status.Phase == kkcorev1.PipelinePhaseSucceed || newPipeline.Status.Phase == kkcorev1.PipelinePhaseFailed + }, + CreateFunc: func(e event.CreateEvent) bool { + pipeline, ok := e.Object.(*kkcorev1.Pipeline) + if !ok { + return false + } + + return pipeline.Status.Phase == kkcorev1.PipelinePhaseSucceed || pipeline.Status.Phase == kkcorev1.PipelinePhaseFailed + }, + } + + // Avoid reconciling if the event triggering the reconciliation is related to incremental status updates + // for KKCluster resources only + kkClusterFilter := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldCluster, okOld := e.ObjectOld.(*infrav1beta1.KKCluster) + newCluster, okNew := e.ObjectNew.(*infrav1beta1.KKCluster) + + if !okOld || !okNew { + return true + } + + oldClusterCopy := oldCluster.DeepCopy() + newClusterCopy := newCluster.DeepCopy() + + oldClusterCopy.Status = infrav1beta1.KKClusterStatus{} + newClusterCopy.Status = infrav1beta1.KKClusterStatus{} + + oldClusterCopy.ObjectMeta.ResourceVersion = "" + newClusterCopy.ObjectMeta.ResourceVersion = "" + + return !reflect.DeepEqual(oldClusterCopy, newClusterCopy) + }, + } + + return ctrl.NewControllerManagedBy(mgr). + WithOptions(ctrlcontroller.Options{ + MaxConcurrentReconciles: r.MaxConcurrentReconciles, + }). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))). + WithEventFilter(kkClusterFilter). + For(&infrav1beta1.KKCluster{}). + Owns(&kkcorev1.Pipeline{}, builder.WithPredicates(pipelinePhaseFilter)). + Owns(&kkcorev1.Inventory{}). + Complete(r) +} diff --git a/pkg/controllers/kkmachine_controller.go b/pkg/controllers/kkmachine_controller.go new file mode 100644 index 000000000..62e593c17 --- /dev/null +++ b/pkg/controllers/kkmachine_controller.go @@ -0,0 +1,325 @@ +/* +Copyright 2024 The KubeSphere Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "fmt" + "reflect" + "time" + + "sigs.k8s.io/cluster-api/util/conditions" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "k8s.io/klog/v2" + + infrav1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/predicates" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + ctrlfinalizer "sigs.k8s.io/controller-runtime/pkg/finalizer" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubesphere/kubekey/v4/pkg/scope" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// KKMachineReconciler reconciles a KKMachine object +type KKMachineReconciler struct { + ctrlclient.Client + Scheme *runtime.Scheme + record.EventRecorder + + ctrlfinalizer.Finalizers + MaxConcurrentReconciles int +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch + +func (r *KKMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + // Fetch the KKMachine. + kkMachine := &infrav1beta1.KKMachine{} + err := r.Get(ctx, req.NamespacedName, kkMachine) + if err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + + return ctrl.Result{}, err + } + + // Fetch the Machine. + machine, err := util.GetOwnerMachine(ctx, r.Client, kkMachine.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machine == nil { + klog.V(5).InfoS("Machine has not yet set OwnerRef", + "ProviderID", kkMachine.Spec.ProviderID) + + return ctrl.Result{}, nil + } + + klog.V(4).InfoS("Fetched machine", "machine", machine.Name) + + // Fetch the Cluster & KKCluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + if err != nil { + klog.V(5).InfoS("Machine is missing cluster label or cluster does not exist") + + return ctrl.Result{}, nil + } + klog.V(4).InfoS("Fetched cluster", "cluster", cluster.Name) + + if annotations.IsPaused(cluster, kkMachine) { + klog.V(5).InfoS("KKMachine or linked Cluster is marked as paused. Won't reconcile") + + return ctrl.Result{}, nil + } + + // Handle Deletion Early, avoid `KKCluster` delete earlier than `KKMachine`, which may causes some problem. + if !kkMachine.ObjectMeta.DeletionTimestamp.IsZero() { + r.reconcileDelete(kkMachine) + + return ctrl.Result{}, nil + } + + kkCluster := &infrav1beta1.KKCluster{} + kkClusterName := ctrlclient.ObjectKey{ + Namespace: kkMachine.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + if err := r.Client.Get(ctx, kkClusterName, kkCluster); err != nil { + klog.V(5).InfoS("KKCluster is not ready yet") + + return ctrl.Result{}, nil + } + klog.V(4).InfoS("Fetched kk-cluster", "kk-cluster", kkCluster.Name) + + // Create the cluster scope. + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: r.Client, + Cluster: cluster, + KKCluster: kkCluster, + ControllerName: "kk-cluster", + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create scope: %w", err) + } + + // Create the machine scope + machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ + Client: r.Client, + ClusterScope: clusterScope, + Machine: machine, + KKMachine: kkMachine, + }) + if err != nil { + klog.V(5).ErrorS(err, "failed to create scope") + + return ctrl.Result{}, err + } + + // Always close the scope when exiting this function, so we can persist any KKMachine changes. + defer func() { + if err := machineScope.Close(ctx); err != nil && retErr == nil { + klog.V(5).ErrorS(err, "failed to patch object") + retErr = err + } + }() + + return r.reconcileNormal(ctx, machineScope) +} + +func (r *KKMachineReconciler) reconcileNormal(ctx context.Context, s *scope.MachineScope) (reconcile.Result, error) { + klog.V(4).Info("Reconcile KKMachine normal") + + // If the KKMachine doesn't have our finalizer, add it. + if controllerutil.AddFinalizer(s.KKMachine, infrav1beta1.MachineFinalizer) { + // Register the finalizer immediately to avoid orphaning KK resources on delete + if err := s.PatchObject(ctx); err != nil { + return reconcile.Result{}, err + } + } + + if !s.ClusterScope.Cluster.Status.InfrastructureReady { + return reconcile.Result{}, nil + } + + if s.IsRole(infrav1beta1.ControlPlaneRole) { + s.KKMachine.Labels[clusterv1.MachineControlPlaneLabel] = "true" + } + + if !conditions.Has(s.ClusterScope.KKCluster, infrav1beta1.ClusterReadyCondition) { + return reconcile.Result{}, nil + } + + if err := refreshProviderID(ctx, r.Client, s); err != nil { + return reconcile.Result{}, err + } + + s.KKMachine.Status.Ready = true + + return ctrl.Result{ + RequeueAfter: 30 * time.Second, + }, nil +} + +func (r *KKMachineReconciler) reconcileDelete(kkMachine *infrav1beta1.KKMachine) { + klog.V(4).Info("Reconcile KKCluster delete") + + // Machine is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(kkMachine, infrav1beta1.MachineFinalizer) +} + +func refreshProviderID(ctx context.Context, client ctrlclient.Client, s *scope.MachineScope) error { + inv, err := GetInventory(ctx, client, s.ClusterScope) + if err != nil { + return err + } + + hostMachineMap := inv.Status.HostMachineMapping + if hostMachineMap == nil { + err := errors.New("failed to get host machine mapping") + klog.V(5).ErrorS(err, "", "Inventory", inv) + + return err + } + + // Remove ProviderID if it's not exist in map. + if s.KKMachine.Spec.ProviderID != nil { + exist := false + for _, bindInfo := range hostMachineMap { + if bindInfo.Machine == s.Name() { + exist = true + if !slicesEqualUnordered(bindInfo.Roles, s.KKMachine.Spec.Roles) { + s.KKMachine.Spec.ProviderID = nil + } + + break + } + } + if !exist { + s.KKMachine.Spec.ProviderID = nil + } + } + + // Add ProviderID if there is an unbound host. + if s.KKMachine.Spec.ProviderID == nil { + for hn, bindInfo := range hostMachineMap { + if bindInfo.Machine != "" || !slicesEqualUnordered(bindInfo.Roles, s.KKMachine.Spec.Roles) { + continue + } + + // Bind ProviderID + s.SetProviderID(hn) + + // Update Inventory + expected := inv.DeepCopy() + bindInfo.Machine = s.Name() + inv.Status.HostMachineMapping[hn] = bindInfo + if err := client.Status().Patch(ctx, inv, ctrlclient.MergeFrom(expected)); err != nil { + return err + } + + break + } + } + + return nil +} + +func slicesEqualUnordered(a, b []string) bool { + if len(a) != len(b) { + return false + } + + countA := make(map[string]int) + countB := make(map[string]int) + + for _, item := range a { + countA[item]++ + } + + for _, item := range b { + countB[item]++ + } + + if len(countA) != len(countB) { + return false + } + + for key, count := range countA { + if countB[key] != count { + return false + } + } + + return true +} + +// SetupWithManager sets up the controller with the Manager. +func (r *KKMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + // Avoid reconciling if the event triggering the reconciliation is related to incremental status updates + // for KKMachine resources only + kkMachineFilter := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldKKMachine, okOld := e.ObjectOld.(*infrav1beta1.KKMachine) + newKKMachine, okNew := e.ObjectNew.(*infrav1beta1.KKMachine) + + if !okOld || !okNew { + return false + } + + oldCluster := oldKKMachine.DeepCopy() + newCluster := newKKMachine.DeepCopy() + + oldCluster.Status = infrav1beta1.KKMachineStatus{} + newCluster.Status = infrav1beta1.KKMachineStatus{} + + oldCluster.ObjectMeta.ResourceVersion = "" + newCluster.ObjectMeta.ResourceVersion = "" + + return !reflect.DeepEqual(oldCluster, newCluster) + }, + } + + return ctrl.NewControllerManagedBy(mgr). + WithOptions(ctrlcontroller.Options{ + MaxConcurrentReconciles: r.MaxConcurrentReconciles, + }). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))). + WithEventFilter(kkMachineFilter). + For(&infrav1beta1.KKMachine{}). + Complete(r) +} From 1d07a27f1f00d6ea0706c3f31ddd2598a24b3570 Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Tue, 24 Sep 2024 21:50:18 +0800 Subject: [PATCH 2/9] capkk scope --- pkg/scope/cluster.go | 180 +++++++++++++++++++++++++++++++++++++++++++ pkg/scope/machine.go | 166 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 346 insertions(+) create mode 100644 pkg/scope/cluster.go create mode 100644 pkg/scope/machine.go diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go new file mode 100644 index 000000000..c1e1c4f8a --- /dev/null +++ b/pkg/scope/cluster.go @@ -0,0 +1,180 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scope + +import ( + "context" + "errors" + "fmt" + "net" + + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" +) + +const ( + // KKClusterLabelName is the label set on KKMachines and KKInstances linked to a kkCluster. + KKClusterLabelName = "kkcluster.infrastructure.cluster.x-k8s.io/cluster-name" +) + +// ClusterScopeParams defines the input parameters used to create a new Scope. +type ClusterScopeParams struct { + Client ctrlclient.Client + Cluster *clusterv1.Cluster + KKCluster *v1beta1.KKCluster + ControllerName string +} + +// NewClusterScope creates a new Scope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { + if params.Cluster == nil { + return nil, errors.New("failed to generate new scope from nil Cluster") + } + if params.KKCluster == nil { + return nil, errors.New("failed to generate new scope from nil KKCluster") + } + + clusterScope := &ClusterScope{ + client: params.Client, + Cluster: params.Cluster, + KKCluster: params.KKCluster, + controllerName: params.ControllerName, + } + + helper, err := patch.NewHelper(params.KKCluster, params.Client) + if err != nil { + return nil, fmt.Errorf("%w: failed to init patch helper", err) + } + + clusterScope.patchHelper = helper + + return clusterScope, nil +} + +// ClusterScope defines the basic context for an actuator to operate upon. +type ClusterScope struct { + client ctrlclient.Client + patchHelper *patch.Helper + + Cluster *clusterv1.Cluster + KKCluster *v1beta1.KKCluster + + controllerName string +} + +// Name returns the CAPI cluster name. +func (s *ClusterScope) Name() string { + return s.Cluster.Name +} + +// Namespace returns the cluster namespace. +func (s *ClusterScope) Namespace() string { + return s.Cluster.Namespace +} + +// InfraClusterName returns the KK cluster name. +func (s *ClusterScope) InfraClusterName() string { + return s.KKCluster.Name +} + +// KubernetesClusterName is the name of the Kubernetes cluster. +func (s *ClusterScope) KubernetesClusterName() string { + return s.Cluster.Name +} + +// GetKKMachines returns the list of KKMachines for a KKCluster. +func (s *ClusterScope) GetKKMachines(ctx context.Context) (*v1beta1.KKMachineList, error) { + kkMachineList := &v1beta1.KKMachineList{} + if err := s.client.List( + ctx, + kkMachineList, + ctrlclient.InNamespace(s.KKCluster.Namespace), + ctrlclient.MatchingLabels{ + KKClusterLabelName: s.KKCluster.Name, + }, + ); err != nil { + return nil, fmt.Errorf("%w: failed to list KKMachines", err) + } + + return kkMachineList, nil +} + +// ControlPlaneEndpoint returns the control plane endpoint. +func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { + return s.KKCluster.Spec.ControlPlaneEndpoint +} + +// PatchObject persists the cluster configuration and status. +func (s *ClusterScope) PatchObject(ctx context.Context) error { + return s.patchHelper.Patch( + ctx, + s.KKCluster, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + v1beta1.HostsReadyCondition, + v1beta1.PreparationReadyCondition, + v1beta1.EtcdReadyCondition, + v1beta1.BinaryInstallCondition, + v1beta1.BootstrapReadyCondition, + v1beta1.ClusterReadyCondition, + }}) +} + +// Close closes the current scope persisting the cluster configuration and status. +func (s *ClusterScope) Close(ctx context.Context) error { + return s.PatchObject(ctx) +} + +// ControllerName returns the name of the controller that +// created the ClusterScope. +func (s *ClusterScope) ControllerName() string { + return s.controllerName +} + +// Distribution returns Kubernetes distribution of the cluster. +func (s *ClusterScope) Distribution() string { + return s.KKCluster.Spec.Distribution +} + +// ControlPlaneLoadBalancer returns the KKLoadBalancerSpec. +func (s *ClusterScope) ControlPlaneLoadBalancer() *v1beta1.KKLoadBalancerSpec { + lb := s.KKCluster.Spec.ControlPlaneLoadBalancer + if lb == nil { + return nil + } + + ip := net.ParseIP(lb.Host) + if ip == nil { + return nil + } + + return lb +} + +// APIServerPort returns the APIServerPort to use when creating the load balancer. +func (s *ClusterScope) APIServerPort() int32 { + if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { + return *s.Cluster.Spec.ClusterNetwork.APIServerPort + } + + return 6443 +} diff --git a/pkg/scope/machine.go b/pkg/scope/machine.go new file mode 100644 index 000000000..335a03ef7 --- /dev/null +++ b/pkg/scope/machine.go @@ -0,0 +1,166 @@ +/* + Copyright 2022 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scope + +import ( + "context" + "errors" + "fmt" + + infrastructurev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + + "k8s.io/utils/ptr" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capierrors "sigs.k8s.io/cluster-api/errors" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MachineScopeParams defines the input parameters used to create a new MachineScope. +type MachineScopeParams struct { + Client ctrlclient.Client + ClusterScope *ClusterScope + Machine *clusterv1.Machine + KKMachine *infrastructurev1alpha1.KKMachine +} + +// NewMachineScope creates a new MachineScope from the supplied parameters. +// This is meant to be called for each reconcile iteration. +func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { + if params.Client == nil { + return nil, errors.New("client is required when creating a MachineScope") + } + if params.ClusterScope == nil { + return nil, errors.New("cluster is required when creating a MachineScope") + } + if params.Machine == nil { + return nil, errors.New("machine is required when creating a MachineScope") + } + if params.KKMachine == nil { + return nil, errors.New("kk machine is required when creating a MachineScope") + } + + helper, err := patch.NewHelper(params.KKMachine, params.Client) + if err != nil { + return nil, fmt.Errorf("%w: failed to init patch helper", err) + } + + return &MachineScope{ + client: params.Client, + patchHelper: helper, + + Machine: params.Machine, + ClusterScope: params.ClusterScope, + KKMachine: params.KKMachine, + }, nil +} + +// MachineScope defines a scope defined around a machine and its cluster. +type MachineScope struct { + client ctrlclient.Client + patchHelper *patch.Helper + + ClusterScope *ClusterScope + Machine *clusterv1.Machine + KKMachine *infrastructurev1alpha1.KKMachine +} + +// Name returns the KKMachine name. +func (m *MachineScope) Name() string { + return m.KKMachine.Name +} + +// Namespace returns the namespace name. +func (m *MachineScope) Namespace() string { + return m.KKMachine.Namespace +} + +// IsControlPlane returns true if the machine is a control plane. +func (m *MachineScope) IsControlPlane() bool { + return util.IsControlPlaneMachine(m.Machine) +} + +// GetProviderID returns the KKMachine providerID from the spec. +func (m *MachineScope) GetProviderID() string { + if m.KKMachine.Spec.ProviderID != nil { + return *m.KKMachine.Spec.ProviderID + } + + return "" +} + +// SetProviderID sets the KKMachine providerID in spec. +func (m *MachineScope) SetProviderID(kkMachineID string) { + m.KKMachine.Spec.ProviderID = ptr.To(kkMachineID) +} + +// GetRoles returns the KKMachine roles. +func (m *MachineScope) GetRoles() []string { + return m.KKMachine.Spec.Roles +} + +// IsRole returns true if the machine has the given role. +func (m *MachineScope) IsRole(role string) bool { + for _, r := range m.KKMachine.Spec.Roles { + if r == role { + return true + } + } + + return false +} + +// SetReady sets the KKMachine Ready Status. +func (m *MachineScope) SetReady() { + m.KKMachine.Status.Ready = true +} + +// SetNotReady sets the KKMachine Ready Status to false. +func (m *MachineScope) SetNotReady() { + m.KKMachine.Status.Ready = false +} + +// SetFailureMessage sets the KKMachine status failure message. +func (m *MachineScope) SetFailureMessage(v error) { + m.KKMachine.Status.FailureMessage = ptr.To(v.Error()) +} + +// SetFailureReason sets the KKMachine status failure reason. +func (m *MachineScope) SetFailureReason(v capierrors.MachineStatusError) { + m.KKMachine.Status.FailureReason = &v +} + +// PatchObject persists the machine spec and status. +func (m *MachineScope) PatchObject(ctx context.Context) error { + // Always update the readyCondition by summarizing the state of other conditions. + // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). + return m.patchHelper.Patch( + ctx, + m.KKMachine) +} + +// Close the MachineScope by updating the machine spec, machine status. +func (m *MachineScope) Close(ctx context.Context) error { + return m.PatchObject(ctx) +} + +// HasFailed returns the failure state of the machine scope. +func (m *MachineScope) HasFailed() bool { + return m.KKMachine.Status.FailureReason != nil || m.KKMachine.Status.FailureMessage != nil +} From 49f0e1710ac561fc4715c4be0bd553525db5c0af Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Sun, 29 Sep 2024 23:54:48 +0800 Subject: [PATCH 3/9] feat: update capkk controller --- pkg/const/scheme.go | 7 + pkg/controllers/kkcluster_controller.go | 253 +++++++++--------------- pkg/manager/controller_manager.go | 36 ++++ 3 files changed, 133 insertions(+), 163 deletions(-) diff --git a/pkg/const/scheme.go b/pkg/const/scheme.go index 19e7f5e50..b73803829 100644 --- a/pkg/const/scheme.go +++ b/pkg/const/scheme.go @@ -24,6 +24,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + kcpv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + + capkkv1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" kkcorev1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1" kkcorev1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/core/v1alpha1" ) @@ -50,6 +54,9 @@ func newScheme() *runtime.Scheme { utilruntime.Must(kkcorev1.AddToScheme(s)) utilruntime.Must(kkcorev1alpha1.AddToScheme(s)) utilruntime.Must(kkcorev1alpha1.AddConversionFuncs(s)) + utilruntime.Must(capkkv1beta1.AddToScheme(s)) + utilruntime.Must(capiv1beta1.AddToScheme(s)) + utilruntime.Must(kcpv1beta1.AddToScheme(s)) return s } diff --git a/pkg/controllers/kkcluster_controller.go b/pkg/controllers/kkcluster_controller.go index 9876a09ab..8e78b9978 100644 --- a/pkg/controllers/kkcluster_controller.go +++ b/pkg/controllers/kkcluster_controller.go @@ -23,8 +23,11 @@ import ( "fmt" "math/big" "reflect" + "strings" "time" + "k8s.io/utils/ptr" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -239,6 +242,9 @@ func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *sco return err } case infrav1beta1.PreparationReadyCondition: + if err := dealWithSecrets(ctx, r.Client, s); err != nil { + return err + } if err := r.dealWithPreparation(ctx, s); err != nil { return err } @@ -546,6 +552,90 @@ func (r *KKClusterReconciler) dealWithExecuteFailed(p *kkcorev1.Pipeline, functi return err } +// dealWithSecrets function fetches secrets created by KubeadmControlPlane, etc. And uses them to create a cluster. +func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) error { + // Fetch all secrets + secrets := &corev1.SecretList{} + if err := client.List(ctx, secrets, ctrlclient.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + }); err != nil { + return err + } + + // Fetch KubeadmControlPlaneReference + var kcpOwnRef metav1.OwnerReference + if kcp, err := GetKubeadmControlPlane(ctx, client, s); err != nil { + return err + } else if kcp != nil { + kcpOwnRef = metav1.OwnerReference{ + APIVersion: kcp.APIVersion, + Kind: kcp.Kind, + Name: kcp.Name, + UID: kcp.UID, + Controller: ptr.To(true), + } + } + + // deal with secrets created by kcp. + for _, secret := range secrets.Items { + if !util.HasOwnerRef(secret.OwnerReferences, kcpOwnRef) { + continue + } + if err := dealWithSecretsCreatedByKCP(ctx, client, s, &secret); err != nil { + return err + } + } + + return nil +} + +// dealWithSecretsCreatedByKCP function handle one secret created by kcp, and bind with `PipelineTemplate` resource. +func dealWithSecretsCreatedByKCP(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, secret *corev1.Secret) error { + var mountPath string + switch { + case strings.Contains(secret.Name, "ca"): + mountPath = "/etc/kubernetes/pki/ca.crt" + case strings.Contains(secret.Name, "control-plane"): + mountPath = "/etc/kubernetes/pki/control-plane.crt" + case strings.Contains(secret.Name, "etcd"): + mountPath = "/etc/kubernetes/pki/etcd.crt" + case strings.Contains(secret.Name, "kubeconfig"): + mountPath = "/etc/kubernetes/kubeconfig" + case strings.Contains(secret.Name, "proxy"): + mountPath = "/etc/kubernetes/pki/proxy.crt" + case strings.Contains(secret.Name, "sa"): + mountPath = "/etc/kubernetes/pki/sa.crt" + default: + return nil + } + + pipelineTemplate, err := GetPipelineTemplateFromRef(ctx, client, s.KKCluster.Spec.PipelineRef) + if err != nil { + return err + } + + volume := corev1.Volume{ + Name: secret.Name + "volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + }, + }, + } + + // 将 Volume 挂载到对应的 VolumeMounts + volumeMount := corev1.VolumeMount{ + Name: secret.Name + "volume", + MountPath: mountPath, + ReadOnly: true, + } + + pipelineTemplate.Spec.JobSpec.Volumes = append(pipelineTemplate.Spec.JobSpec.Volumes, volume) + pipelineTemplate.Spec.JobSpec.VolumeMounts = append(pipelineTemplate.Spec.JobSpec.VolumeMounts, volumeMount) + + return client.Update(ctx, pipelineTemplate) +} + // dealWithPipelinesReconciles will reconcile all pipelines created for execute `playbookName` tasks, and belong to current cluster. // It will create one func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s *scope.ClusterScope, @@ -866,152 +956,6 @@ func (r *KKClusterReconciler) generatePipelineByTemplate(ctx context.Context, s return pipeline, nil } -// generateKKMachines function can generate KKMachines bind with both control plane nodes and worker nodes. -// func (r *KKClusterReconciler) generateKKMachines(ctx context.Context, s *scope.ClusterScope) error { -// // Fetch groups and hosts of `Inventory`, replicas of `KubeadmControlPlane` and `MachineDeployment`. -// inv, err := GetInventory(ctx, r.Client, s) -// if err != nil { -// return err -// } -// -// kcp, err := GetKubeadmControlPlane(ctx, r.Client, s) -// if err != nil { -// return err -// } -// -// md, err := GetMachineDeployment(ctx, r.Client, s) -// if err != nil { -// return err -// } -// -// controlPlaneGroup := inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] -// workerGroup := inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] -// controlPlaneInfraRef := kcp.Spec.MachineTemplate.InfrastructureRef -// workerInfraRef := kcp.Spec.MachineTemplate.InfrastructureRef -// -// // Iterate through the control plane hosts -// for _, hostName := range controlPlaneGroup.Hosts { -// // Generate labels for control plane -// labels := map[string]string{ -// clusterv1.ClusterNameLabel: s.Name(), -// clusterv1.MachineControlPlaneLabel: "true", -// } -// -// // Check if the KKMachine already exists -// kkMachine := &infrav1beta1.KKMachine{} -// err := r.Client.Get(ctx, ctrlclient.ObjectKey{ -// Name: s.Name() + "-" + hostName, // Name convention -// Namespace: s.Namespace(), -// }, kkMachine) -// -// if err != nil && apierrors.IsNotFound(err) { -// if err := r.generateKKMachine(ctx, s, controlPlaneInfraRef, hostName, labels); err != nil { -// return err -// } -// } else if err == nil { -// // If exists, update the KKMachine if necessary -// if err := r.updateKKMachine(ctx, kkMachine, controlPlaneInfraRef, labels); err != nil { -// return err -// } -// } -// } -// -// // Iterate through the worker group hosts -// for _, hostName := range workerGroup.Hosts { -// // Generate labels for worker nodes -// labels := map[string]string{ -// clusterv1.ClusterNameLabel: s.Name(), -// clusterv1.MachineDeploymentNameLabel: md.Name, -// } -// -// // Check if the KKMachine already exists -// kkMachine := &infrav1beta1.KKMachine{} -// err := r.Client.Get(ctx, ctrlclient.ObjectKey{ -// Name: s.Name() + "-" + hostName, // Name convention -// Namespace: s.Namespace(), -// }, kkMachine) -// -// if err != nil && apierrors.IsNotFound(err) { -// // If not found, generate a new KKMachine -// if err := r.generateKKMachine(ctx, s, workerInfraRef, hostName, labels); err != nil { -// return err -// } -// } else if err == nil { -// // If exists, update the KKMachine if necessary -// if err := r.updateKKMachine(ctx, kkMachine, workerInfraRef, labels); err != nil { -// return err -// } -// } -// } -// -// return nil -// } - -// generateKKMachine function is used for generate a `KKMachine` resource by `Ref` and `providerID` given by other CRDs. -// Param::providerID: from `Inventory` resource, Param::ref: from `KubeadmControlPlane` for `MachineDeployment` resource. -// Param::ref is used for get `KKMachineTemplate` -// Param::labels used for bind with other CRDs. -// func (r *KKClusterReconciler) generateKKMachine(ctx context.Context, s *scope.ClusterScope, ref corev1.ObjectReference, -// providerID string, labels map[string]string) error { -// kkMachineTemplate, err := GetKKMachineTemplateFromRef(ctx, r.Client, ref) -// if err != nil { -// return err -// } -// -// // Create a new KKMachine based on the template -// kkMachine := &infrav1beta1.KKMachine{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: s.Name() + "-" + providerID, -// Namespace: s.Namespace(), -// Labels: kkMachineTemplate.Spec.Template.ObjectMeta.Labels, -// }, -// Spec: kkMachineTemplate.Spec.Template.Spec, -// } -// -// // Add additional labels provided -// for k, v := range labels { -// kkMachine.ObjectMeta.Labels[k] = labels[v] -// } -// -// // Assign the providerID to the new KKMachine -// kkMachine.Spec.ProviderID = &providerID -// -// // Create the new KKMachine resource -// return r.Client.Create(ctx, kkMachine) -// } - -// updateKKMachine function used for update one exist `KKMachine` resource. Usually update `labels` and `roles`. -// func (r *KKClusterReconciler) updateKKMachine(ctx context.Context, kkm *infrav1beta1.KKMachine, -// ref corev1.ObjectReference, labels map[string]string) error { -// kkMachineTemplate, err := GetKKMachineTemplateFromRef(ctx, r.Client, ref) -// if err != nil { -// return err -// } -// -// // Update labels if they don't exist -// for key, value := range labels { -// if _, exists := kkm.Labels[key]; !exists { -// kkm.Labels[key] = value -// } -// } -// -// // Append roles if they are missing -// // convert old role to roleSet, used for de-duplicated -// roleSet := make(map[string]struct{}) -// for _, role := range kkm.Spec.Roles { -// roleSet[role] = struct{}{} -// } -// // Append missing roles from the template -// for _, role := range kkMachineTemplate.Spec.Template.Spec.Roles { -// if _, exists := roleSet[role]; !exists { -// kkm.Spec.Roles = append(kkm.Spec.Roles, role) -// } -// } -// -// // Update the KKMachine resource -// return r.Client.Update(ctx, kkm) -// } - // GetInventory function return cluster's `Inventory` resource. func GetInventory(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*kkcorev1.Inventory, error) { inventory := &kkcorev1.Inventory{} @@ -1126,23 +1070,6 @@ func GetPipelineTemplateFromRef(ctx context.Context, client ctrlclient.Client, r return pipelineTemplate, nil } -// GetKKMachineTemplateFromRef function return `KKMachineTemplate` resource based on `ObjectReference`. -// e.g. `ObjectReference` from `KubeadmControlPlane` & `MachineDeployment` resources. -// func GetKKMachineTemplateFromRef(ctx context.Context, client ctrlclient.Client, ref corev1.ObjectReference) (*infrav1beta1.KKMachineTemplate, error) { -// kkMachineTemplate := &infrav1beta1.KKMachineTemplate{} -// -// namespacedName := types.NamespacedName{ -// Namespace: ref.Namespace, -// Name: ref.Name, -// } -// -// if err := client.Get(ctx, namespacedName, kkMachineTemplate); err != nil { -// return nil, err -// } -// -// return kkMachineTemplate, nil -// } - // SetupWithManager sets up the controller with the Manager. func (r *KKClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { pipelinePhaseFilter := predicate.Funcs{ diff --git a/pkg/manager/controller_manager.go b/pkg/manager/controller_manager.go index dd72a3cd6..11f2f0439 100644 --- a/pkg/manager/controller_manager.go +++ b/pkg/manager/controller_manager.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + capkkv1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -67,5 +69,39 @@ func (c controllerManager) Run(ctx context.Context) error { return err } + if err := (&controllers.KKClusterReconciler{ + Client: mgr.GetClient(), + EventRecorder: mgr.GetEventRecorderFor("kk-cluster-controller"), + Scheme: mgr.GetScheme(), + MaxConcurrentReconciles: c.MaxConcurrentReconciles, + }).SetupWithManager(ctx, mgr); err != nil { + klog.ErrorS(err, "create kk-cluster controller error") + + return err + } + + if err := (&controllers.KKMachineReconciler{ + Client: mgr.GetClient(), + EventRecorder: mgr.GetEventRecorderFor("kk-machine-controller"), + Scheme: mgr.GetScheme(), + MaxConcurrentReconciles: c.MaxConcurrentReconciles, + }).SetupWithManager(ctx, mgr); err != nil { + klog.ErrorS(err, "create kk-machine controller error") + + return err + } + + if err = (&capkkv1beta1.KKCluster{}).SetupWebhookWithManager(mgr); err != nil { + klog.ErrorS(err, "unable to create webhook", "webhook", "KKCluster") + + return err + } + + if err = (&capkkv1beta1.KKMachine{}).SetupWebhookWithManager(mgr); err != nil { + klog.ErrorS(err, "unable to create webhook", "webhook", "KKMachine") + + return err + } + return mgr.Start(ctx) } From d7ae6c2f5cabce60f7ef16738980e2dadd762208 Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Thu, 10 Oct 2024 13:32:54 +0800 Subject: [PATCH 4/9] feat: add secrets reconcile --- pkg/const/scheme.go | 2 + pkg/controllers/kkcluster_controller.go | 361 +++++++++++++++--------- pkg/controllers/pipeline_controller.go | 3 +- 3 files changed, 231 insertions(+), 135 deletions(-) diff --git a/pkg/const/scheme.go b/pkg/const/scheme.go index b73803829..bcaccb8c6 100644 --- a/pkg/const/scheme.go +++ b/pkg/const/scheme.go @@ -25,6 +25,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + kcv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" kcpv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" capkkv1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" @@ -57,6 +58,7 @@ func newScheme() *runtime.Scheme { utilruntime.Must(capkkv1beta1.AddToScheme(s)) utilruntime.Must(capiv1beta1.AddToScheme(s)) utilruntime.Must(kcpv1beta1.AddToScheme(s)) + utilruntime.Must(kcv1beta1.AddToScheme(s)) return s } diff --git a/pkg/controllers/kkcluster_controller.go b/pkg/controllers/kkcluster_controller.go index 8e78b9978..e7fde5138 100644 --- a/pkg/controllers/kkcluster_controller.go +++ b/pkg/controllers/kkcluster_controller.go @@ -38,7 +38,8 @@ import ( "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kcv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + kcpv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -58,8 +59,9 @@ import ( "github.com/kubesphere/kubekey/v4/pkg/scope" ) -// Defines some useful static strings. +// This const defines some useful static strings. const ( + TrueString string = "true" MaxPipelineCounts int = 3 PipelineUpperLimitReason string = "PipelineUpperLimit" @@ -79,6 +81,19 @@ const ( BootstrapPlaybookName string = "bootstrap-ready" BootstrapPlaybook string = "capkk/playbooks/capkk_bootstrap_ready.yaml" + + KCPCertificateAuthoritySecretInfix string = "ca" + KCPCertificateAuthorityMountPath string = "/etc/kubernetes/pki/ca" + KCPKubeadmConfigSecretInfix string = "control-plane" + KCPKubeadmConfigMountPath string = "/etc/kubernetes/pki/kubeadmconfig" + KCPEtcdSecretInfix string = "etcd" + KCPEtcdMountPath string = "/etc/kubernetes/pki/etcd" + KCPKubeConfigSecretInfix string = "kubeconfig" + KCPKubeConfigMountPath string = "/etc/kubernetes/pki/kubeconfig" + KCPProxySecretInfix string = "proxy" + KCPProxyMountPath string = "/etc/kubernetes/pki/proxy" + KCPServiceAccountInfix string = "sa" + KCPServiceAccountMountPath string = "/etc/kubernetes/pki/sa" ) // KKClusterReconciler reconciles a KKCluster object @@ -219,6 +234,13 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.Clus } } + // Initialize node select mode + if !kkCluster.Status.Ready { + if err := r.initKKCluster(ctx, s); err != nil { + return ctrl.Result{}, err + } + } + kkCluster.Status.Ready = true return ctrl.Result{ @@ -230,6 +252,7 @@ func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *sco var reset bool for { reset = false + for _, condition := range s.KKCluster.Status.Conditions { conditionsCnt := len(s.KKCluster.Status.Conditions) if conditions.IsFalse(s.KKCluster, condition.Type) { @@ -242,8 +265,11 @@ func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *sco return err } case infrav1beta1.PreparationReadyCondition: - if err := dealWithSecrets(ctx, r.Client, s); err != nil { - return err + // Refresh KCP secrets if annotation is true. + if val, ok := s.KKCluster.Annotations[infrav1beta1.KCPSecretsRefreshAnnotation]; ok && val == TrueString { + if err := dealWithSecrets(ctx, r.Client, s); err != nil { + return err + } } if err := r.dealWithPreparation(ctx, s); err != nil { return err @@ -345,17 +371,14 @@ func (r *KKClusterReconciler) dealWithHostConnectCheck(ctx context.Context, s *s // dealWithHostSelector function will be executed by dealWithHostConnectCheck function, if relevant pipeline run complete. func (r *KKClusterReconciler) dealWithHostSelector(ctx context.Context, s *scope.ClusterScope, _ kkcorev1.Pipeline) error { - // Initialize node select mode - if err := r.initNodeSelectMode(s); err != nil { - return err - } - // Fetch groups and hosts of `Inventory`, replicas of `KubeadmControlPlane` and `MachineDeployment`. inv, err := r.getInitialedInventory(ctx, s) if err != nil { return err } + originalInventory := inv.DeepCopy() + kcp, err := GetKubeadmControlPlane(ctx, r.Client, s) if err != nil { return err @@ -370,28 +393,30 @@ func (r *KKClusterReconciler) dealWithHostSelector(ctx context.Context, s *scope unavailableHosts, unavailableGroups := make(map[string]struct{}), make(map[string]struct{}) // Validate kubernetes cluster's controlPlaneGroup. - controlPlaneGroup, err := validateInventoryGroup(s.KKCluster, inv, s.KKCluster.Spec.ControlPlaneGroupName, + controlPlaneGroup, err := validateInventoryGroup(s.KKCluster, inv, + s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation], int(*kcp.Spec.Replicas), unavailableHosts, unavailableGroups, false, ) if err != nil { return err } - inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] = controlPlaneGroup + inv.Spec.Groups[s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation]] = controlPlaneGroup // Validate kubernetes cluster's workerGroup. - workerGroup, err := validateInventoryGroup(s.KKCluster, inv, s.KKCluster.Spec.WorkerGroupName, + workerGroup, err := validateInventoryGroup(s.KKCluster, inv, + s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation], int(*md.Spec.Replicas), unavailableHosts, unavailableGroups, false, ) if err != nil { return err } - inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] = workerGroup + inv.Spec.Groups[s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation]] = workerGroup // Update `Inventory` resource. - if err := r.Client.Update(ctx, inv); err != nil { - klog.V(5).ErrorS(err, "Update Inventory error", "Inventory", ctrlclient.ObjectKeyFromObject(inv)) + if err := r.Client.Patch(ctx, inv, ctrlclient.MergeFrom(originalInventory)); err != nil { + klog.V(5).ErrorS(err, "Failed to patch Inventory", "Inventory", ctrlclient.ObjectKeyFromObject(inv)) return err } @@ -526,13 +551,21 @@ func (r *KKClusterReconciler) dealWithExecutePlaybookReconcile(ctx context.Conte return &kkcorev1.Pipeline{}, nil case kkcorev1.PipelinePhaseSucceed: r.dealWithExecuteSucceed(p, funcWithSucceed) - - return p, nil + if err := r.Client.Status().Update(ctx, s.KKCluster); err != nil { + return p, err + } case kkcorev1.PipelinePhaseFailed: - return p, r.dealWithExecuteFailed(p, funcWithFailed) + if err := r.dealWithExecuteFailed(p, funcWithFailed); err != nil { + return p, err + } + if err := r.Client.Status().Update(ctx, s.KKCluster); err != nil { + return p, err + } default: return &kkcorev1.Pipeline{}, nil } + + return p, nil } // dealWithExecuteSucceed function used by dealWithExecutePlaybookReconcile, mark current condition as false and mark the @@ -554,7 +587,7 @@ func (r *KKClusterReconciler) dealWithExecuteFailed(p *kkcorev1.Pipeline, functi // dealWithSecrets function fetches secrets created by KubeadmControlPlane, etc. And uses them to create a cluster. func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) error { - // Fetch all secrets + // Fetch all secrets. secrets := &corev1.SecretList{} if err := client.List(ctx, secrets, ctrlclient.MatchingLabels{ clusterv1.ClusterNameLabel: s.Name(), @@ -562,7 +595,7 @@ func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.Clu return err } - // Fetch KubeadmControlPlaneReference + // Fetch KubeadmControlPlaneReference. var kcpOwnRef metav1.OwnerReference if kcp, err := GetKubeadmControlPlane(ctx, client, s); err != nil { return err @@ -576,67 +609,116 @@ func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.Clu } } - // deal with secrets created by kcp. + // Fetch control plane's KubeadmConfig. + var kcOwnRef metav1.OwnerReference + if kc, err := GetControlPlaneKubeadmConfig(ctx, client, s); err != nil { + return err + } else if kc != nil { + kcOwnRef = metav1.OwnerReference{ + APIVersion: kc.APIVersion, + Kind: kc.Kind, + Name: kc.Name, + UID: kc.UID, + Controller: ptr.To(true), + } + } + + // Deal with secrets for _, secret := range secrets.Items { - if !util.HasOwnerRef(secret.OwnerReferences, kcpOwnRef) { + if !(util.HasOwnerRef(secret.OwnerReferences, kcpOwnRef) || util.HasOwnerRef(secret.OwnerReferences, kcOwnRef)) { continue } - if err := dealWithSecretsCreatedByKCP(ctx, client, s, &secret); err != nil { + + if err := mountSecretsOnPipelineTemplate(ctx, client, s, &secret); err != nil { return err } } - return nil + // Delete kcp secrets annotation. + originalKKCluster := s.KKCluster.DeepCopy() + delete(s.KKCluster.Annotations, infrav1beta1.KCPSecretsRefreshAnnotation) + + return client.Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(originalKKCluster)) } -// dealWithSecretsCreatedByKCP function handle one secret created by kcp, and bind with `PipelineTemplate` resource. -func dealWithSecretsCreatedByKCP(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, secret *corev1.Secret) error { - var mountPath string - switch { - case strings.Contains(secret.Name, "ca"): - mountPath = "/etc/kubernetes/pki/ca.crt" - case strings.Contains(secret.Name, "control-plane"): - mountPath = "/etc/kubernetes/pki/control-plane.crt" - case strings.Contains(secret.Name, "etcd"): - mountPath = "/etc/kubernetes/pki/etcd.crt" - case strings.Contains(secret.Name, "kubeconfig"): - mountPath = "/etc/kubernetes/kubeconfig" - case strings.Contains(secret.Name, "proxy"): - mountPath = "/etc/kubernetes/pki/proxy.crt" - case strings.Contains(secret.Name, "sa"): - mountPath = "/etc/kubernetes/pki/sa.crt" - default: - return nil +// mountSecretsOnPipelineTemplate function handle one secret created by kcp, and bind with `PipelineTemplate` resource. +func mountSecretsOnPipelineTemplate(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, secret *corev1.Secret) error { + prefixToMountPath := map[string]string{ + s.KKCluster.Name + "-" + KCPCertificateAuthoritySecretInfix: KCPCertificateAuthorityMountPath, + s.KKCluster.Name + "-" + KCPKubeadmConfigSecretInfix: KCPKubeadmConfigMountPath, + s.KKCluster.Name + "-" + KCPEtcdSecretInfix: KCPEtcdMountPath, + s.KKCluster.Name + "-" + KCPKubeConfigSecretInfix: KCPKubeConfigMountPath, + s.KKCluster.Name + "-" + KCPProxySecretInfix: KCPProxyMountPath, + s.KKCluster.Name + "-" + KCPServiceAccountInfix: KCPServiceAccountMountPath, } - pipelineTemplate, err := GetPipelineTemplateFromRef(ctx, client, s.KKCluster.Spec.PipelineRef) - if err != nil { - return err + // Fetch secret mount path. All secrets created by KCP must satisfy `${ClusterName}-${Infix}` format name. + var mountPath string + for prefix, path := range prefixToMountPath { + if strings.HasPrefix(secret.Name, prefix) { + mountPath = path + + break + } + } + if mountPath == "" { + return nil } + // Define `Volume` and `VolumeMount` volume := corev1.Volume{ - Name: secret.Name + "volume", + Name: secret.Name + "-volume", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: secret.Name, }, }, } - - // 将 Volume 挂载到对应的 VolumeMounts + // Mount `Volume` on `VolumeMount` volumeMount := corev1.VolumeMount{ - Name: secret.Name + "volume", + Name: secret.Name + "-volume", MountPath: mountPath, - ReadOnly: true, } - pipelineTemplate.Spec.JobSpec.Volumes = append(pipelineTemplate.Spec.JobSpec.Volumes, volume) - pipelineTemplate.Spec.JobSpec.VolumeMounts = append(pipelineTemplate.Spec.JobSpec.VolumeMounts, volumeMount) + // DeepCopy `KKCluster` for patch update. + originalKKCluster := s.KKCluster.DeepCopy() + + // Fetch `.Spec.PipelineTemplate`. + pipelineTemplate := &s.KKCluster.Spec.PipelineTemplate - return client.Update(ctx, pipelineTemplate) + // Append or Update `Volume` for `.Spec.PipelineTemplate`. + volumeExists := false + for i, v := range pipelineTemplate.JobSpec.Volumes { + if v.Name == volume.Name { + pipelineTemplate.JobSpec.Volumes[i] = volume + volumeExists = true + + break + } + } + if !volumeExists { + pipelineTemplate.JobSpec.Volumes = append(pipelineTemplate.JobSpec.Volumes, volume) + } + + // Append or Update `VolumeMount` for `.Spec.PipelineTemplate`. + volumeMountExists := false + for i, vm := range pipelineTemplate.JobSpec.VolumeMounts { + if vm.Name == volumeMount.Name { + pipelineTemplate.JobSpec.VolumeMounts[i] = volumeMount + volumeMountExists = true + + break + } + } + if !volumeMountExists { + pipelineTemplate.JobSpec.VolumeMounts = append(pipelineTemplate.JobSpec.VolumeMounts, volumeMount) + } + + // Patch `KKCluster`. + return client.Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(originalKKCluster)) } -// dealWithPipelinesReconciles will reconcile all pipelines created for execute `playbookName` tasks, and belong to current cluster. +// dealWithPipelinesReconciles will reconcile all pipelines created for execute `playbookName` test1, and belong to current cluster. // It will create one func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s *scope.ClusterScope, playbook, playbookName string) (*kkcorev1.Pipeline, error) { @@ -678,23 +760,49 @@ func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s return latestPipeline, nil } -// initNodeSelectMode function used to initialize some necessary configuration information if yaml file not config them. -func (r *KKClusterReconciler) initNodeSelectMode(s *scope.ClusterScope) error { - // Set default value of `KKCluster` resource. +// initKKCluster function used to initialize some necessary configuration information if yaml file not config them. +func (r *KKClusterReconciler) initKKCluster(ctx context.Context, s *scope.ClusterScope) error { + originalKKCluster := s.KKCluster.DeepCopy() + if s.KKCluster.Spec.NodeSelectorMode == "" { s.KKCluster.Spec.NodeSelectorMode = infrav1beta1.DefaultNodeSelectorMode } - if s.KKCluster.Spec.ControlPlaneGroupName == "" { - s.KKCluster.Spec.ControlPlaneGroupName = infrav1beta1.DefaultControlPlaneGroupName + + // Fetch annotations of `KKCluster`. + kkcAnnotations := s.KKCluster.Annotations + if kkcAnnotations == nil { + kkcAnnotations = make(map[string]string) + } + + // Init annotations of `KKCluster`. + if _, exists := kkcAnnotations[infrav1beta1.ControlPlaneGroupNameAnnotation]; !exists { + kkcAnnotations[infrav1beta1.ControlPlaneGroupNameAnnotation] = infrav1beta1.DefaultControlPlaneGroupName + } + + if _, exists := kkcAnnotations[infrav1beta1.WorkerGroupNameAnnotation]; !exists { + kkcAnnotations[infrav1beta1.WorkerGroupNameAnnotation] = infrav1beta1.DefaultWorkerGroupName } - if s.KKCluster.Spec.WorkerGroupName == "" { - s.KKCluster.Spec.WorkerGroupName = infrav1beta1.DefaultWorkerGroupName + + if _, exists := kkcAnnotations[infrav1beta1.ClusterGroupNameAnnotation]; !exists { + kkcAnnotations[infrav1beta1.ClusterGroupNameAnnotation] = infrav1beta1.DefaultClusterGroupName } - if s.KKCluster.Spec.ClusterGroupName == "" { - s.KKCluster.Spec.ClusterGroupName = infrav1beta1.DefaultClusterGroupName + + if _, exists := kkcAnnotations[infrav1beta1.EtcdGroupNameAnnotation]; !exists { + kkcAnnotations[infrav1beta1.EtcdGroupNameAnnotation] = infrav1beta1.DefaultEtcdGroupName } - return nil + if _, exists := kkcAnnotations[infrav1beta1.RegistryGroupNameAnnotation]; !exists { + kkcAnnotations[infrav1beta1.RegistryGroupNameAnnotation] = infrav1beta1.DefaultRegistryGroupName + } + + if _, exists := kkcAnnotations[infrav1beta1.KCPSecretsRefreshAnnotation]; !exists { + kkcAnnotations[infrav1beta1.KCPSecretsRefreshAnnotation] = "true" + } + + // Patch annotations of KKCluster. + s.KKCluster.Annotations = kkcAnnotations + + return r.Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(originalKKCluster)) } // getInitialedInventory function is a pre-processor function, used to process `Groups` of `Inventory`to streamline @@ -706,6 +814,8 @@ func (r *KKClusterReconciler) getInitialedInventory(ctx context.Context, s *scop return nil, err } + originalInventory := inv.DeepCopy() + hosts := inv.Spec.Hosts groups := inv.Spec.Groups if groups == nil { @@ -721,14 +831,17 @@ func (r *KKClusterReconciler) getInitialedInventory(ctx context.Context, s *scop } // Initialize kubernetes necessary groups. - groups[s.KKCluster.Spec.ClusterGroupName] = kkcorev1.InventoryGroup{ - Groups: []string{s.KKCluster.Spec.ControlPlaneGroupName, s.KKCluster.Spec.WorkerGroupName}, + groups[s.KKCluster.Annotations[infrav1beta1.ClusterGroupNameAnnotation]] = kkcorev1.InventoryGroup{ + Groups: []string{ + s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation], + s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation], + }, } - if _, exists := groups[s.KKCluster.Spec.ControlPlaneGroupName]; !exists { - groups[s.KKCluster.Spec.ControlPlaneGroupName] = kkcorev1.InventoryGroup{} + if _, exists := groups[s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation]]; !exists { + groups[s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation]] = kkcorev1.InventoryGroup{} } - if _, exists := groups[s.KKCluster.Spec.WorkerGroupName]; !exists { - groups[s.KKCluster.Spec.WorkerGroupName] = kkcorev1.InventoryGroup{} + if _, exists := groups[s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation]]; !exists { + groups[s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation]] = kkcorev1.InventoryGroup{} } inv.Spec.Groups = groups @@ -736,10 +849,8 @@ func (r *KKClusterReconciler) getInitialedInventory(ctx context.Context, s *scop return nil, err } - if err := r.Update(ctx, inv); err != nil { - klog.ErrorS(err, "Failed to update Inventory", "Inventory", inv) - - return nil, err + if err := r.Patch(ctx, inv, ctrlclient.MergeFrom(originalInventory)); err != nil { + klog.ErrorS(err, "Failed to patch Inventory", "Inventory", inv) } return inv, nil @@ -751,8 +862,8 @@ func (r *KKClusterReconciler) updateInventoryStatus(ctx context.Context, s *scop newHostMachineMapping := make(map[string]kkcorev1.MachineBinding) // Get ControlPlaneGroup and WorkerGroup. - controlPlaneGroup := inv.Spec.Groups[s.KKCluster.Spec.ControlPlaneGroupName] - workerGroup := inv.Spec.Groups[s.KKCluster.Spec.WorkerGroupName] + controlPlaneGroup := inv.Spec.Groups[s.KKCluster.Annotations[infrav1beta1.ControlPlaneGroupNameAnnotation]] + workerGroup := inv.Spec.Groups[s.KKCluster.Annotations[infrav1beta1.WorkerGroupNameAnnotation]] // Update control-plane nodes. for _, h := range controlPlaneGroup.Hosts { @@ -847,7 +958,7 @@ func validateInventoryGroup( return kkcorev1.InventoryGroup{ Groups: make([]string, 0), Hosts: ghosts, - Vars: inv.Spec.Groups[kkc.Spec.ControlPlaneGroupName].Vars, + Vars: inv.Spec.Groups[gName].Vars, }, nil } @@ -904,15 +1015,7 @@ func secureRandomInt(upperLimit int) (int, error) { // generatePipelineByTemplate function can generate a generic pipeline by `PipelineTemplate`. func (r *KKClusterReconciler) generatePipelineByTemplate(ctx context.Context, s *scope.ClusterScope, name string, playbook string, ) (*kkcorev1.Pipeline, error) { - ref := s.KKCluster.Spec.PipelineRef - if ref.Namespace == "" { - ref.Namespace = s.Namespace() - } - - pipelineTemplate, err := GetPipelineTemplateFromRef(ctx, r.Client, s.KKCluster.Spec.PipelineRef) - if err != nil { - return nil, err - } + pipelineTemplate := s.KKCluster.Spec.PipelineTemplate pipeline := &kkcorev1.Pipeline{ ObjectMeta: metav1.ObjectMeta{ @@ -924,14 +1027,14 @@ func (r *KKClusterReconciler) generatePipelineByTemplate(ctx context.Context, s }, }, Spec: kkcorev1.PipelineSpec{ - Project: pipelineTemplate.Spec.Project, + Project: pipelineTemplate.Project, Playbook: playbook, - InventoryRef: pipelineTemplate.Spec.InventoryRef, - ConfigRef: pipelineTemplate.Spec.ConfigRef, - Tags: pipelineTemplate.Spec.Tags, - SkipTags: pipelineTemplate.Spec.SkipTags, - Debug: pipelineTemplate.Spec.Debug, - JobSpec: pipelineTemplate.Spec.JobSpec, + InventoryRef: pipelineTemplate.InventoryRef, + ConfigRef: pipelineTemplate.ConfigRef, + Tags: pipelineTemplate.Tags, + SkipTags: pipelineTemplate.SkipTags, + Debug: pipelineTemplate.Debug, + JobSpec: pipelineTemplate.JobSpec, }, } @@ -981,50 +1084,56 @@ func GetInventory(ctx context.Context, client ctrlclient.Client, s *scope.Cluste return inventory, nil } -// GetConfig function return cluster's `Config` resource. -func GetConfig(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) *kkcorev1.Config { - config := &kkcorev1.Config{} +// GetKubeadmControlPlane function return cluster's `KubeadmControlPlane` resource. +func GetKubeadmControlPlane(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*kcpv1beta1.KubeadmControlPlane, error) { + kcp := &kcpv1beta1.KubeadmControlPlane{} - namespace := s.KKCluster.Spec.ConfigRef.Namespace + namespace := s.Cluster.Spec.ControlPlaneRef.Namespace if namespace == "" { namespace = s.Namespace() } - err := client.Get(ctx, + if err := client.Get(ctx, types.NamespacedName{ - Name: s.KKCluster.Spec.ConfigRef.Name, + Name: s.Cluster.Spec.ControlPlaneRef.Name, Namespace: namespace, - }, config) - - if err != nil { - klog.V(5).InfoS("Cluster not found customize `Config` resource, use default configuration default", - "Config", ctrlclient.ObjectKeyFromObject(config)) - - return nil + }, kcp, + ); err != nil { + return nil, err } - return config + return kcp, nil } -// GetKubeadmControlPlane function return cluster's `KubeadmControlPlane` resource. -func GetKubeadmControlPlane(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*v1beta1.KubeadmControlPlane, error) { - kcp := &v1beta1.KubeadmControlPlane{} +// GetControlPlaneKubeadmConfig function return cluster's `KubeadmConfig` resource belonged to control plane `Machine`. +func GetControlPlaneKubeadmConfig(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) (*kcv1beta1.KubeadmConfig, error) { + kcList := &kcv1beta1.KubeadmConfigList{} namespace := s.Cluster.Spec.ControlPlaneRef.Namespace if namespace == "" { namespace = s.Namespace() } - if err := client.Get(ctx, - types.NamespacedName{ - Name: s.Cluster.Spec.ControlPlaneRef.Name, - Namespace: namespace, - }, kcp, - ); err != nil { - return nil, err + err := client.List(ctx, kcList, + ctrlclient.InNamespace(namespace), + ctrlclient.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + }, ctrlclient.HasLabels{ + clusterv1.MachineControlPlaneNameLabel, + }) + if err != nil && !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("error listing MachineDeployments: %w", err) } - return kcp, nil + if len(kcList.Items) == 0 { + return nil, errors.New("no control plane's KubeadmConfig found for cluster " + s.Name()) + } + + if len(kcList.Items) > 1 { + return nil, errors.New("multiple control plane's KubeadmConfig found for cluster " + s.Name()) + } + + return &kcList.Items[0], nil } // GetMachineDeployment function return cluster's `MachineDeployment` resource. @@ -1054,22 +1163,6 @@ func GetMachineDeployment(ctx context.Context, client ctrlclient.Client, s *scop return &mdList.Items[0], nil } -// GetPipelineTemplateFromRef function used for generate `Pipeline` resources by `PipelineTemplate` -func GetPipelineTemplateFromRef(ctx context.Context, client ctrlclient.Client, ref *corev1.ObjectReference) (*kkcorev1.PipelineTemplate, error) { - pipelineTemplate := &kkcorev1.PipelineTemplate{} - - namespacedName := types.NamespacedName{ - Namespace: ref.Namespace, - Name: ref.Name, - } - - if err := client.Get(ctx, namespacedName, pipelineTemplate); err != nil { - return nil, err - } - - return pipelineTemplate, nil -} - // SetupWithManager sets up the controller with the Manager. func (r *KKClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { pipelinePhaseFilter := predicate.Funcs{ diff --git a/pkg/controllers/pipeline_controller.go b/pkg/controllers/pipeline_controller.go index addee6ce3..f8c074364 100644 --- a/pkg/controllers/pipeline_controller.go +++ b/pkg/controllers/pipeline_controller.go @@ -228,7 +228,7 @@ func (r *PipelineReconciler) checkServiceAccount(ctx context.Context, pipeline k return err } - //create rolebinding + // create rolebinding if err := r.Client.Create(ctx, &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{Namespace: pipeline.Namespace, Name: saName}, RoleRef: rbacv1.RoleRef{ @@ -289,6 +289,7 @@ func (r *PipelineReconciler) GenerateJobSpec(pipeline kkcorev1.Pipeline) batchv1 ImagePullPolicy: corev1.PullPolicy(imagePullPolicy), Command: []string{"kk"}, Args: []string{"pipeline", + "-v", "6", "--name", pipeline.Name, "--namespace", pipeline.Namespace}, VolumeMounts: pipeline.Spec.JobSpec.VolumeMounts, From 9aa3ab9cde3eb141f055dde9150f75708d7b7f2b Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Thu, 10 Oct 2024 14:09:50 +0800 Subject: [PATCH 5/9] feat: update go.mod --- go.mod | 86 ++++++++++++++++++++++++++++++---------------------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/go.mod b/go.mod index 84ada6967..84dab9874 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubesphere/kubekey/v4 -go 1.22 +go 1.22.0 require ( github.com/Masterminds/sprig/v3 v3.2.3 @@ -10,20 +10,21 @@ require ( github.com/opencontainers/image-spec v1.1.0 github.com/pkg/sftp v1.13.6 github.com/schollz/progressbar/v3 v3.14.5 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 - golang.org/x/crypto v0.18.0 + golang.org/x/crypto v0.25.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/apiserver v0.29.1 - k8s.io/client-go v0.29.1 - k8s.io/component-base v0.29.1 + k8s.io/api v0.30.3 + k8s.io/apimachinery v0.30.3 + k8s.io/apiserver v0.30.3 + k8s.io/client-go v0.30.3 + k8s.io/component-base v0.30.3 k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e oras.land/oras-go/v2 v2.5.0 - sigs.k8s.io/controller-runtime v0.17.0 + sigs.k8s.io/cluster-api v1.8.3 + sigs.k8s.io/controller-runtime v0.18.5 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 sigs.k8s.io/yaml v1.4.0 ) @@ -43,27 +44,28 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/evanphx/json-patch v5.8.1+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.7 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect @@ -76,62 +78,62 @@ require ( github.com/kr/fs v0.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/gomega v1.34.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sergi/go-diff v1.3.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/skeema/knownhosts v1.2.1 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - go.etcd.io/etcd/api/v3 v3.5.11 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect - go.etcd.io/etcd/client/v3 v3.5.11 // indirect + go.etcd.io/etcd/api/v3 v3.5.15 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect + go.etcd.io/etcd/client/v3 v3.5.15 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/sdk v1.22.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.27.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/grpc v1.62.2 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiextensions-apiserver v0.29.1 // indirect - k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect + k8s.io/apiextensions-apiserver v0.30.3 // indirect + k8s.io/cluster-bootstrap v0.30.3 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect ) From 760fe34cd11e13e56ae21e8964cb0a47ca4085c8 Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Thu, 10 Oct 2024 14:30:32 +0800 Subject: [PATCH 6/9] feat: update go.sum --- go.sum | 214 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 115 insertions(+), 99 deletions(-) diff --git a/go.sum b/go.sum index b7d8786d3..4fbe29a43 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,10 @@ -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= @@ -22,6 +22,8 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -34,34 +36,43 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= +github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +github.com/coredns/corefile-migration v1.0.23 h1:Fp4FETmk8sT/IRgnKX2xstC2dL7+QdcU+BL5AYIN3Jw= +github.com/coredns/corefile-migration v1.0.23/go.mod h1:8HyMhuyzx9RLZp8cRc9Uf3ECpEAafHOFxQWUPqktMQI= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/evanphx/json-patch v5.8.1+incompatible h1:2toJaoe7/rNa1zpeQx0UnVEjqk6z2ecyA20V/zg8vTU= github.com/evanphx/json-patch v5.8.1+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.1 h1:iPEdwg0XayoS+E7Mth9JxwUtOgyVxnDTXHtKhZPlZxA= -github.com/evanphx/json-patch/v5 v5.8.1/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= @@ -75,8 +86,8 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -88,7 +99,10 @@ github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHa github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -96,17 +110,14 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -115,11 +126,11 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -164,10 +175,12 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -175,10 +188,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= +github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= +github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= +github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -193,8 +206,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= @@ -208,8 +221,9 @@ github.com/schollz/progressbar/v3 v3.14.5 h1:97RrSxbBASxQuZN9yemnyGrFZ/swnG6IrEe github.com/schollz/progressbar/v3 v3.14.5/go.mod h1:Nrzpuw3Nl0srLY0VlTvC4V6RL50pcEymjy6qyJAaLa0= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -217,22 +231,25 @@ github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2 github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= @@ -246,14 +263,14 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= -go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= -go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= -go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk= +go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM= +go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA= +go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU= go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= -go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= +go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4= +go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU= go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= @@ -262,28 +279,28 @@ go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -293,16 +310,16 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -314,10 +331,10 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -355,12 +372,11 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -369,28 +385,24 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac h1:OZkkudMUu9LVQMCoRUbI/1p5VCo9BOrlvkqMvWtqa6s= -google.golang.org/genproto/googleapis/api v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.62.2 h1:iEIj1U5qjyBjzkM5nk3Fq+S1IbjbXSyqeULZ1Nfo4AA= +google.golang.org/grpc v1.62.2/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -409,30 +421,34 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= +k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= +k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= +k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= +k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= +k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= +k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= +k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= +k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/cluster-bootstrap v0.30.3 h1:MgxyxMkpaC6mu0BKWJ8985XCOnKU+eH3Iy+biwtDXRk= +k8s.io/cluster-bootstrap v0.30.3/go.mod h1:h8BoLDfdD7XEEIXy7Bx9FcMzxHwz29jsYYi34bM5DKU= +k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s= +k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 h1:m6dl1pkxz3HuE2mP9MUYPCCGyy6IIFlv/vTlLBDxIwA= -k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= -sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s= -sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl3OpL4842oIk6lH7gWBb0JOmJ0ute7M= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs= +sigs.k8s.io/cluster-api v1.8.3 h1:N6i25rF5QMadwVg2UPfuO6CzmNXjqnF2r1MAO+kcsro= +sigs.k8s.io/cluster-api v1.8.3/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= +sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P6n4Rk= +sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= From 0a1cf01f7e6b08149f0eb09c6c06ad119f74c13c Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Tue, 29 Oct 2024 17:14:37 +0800 Subject: [PATCH 7/9] feat: bug fix and deal with secrets --- build/kk/Dockerfile | 2 +- pkg/apis/capkk/v1beta1/kkcluster_types.go | 22 +- pkg/controllers/kkcluster_controller.go | 309 +++++++++++++++------- pkg/controllers/kkmachine_controller.go | 21 +- pkg/scope/cluster.go | 51 +++- 5 files changed, 284 insertions(+), 121 deletions(-) diff --git a/build/kk/Dockerfile b/build/kk/Dockerfile index 1e6231c8b..d1b20b226 100644 --- a/build/kk/Dockerfile +++ b/build/kk/Dockerfile @@ -33,7 +33,7 @@ FROM alpine:3.19.0 WORKDIR /kubekey # install tool -RUN apk update && apk add bash && apk add curl && apk add openssl +RUN apk update && apk add bash && apk add curl && apk add openssl && apk add sudo RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \ chmod 700 get_helm.sh && \ ./get_helm.sh diff --git a/pkg/apis/capkk/v1beta1/kkcluster_types.go b/pkg/apis/capkk/v1beta1/kkcluster_types.go index c37a48071..002417214 100644 --- a/pkg/apis/capkk/v1beta1/kkcluster_types.go +++ b/pkg/apis/capkk/v1beta1/kkcluster_types.go @@ -28,10 +28,11 @@ type KKClusterPhase string // const defines current Phase of KKCluster. const ( - KKClusterPhasePending KKClusterPhase = "Pending" - KKClusterPhaseSucceed KKClusterPhase = "Succeed" - KKClusterPhaseRunning KKClusterPhase = "Running" - KKClusterPhaseFailed KKClusterPhase = "Failed" + KKClusterPhasePending KKClusterPhase = "Pending" + KKClusterPhaseSucceed KKClusterPhase = "Succeed" + KKClusterPhaseRunning KKClusterPhase = "Running" + KKClusterPhaseFailed KKClusterPhase = "Failed" + KKClusterPhaseDeleting KKClusterPhase = "Deleting" ) // NodeSelectorMode defines selector function during select cluster nodes. @@ -128,6 +129,19 @@ const ( ClusterReadyReason string = "ClusterReady" // ClusterReadyMessage is a specification `Message` of ClusterReadyCondition. ClusterReadyMessage string = "Cluster is ready." + + // ClusterDeletingCondition will delete the cluster. + ClusterDeletingCondition clusterv1.ConditionType = "ClusterDeletingCondition" + // WaitingClusterDeletingReason is one `Reason` of ClusterDeletingCondition. + WaitingClusterDeletingReason string = "WaitingForClusterDeleting" + // WaitingClusterDeletingMessage is a specification `Message` of ClusterDeletingCondition. + WaitingClusterDeletingMessage string = "Waiting for cluster deletion" + // ClusterDeletingSucceedReason is one `Reason` of ClusterDeletingCondition. + ClusterDeletingSucceedReason string = "ClusterDeletingSucceeded" + // ClusterDeletingFailedReason is one `Reason` of ClusterDeletingCondition. + ClusterDeletingFailedReason string = "ClusterDeletingFailed" + // ClusterDeletingSucceedMessage is a specification `Message` of ClusterDeletingCondition. + ClusterDeletingSucceedMessage string = "Cluster deletion succeeded" ) const ( diff --git a/pkg/controllers/kkcluster_controller.go b/pkg/controllers/kkcluster_controller.go index e7fde5138..f3f32976a 100644 --- a/pkg/controllers/kkcluster_controller.go +++ b/pkg/controllers/kkcluster_controller.go @@ -22,10 +22,13 @@ import ( "errors" "fmt" "math/big" + "path/filepath" "reflect" "strings" "time" + "gopkg.in/yaml.v3" + "k8s.io/utils/ptr" corev1 "k8s.io/api/core/v1" @@ -82,18 +85,24 @@ const ( BootstrapPlaybookName string = "bootstrap-ready" BootstrapPlaybook string = "capkk/playbooks/capkk_bootstrap_ready.yaml" - KCPCertificateAuthoritySecretInfix string = "ca" - KCPCertificateAuthorityMountPath string = "/etc/kubernetes/pki/ca" - KCPKubeadmConfigSecretInfix string = "control-plane" - KCPKubeadmConfigMountPath string = "/etc/kubernetes/pki/kubeadmconfig" - KCPEtcdSecretInfix string = "etcd" - KCPEtcdMountPath string = "/etc/kubernetes/pki/etcd" - KCPKubeConfigSecretInfix string = "kubeconfig" - KCPKubeConfigMountPath string = "/etc/kubernetes/pki/kubeconfig" - KCPProxySecretInfix string = "proxy" - KCPProxyMountPath string = "/etc/kubernetes/pki/proxy" - KCPServiceAccountInfix string = "sa" - KCPServiceAccountMountPath string = "/etc/kubernetes/pki/sa" + ClusterDeletingPlaybookName string = "delete-cluster" + ClusterDeletingPlaybook string = "capkk/playbooks/capkk_delete_cluster.yaml" + + CloudConfigValueKey string = "value" + + KubernetesDir string = "/etc/kubernetes/pki/" + // KCPCertificateAuthoritySecretInfix string = "ca" + // KCPCertificateAuthorityMountPath string = "/etc/kubernetes/pki/ca" + KCPKubeadmConfigSecretInfix string = "control-plane" + KCPKubeadmConfigMountPath string = "/etc/kubernetes/pki/kubeadmconfig" + // KCPEtcdSecretInfix string = "etcd" + // KCPEtcdMountPath string = "/etc/kubernetes/pki/etcd" + KCPKubeConfigSecretInfix string = "kubeconfig" + KCPKubeConfigMountPath string = "/etc/kubernetes/pki/kubeconfig" + // KCPProxySecretInfix string = "proxy" + // KCPProxyMountPath string = "/etc/kubernetes/pki/proxy" + // KCPServiceAccountInfix string = "sa" + // KCPServiceAccountMountPath string = "/etc/kubernetes/pki/sa" ) // KKClusterReconciler reconciles a KKCluster object @@ -171,9 +180,7 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Handle deleted clusters if !kkCluster.DeletionTimestamp.IsZero() { - r.reconcileDelete(clusterScope) - - return ctrl.Result{}, nil + return r.reconcileDelete(ctx, clusterScope) } // Handle non-deleted clusters @@ -183,37 +190,31 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.ClusterScope) (reconcile.Result, error) { klog.V(4).Info("Reconcile KKCluster normal") - kkCluster := s.KKCluster - // If the KKCluster doesn't have our finalizer, add it. - if controllerutil.AddFinalizer(kkCluster, infrav1beta1.ClusterFinalizer) { + if controllerutil.AddFinalizer(s.KKCluster, infrav1beta1.ClusterFinalizer) { // Register the finalizer immediately to avoid orphaning KK resources on delete if err := s.PatchObject(ctx); err != nil { return reconcile.Result{}, err } } - switch kkCluster.Status.Phase { + switch s.KKCluster.Status.Phase { case "": // Switch kkCluster.Status.Phase to `Pending` - excepted := kkCluster.DeepCopy() - kkCluster.Status.Phase = infrav1beta1.KKClusterPhasePending - if err := r.Client.Status().Patch(ctx, kkCluster, ctrlclient.MergeFrom(excepted)); err != nil { - klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", ctrlclient.ObjectKeyFromObject(kkCluster)) - - return ctrl.Result{}, err + err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhasePending) + if err != nil { + return reconcile.Result{}, err } case infrav1beta1.KKClusterPhasePending: - // Switch kkCluster.Status.Phase to `Pending`, also add HostReadyCondition. - excepted := kkCluster.DeepCopy() - kkCluster.Status.Phase = infrav1beta1.KKClusterPhaseRunning - // Set series of conditions as `Unknown` for the next reconciles. - conditions.MarkUnknown(s.KKCluster, infrav1beta1.HostsReadyCondition, - infrav1beta1.WaitingCheckHostReadyReason, infrav1beta1.WaitingCheckHostReadyMessage) - if err := r.Client.Status().Patch(ctx, kkCluster, ctrlclient.MergeFrom(excepted)); err != nil { - klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", ctrlclient.ObjectKeyFromObject(kkCluster)) - - return ctrl.Result{}, err + err := s.PatchClusterWithFunc(ctx, func(kkc *infrav1beta1.KKCluster) { + // Switch kkCluster.Status.Phase to `Pending` + kkc.Status.Phase = infrav1beta1.KKClusterPhaseRunning + // Set series of conditions as `Unknown` for the next reconciles. + conditions.MarkUnknown(s.KKCluster, infrav1beta1.HostsReadyCondition, + infrav1beta1.WaitingCheckHostReadyReason, infrav1beta1.WaitingCheckHostReadyMessage) + }) + if err != nil { + return reconcile.Result{}, err } case infrav1beta1.KKClusterPhaseRunning: if err := r.reconcileNormalRunning(ctx, s); err != nil { @@ -228,20 +229,20 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.Clus } if lb := s.ControlPlaneLoadBalancer(); lb != nil { - kkCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + s.KKCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: lb.Host, Port: s.APIServerPort(), } } // Initialize node select mode - if !kkCluster.Status.Ready { + if !s.KKCluster.Status.Ready { if err := r.initKKCluster(ctx, s); err != nil { return ctrl.Result{}, err } } - kkCluster.Status.Ready = true + s.KKCluster.Status.Ready = true return ctrl.Result{ RequeueAfter: 30 * time.Second, @@ -320,23 +321,51 @@ func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *sco return nil } -func (r *KKClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) { +func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, s *scope.ClusterScope) (reconcile.Result, error) { klog.V(4).Info("Reconcile KKCluster delete") // : pipeline delete - switch clusterScope.KKCluster.Status.Phase { + switch s.KKCluster.Status.Phase { case infrav1beta1.KKClusterPhasePending: - // transfer into Delete phase + // Switch kkCluster.Status.Phase to `Deleting` + err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) + if err != nil { + return reconcile.Result{}, err + } case infrav1beta1.KKClusterPhaseRunning: - // delete running pipeline & recreate delete pipeline + // delete running pipeline + if err := r.dealWithDeletePipelines(ctx, s); err != nil { + return reconcile.Result{}, err + } + + err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) + if err != nil { + return reconcile.Result{}, err + } case infrav1beta1.KKClusterPhaseFailed: - // delete + // Switch kkCluster.Status.Phase to `Deleting` + err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) + if err != nil { + return reconcile.Result{}, err + } case infrav1beta1.KKClusterPhaseSucceed: - // + // Switch kkCluster.Status.Phase to `Deleting` + err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) + if err != nil { + return reconcile.Result{}, err + } + case infrav1beta1.KKClusterPhaseDeleting: + if err := r.dealWithClusterDeleting(ctx, s); err != nil { + return reconcile.Result{}, err + } } // Cluster is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(clusterScope.KKCluster, infrav1beta1.ClusterFinalizer) + if conditions.IsFalse(s.KKCluster, infrav1beta1.ClusterDeletingCondition) { + controllerutil.RemoveFinalizer(s.KKCluster, infrav1beta1.ClusterFinalizer) + } + + return ctrl.Result{}, nil } // dealWithHostConnectCheck and dealWithHostSelector function used to pre-check inventory configuration, especially @@ -534,6 +563,27 @@ func (r *KKClusterReconciler) dealWithClusterReadyCheck(ctx context.Context, s * return r.updateInventoryStatus(ctx, s, inv) } +// dealWithClusterDeleting function will delete the cluster. +func (r *KKClusterReconciler) dealWithClusterDeleting(ctx context.Context, s *scope.ClusterScope) error { + var err error + if _, err = r.dealWithExecutePlaybookReconcile( + ctx, s, ClusterDeletingPlaybook, ClusterDeletingPlaybookName, + func(_ *kkcorev1.Pipeline) { + conditions.MarkFalse(s.KKCluster, infrav1beta1.ClusterDeletingCondition, infrav1beta1.ClusterDeletingSucceedReason, + clusterv1.ConditionSeverityInfo, infrav1beta1.ClusterDeletingSucceedMessage) + }, + func(p *kkcorev1.Pipeline) { + r.EventRecorder.Eventf(s.KKCluster, corev1.EventTypeWarning, infrav1beta1.ClusterDeletingFailedReason, p.Status.Reason) + conditions.MarkTrueWithNegativePolarity(s.KKCluster, infrav1beta1.ClusterDeletingCondition, + infrav1beta1.ClusterDeletingFailedReason, clusterv1.ConditionSeverityError, p.Status.Reason, + ) + }); err != nil { + return err + } + + return nil +} + // dealWithExecutePlaybookReconcile will judge the closest pipeline's `.Status.Phase` to the latest state of the cluster, // and execute exactly stage to adjustment the cluster conditions. It will return one pipeline if it's useful, used for // the other judgements. @@ -585,7 +635,7 @@ func (r *KKClusterReconciler) dealWithExecuteFailed(p *kkcorev1.Pipeline, functi return err } -// dealWithSecrets function fetches secrets created by KubeadmControlPlane, etc. And uses them to create a cluster. +// dealWithSecrets function fetches secrets, and uses them to create a cluster. func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope) error { // Fetch all secrets. secrets := &corev1.SecretList{} @@ -595,20 +645,20 @@ func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.Clu return err } - // Fetch KubeadmControlPlaneReference. - var kcpOwnRef metav1.OwnerReference - if kcp, err := GetKubeadmControlPlane(ctx, client, s); err != nil { - return err - } else if kcp != nil { - kcpOwnRef = metav1.OwnerReference{ - APIVersion: kcp.APIVersion, - Kind: kcp.Kind, - Name: kcp.Name, - UID: kcp.UID, - Controller: ptr.To(true), + // Deal with secrets + for _, secret := range secrets.Items { + if err := dealWithKCSecrets(ctx, client, s, &secret); err != nil { + return err } } + return s.PatchClusterWithFunc(ctx, func(kkc *infrav1beta1.KKCluster) { + delete(kkc.Annotations, infrav1beta1.KCPSecretsRefreshAnnotation) + }) +} + +// dealWithKCSecrets function fetches secrets created by KubeadmConfig. +func dealWithKCSecrets(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, secret *corev1.Secret) error { // Fetch control plane's KubeadmConfig. var kcOwnRef metav1.OwnerReference if kc, err := GetControlPlaneKubeadmConfig(ctx, client, s); err != nil { @@ -623,48 +673,24 @@ func dealWithSecrets(ctx context.Context, client ctrlclient.Client, s *scope.Clu } } - // Deal with secrets - for _, secret := range secrets.Items { - if !(util.HasOwnerRef(secret.OwnerReferences, kcpOwnRef) || util.HasOwnerRef(secret.OwnerReferences, kcOwnRef)) { - continue - } - - if err := mountSecretsOnPipelineTemplate(ctx, client, s, &secret); err != nil { - return err - } + if !util.HasOwnerRef(secret.OwnerReferences, kcOwnRef) { + return nil } - // Delete kcp secrets annotation. - originalKKCluster := s.KKCluster.DeepCopy() - delete(s.KKCluster.Annotations, infrav1beta1.KCPSecretsRefreshAnnotation) - - return client.Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(originalKKCluster)) -} + // if secret format is cloud-config, parse and generate relevant secrets bind with `.Spec.PipelineTemplate` + if strings.HasPrefix(secret.Name, s.KKCluster.Name+"-"+KCPKubeConfigSecretInfix) { -// mountSecretsOnPipelineTemplate function handle one secret created by kcp, and bind with `PipelineTemplate` resource. -func mountSecretsOnPipelineTemplate(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, secret *corev1.Secret) error { - prefixToMountPath := map[string]string{ - s.KKCluster.Name + "-" + KCPCertificateAuthoritySecretInfix: KCPCertificateAuthorityMountPath, - s.KKCluster.Name + "-" + KCPKubeadmConfigSecretInfix: KCPKubeadmConfigMountPath, - s.KKCluster.Name + "-" + KCPEtcdSecretInfix: KCPEtcdMountPath, - s.KKCluster.Name + "-" + KCPKubeConfigSecretInfix: KCPKubeConfigMountPath, - s.KKCluster.Name + "-" + KCPProxySecretInfix: KCPProxyMountPath, - s.KKCluster.Name + "-" + KCPServiceAccountInfix: KCPServiceAccountMountPath, } - - // Fetch secret mount path. All secrets created by KCP must satisfy `${ClusterName}-${Infix}` format name. - var mountPath string - for prefix, path := range prefixToMountPath { - if strings.HasPrefix(secret.Name, prefix) { - mountPath = path - - break - } - } - if mountPath == "" { - return nil + if strings.HasPrefix(secret.Name, s.KKCluster.Name+"-"+KCPKubeadmConfigSecretInfix) { + return GenerateAndBindSecretsFromCloudConfig(ctx, client, s, secret, s.Name()) } + return nil +} + +// mountSecretOnPipelineTemplate function handle one secret created by kcp, and bind with `PipelineTemplate` resource. +func mountSecretOnPipelineTemplate(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, + secret *corev1.Secret, mountPath string) error { // Define `Volume` and `VolumeMount` volume := corev1.Volume{ Name: secret.Name + "-volume", @@ -718,7 +744,7 @@ func mountSecretsOnPipelineTemplate(ctx context.Context, client ctrlclient.Clien return client.Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(originalKKCluster)) } -// dealWithPipelinesReconciles will reconcile all pipelines created for execute `playbookName` test1, and belong to current cluster. +// dealWithPipelinesReconcile will reconcile all pipelines created for execute `playbookName` test1, and belong to current cluster. // It will create one func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s *scope.ClusterScope, playbook, playbookName string) (*kkcorev1.Pipeline, error) { @@ -760,6 +786,27 @@ func (r *KKClusterReconciler) dealWithPipelinesReconcile(ctx context.Context, s return latestPipeline, nil } +// dealWithDeletePipelines delete all existed pipeline created by cluster. +func (r *KKClusterReconciler) dealWithDeletePipelines(ctx context.Context, s *scope.ClusterScope) error { + pipelines := &kkcorev1.PipelineList{} + + // Check if pipelines exist, or an unexpected error occurred. + if err := r.Client.List(ctx, pipelines, ctrlclient.InNamespace(s.Namespace()), ctrlclient.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + }); err != nil && !apierrors.IsNotFound(err) { + return err + } + + // Iterate through all pipelines and delete them. + for _, pipeline := range pipelines.Items { + if err := r.Client.Delete(ctx, &pipeline); err != nil && !apierrors.IsNotFound(err) { + return err + } + } + + return nil +} + // initKKCluster function used to initialize some necessary configuration information if yaml file not config them. func (r *KKClusterReconciler) initKKCluster(ctx context.Context, s *scope.ClusterScope) error { originalKKCluster := s.KKCluster.DeepCopy() @@ -1163,6 +1210,80 @@ func GetMachineDeployment(ctx context.Context, client ctrlclient.Client, s *scop return &mdList.Items[0], nil } +// GenerateAndBindSecretsFromCloudConfig fetches the cloud-config format secret, then parse it and returns secrets. +func GenerateAndBindSecretsFromCloudConfig(ctx context.Context, client ctrlclient.Client, s *scope.ClusterScope, + secret *corev1.Secret, secretNamePrefix string) error { + // WriteFile represents the structure of each write_files entry in cloud-init. + type WriteFile struct { + Path string `yaml:"path"` + Owner string `yaml:"owner"` + Permissions string `yaml:"permissions"` + Content string `yaml:"content"` + } + + // CloudConfig represents the structure of the cloud-init data. + type CloudConfig struct { + WriteFiles []WriteFile `yaml:"write_files"` + } + + // Step 1: Get the YAML data from the secret. + data, exists := secret.Data[CloudConfigValueKey] + if !exists { + return fmt.Errorf("key %s not found in secret", CloudConfigValueKey) + } + + // Step 2: Parse the cloud-init content into CloudConfig struct. + var cloudConfig CloudConfig + if err := yaml.Unmarshal(data, &cloudConfig); err != nil { + return fmt.Errorf("failed to unmarshal cloud-init YAML: %w", err) + } + + for _, file := range cloudConfig.WriteFiles { + var secretName string + if strings.HasPrefix(file.Path, KubernetesDir) { + // Create a secret name based on the file path (replace slashes with hyphens). + secretName = fmt.Sprintf("%s-%s", secretNamePrefix, + strings.NewReplacer(".", "-", "/", "-").Replace(strings.TrimPrefix(file.Path, KubernetesDir))) + } else if file.Path == "/run/kubeadm/kubeadm.yaml" { + secretName = fmt.Sprintf("%s-%s", secretNamePrefix, "kubeadm-config") + } else { + continue + } + + ownerRef := metav1.OwnerReference{ + APIVersion: s.KKCluster.APIVersion, + Kind: s.KKCluster.Kind, + Name: s.KKCluster.Name, + UID: s.KKCluster.UID, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: s.Namespace(), + OwnerReferences: []metav1.OwnerReference{ownerRef}, + }, + Data: map[string][]byte{ + filepath.Base(file.Path): []byte(file.Content), + }, + Type: corev1.SecretTypeBootstrapToken, + } + + // Create the Secret in the cluster + if err := client.Create(ctx, secret); err != nil { + return err + } + + if err := mountSecretOnPipelineTemplate(ctx, client, s, secret, file.Path); err != nil { + return err + } + } + + return nil +} + // SetupWithManager sets up the controller with the Manager. func (r *KKClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { pipelinePhaseFilter := predicate.Funcs{ diff --git a/pkg/controllers/kkmachine_controller.go b/pkg/controllers/kkmachine_controller.go index 62e593c17..16c94303c 100644 --- a/pkg/controllers/kkmachine_controller.go +++ b/pkg/controllers/kkmachine_controller.go @@ -107,13 +107,6 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } - // Handle Deletion Early, avoid `KKCluster` delete earlier than `KKMachine`, which may causes some problem. - if !kkMachine.ObjectMeta.DeletionTimestamp.IsZero() { - r.reconcileDelete(kkMachine) - - return ctrl.Result{}, nil - } - kkCluster := &infrav1beta1.KKCluster{} kkClusterName := ctrlclient.ObjectKey{ Namespace: kkMachine.Namespace, @@ -158,6 +151,14 @@ func (r *KKMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } }() + // Handle Deleted machines + if !kkMachine.ObjectMeta.DeletionTimestamp.IsZero() { + r.reconcileDelete(kkMachine) + + return ctrl.Result{}, nil + } + + // Handle normal machines return r.reconcileNormal(ctx, machineScope) } @@ -195,11 +196,11 @@ func (r *KKMachineReconciler) reconcileNormal(ctx context.Context, s *scope.Mach }, nil } -func (r *KKMachineReconciler) reconcileDelete(kkMachine *infrav1beta1.KKMachine) { - klog.V(4).Info("Reconcile KKCluster delete") +func (r *KKMachineReconciler) reconcileDelete(kkm *infrav1beta1.KKMachine) { + klog.V(4).Info("Reconcile KKMachine delete") // Machine is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(kkMachine, infrav1beta1.MachineFinalizer) + controllerutil.RemoveFinalizer(kkm, infrav1beta1.MachineFinalizer) } func refreshProviderID(ctx context.Context, client ctrlclient.Client, s *scope.MachineScope) error { diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index c1e1c4f8a..8612bc146 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -22,9 +22,10 @@ import ( "fmt" "net" + "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" + infrav1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" @@ -39,7 +40,7 @@ const ( type ClusterScopeParams struct { Client ctrlclient.Client Cluster *clusterv1.Cluster - KKCluster *v1beta1.KKCluster + KKCluster *infrav1beta1.KKCluster ControllerName string } @@ -76,7 +77,7 @@ type ClusterScope struct { patchHelper *patch.Helper Cluster *clusterv1.Cluster - KKCluster *v1beta1.KKCluster + KKCluster *infrav1beta1.KKCluster controllerName string } @@ -102,8 +103,8 @@ func (s *ClusterScope) KubernetesClusterName() string { } // GetKKMachines returns the list of KKMachines for a KKCluster. -func (s *ClusterScope) GetKKMachines(ctx context.Context) (*v1beta1.KKMachineList, error) { - kkMachineList := &v1beta1.KKMachineList{} +func (s *ClusterScope) GetKKMachines(ctx context.Context) (*infrav1beta1.KKMachineList, error) { + kkMachineList := &infrav1beta1.KKMachineList{} if err := s.client.List( ctx, kkMachineList, @@ -123,6 +124,32 @@ func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { return s.KKCluster.Spec.ControlPlaneEndpoint } +// PatchClusterPhase switches `KKCluster` into the specified phase. +func (s *ClusterScope) PatchClusterPhase(ctx context.Context, phase infrav1beta1.KKClusterPhase) error { + return s.PatchClusterWithFunc(ctx, func(kkc *infrav1beta1.KKCluster) { + kkc.Status.Phase = phase + }) +} + +// PatchClusterWithFunc executes a function that modifies the KKCluster and then patches the changes. +func (s *ClusterScope) PatchClusterWithFunc(ctx context.Context, function func(kkc *infrav1beta1.KKCluster)) error { + // Create a deep copy of the original KKCluster to preserve its state before modification. + expected := s.KKCluster.DeepCopy() + + // Execute the provided function to modify the KKCluster. + function(s.KKCluster) + + // Apply the patch using the client, comparing the modified state with the original. + if err := s.client.Status().Patch(ctx, s.KKCluster, ctrlclient.MergeFrom(expected)); err != nil { + klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", + ctrlclient.ObjectKeyFromObject(s.KKCluster)) + + return err + } + + return nil +} + // PatchObject persists the cluster configuration and status. func (s *ClusterScope) PatchObject(ctx context.Context) error { return s.patchHelper.Patch( @@ -130,12 +157,12 @@ func (s *ClusterScope) PatchObject(ctx context.Context) error { s.KKCluster, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ clusterv1.ReadyCondition, - v1beta1.HostsReadyCondition, - v1beta1.PreparationReadyCondition, - v1beta1.EtcdReadyCondition, - v1beta1.BinaryInstallCondition, - v1beta1.BootstrapReadyCondition, - v1beta1.ClusterReadyCondition, + infrav1beta1.HostsReadyCondition, + infrav1beta1.PreparationReadyCondition, + infrav1beta1.EtcdReadyCondition, + infrav1beta1.BinaryInstallCondition, + infrav1beta1.BootstrapReadyCondition, + infrav1beta1.ClusterReadyCondition, }}) } @@ -156,7 +183,7 @@ func (s *ClusterScope) Distribution() string { } // ControlPlaneLoadBalancer returns the KKLoadBalancerSpec. -func (s *ClusterScope) ControlPlaneLoadBalancer() *v1beta1.KKLoadBalancerSpec { +func (s *ClusterScope) ControlPlaneLoadBalancer() *infrav1beta1.KKLoadBalancerSpec { lb := s.KKCluster.Spec.ControlPlaneLoadBalancer if lb == nil { return nil From 8120584b81c3f4890664133750bfc9b6eb5f1b0e Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Tue, 29 Oct 2024 17:28:56 +0800 Subject: [PATCH 8/9] fix: remove webhook --- pkg/manager/controller_manager.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/pkg/manager/controller_manager.go b/pkg/manager/controller_manager.go index 11f2f0439..8a77a38c7 100644 --- a/pkg/manager/controller_manager.go +++ b/pkg/manager/controller_manager.go @@ -20,8 +20,6 @@ import ( "context" "fmt" - capkkv1beta1 "github.com/kubesphere/kubekey/v4/pkg/apis/capkk/v1beta1" - "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -91,17 +89,5 @@ func (c controllerManager) Run(ctx context.Context) error { return err } - if err = (&capkkv1beta1.KKCluster{}).SetupWebhookWithManager(mgr); err != nil { - klog.ErrorS(err, "unable to create webhook", "webhook", "KKCluster") - - return err - } - - if err = (&capkkv1beta1.KKMachine{}).SetupWebhookWithManager(mgr); err != nil { - klog.ErrorS(err, "unable to create webhook", "webhook", "KKMachine") - - return err - } - return mgr.Start(ctx) } From b3d07e7c3285ef8d70850d90bb3803f41d28c2e6 Mon Sep 17 00:00:00 2001 From: DingYongliang <1521323439@qq.com> Date: Tue, 29 Oct 2024 17:49:07 +0800 Subject: [PATCH 9/9] fix: lint error --- pkg/controllers/kkcluster_controller.go | 142 ++++++++++++------------ 1 file changed, 74 insertions(+), 68 deletions(-) diff --git a/pkg/controllers/kkcluster_controller.go b/pkg/controllers/kkcluster_controller.go index f3f32976a..e6ded3c46 100644 --- a/pkg/controllers/kkcluster_controller.go +++ b/pkg/controllers/kkcluster_controller.go @@ -180,7 +180,10 @@ func (r *KKClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // Handle deleted clusters if !kkCluster.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, clusterScope) + err := r.reconcileDelete(ctx, clusterScope) + if err != nil { + return reconcile.Result{}, err + } } // Handle non-deleted clusters @@ -198,6 +201,7 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.Clus } } + //nolint:exhaustive switch s.KKCluster.Status.Phase { case "": // Switch kkCluster.Status.Phase to `Pending` @@ -250,78 +254,83 @@ func (r *KKClusterReconciler) reconcileNormal(ctx context.Context, s *scope.Clus } func (r *KKClusterReconciler) reconcileNormalRunning(ctx context.Context, s *scope.ClusterScope) error { - var reset bool for { - reset = false + conditionsIsChanged, err := r.dealWithKKClusterConditions(ctx, s) + if err != nil { + return err + } + if !conditionsIsChanged { + break + } + } - for _, condition := range s.KKCluster.Status.Conditions { - conditionsCnt := len(s.KKCluster.Status.Conditions) - if conditions.IsFalse(s.KKCluster, condition.Type) { - continue - } + return nil +} - switch condition.Type { - case infrav1beta1.HostsReadyCondition: - if err := r.dealWithHostConnectCheck(ctx, s); err != nil { - return err - } - case infrav1beta1.PreparationReadyCondition: - // Refresh KCP secrets if annotation is true. - if val, ok := s.KKCluster.Annotations[infrav1beta1.KCPSecretsRefreshAnnotation]; ok && val == TrueString { - if err := dealWithSecrets(ctx, r.Client, s); err != nil { - return err - } - } - if err := r.dealWithPreparation(ctx, s); err != nil { - return err - } - case infrav1beta1.EtcdReadyCondition: - if err := r.dealWithEtcdInstall(ctx, s); err != nil { - return err - } - case infrav1beta1.BinaryInstallCondition: - if err := r.dealWithBinaryInstall(ctx, s); err != nil { - return err - } - case infrav1beta1.BootstrapReadyCondition: - // kubeadm init, kubeadm join - if err := r.dealWithBootstrapReady(ctx, s); err != nil { - return err - } - case infrav1beta1.ClusterReadyCondition: - // kubectl get node - // master -> configmap -> kubeconfig -> Client: get node - if err := r.dealWithClusterReadyCheck(ctx, s); err != nil { - return err - } - // Switch `KKCluster.Phase` to `Succeed` - s.KKCluster.Status.Phase = infrav1beta1.KKClusterPhaseSucceed - if err := r.Client.Status().Update(ctx, s.KKCluster); err != nil { - klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", - ctrlclient.ObjectKeyFromObject(s.KKCluster)) +//nolint:gocognit,cyclop +func (r *KKClusterReconciler) dealWithKKClusterConditions(ctx context.Context, s *scope.ClusterScope) (bool, error) { + for _, condition := range s.KKCluster.Status.Conditions { + conditionsCnt := len(s.KKCluster.Status.Conditions) + if conditions.IsFalse(s.KKCluster, condition.Type) { + continue + } - return err + //nolint:exhaustive + switch condition.Type { + case infrav1beta1.HostsReadyCondition: + if err := r.dealWithHostConnectCheck(ctx, s); err != nil { + return false, err + } + case infrav1beta1.PreparationReadyCondition: + // Refresh KCP secrets if annotation is true. + if val, ok := s.KKCluster.Annotations[infrav1beta1.KCPSecretsRefreshAnnotation]; ok && val == TrueString { + if err := dealWithSecrets(ctx, r.Client, s); err != nil { + return false, err } - default: } + if err := r.dealWithPreparation(ctx, s); err != nil { + return false, err + } + case infrav1beta1.EtcdReadyCondition: + if err := r.dealWithEtcdInstall(ctx, s); err != nil { + return false, err + } + case infrav1beta1.BinaryInstallCondition: + if err := r.dealWithBinaryInstall(ctx, s); err != nil { + return false, err + } + case infrav1beta1.BootstrapReadyCondition: + // kubeadm init, kubeadm join + if err := r.dealWithBootstrapReady(ctx, s); err != nil { + return false, err + } + case infrav1beta1.ClusterReadyCondition: + // kubectl get node + // master -> configmap -> kubeconfig -> Client: get node + if err := r.dealWithClusterReadyCheck(ctx, s); err != nil { + return false, err + } + // Switch `KKCluster.Phase` to `Succeed` + s.KKCluster.Status.Phase = infrav1beta1.KKClusterPhaseSucceed + if err := r.Client.Status().Update(ctx, s.KKCluster); err != nil { + klog.V(5).ErrorS(err, "Update KKCluster error", "KKCluster", + ctrlclient.ObjectKeyFromObject(s.KKCluster)) - // If add new conditions, restart loop. - if len(s.KKCluster.Status.Conditions) > conditionsCnt { - reset = true - - break + return false, err } + default: } - if !reset { - break + // If add new conditions, restart loop. + if len(s.KKCluster.Status.Conditions) > conditionsCnt { + return true, nil } } - return nil + return false, nil } -func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, s *scope.ClusterScope) (reconcile.Result, error) { +func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, s *scope.ClusterScope) error { klog.V(4).Info("Reconcile KKCluster delete") // : pipeline delete @@ -330,33 +339,33 @@ func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, s *scope.Clus // Switch kkCluster.Status.Phase to `Deleting` err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) if err != nil { - return reconcile.Result{}, err + return err } case infrav1beta1.KKClusterPhaseRunning: // delete running pipeline if err := r.dealWithDeletePipelines(ctx, s); err != nil { - return reconcile.Result{}, err + return err } err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) if err != nil { - return reconcile.Result{}, err + return err } case infrav1beta1.KKClusterPhaseFailed: // Switch kkCluster.Status.Phase to `Deleting` err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) if err != nil { - return reconcile.Result{}, err + return err } case infrav1beta1.KKClusterPhaseSucceed: // Switch kkCluster.Status.Phase to `Deleting` err := s.PatchClusterPhase(ctx, infrav1beta1.KKClusterPhaseDeleting) if err != nil { - return reconcile.Result{}, err + return err } case infrav1beta1.KKClusterPhaseDeleting: if err := r.dealWithClusterDeleting(ctx, s); err != nil { - return reconcile.Result{}, err + return err } } @@ -365,7 +374,7 @@ func (r *KKClusterReconciler) reconcileDelete(ctx context.Context, s *scope.Clus controllerutil.RemoveFinalizer(s.KKCluster, infrav1beta1.ClusterFinalizer) } - return ctrl.Result{}, nil + return nil } // dealWithHostConnectCheck and dealWithHostSelector function used to pre-check inventory configuration, especially @@ -678,9 +687,6 @@ func dealWithKCSecrets(ctx context.Context, client ctrlclient.Client, s *scope.C } // if secret format is cloud-config, parse and generate relevant secrets bind with `.Spec.PipelineTemplate` - if strings.HasPrefix(secret.Name, s.KKCluster.Name+"-"+KCPKubeConfigSecretInfix) { - - } if strings.HasPrefix(secret.Name, s.KKCluster.Name+"-"+KCPKubeadmConfigSecretInfix) { return GenerateAndBindSecretsFromCloudConfig(ctx, client, s, secret, s.Name()) }