diff --git a/mgradm/cmd/inspect/kubernetes.go b/mgradm/cmd/inspect/kubernetes.go index 32efdaeea..7631b460d 100644 --- a/mgradm/cmd/inspect/kubernetes.go +++ b/mgradm/cmd/inspect/kubernetes.go @@ -47,7 +47,7 @@ func kuberneteInspect( } // Get the SCC credentials secret if existing - pullSecret, err := kubernetes.GetSCCSecret(namespace, &types.SCCCredentials{}, kubernetes.ServerApp) + pullSecret, err := kubernetes.GetRegistrySecret(namespace, &types.SCCCredentials{}, kubernetes.ServerApp) if err != nil { return err } diff --git a/mgradm/cmd/install/podman/podman.go b/mgradm/cmd/install/podman/podman.go index 161274cf6..e7c92e6be 100644 --- a/mgradm/cmd/install/podman/podman.go +++ b/mgradm/cmd/install/podman/podman.go @@ -42,6 +42,7 @@ NOTE: installing on a remote podman is not supported yet! }, } + adm_utils.AddMirrorFlag(cmd) shared.AddInstallFlags(cmd) podman.AddPodmanArgFlag(cmd) diff --git a/mgradm/cmd/install/podman/podman_test.go b/mgradm/cmd/install/podman/podman_test.go index 045d4cfbe..ed9fdfd0f 100644 --- a/mgradm/cmd/install/podman/podman_test.go +++ b/mgradm/cmd/install/podman/podman_test.go @@ -17,6 +17,7 @@ import ( func TestParamsParsing(t *testing.T) { args := flagstests.InstallFlagsTestArgs() + args = append(args, flagstests.MirrorFlagTestArgs...) args = append(args, flagstests.PodmanFlagsTestArgs...) args = append(args, "srv.fq.dn") @@ -24,6 +25,7 @@ func TestParamsParsing(t *testing.T) { tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags, _ *cobra.Command, args []string, ) error { + flagstests.AssertMirrorFlag(t, flags.Mirror) flagstests.AssertInstallFlags(t, &flags.ServerFlags) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) diff --git a/mgradm/cmd/install/podman/utils.go b/mgradm/cmd/install/podman/utils.go index dec32b786..1dc60f49b 100644 --- a/mgradm/cmd/install/podman/utils.go +++ b/mgradm/cmd/install/podman/utils.go @@ -6,8 +6,8 @@ package podman import ( "errors" + "fmt" "os/exec" - "strings" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -91,35 +91,23 @@ func installForPodman( return err } - cnx := shared.NewConnection("podman", shared_podman.ServerContainerName, "") - if err := waitForSystemStart(systemd, cnx, preparedImage, flags); err != nil { - return utils.Errorf(err, L("cannot wait for system start")) + if err := shared_podman.SetupNetwork(false); err != nil { + return utils.Errorf(err, L("cannot setup network")) } - caPassword := flags.Installation.SSL.Password - if flags.Installation.SSL.UseExisting() { - // We need to have a password for the generated CA, even though it will be thrown away after install - caPassword = "dummy" - } + log.Info().Msg(L("Run setup command in the container")) - env := map[string]string{ - "CERT_O": flags.Installation.SSL.Org, - "CERT_OU": flags.Installation.SSL.OU, - "CERT_CITY": flags.Installation.SSL.City, - "CERT_STATE": flags.Installation.SSL.State, - "CERT_COUNTRY": flags.Installation.SSL.Country, - "CERT_EMAIL": flags.Installation.SSL.Email, - "CERT_CNAMES": strings.Join(append([]string{fqdn}, flags.Installation.SSL.Cnames...), ","), - "CERT_PASS": caPassword, + if err := runSetup(preparedImage, &flags.ServerFlags, fqdn); err != nil { + return err } - log.Info().Msg(L("Run setup command in the container")) + cnx := shared.NewConnection("podman", shared_podman.ServerContainerName, "") + if err := waitForSystemStart(systemd, cnx, preparedImage, flags); err != nil { + return utils.Errorf(err, L("cannot wait for system start")) + } - if err := adm_utils.RunSetup(cnx, &flags.ServerFlags, fqdn, env); err != nil { - if stopErr := systemd.StopService(shared_podman.ServerService); stopErr != nil { - log.Error().Msgf(L("Failed to stop service: %v"), stopErr) - } - return err + if err := cnx.CopyCaCertificate(fqdn); err != nil { + return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) } if path, err := exec.LookPath("uyuni-payg-extract-data"); err == nil { @@ -173,3 +161,44 @@ func installForPodman( } return nil } + +// runSetup execute the setup. +func runSetup(image string, flags *adm_utils.ServerFlags, fqdn string) error { + env := adm_utils.GetSetupEnv(flags.Mirror, &flags.Installation, fqdn, false) + envNames := []string{} + envValues := []string{} + for key, value := range env { + envNames = append(envNames, "-e", key) + envValues = append(envValues, fmt.Sprintf("%s=%s", key, value)) + } + + command := []string{ + "run", + "--rm", + "--shm-size=0", + "--shm-size-systemd=0", + "--name", "uyuni-setup", + "--network", shared_podman.UyuniNetwork, + "-e", "TZ=" + flags.Installation.TZ, + } + for _, volume := range utils.ServerVolumeMounts { + command = append(command, "-v", fmt.Sprintf("%s:%s:z", volume.Name, volume.MountPath)) + } + command = append(command, envNames...) + command = append(command, image) + + script, err := adm_utils.GenerateSetupScript(&flags.Installation, false) + if err != nil { + return err + } + command = append(command, "/usr/bin/sh", "-c", script) + + if _, err := newRunner("podman", command...).Env(envValues).StdMapping().Exec(); err != nil { + return utils.Errorf(err, L("server setup failed")) + } + + log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Installation.Admin.Login) + return nil +} + +var newRunner = utils.NewRunner diff --git a/mgradm/cmd/install/shared/flags.go b/mgradm/cmd/install/shared/flags.go index 9c221d01d..8eddef075 100644 --- a/mgradm/cmd/install/shared/flags.go +++ b/mgradm/cmd/install/shared/flags.go @@ -20,7 +20,6 @@ func AddInspectFlags(cmd *cobra.Command) { // AddInstallFlags add flags to installa command. func AddInstallFlags(cmd *cobra.Command) { - cmd_utils.AddMirrorFlag(cmd) cmd.Flags().String("tz", "", L("Time zone to set on the server. Defaults to the host timezone")) cmd.Flags().String("email", "admin@example.com", L("Administrator e-mail")) cmd.Flags().String("emailfrom", "notifications@example.com", L("E-Mail sending the notifications")) @@ -31,7 +30,6 @@ func AddInstallFlags(cmd *cobra.Command) { cmd.Flags().String("db-name", "susemanager", L("Database name")) cmd.Flags().String("db-host", "localhost", L("Database host")) cmd.Flags().Int("db-port", 5432, L("Database port")) - cmd.Flags().String("db-protocol", "tcp", L("Database protocol")) cmd.Flags().String("db-admin-user", "", L("External database admin user name")) cmd.Flags().String("db-admin-password", "", L("External database admin password")) cmd.Flags().String("db-provider", "", L("External database provider. Possible values 'aws'")) @@ -42,7 +40,6 @@ func AddInstallFlags(cmd *cobra.Command) { _ = utils.AddFlagToHelpGroupID(cmd, "db-name", "db") _ = utils.AddFlagToHelpGroupID(cmd, "db-host", "db") _ = utils.AddFlagToHelpGroupID(cmd, "db-port", "db") - _ = utils.AddFlagToHelpGroupID(cmd, "db-protocol", "db") _ = utils.AddFlagToHelpGroupID(cmd, "db-admin-user", "db") _ = utils.AddFlagToHelpGroupID(cmd, "db-admin-password", "db") _ = utils.AddFlagToHelpGroupID(cmd, "db-provider", "db") diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 2e0bdc48c..b3366c05d 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -60,7 +60,7 @@ func migrateToKubernetes( } // Create a secret using SCC credentials if any are provided - pullSecret, err := shared_kubernetes.GetSCCSecret( + pullSecret, err := shared_kubernetes.GetRegistrySecret( flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, ) if err != nil { diff --git a/mgradm/shared/kubernetes/db.go b/mgradm/shared/kubernetes/db.go index 7a60903ab..da5ea7d69 100644 --- a/mgradm/shared/kubernetes/db.go +++ b/mgradm/shared/kubernetes/db.go @@ -7,6 +7,9 @@ package kubernetes import ( + "strings" + + "github.com/rs/zerolog" "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" core "k8s.io/api/core/v1" @@ -19,12 +22,20 @@ const ( DBSecret = "db-credentials" // ReportdbSecret is the name of the report database credentials secret. ReportdbSecret = "reportdb-credentials" + SCCSecret = "scc-credentials" secretUsername = "username" secretPassword = "password" ) -// CreateDBSecret creates a secret containing the DB credentials. -func CreateDBSecret(namespace string, name string, user string, password string) error { +// CreateBasicAuthSecret creates a secret of type basic-auth. +func CreateBasicAuthSecret(namespace string, name string, user string, password string) error { + // Check if the secret is already existing + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", name, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return nil + } + + // Create the secret secret := core.Secret{ TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, ObjectMeta: meta.ObjectMeta{ @@ -40,5 +51,5 @@ func CreateDBSecret(namespace string, name string, user string, password string) Type: core.SecretTypeBasicAuth, } - return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the database secret")) + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the secret")) } diff --git a/mgradm/shared/kubernetes/deployment.go b/mgradm/shared/kubernetes/deployment.go index 765e41239..1c2dfb348 100644 --- a/mgradm/shared/kubernetes/deployment.go +++ b/mgradm/shared/kubernetes/deployment.go @@ -44,14 +44,15 @@ func CreateServerDeployment( } } - serverDeploy := getServerDeployment( + serverDeploy := GetServerDeployment( namespace, serverImage, kubernetes.GetPullPolicy(pullPolicy), timezone, debug, mirrorPvName, pullSecret, ) return kubernetes.Apply([]runtime.Object{serverDeploy}, L("failed to create the server deployment")) } -func getServerDeployment( +// GetServerDeployment computes the deployment object for an Uyuni server. +func GetServerDeployment( namespace string, image string, pullPolicy core.PullPolicy, @@ -62,6 +63,101 @@ func getServerDeployment( ) *apps.Deployment { var replicas int32 = 1 + runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi") + cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount( + "/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory, + ) + + // Compute the needed ports + ports := utils.GetServerPorts(debug) + + template := getServerPodTemplate(image, pullPolicy, timezone, pullSecret) + + template.Spec.Volumes = append(template.Spec.Volumes, runVolume, cgroupVolume) + template.Spec.Containers[0].Ports = kubernetes.ConvertPortMaps(ports) + template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, + runMount, cgroupMount, + ) + + if mirrorPvName != "" { + // Add a mount for the mirror + template.Spec.Containers[0].VolumeMounts = append(template.Spec.Containers[0].VolumeMounts, + core.VolumeMount{ + Name: mirrorPvName, + MountPath: "/mirror", + }, + ) + + // Add the environment variable for the deployment to use the mirror + // This doesn't makes sense for migration as the setup script is not executed + template.Spec.Containers[0].Env = append(template.Spec.Containers[0].Env, + core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}, + ) + } + + template.Spec.Containers[0].Lifecycle = &core.Lifecycle{ + PreStop: &core.LifecycleHandler{ + Exec: &core.ExecAction{ + Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"}, + }, + }, + } + + template.Spec.Containers[0].ReadinessProbe = &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/api/api/getVersion", + }, + }, + PeriodSeconds: 30, + TimeoutSeconds: 20, + FailureThreshold: 5, + } + + template.Spec.Containers[0].LivenessProbe = &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/manager/api/api/getVersion", + }, + }, + InitialDelaySeconds: 60, + PeriodSeconds: 60, + TimeoutSeconds: 20, + FailureThreshold: 5, + } + + deployment := apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: ServerDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + // As long as the container cannot scale, we need to stick to recreate strategy + // or the new deployed pods won't be ready. + Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType}, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.ServerComponent}, + }, + Template: template, + }, + } + + return &deployment +} + +// GetServerPodTemplate computes the pod template with the init container and the minimum viable volumes and mounts. +// This is intended to be shared with the setup job. +func getServerPodTemplate( + image string, + pullPolicy core.PullPolicy, + timezone string, + pullSecret string, +) core.PodTemplateSpec { envs := []core.EnvVar{ {Name: "TZ", Value: timezone}, } @@ -79,28 +175,8 @@ func getServerDeployment( initMounts = append(initMounts, *initMount) } - if mirrorPvName != "" { - // Add a volume for the mirror - mounts = append(mounts, types.VolumeMount{MountPath: "/mirror", Name: mirrorPvName}) - - // Add the environment variable for the deployment to use the mirror - // This doesn't makes sense for migration as the setup script is not executed - envs = append(envs, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}) - - // Add the volume mount now since we don't want it in the init container ones. - volumeMounts = append(volumeMounts, core.VolumeMount{ - Name: mirrorPvName, - MountPath: "/mirror", - }) - } - volumes := kubernetes.CreateVolumes(mounts) - runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi") - cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount( - "/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory, - ) - caMount := core.VolumeMount{ Name: "ca-cert", MountPath: "/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT", @@ -118,92 +194,40 @@ func getServerDeployment( } initMounts = append(initMounts, tlsKeyMount) - volumeMounts = append(volumeMounts, runMount, cgroupMount, caMount, tlsKeyMount) - volumes = append(volumes, runVolume, cgroupVolume, caVolume, tlsKeyVolume) - - // Compute the needed ports - ports := utils.GetServerPorts(debug) + volumeMounts = append(volumeMounts, caMount, tlsKeyMount) + volumes = append(volumes, caVolume, tlsKeyVolume) - deployment := apps.Deployment{ - TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + template := core.PodTemplateSpec{ ObjectMeta: meta.ObjectMeta{ - Name: ServerDeployName, - Namespace: namespace, - Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), }, - Spec: apps.DeploymentSpec{ - Replicas: &replicas, - // As long as the container cannot scale, we need to stick to recreate strategy - // or the new deployed pods won't be ready. - Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType}, - Selector: &meta.LabelSelector{ - MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.ServerComponent}, - }, - Template: core.PodTemplateSpec{ - ObjectMeta: meta.ObjectMeta{ - Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + Spec: core.PodSpec{ + InitContainers: []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: pullPolicy, + Command: []string{"sh", "-x", "-c", initScript}, + VolumeMounts: initMounts, }, - Spec: core.PodSpec{ - InitContainers: []core.Container{ - { - Name: "init-volumes", - Image: image, - ImagePullPolicy: pullPolicy, - Command: []string{"sh", "-x", "-c", initScript}, - VolumeMounts: initMounts, - }, - }, - Containers: []core.Container{ - { - Name: "uyuni", - Image: image, - ImagePullPolicy: pullPolicy, - Lifecycle: &core.Lifecycle{ - PreStop: &core.LifecycleHandler{ - Exec: &core.ExecAction{ - Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"}, - }, - }, - }, - Ports: kubernetes.ConvertPortMaps(ports), - Env: envs, - ReadinessProbe: &core.Probe{ - ProbeHandler: core.ProbeHandler{ - HTTPGet: &core.HTTPGetAction{ - Port: intstr.FromInt(80), - Path: "/rhn/metrics", - }, - }, - PeriodSeconds: 30, - TimeoutSeconds: 20, - FailureThreshold: 5, - }, - LivenessProbe: &core.Probe{ - ProbeHandler: core.ProbeHandler{ - HTTPGet: &core.HTTPGetAction{ - Port: intstr.FromInt(80), - Path: "/rhn/metrics", - }, - }, - InitialDelaySeconds: 60, - PeriodSeconds: 60, - TimeoutSeconds: 20, - FailureThreshold: 5, - }, - VolumeMounts: volumeMounts, - }, - }, - Volumes: volumes, + }, + Containers: []core.Container{ + { + Name: "uyuni", + Image: image, + ImagePullPolicy: pullPolicy, + Env: envs, + VolumeMounts: volumeMounts, }, }, + Volumes: volumes, }, } if pullSecret != "" { - deployment.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} } - - return &deployment + return template } const initScript = ` @@ -244,10 +268,13 @@ do if [ "$vol" = "/etc/pki/tls" ]; then ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/etc/pki/tls/certs/spacewalk.crt; ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/spacewalk.key; - cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/pg-spacewalk.key; - chown postgres:postgres /mnt/etc/pki/tls/private/pg-spacewalk.key; fi fi + + if [ "$vol" = "/etc/pki/tls" ]; then + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/pg-spacewalk.key; + chown postgres:postgres /mnt/etc/pki/tls/private/pg-spacewalk.key; + fi done ` @@ -315,15 +342,3 @@ func getRunningServerImage(namespace string) string { } return strings.TrimSpace(string(out)) } - -// neverSetup checks if the server container has already been setup setup. -func neverSetup(namespace string, image string, pullPolicy string, pullSecret string) bool { - out, err := kubernetes.RunPodLogs(namespace, "ran-setup-check", image, pullPolicy, pullSecret, - []types.VolumeMount{utils.RootVolumeMount}, - "ls", "-1a", "/root/", - ) - if err != nil { - return false - } - return !strings.Contains(string(out), ".MANAGER_SETUP_COMPLETE") -} diff --git a/mgradm/shared/kubernetes/reconcile.go b/mgradm/shared/kubernetes/reconcile.go index eb908424f..073af1383 100644 --- a/mgradm/shared/kubernetes/reconcile.go +++ b/mgradm/shared/kubernetes/reconcile.go @@ -13,9 +13,7 @@ import ( "os" "os/exec" - "github.com/rs/zerolog/log" adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/ssl" @@ -40,10 +38,8 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { return utils.Errorf(err, L("failed to compute image URL")) } - cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) - // Create a secret using SCC credentials if any are provided - pullSecret, err := kubernetes.GetSCCSecret( + pullSecret, err := kubernetes.GetRegistrySecret( flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp, ) if err != nil { @@ -53,6 +49,8 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { // Do we have an existing deployment to upgrade? // This can be freshly synchronized data from a migration or a running instance to upgrade. hasDeployment := kubernetes.HasDeployment(namespace, kubernetes.ServerFilter) + + // Check that the postgresql PVC is bound to a Volume. hasDatabase := kubernetes.HasVolume(namespace, "var-pgsql") isMigration := hasDatabase && !hasDeployment @@ -251,54 +249,78 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { // Wait for uyuni-cert secret to be ready kubernetes.WaitForSecret(namespace, CertSecretName) - // Start the server - if err := CreateServerDeployment( - namespace, serverImage, flags.Image.PullPolicy, flags.Installation.TZ, flags.Installation.Debug.Java, - flags.Volumes.Mirror, pullSecret, - ); err != nil { - return err - } - // Create the services if err := CreateServices(namespace, flags.Installation.Debug.Java); err != nil { return err } - if clusterInfos.Ingress == "traefik" { - // Create the Traefik routes - if err := CreateTraefikRoutes(namespace, needsHub, flags.Installation.Debug.Java); err != nil { + // Store the DB credentials in a secret. + if flags.Installation.DB.User != "" && flags.Installation.DB.Password != "" { + if err := CreateBasicAuthSecret( + namespace, DBSecret, flags.Installation.DB.User, flags.Installation.DB.Password, + ); err != nil { return err } } - // Wait for the server deployment to have a running pod before trying to set it up. - if err := kubernetes.WaitForRunningDeployment(namespace, ServerDeployName); err != nil { - return err + if flags.Installation.ReportDB.User != "" && flags.Installation.ReportDB.Password != "" { + if err := CreateBasicAuthSecret( + namespace, ReportdbSecret, flags.Installation.ReportDB.User, flags.Installation.ReportDB.Password, + ); err != nil { + return err + } } - // Run the setup only if it hasn't be done before: this is a one-off task. - // TODO Ideally we would need a job running at an earlier stage to persist the logs in a kubernetes-friendly way. - if neverSetup(namespace, serverImage, flags.Image.PullPolicy, pullSecret) { - if err := adm_utils.RunSetup( - cnx, &flags.ServerFlags, fqdn, map[string]string{"NO_SSL": "Y"}, + // This SCCSecret is used to mount the env variable in the setup job and is different from the + // pullSecret as it is of a different type: basic-auth vs docker. + if flags.Installation.SCC.User != "" && flags.Installation.SCC.Password != "" { + if err := CreateBasicAuthSecret( + namespace, SCCSecret, flags.Installation.SCC.User, flags.Installation.SCC.Password, ); err != nil { - if stopErr := kubernetes.Stop(namespace, kubernetes.ServerApp); stopErr != nil { - log.Error().Msgf(L("Failed to stop service: %v"), stopErr) - } return err } } - // Store the DB credentials in a secret. - if flags.Installation.DB.User != "" && flags.Installation.DB.Password != "" { - if err := CreateDBSecret( - namespace, DBSecret, flags.Installation.DB.User, flags.Installation.DB.Password, + adminSecret := "admin-credentials" + if flags.Installation.Admin.Login != "" && flags.Installation.Admin.Password != "" { + if err := CreateBasicAuthSecret( + namespace, adminSecret, flags.Installation.Admin.Login, flags.Installation.Admin.Password, ); err != nil { return err } } - deploymentsStarting := []string{} + // TODO For a migration or an upgrade this needs to be skipped + // Run the setup script. + // The script will be skipped if the server has already been setup. + jobName, err := StartSetupJob( + namespace, serverImage, kubernetes.GetPullPolicy(flags.Image.PullPolicy), pullSecret, + flags.Volumes.Mirror, &flags.Installation, fqdn, adminSecret, DBSecret, ReportdbSecret, SCCSecret, + ) + if err != nil { + return err + } + + if err := kubernetes.WaitForJob(namespace, jobName, 120); err != nil { + return err + } + + if clusterInfos.Ingress == "traefik" { + // Create the Traefik routes + if err := CreateTraefikRoutes(namespace, needsHub, flags.Installation.Debug.Java); err != nil { + return err + } + } + + // Start the server + if err := CreateServerDeployment( + namespace, serverImage, flags.Image.PullPolicy, flags.Installation.TZ, flags.Installation.Debug.Java, + flags.Volumes.Mirror, pullSecret, + ); err != nil { + return err + } + + deploymentsStarting := []string{ServerDeployName} // Start the Coco Deployments if requested. if replicas := kubernetes.GetReplicas(namespace, CocoDeployName); replicas != 0 && !flags.Coco.IsChanged { diff --git a/mgradm/shared/kubernetes/setup.go b/mgradm/shared/kubernetes/setup.go new file mode 100644 index 000000000..23390b2af --- /dev/null +++ b/mgradm/shared/kubernetes/setup.go @@ -0,0 +1,173 @@ +//SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "time" + + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const SetupJobName = "uyuni-setup" + +// StartSetupJob creates the job setting up the server. +func StartSetupJob( + namespace string, + image string, + pullPolicy core.PullPolicy, + pullSecret string, + mirrorPvName string, + flags *adm_utils.InstallationFlags, + fqdn string, + adminSecret string, + dbSecret string, + reportdbSecret string, + sccSecret string, +) (string, error) { + job, err := GetSetupJob( + namespace, image, pullPolicy, pullSecret, mirrorPvName, flags, fqdn, + adminSecret, dbSecret, reportdbSecret, sccSecret, + ) + if err != nil { + return "", err + } + return job.ObjectMeta.Name, kubernetes.Apply([]*batch.Job{job}, L("failed to run the setup job")) +} + +// GetSetupJob creates the job definition object for the setup. +func GetSetupJob( + namespace string, + image string, + pullPolicy core.PullPolicy, + pullSecret string, + mirrorPvName string, + flags *adm_utils.InstallationFlags, + fqdn string, + adminSecret string, + dbSecret string, + reportdbSecret string, + sccSecret string, +) (*batch.Job, error) { + var maxFailures int32 + timestamp := time.Now().Format("20060102150405") + + template := getServerPodTemplate(image, pullPolicy, flags.TZ, pullSecret) + + script, err := adm_utils.GenerateSetupScript(flags, true) + if err != nil { + return nil, err + } + + template.Spec.Containers[0].Name = "setup" + template.Spec.Containers[0].Command = []string{"sh", "-c", script} + template.Spec.RestartPolicy = core.RestartPolicyNever + + optional := false + + envVars := []core.EnvVar{ + {Name: "ADMIN_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: adminSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "ADMIN_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: adminSecret}, + Key: "password", + Optional: &optional, + }, + }}, + {Name: "MANAGER_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: dbSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "MANAGER_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: dbSecret}, + Key: "password", + Optional: &optional, + }, + }}, + {Name: "REPORT_DB_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret}, + Key: "username", + Optional: &optional, + }, + }}, + {Name: "REPORT_DB_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: reportdbSecret}, + Key: "password", + Optional: &optional, + }, + }}, + // EXTERNALDB_* variables are not passed yet: only for AWS and it probably doesn't make sense for kubernetes yet. + } + + // The DB and ReportDB port is expected to be the standard one. + // When using an external database with a custom port the only solution is to access it using + // its IP address and a headless service with a custom EndpointSlice. + // If this is too big a constraint, we'll have to accept the port as a parameter too. + env := adm_utils.GetSetupEnv(mirrorPvName, flags, fqdn, true) + for key, value := range env { + envVars = append(envVars, core.EnvVar{Name: key, Value: value}) + } + + if sccSecret != "" { + envVars = append(envVars, + core.EnvVar{Name: "SCC_USER", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: sccSecret}, + Key: "username", + Optional: &optional, + }, + }}, + core.EnvVar{Name: "SCC_PASS", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: sccSecret}, + Key: "password", + Optional: &optional, + }, + }}, + ) + } + + if mirrorPvName != "" { + envVars = append(envVars, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}) + } + template.Spec.Containers[0].Env = envVars + + job := batch.Job{ + TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: SetupJobName + "-" + timestamp, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: batch.JobSpec{ + Template: template, + BackoffLimit: &maxFailures, + }, + } + + if pullSecret != "" { + job.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return &job, nil +} diff --git a/mgradm/shared/templates/mgrSetupScriptTemplate.go b/mgradm/shared/templates/mgrSetupScriptTemplate.go index 1ad61c7eb..902eccb0e 100644 --- a/mgradm/shared/templates/mgrSetupScriptTemplate.go +++ b/mgradm/shared/templates/mgrSetupScriptTemplate.go @@ -11,9 +11,10 @@ import ( //nolint:lll const mgrSetupScriptTemplate = `#!/bin/sh -{{- range $name, $value := .Env }} -export {{ $name }}='{{ $value }}' -{{- end }} +if test -e /root/.MANAGER_SETUP_COMPLETE; then + echo "Server appears to be already configured. Installation options may be ignored." + exit 0 +fi {{- if .DebugJava }} echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:8003,server=y,suspend=n" ' >> /etc/tomcat/conf.d/remote_debug.conf @@ -28,12 +29,21 @@ RESULT=$? /usr/bin/rhn-ssl-dbstore --ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT if test -n "{{ .AdminPassword }}"; then + echo "starting tomcat..." + (su -s /usr/bin/sh -g tomcat -G www -G susemanager tomcat /usr/lib/tomcat/server start)& + + echo "starting apache2..." + /usr/sbin/start_apache2 -k start + + echo "Creating first user..." {{ if .NoSSL }} CURL_SCHEME="http" {{ else }} - CURL_SCHEME="-k https" + CURL_SCHEME="-L -k https" {{ end }} + curl -o /tmp/curl-retry -s --retry 7 $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do + HTTP_CODE=$(curl -o /dev/null -s -w %{http_code} $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do) if test "$HTTP_CODE" == "200"; then echo "Creating administration user" @@ -52,17 +62,16 @@ if test -n "{{ .AdminPassword }}"; then rm -f /tmp/curl_out elif test "$HTTP_CODE" == "403"; then echo "Administration user already exists, reusing" + else + RESULT=1 fi fi -# clean before leaving -rm $0 exit $RESULT ` // MgrSetupScriptTemplateData represents information used to create setup script. type MgrSetupScriptTemplateData struct { - Env map[string]string NoSSL bool DebugJava bool AdminPassword string diff --git a/mgradm/shared/utils/flags.go b/mgradm/shared/utils/flags.go index b9294d23d..ea4ebd9de 100644 --- a/mgradm/shared/utils/flags.go +++ b/mgradm/shared/utils/flags.go @@ -95,7 +95,6 @@ type DBFlags struct { Port int User string Password string - Protocol string Provider string Admin struct { User string diff --git a/mgradm/shared/utils/setup.go b/mgradm/shared/utils/setup.go index 85cfba3e0..0e7f48984 100644 --- a/mgradm/shared/utils/setup.go +++ b/mgradm/shared/utils/setup.go @@ -5,62 +5,17 @@ package utils import ( - "path/filepath" "strconv" "strings" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" - "github.com/uyuni-project/uyuni-tools/shared" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -const setupName = "setup.sh" - -// RunSetup execute the setup. -func RunSetup(cnx *shared.Connection, flags *ServerFlags, fqdn string, env map[string]string) error { - // Containers should be running now, check storage if it is using volume from already configured server - preconfigured := false - if isServerConfigured(cnx) { - log.Warn().Msg( - L("Server appears to be already configured. Installation will continue, but installation options may be ignored."), - ) - preconfigured = true - } - - tmpFolder, cleaner, err := generateSetupScript(&flags.Installation, fqdn, flags.Mirror, env) - if err != nil { - return err - } - defer cleaner() - - if err := cnx.Copy(filepath.Join(tmpFolder, setupName), "server:/tmp/setup.sh", "root", "root"); err != nil { - return utils.Errorf(err, L("cannot copy /tmp/setup.sh")) - } - - err = ExecCommand(zerolog.InfoLevel, cnx, "/tmp/setup.sh") - if err != nil && !preconfigured { - return utils.Errorf(err, L("error running the setup script")) - } - if err := cnx.CopyCaCertificate(fqdn); err != nil { - return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) - } - - log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Installation.Admin.Login) - return nil -} - -// generateSetupScript creates a temporary folder with the setup script to execute in the container. -// The script exports all the needed environment variables and calls uyuni's mgr-setup. -// Podman or kubernetes-specific variables can be passed using extraEnv parameter. -func generateSetupScript( - flags *InstallationFlags, - fqdn string, - mirror string, - extraEnv map[string]string, -) (string, func(), error) { +// GetSetupEnv computes the environment variables required by the setup script from the flags. +// As the requirements are slightly different for kubernetes there is a toggle parameter for it. +func GetSetupEnv(mirror string, flags *InstallationFlags, fqdn string, kubernetes bool) map[string]string { localHostValues := []string{ "localhost", "127.0.0.1", @@ -79,65 +34,66 @@ func generateSetupScript( reportdbHost = "localhost" } } - env := map[string]string{ - "UYUNI_FQDN": fqdn, - "MANAGER_USER": flags.DB.User, - "MANAGER_PASS": flags.DB.Password, - "MANAGER_ADMIN_EMAIL": flags.Email, - "MANAGER_MAIL_FROM": flags.EmailFrom, - "MANAGER_ENABLE_TFTP": boolToString(flags.Tftp), - "LOCAL_DB": boolToString(localDB), - "MANAGER_DB_NAME": flags.DB.Name, - "MANAGER_DB_HOST": dbHost, - "MANAGER_DB_PORT": strconv.Itoa(flags.DB.Port), - "MANAGER_DB_PROTOCOL": flags.DB.Protocol, - "REPORT_DB_NAME": flags.ReportDB.Name, - "REPORT_DB_HOST": reportdbHost, - "REPORT_DB_PORT": strconv.Itoa(flags.ReportDB.Port), - "REPORT_DB_USER": flags.ReportDB.User, - "REPORT_DB_PASS": flags.ReportDB.Password, - "EXTERNALDB_ADMIN_USER": flags.DB.Admin.User, - "EXTERNALDB_ADMIN_PASS": flags.DB.Admin.Password, - "EXTERNALDB_PROVIDER": flags.DB.Provider, - "ISS_PARENT": flags.IssParent, - "ACTIVATE_SLP": "N", // Deprecated, will be removed soon - "SCC_USER": flags.SCC.User, - "SCC_PASS": flags.SCC.Password, - } - if mirror != "" { - env["MIRROR_PATH"] = "/mirror" - } - // Add the extra environment variables - for key, value := range extraEnv { - env[key] = value + dbPort := "5432" + if flags.DB.Port != 0 { + dbPort = strconv.Itoa(flags.DB.Port) } - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return "", nil, err + reportdbPort := "5432" + if flags.ReportDB.Port != 0 { + reportdbPort = strconv.Itoa(flags.ReportDB.Port) } - _, noSSL := env["NO_SSL"] + env := map[string]string{ + "UYUNI_FQDN": fqdn, + "MANAGER_ADMIN_EMAIL": flags.Email, + "MANAGER_MAIL_FROM": flags.EmailFrom, + "MANAGER_ENABLE_TFTP": boolToString(flags.Tftp), + "LOCAL_DB": boolToString(localDB), + "MANAGER_DB_NAME": flags.DB.Name, + "MANAGER_DB_HOST": dbHost, + "MANAGER_DB_PORT": dbPort, + "MANAGER_DB_PROTOCOL": "tcp", + "REPORT_DB_NAME": flags.ReportDB.Name, + "REPORT_DB_HOST": reportdbHost, + "REPORT_DB_PORT": reportdbPort, + "EXTERNALDB_PROVIDER": flags.DB.Provider, + "ISS_PARENT": flags.IssParent, + "ACTIVATE_SLP": "N", // Deprecated, will be removed soon + } - dataTemplate := templates.MgrSetupScriptTemplateData{ - Env: env, - DebugJava: flags.Debug.Java, - OrgName: flags.Organization, - AdminLogin: flags.Admin.Login, - AdminPassword: strings.ReplaceAll(flags.Admin.Password, `"`, `\"`), - AdminFirstName: flags.Admin.FirstName, - AdminLastName: flags.Admin.LastName, - AdminEmail: flags.Admin.Email, - NoSSL: noSSL, + if kubernetes { + env["NO_SSL"] = "Y" + } else { + // SSL setup for podman generated certificate + env["CERT_O"] = flags.SSL.Org + env["CERT_OU"] = flags.SSL.OU + env["CERT_CITY"] = flags.SSL.City + env["CERT_STATE"] = flags.SSL.State + env["CERT_COUNTRY"] = flags.SSL.Country + env["CERT_EMAIL"] = flags.SSL.Email + env["CERT_CNAMES"] = strings.Join(append([]string{fqdn}, flags.SSL.Cnames...), ",") + env["CERT_PASS"] = flags.SSL.Password + + // Only add the credentials for podman as we have secret for Kubernetes. + env["MANAGER_USER"] = flags.DB.User + env["MANAGER_PASS"] = flags.DB.Password + env["ADMIN_USER"] = flags.Admin.Login + env["ADMIN_PASS"] = flags.Admin.Password + env["REPORT_DB_USER"] = flags.ReportDB.User + env["REPORT_DB_PASS"] = flags.ReportDB.Password + env["EXTERNALDB_ADMIN_USER"] = flags.DB.Admin.User + env["EXTERNALDB_ADMIN_PASS"] = flags.DB.Admin.Password + env["SCC_USER"] = flags.SCC.User + env["SCC_PASS"] = flags.SCC.Password } - scriptPath := filepath.Join(scriptDir, setupName) - if err = utils.WriteTemplateToFile(dataTemplate, scriptPath, 0555, true); err != nil { - return "", cleaner, utils.Errorf(err, L("Failed to generate setup script")) + if mirror != "" { + env["MIRROR_PATH"] = "/mirror" } - return scriptDir, cleaner, nil + return env } func boolToString(value bool) string { @@ -147,6 +103,24 @@ func boolToString(value bool) string { return "N" } -func isServerConfigured(cnx *shared.Connection) bool { - return cnx.TestExistenceInPod("/root/.MANAGER_SETUP_COMPLETE") +// GenerateSetupScript creates a temporary folder with the setup script to execute in the container. +// The script exports all the needed environment variables and calls uyuni's mgr-setup. +func GenerateSetupScript(flags *InstallationFlags, nossl bool) (string, error) { + template := templates.MgrSetupScriptTemplateData{ + DebugJava: flags.Debug.Java, + OrgName: flags.Organization, + AdminLogin: "$ADMIN_USER", + AdminPassword: "$ADMIN_PASS", + AdminFirstName: flags.Admin.FirstName, + AdminLastName: flags.Admin.LastName, + AdminEmail: flags.Admin.Email, + NoSSL: nossl, + } + + // Prepare the script + scriptBuilder := new(strings.Builder) + if err := template.Render(scriptBuilder); err != nil { + return "", utils.Errorf(err, L("failed to render setup script")) + } + return scriptBuilder.String(), nil } diff --git a/shared/kubernetes/kubernetes.go b/shared/kubernetes/kubernetes.go index 64cb3f4cc..168d4665e 100644 --- a/shared/kubernetes/kubernetes.go +++ b/shared/kubernetes/kubernetes.go @@ -189,16 +189,16 @@ func createDockerSecret( // AddSccSecret creates a secret holding the SCC credentials and adds it to the helm args. func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials, appLabel string) ([]string, error) { - secret, err := GetSCCSecret(namespace, scc, appLabel) + secret, err := GetRegistrySecret(namespace, scc, appLabel) if secret != "" { helmArgs = append(helmArgs, secret) } return helmArgs, err } -// GetSCCSecret creates a secret holding the SCC credentials and returns the secret name. -func GetSCCSecret(namespace string, scc *types.SCCCredentials, appLabel string) (string, error) { - const secretName = "scc-credentials" +// GetRegistrySecret creates a docker secret holding the SCC credentials and returns the secret name. +func GetRegistrySecret(namespace string, scc *types.SCCCredentials, appLabel string) (string, error) { + const secretName = "registry-credentials" // Return the existing secret if any. out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", secretName, "-o", "name") diff --git a/shared/testutils/flagstests/mgradm_install.go b/shared/testutils/flagstests/mgradm_install.go index eb975ac86..69cc301be 100644 --- a/shared/testutils/flagstests/mgradm_install.go +++ b/shared/testutils/flagstests/mgradm_install.go @@ -23,7 +23,6 @@ var InstallFlagsTestArgs = func() []string { "--db-name", "dbname", "--db-host", "dbhost", "--db-port", "1234", - "--db-protocol", "dbprot", "--db-admin-user", "dbadmin", "--db-admin-password", "dbadminpass", "--db-provider", "aws", @@ -47,7 +46,6 @@ var InstallFlagsTestArgs = func() []string { "--organization", "someorg", } - args = append(args, MirrorFlagTestArgs...) args = append(args, SCCFlagTestArgs...) args = append(args, ImageFlagsTestArgs...) args = append(args, CocoFlagsTestArgs...) @@ -69,7 +67,6 @@ func AssertInstallFlags(t *testing.T, flags *utils.ServerFlags) { testutils.AssertEquals(t, "Error parsing --db-name", "dbname", flags.Installation.DB.Name) testutils.AssertEquals(t, "Error parsing --db-host", "dbhost", flags.Installation.DB.Host) testutils.AssertEquals(t, "Error parsing --db-port", 1234, flags.Installation.DB.Port) - testutils.AssertEquals(t, "Error parsing --db-protocol", "dbprot", flags.Installation.DB.Protocol) testutils.AssertEquals(t, "Error parsing --db-admin-user", "dbadmin", flags.Installation.DB.Admin.User) testutils.AssertEquals(t, "Error parsing --db-admin-password", "dbadminpass", flags.Installation.DB.Admin.Password) testutils.AssertEquals(t, "Error parsing --db-provider", "aws", flags.Installation.DB.Provider) @@ -98,7 +95,6 @@ func AssertInstallFlags(t *testing.T, flags *utils.ServerFlags) { testutils.AssertEquals(t, "Error parsing --admin-firstName", "adminfirst", flags.Installation.Admin.FirstName) testutils.AssertEquals(t, "Error parsing --admin-lastName", "adminlast", flags.Installation.Admin.LastName) testutils.AssertEquals(t, "Error parsing --organization", "someorg", flags.Installation.Organization) - AssertMirrorFlag(t, flags.Mirror) AssertSCCFlag(t, &flags.Installation.SCC) AssertImageFlag(t, &flags.Image) AssertCocoFlag(t, &flags.Coco) diff --git a/shared/utils/exec.go b/shared/utils/exec.go index bed607f40..a1cd4b66c 100644 --- a/shared/utils/exec.go +++ b/shared/utils/exec.go @@ -8,6 +8,7 @@ import ( "bytes" "errors" "fmt" + "os" "os/exec" "strings" "time" @@ -34,6 +35,98 @@ func (l OutputLogWriter) Write(p []byte) (n int, err error) { return } +// Runner is a helper object around the exec.Command() function. +// +// This is supposed to be created using the NewRunner() function. +type Runner struct { + logger zerolog.Logger + cmd *exec.Cmd + spinner *spinner.Spinner +} + +// NewRunner creates a new runner instance for the command. +func NewRunner(command string, args ...string) *Runner { + runner := Runner{logger: log.Logger} + runner.cmd = exec.Command(command, args...) + return &runner +} + +// Log sets the log level of the output. +func (r *Runner) Log(logLevel zerolog.Level) *Runner { + r.logger = log.Logger.Level(logLevel) + return r +} + +// Spinner sets a spinner with its message. +// If no message is passed, the command will be used. +func (r *Runner) Spinner(message string) *Runner { + r.spinner = spinner.New(spinner.CharSets[14], 100*time.Millisecond) + text := message + if message == "" { + text = strings.Join(r.cmd.Args, " ") + } + r.spinner.Suffix = fmt.Sprintf(" %s\n", text) + return r +} + +// StdMapping maps the process output and error streams to the the standard ones. +// This is useful to show the process output in the console and the logs and can be combined with Log(). +func (r *Runner) StdMapping() *Runner { + r.cmd.Stdout = r.logger + r.cmd.Stderr = r.logger + return r +} + +// Env sets environment variables to use for the command. +func (r *Runner) Env(env []string) *Runner { + if r.cmd.Env == nil { + r.cmd.Env = os.Environ() + } + r.cmd.Env = append(r.cmd.Env, env...) + return r +} + +// Exec really executes the command and returns its output and error. +// The error output to used as error message if the StdMapping() function wasn't called. +func (r *Runner) Exec() ([]byte, error) { + if r.spinner != nil { + r.spinner.Start() + } + + r.logger.Debug().Msgf("Running: %s", strings.Join(r.cmd.Args, " ")) + var out []byte + var err error + + if r.cmd.Stdout != nil { + err = r.cmd.Run() + } else { + out, err = r.cmd.Output() + } + + if r.spinner != nil { + r.spinner.Stop() + } + + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + err = &CmdError{exitErr} + } + + r.logger.Trace().Msgf("Command output: %s, error: %s", out, err) + + return out, err +} + +// CmdError is a wrapper around exec.ExitError to show the standard error as message. +type CmdError struct { + *exec.ExitError +} + +// Error returns the stderr as error message. +func (e *CmdError) Error() string { + return strings.TrimSpace(string(e.Stderr)) +} + // RunCmd execute a shell command. func RunCmd(command string, args ...string) error { s := spinner.New(spinner.CharSets[14], 100*time.Millisecond) // Build our new spinner diff --git a/shared/utils/exec_test.go b/shared/utils/exec_test.go new file mode 100644 index 000000000..86eb555b0 --- /dev/null +++ b/shared/utils/exec_test.go @@ -0,0 +1,85 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestRunner(t *testing.T) { + type testCase struct { + exit int + logLevel zerolog.Level + } + + testCases := []testCase{ + {exit: 0, logLevel: zerolog.TraceLevel}, + {exit: 2, logLevel: zerolog.TraceLevel}, + {exit: 0, logLevel: zerolog.DebugLevel}, + {exit: 0, logLevel: zerolog.InfoLevel}, + } + + for i, test := range testCases { + logWriter := new(strings.Builder) + log.Logger = zerolog.New(logWriter) + + runner := NewRunner("sh", "-c", + fmt.Sprintf(`echo "Test output: ENV=$ENV"; echo 'error message' >&2; exit %d`, test.exit), + ) + out, err := runner.Log(test.logLevel).Env([]string{"ENV=foo"}).Exec() + + caseMsg := fmt.Sprintf("test %d: ", i) + + // Check the output + testutils.AssertEquals(t, caseMsg+"Unexpected output", "Test output: ENV=foo\n", string(out)) + + // Check the returned error + if test.exit == 0 { + testutils.AssertEquals(t, caseMsg+"Unexpected error", nil, err) + } else { + testutils.AssertEquals(t, caseMsg+"Unexpected error", "error message", string(err.Error())) + var cmdErr *CmdError + if errors.As(err, &cmdErr) { + testutils.AssertEquals(t, caseMsg+"Unexpected exit code", test.exit, cmdErr.ExitCode()) + } else { + t.Errorf(caseMsg + "unexpected error type") + } + } + + // Check the log content + logContent := logWriter.String() + t.Logf("log: %s", logContent) + if test.logLevel == zerolog.TraceLevel { + testutils.AssertTrue(t, caseMsg+"missing trace log entry", strings.Contains(logContent, "Command output:")) + } else { + testutils.AssertTrue(t, caseMsg+"unexpected trace log entry", !strings.Contains(logContent, `"level":"trace"`)) + } + + if test.logLevel <= zerolog.DebugLevel { + testutils.AssertTrue(t, caseMsg+"missing debug log entry", strings.Contains(logContent, "Running:")) + } else { + testutils.AssertTrue(t, caseMsg+"unexpected debug log entry", !strings.Contains(logContent, `"level":"debug"`)) + } + } +} + +func ExampleRunner() { + out, err := NewRunner("sh", "-c", `echo "Hello $user"`). + Env([]string{"user=world"}). + Log(zerolog.DebugLevel). + Exec() + if err != nil { + fmt.Printf("Error: %s", err) + } + fmt.Println(strings.TrimSpace(string(out))) + // Output: Hello world +} diff --git a/uyuni-tools.changes.cbosdo.systemd-free-setup b/uyuni-tools.changes.cbosdo.systemd-free-setup new file mode 100644 index 000000000..52a9de38c --- /dev/null +++ b/uyuni-tools.changes.cbosdo.systemd-free-setup @@ -0,0 +1 @@ +- Run setup in its own container rather than using exec