From 96e170635e691ea94148169c75a53aa56bd06e8e Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Tue, 26 Apr 2022 16:12:40 -0400 Subject: [PATCH 1/7] k8s-daemonset initial cut --- charts/k8s-daemonset/.helmignore | 21 + charts/k8s-daemonset/Chart.yaml | 11 + charts/k8s-daemonset/README.md | 1202 +++++++++++++++++ charts/k8s-daemonset/linter_values.yaml | 42 + charts/k8s-daemonset/templates/NOTES.txt | 43 + .../templates/_capabilities_helpers.tpl | 42 + .../templates/_daemonset_spec.tpl | 427 ++++++ charts/k8s-daemonset/templates/_helpers.tpl | 73 + charts/k8s-daemonset/templates/daemonset.yaml | 5 + charts/k8s-daemonset/templates/gmc.yaml | 27 + charts/k8s-daemonset/templates/ingress.yaml | 101 ++ charts/k8s-daemonset/templates/pdb.yaml | 23 + charts/k8s-daemonset/templates/service.yaml | 42 + .../templates/serviceaccount.yaml | 22 + .../templates/servicemonitor.yaml | 22 + charts/k8s-daemonset/values.yaml | 713 ++++++++++ examples/k8s-daemonset-nginx/README.md | 381 ++++++ examples/k8s-daemonset-nginx/values.yaml | 69 + test/k8s_daemonset_nginx_example_test.go | 206 +++ test/k8s_service_template_test.go | 2 +- 20 files changed, 3473 insertions(+), 1 deletion(-) create mode 100644 charts/k8s-daemonset/.helmignore create mode 100644 charts/k8s-daemonset/Chart.yaml create mode 100644 charts/k8s-daemonset/README.md create mode 100644 charts/k8s-daemonset/linter_values.yaml create mode 100644 charts/k8s-daemonset/templates/NOTES.txt create mode 100644 charts/k8s-daemonset/templates/_capabilities_helpers.tpl create mode 100644 charts/k8s-daemonset/templates/_daemonset_spec.tpl create mode 100644 charts/k8s-daemonset/templates/_helpers.tpl create mode 100644 charts/k8s-daemonset/templates/daemonset.yaml create mode 100644 charts/k8s-daemonset/templates/gmc.yaml create mode 100644 charts/k8s-daemonset/templates/ingress.yaml create mode 100644 charts/k8s-daemonset/templates/pdb.yaml create mode 100644 charts/k8s-daemonset/templates/service.yaml create mode 100644 charts/k8s-daemonset/templates/serviceaccount.yaml create mode 100644 charts/k8s-daemonset/templates/servicemonitor.yaml create mode 100644 charts/k8s-daemonset/values.yaml create mode 100644 examples/k8s-daemonset-nginx/README.md create mode 100644 examples/k8s-daemonset-nginx/values.yaml create mode 100644 test/k8s_daemonset_nginx_example_test.go diff --git a/charts/k8s-daemonset/.helmignore b/charts/k8s-daemonset/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/charts/k8s-daemonset/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/k8s-daemonset/Chart.yaml b/charts/k8s-daemonset/Chart.yaml new file mode 100644 index 00000000..9f74c173 --- /dev/null +++ b/charts/k8s-daemonset/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +name: k8s-daemonset +description: A Helm chart to package your application container for Kubernetes +# This will be updated with the release tag in the CI/CD pipeline before publishing. This has to be a valid semver for +# the linter to accept. +version: 0.0.1-replace +home: https://github.com/gruntwork-io/helm-kubernetes-services +maintainers: + - name: Gruntwork + email: info@gruntwork.io + url: https://gruntwork.io diff --git a/charts/k8s-daemonset/README.md b/charts/k8s-daemonset/README.md new file mode 100644 index 00000000..c205d5c9 --- /dev/null +++ b/charts/k8s-daemonset/README.md @@ -0,0 +1,1202 @@ +# Kubernetes Service Helm Chart + +This Helm Chart can be used to deploy your application container under a +[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) resource onto your Kubernetes +cluster. You can use this Helm Chart to run and deploy a long-running container, such as a web service or backend +microservice. The container will be packaged into +[Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) that are managed by the `Deployment` +controller. + +This Helm Chart can also be used to front the `Pods` of the `Deployment` resource with a +[Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide a stable endpoint to access the +`Pods`, as well as load balance traffic to them. The Helm Chart can also specify +[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) rules to further configure complex routing +rules in front of the `Service`. + +If you're using the chart to deploy to [GKE](https://cloud.google.com/kubernetes-engine/), you can also use the chart to deploy a [Google Managed SSL Certificate](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs) and associate it with the Ingress. + + +## How to use this chart? + +* See the [root README](/README.adoc) for general instructions on using Gruntwork Helm Charts. +* See the [examples](/examples) folder for example usage. +* See the provided [values.yaml](./values.yaml) file for the required and optional configuration values that you can set + on this chart. + +back to [root README](/README.adoc#core-concepts) + +## What resources does this Helm Chart deploy? + +The following resources will be deployed with this Helm Chart, depending on which configuration values you use: + +- `Deployment`: The main `Deployment` controller that will manage the application container image specified in the + `containerImage` input value. +- Secondary `Deployment` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). +- `Service`: The `Service` resource providing a stable endpoint that can be used to address to `Pods` created by the + `Deployment` controller. Created only if you configure the `service` input (and set + `service.enabled = true`). +- `ServiceMonitor`: The `ServiceMonitor` describes the set of targets to be monitored by Prometheus. Created only if you configure the service input and set `serviceMonitor.enabled = true`. +- `Ingress`: The `Ingress` resource providing host and path routing rules to the `Service` for the deployed `Ingress` + controller in the cluster. Created only if you configure the `ingress` input (and set + `ingress.enabled = true`). +- `Horizontal Pod Autoscaler`: The `Horizontal Pod Autoscaler` automatically scales the number of pods in a replication + controller, deployment, replica set or stateful set based on observed CPU or memory utilization. + Created only if the user sets `horizontalPodAutoscaler.enabled = true`. +- `PodDisruptionBudget`: The `PodDisruptionBudget` resource that specifies a disruption budget for the `Pods` managed by + the `Deployment`. This manages how many pods can be disrupted by a voluntary disruption (e.g + node maintenance). Created if you specify a non-zero value for the `minPodsAvailable` input + value. +- `ManagedCertificate`: The `ManagedCertificate` is a [GCP](https://cloud.google.com/) -specific resource that creates a Google Managed SSL certificate. Google-managed SSL certificates are provisioned, renewed, and managed for your domain names. Read more about Google-managed SSL certificates [here](https://cloud.google.com/load-balancing/docs/ssl-certificates#managed-certs). Created only if you configure the `google.managedCertificate` input (and set + `google.managedCertificate.enabled = true` and `google.managedCertificate.domainName = your.domain.name`). + +back to [root README](/README.adoc#core-concepts) + +## How do I deploy additional services not managed by the chart? + +You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` +key. You provide each resource manifest directly as a value under `customResources.resources` and set +`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in +[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and +[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I expose my application internally to the cluster? + +In general, `Pods` are considered ephemeral in Kubernetes. `Pods` can come and go at any point in time, either because +containers fail or the underlying instances crash. In either case, the dynamic nature of `Pods` make it difficult to +consistently access your application if you are individually addressing the `Pods` directly. + +Traditionally, this is solved using service discovery, where you have a stateful system that the `Pods` would register +to when they are available. Then, your other applications can query the system to find all the available `Pods` and +access one of the available ones. + +Kubernetes provides a built in mechanism for service discovery in the `Service` resource. `Services` are an abstraction +that groups a set of `Pods` behind a consistent, stable endpoint to address them. By creating a `Service` resource, you +can provide a single endpoint to other applications to connect to the `Pods` behind the `Service`, and not worry about +the dynamic nature of the `Pods`. + +You can read a more detailed description of `Services` in [the official +documentation](https://kubernetes.io/docs/concepts/services-networking/service/). Here we will cover just enough to +understand how to access your app. + +By default, this Helm Chart will deploy your application container in a `Pod` that exposes ports 80. These will +be exposed to the Kubernetes cluster behind the `Service` resource, which exposes port 80. You can modify this behavior +by overriding the `containerPorts` input value and the `service` input value. See the corresponding section in the +`values.yaml` file for more details. + +Once the `Service` is created, you can check what endpoint the `Service` provides by querying Kubernetes using +`kubectl`. First, retrieve the `Service` name that is outputted in the install summary when you first install the Helm +Chart. If you forget, you can get the same information at a later point using `helm status`. For example, if you had +previously installed this chart under the name `edge-service`, you can run the following command to see the created +resources: + +```bash +$ helm status edge-service +LAST DEPLOYED: Fri Feb 8 16:25:49 2019 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1/Service +NAME AGE +edge-service-nginx 24m + +==> v1/Deployment +edge-service-nginx 24m + +==> v1/Pod(related) + +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 24m +edge-service-nginx-844c978df7-mln26 1/1 Running 0 24m +edge-service-nginx-844c978df7-rdsr8 1/1 Running 0 24m +``` + +This will show you some metadata about the release, the deployed resources, and any notes provided by the Helm Chart. In +this example, the service name is `edge-service-nginx` so we will use that to query the `Service`: + +```bash +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx ClusterIP 172.20.186.176 80/TCP 27m +``` + +Here you can see basic information about the `Service`. The important piece of information is the `CLUSTER-IP` and +`PORT` fields, which tell you the available endpoint for the `Service`, and any exposed ports. Given that, any `Pod` in +your Kubernetes cluster can access the `Pods` of this application by hitting `{CLUSTER-IP}:{PORT}`. So for this example, +that will be `172.20.186.176:80`. + +But what if you want to automatically find a `Service` by name? The name of the `Service` created by this Helm Chart is +always `{RELEASE_NAME}-{applicationName}`, where `applicationName` is provided in the input value and `RELEASE_NAME` is +set when you install the Helm Chart. This means that the name is predictable, while the allocated IP address may not be. + +To address the `Service` by name, Kubernetes provides two ways: + +- environment variables +- DNS + +### Addressing Service by Environment Variables + +For each active `Service` that a `Pod` has access to, Kubernetes will automatically set a set of environment variables +in the container. These are `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` to get the host address (ip address) +and port respectively, where `SVCNAME` is the name of the `Service`. Note that `SVCNAME` will be the all caps version +with underscores of the `Service` name. + +Using the previous example where we installed this chart with a release name `edge-service` and `applicationName` +`nginx`, we get the `Service` name `edge-service-nginx`. Kubernetes will expose the following environment variables to +all containers that can access the `Service`: + +``` +EDGE_SERVICE_NGINX_SERVICE_HOST=172.20.186.176 +EDGE_SERVICE_NGINX_SERVICE_PORT=80 +``` + +Note that environment variables are set when the container first boots up. This means that if you already had `Pods` +deployed in your system before the `Service` was created, you will have to cycle the `Pods` in order to get the +environment variables. If you wish to avoid ordering issues, you can use the DNS method to address the `Service` +instead, if that is available. + +### Addressing Service by DNS + +If your Kubernetes cluster is deployed with the DNS add-on (this is automatically installed for EKS and GKE), then you +can rely on DNS to address your `Service`. Every `Service` in Kubernetes will register the domain +`{SVCNAME}.{NAMESPACE}.svc.cluster.local` to the DNS service of the cluster. This means that all your `Pods` in the +cluster can get the `Service` host by hitting that domain. + +The `NAMESPACE` in the domain refers to the `Namespace` where the `Service` was created. By default, all resources are +created in the `default` namespace. This is configurable at install time of the Helm Chart using the `--namespace` +option. + +In our example, we deployed the chart to the `default` `Namespace`, and the `Service` name is `edge-service-nginx`. So in +this case, the domain of the `Service` will be `edge-service-nginx.default.svc.cluster.local`. When any `Pod` addresses +that domain, it will get the address `172.20.186.176`. + +Note that DNS does not resolve ports, so in this case, you will have to know which port the `Service` uses. So in your +`Pod`, you will have to know that the `Service` exposes port `80` when you address it in your code for the container as +`edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it +is specified in the Helm Chart input value. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I expose my application externally, outside of the cluster? + +Similar to the previous section ([How do I expose my application internally to the +cluster?](#how-do-i-expose-my-application-internally-to-the-cluster), you can use a `Service` resource to expose your +application externally. The primary service type that facilitates external access is the `NodePort` `Service` type. + +The `NodePort` `Service` type will expose the `Service` by binding an available port on the network interface of the +physical machines running the `Pod`. This is different from a network interface internal to Kubernetes, which is only +accessible within the cluster. Since the port is on the host machine network interface, you can access the `Service` by +hitting that port on the node. + +For example, suppose you had a 2 node Kubernetes cluster deployed on EC2. Suppose further that all your EC2 instances +have public IP addresses that you can access. For the sake of this example, we will assign random IP addresses to the +instances: + +- 54.219.117.250 +- 38.110.235.198 + +Now let's assume you deployed this helm chart using the `NodePort` `Service` type. You can do this by setting the +`service.type` input value to `NodePort`: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +When you install this helm chart with this input config, helm will deploy the `Service` as a `NodePort`, binding an +available port on the host machine to access the `Service`. You can confirm this by querying the `Service` using +`kubectl`: + +```bash +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx NodePort 10.99.244.96 80:31035/TCP 33s +``` + +In this example, you can see that the `Service` type is `NodePort` as expected. Additionally, you can see that the there +is a port binding between port 80 and 31035. This port binding refers to the binding between the `Service` port (80 in +this case) and the host port (31035 in this case). + +One thing to be aware of about `NodePorts` is that the port binding will exist on all nodes in the cluster. This means +that, in our 2 node example, both nodes now have a port binding of 31035 on the host network interface that routes to +the `Service`, regardless of whether or not the node is running the `Pods` backing the `Service` endpoint. This means +that you can reach the `Service` on both of the following endpoints: + +- `54.219.117.250:31035` +- `38.110.235.198:31035` + +This means that no two `Service` can share the same `NodePort`, as the port binding is shared across the cluster. +Additionally, if you happen to hit a node that is not running a `Pod` backing the `Service`, Kubernetes will +automatically hop to one that is. + +You might use the `NodePort` if you do not wish to manage load balancers through Kubernetes, or if you are running +Kubernetes on prem where you do not have native support for managed load balancers. + +To summarize: + +- `NodePort` is the simplest way to expose your `Service` to externally to the cluster. +- You have a limit on the number of `NodePort` `Services` you can have in your cluster, imposed by the number of open ports + available on your host machines. +- You have potentially inefficient hopping if you happen to route to a node that is not running the `Pod` backing the + `Service`. + +Additionally, Kubernetes provides two mechanisms to manage an external load balancer that routes to the `NodePort` for +you. The two ways are: + +- [Using a `LoadBalancer` `Service` type](#loadbalancer-service-type) +- [Using `Ingress` resources with an `Ingress Controller`](#ingress-and-ingress-controllers) + +### LoadBalancer Service Type + +The `LoadBalancer` `Service` type will expose the `Service` by allocating a managed load balancer in the cloud that is +hosting the Kubernetes cluster. On AWS, this will be an ELB, while on GCP, this will be a Cloud Load Balancer. When the +`LoadBalancer` `Service` is created, Kubernetes will automatically create the underlying load balancer resource in the +cloud for you, and create all the target groups so that they route to the `Pods` backing the `Service`. + +You can deploy this helm chart using the `LoadBalancer` `Service` type by setting the `service.type` input value to +`LoadBalancer`: + +```yaml +service: + enabled: true + type: LoadBalancer + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +When you install this helm chart with this input config, helm will deploy the `Service` as a `LoadBalancer`, allocating +a managed load balancer in the cloud hosting your Kubernetes cluster. You can get the attached load balancer by querying +the `Service` using `kubectl`. In this example, we will assume we are using EKS: + +``` +$ kubectl get service edge-service-nginx +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +edge-service-nginx LoadBalancer 172.20.7.35 a02fef4d02e41... 80:32127/TCP 1m +``` + +Now, in this example, we have an entry in the `EXTERNAL-IP` field. This is truncated here, but you can get the actual +output when you describe the service: + +``` +$ kubectl describe service edge-service-nginx +Name: edge-service-nginx +Namespace: default +Labels: app.kubernetes.io/instance=edge-service + app.kubernetes.io/managed-by=helm + app.kubernetes.io/name=nginx + gruntwork.io/app-name=nginx + helm.sh/chart=k8s-service-0.1.0 +Annotations: +Selector: app.kubernetes.io/instance=edge-service,app.kubernetes.io/name=nginx,gruntwork.io/app-name=nginx +Type: LoadBalancer +IP: 172.20.7.35 +LoadBalancer Ingress: a02fef4d02e4111e9891806271fc7470-173030870.us-west-2.elb.amazonaws.com +Port: app 80/TCP +TargetPort: 80/TCP +NodePort: app 32127/TCP +Endpoints: 10.0.3.19:80 +Session Affinity: None +External Traffic Policy: Cluster +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal EnsuringLoadBalancer 2m service-controller Ensuring load balancer + Normal EnsuredLoadBalancer 2m service-controller Ensured load balancer +``` + +In the describe output, there is a field named `LoadBalancer Ingress`. When you have a `LoadBalancer` `Service` type, +this field contains the public DNS endpoint of the associated load balancer resource in the cloud provider. In this +case, we have an AWS ELB instance, so this endpoint is the public endpoint of the associated ELB resource. + +**Note:** Eagle eyed readers might also notice that there is an associated `NodePort` on the resource. This is because under the +hood, `LoadBalancer` `Services` utilize `NodePorts` to handle the connection between the managed load balancer of the +cloud provider and the Kubernetes `Pods`. This is because at this time, there is no portable way to ensure that the +network between the cloud load balancers and Kubernetes can be shared such that the load balancers can route to the +internal network of the Kubernetes cluster. Therefore, Kubernetes resorts to using `NodePort` as an abstraction layer to +connect the `LoadBalancer` to the `Pods` backing the `Service`. This means that `LoadBalancer` `Services` share the same +drawbacks as using a `NodePort` `Service`. + +To summarize: + +- `LoadBalancer` provides a way to set up a cloud load balancer resource that routes to the provisioned `NodePort` on + each node in your Kubernetes cluster. +- `LoadBalancer` can be used to provide a persistent endpoint that is robust to the ephemeral nature of nodes in your + cluster. E.g it is able to route to live nodes in the face of node failures. +- `LoadBalancer` does not support weighted balancing. This means that you cannot balance the traffic so that it prefers + nodes that have more instances of the `Pod` running. +- Note that under the hood, `LoadBalancer` utilizes a `NodePort` `Service`, and thus shares the same limits as `NodePort`. + +### Ingress and Ingress Controllers + +`Ingress` is a mechanism in Kubernetes that abstracts externally exposing a `Service` from the `Service` config itself. +`Ingress` resources support: + +- assigning an externally accessible URL to a `Service` +- perform hostname and path based routing of `Services` +- load balance traffic using customizable balancing rules +- terminate SSL + +You can read more about `Ingress` resources in [the official +documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/). Here, we will cover the basics to +understand how `Ingress` can be used to externally expose the `Service`. + +At a high level, the `Ingress` resource is used to specify the configuration for a particular `Service`. In turn, the +`Ingress Controller` is responsible for fulfilling those configurations in the cluster. This means that the first +decision to make in using `Ingress` resources, is selecting an appropriate `Ingress Controller` for your cluster. + +#### Choosing an Ingress Controller + +Before you can use an `Ingress` resource, you must install an `Ingress Controller` in your Kubernetes cluster. There are +many kinds of `Ingress Controllers` available, each with different properties. You can see [a few examples listed in the +official documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). + +When you use an external cloud `Ingress Controller` such as the [GCE Ingress +Controller](https://github.com/kubernetes/ingress-gce/blob/master/README.md) or [AWS ALB Ingress +Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller), Kubernetes will allocate an externally +addressable load balancer (for GCE this will be a Cloud Load Balancer and for AWS this will be an ALB) that fulfills the +`Ingress` rules. This includes routing the domain names and paths to the right `Service` as configured by the `Ingress` +rules. Additionally, Kubernetes will manage the target groups of the load balancer so that they are up to date with +the latest `Ingress` configuration. However, in order for this to work, there needs to be some way for the load balancer +to connect to the `Pods` servicing the `Service`. Since the `Pods` are internal to the Kubernetes network and the load +balancers are external to the network, there must be a `NodePort` that links the two together. As such, like the +`LoadBalancer` `Service` type, these `Ingress Controllers` also require a `NodePort` under the hood. + + + +Alternatively, you can use an internal `Ingress Controller` that runs within Kubernetes as `Pods`. For example, the +official `nginx Ingress Controller` will launch `nginx` as `Pods` within your Kubernetes cluster. These `nginx` `Pods` +are then configured using `Ingress` resources, which then allows `nginx` to route to the right `Pods`. Since the `nginx` +`Pods` are internal to the Kubernetes network, there is no need for your `Services` to be `NodePorts` as they are +addressable within the network by the `Pods`. However, this means that you need some other mechanism to expose `nginx` +to the outside world, which will require a `NodePort`. The advantage of this approach, despite still requiring a +`NodePort`, is that you can have a single `NodePort` that routes to multiple services using hostnames or paths as +managed by `nginx`, as opposed to requiring a `NodePort` per `Service` you wish to expose. + +Which `Ingress Controller` type you wish to use depends on your infrastructure needs. If you have relatively few +`Services`, and you want the simplicity of a managed cloud load balancer experience, you might opt for the external +`Ingress Controllers` such as GCE and AWS ALB controllers. On the other hand, if you have thousands of micro services +that push you to the limits of the available number of ports on a host machine, you might opt for an internal `Ingress +Controller` approach. Whichever approach you decide, be sure to document your decision where you install the particular +`Ingress Controller` so that others in your team know and understand the tradeoffs you made. + +#### Configuring Ingress for your Service + +Once you have an `Ingress Controller` installed and configured on your Kuberentes cluster, you can now start creating +`Ingress` resources to add routes to it. This helm chart supports configuring an `Ingress` resource to complement the +`Service` resource that is created in the chart. + +To add an `Ingress` resource, first make sure you have a `Service` enabled on the chart. Depending on the chosen +`Ingress Controller`, the `Service` type should be `NodePort` or `ClusterIP`. Here, we will create a `NodePort` +`Service` exposing port 80: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP +``` + +Then, we will add the configuration for the `Ingress` resource by specifying the `ingress` input value. For this +example, we will assume that we want to route `/app` to our `Service`, with the domain hosted on `app.yourco.com`: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + hosts: + - app.yourco.com +``` + +This will configure the load balancer backing the `Ingress Controller` that will route any traffic with host and path +prefix `app.yourco.com/app` to the `Service` on port 80. If `app.yourco.com` is configured to point to the `Ingress +Controller` load balancer, then once you deploy the helm chart you should be able to start accessing your app on that +endpoint. + +#### Registering additional paths + +Sometimes you might want to add additional path rules beyond the main service rule that is injected to the `Ingress` +resource. For example, you might want a path that routes to the sidecar containers, or you might want to reuse a single +`Ingress` for multiple different `Service` endpoints because to share load balancers. For these situations, you can use +the `additionalPaths` and `additionalPathsHigherPriority` input values. + +Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarMonitor` served on port +3000: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + sidecarMonitor: + port: 3000 + targetPort: 3000 + protocol: TCP +``` + +To route `/app` to the `app` service endpoint and `/sidecar` to the `sidecarMonitor` service endpoint, we will configure +the `app` service path rules as the main service route and the `sidecarMonitor` as an additional path rule: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + additionalPaths: + - path: /sidecar + servicePort: 3000 +``` + +Now suppose you had a sidecar service that will return a fixed response indicating server maintainance and you want to +temporarily route all requests to that endpoint without taking down the pod. You can do this by creating a route that +catches all paths as a higher priority path using the `additionalPathsHigherPriority` input value. + +Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarFixedResponse` served on +port 3000: + +```yaml +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + sidecarFixedResponse: + port: 3000 + targetPort: 3000 + protocol: TCP +``` + +To route all traffic to the fixed response port: + +```yaml +ingress: + enabled: true + path: /app + servicePort: 80 + additionalPathsHigherPriority: + - path: /* + servicePort: 3000 +``` + +The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be +evaluated first when routing requests. + +back to [root README](/README.adoc#day-to-day-operations) + +### How do I expose additional ports? + +By default, this Helm Chart will deploy your application container in a Pod that exposes ports 80. Sometimes you might +want to expose additional ports in your application - for example a separate port for Prometheus metrics. You can expose +additional ports for your application by overriding `containerPorts` and `service` input values: + +```yaml + +containerPorts: + http: + port: 80 + protocol: TCP + prometheus: + port: 2020 + protocol: TCP + +service: + enabled: true + type: NodePort + ports: + app: + port: 80 + targetPort: 80 + protocol: TCP + prometheus: + port: 2020 + targetPort: 2020 + protocol: TCP + +``` + + +## How do I deploy a worker service? + +Worker services typically do not have a RPC or web server interface to access it. Instead, worker services act on their +own and typically reach out to get the data they need. These services should be deployed without any ports exposed. +However, by default `k8s-service` will deploy an internally exposed service with port 80 open. + +To disable the default port, you can use the following `values.yaml` inputs: + +``` +containerPorts: + http: + disabled: true + +service: + enabled: false +``` + +This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on +the container. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I check the status of the rollout? + +This Helm Chart packages your application into a `Deployment` controller. The `Deployment` controller will be +responsible with managing the `Pods` of your application, ensuring that the Kubernetes cluster matches the desired state +configured by the chart inputs. + +When the Helm Chart installs, `helm` will mark the installation as successful when the resources are created. Under the +hood, the `Deployment` controller will do the work towards ensuring the desired number of `Pods` are up and running. + +For example, suppose you set the `replicaCount` variable to 3 when installing this chart. This will configure the +`Deployment` resource to maintain 3 replicas of the `Pod` at any given time, launching new ones if there is a deficit or +removing old ones if there is a surplus. + +To see the current status of the `Deployment`, you can query Kubernetes using `kubectl`. The `Deployment` resource of +the chart are labeled with the `applicationName` input value and the release name provided by helm. So for example, +suppose you deployed this chart using the following `values.yaml` file and command: + +```yaml +applicationName: nginx +containerImage: + repository: nginx + tag: stable +``` + +```bash +$ helm install -n edge-service gruntwork/k8s-service +``` + +In this example, the `applicationName` is set to `nginx`, while the release name is set to `edge-service`. This chart +will then install a `Deployment` resource in the default `Namespace` with the following labels that uniquely identifies +it: + +``` +app.kubernetes.io/name: nginx +app.kubernetes.io/instance: edge-service +``` + +So now you can query Kubernetes for that `Deployment` resource using these labels to see the state: + +```bash +$ kubectl get deployments -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +edge-service-nginx 3 3 3 1 24s +``` + +This includes a few useful information: + +- `DESIRED` lists the number of `Pods` that should be running in your cluster. +- `CURRENT` lists how many `Pods` are currently created in the cluster. +- `UP-TO-DATE` lists how many `Pods` are running the desired image. +- `AVAILABLE` lists how many `Pods` are currently ready to serve traffic, as defined by the `readinessProbe`. + +When all the numbers are in sync and equal, that means the `Deployment` was rolled out successfully and all the `Pods` +are passing the readiness healthchecks. + +In the example output above, note how the `Available` count is `1`, but the others are `3`. This means that all 3 `Pods` +were successfully created with the latest image, but only `1` of them successfully came up. You can dig deeper into the +individual `Pods` to check the status of the unavailable `Pods`. The `Pods` are labeled the same way, so you can pass in +the same label query to get the `Pods` managed by the deployment: + +```bash +$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s +edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s +edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s +``` + +This will show you the status of each individual `Pod` in your deployment. In this example output, there are 2 `Pods` +that are in the `Pending` status, meaning that they have not been scheduled yet. We can look into why the `Pod` failed +to schedule by getting detailed information about the `Pod` with the `describe` command. Unlike `get pods`, `describe +pod` requires a single `Pod` so we will grab the name of one of the failing `Pods` above and feed it to `describe pod`: + +```bash +$ kubectl describe pod edge-service-nginx-844c978df7-mln26 +Name: edge-service-nginx-844c978df7-mln26 +Namespace: default +Priority: 0 +PriorityClassName: +Node: +Labels: app.kubernetes.io/instance=edge-service + app.kubernetes.io/name=nginx + gruntwork.io/app-name=nginx + pod-template-hash=4007534893 +Annotations: +Status: Pending +IP: +Controlled By: ReplicaSet/edge-service-nginx-844c978df7 +Containers: + nginx: + Image: nginx:stable + Ports: 80/TCP + Host Ports: 0/TCP + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from default-token-mgkr9 (ro) +Conditions: + Type Status + PodScheduled False +Volumes: + default-token-mgkr9: + Type: Secret (a volume populated by a Secret) + SecretName: default-token-mgkr9 + Optional: false +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s + node.kubernetes.io/unreachable:NoExecute for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 1m (x25 over 3m) default-scheduler 0/2 nodes are available: 2 Insufficient pods. +``` + +This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because +there is not enough capacity in the cluster to schedule the `Pod`. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I set and share configurations with the application? + +While you can bake most application configuration values into the application container, you might need to inject +dynamic configuration variables into the container. These are typically values that change depending on the environment, +such as the MySQL database endpoint. Additionally, you might also want a way to securely share secrets with the +container such that they are not hard coded in plain text in the container or in the Helm Chart values yaml file. To +support these use cases, this Helm Chart provides three ways to share configuration values with the application +container: + +- [Directly setting environment variables](#directly-setting-environment-variables) +- [Using ConfigMaps](#using-configmaps) +- [Using Secrets](#using-secrets) + +### Directly setting environment variables + +The simplest way to set a configuration value for the container is to set an environment variable for the container +runtime. These variables are set by Kubernetes before the container application is booted, which can then be looked up +using the standard OS lookup functions for environment variables. + +You can use the `envVars` input value to set an environment variable at deploy time. For example, the following entry in +a `values.yaml` file will set the `DB_HOST` environment variable to `mysql.default.svc.cluster.local` and the `DB_PORT` +environment variable to `3306`: + +```yaml +envVars: + DB_HOST: "mysql.default.svc.cluster.local" + DB_PORT: 3306 +``` + +One thing to be aware of when using environment variables is that they are set at start time of the container. This +means that updating the environment variables require restarting the containers so that they propagate. + +### Using ConfigMaps + +While environment variables are an easy way to inject configuration values, what if you want to share the configuration +across multiple deployments? If you wish to use the direct environment variables approach, you would have no choice but +to copy paste the values across each deployment. When this value needs to change, you are now faced with going through +each deployment and updating the reference. + +For this situation, `ConfigMaps` would be a better option. `ConfigMaps` help decouple configuration values from the +`Deployment` and `Pod` config, allowing you to share the values across the deployments. `ConfigMaps` are dedicated +resources in Kubernetes that store configuration values as key value pairs. + +For example, suppose you had a `ConfigMap` to store the database information. You might store the information as two key +value pairs: one for the host (`dbhost`) and one for the port (`dbport`). You can create a `ConfigMap` directly using +`kubectl`, or by using a resource file. + +To directly create the `ConfigMap`: + +``` +kubectl create configmap my-config --from-literal=dbhost=mysql.default.svc.cluster.local --from-literal=dbport=3306 +``` + +Alternatively, you can manage the `ConfigMap` as code using a kubernetes resource config: + +```yaml +# my-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-config +data: + dbhost: mysql.default.svc.cluster.local + dbport: 3306 +``` + +You can then apply this resource file using `kubectl`: + +``` +kubectl apply -f my-config.yaml +``` + +`kubectl` supports multiple ways to seed the `ConfigMap`. You can read all the different ways to create a `ConfigMap` in +[the official +documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap). + +Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during +deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with +the application container. There are two ways to inject the `ConfigMap`: + +- [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) +- [Accessing the `ConfigMap` as Files](#accessing-the-configmap-as-files) + +**NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use +`Secrets` or an external secret store. + +##### Accessing the ConfigMap as Environment Variables + +You can set the values of the `ConfigMap` as environment variables in the application container. To do so, you set the +`as` attribute of the `configMaps` input value to `environment`. For example, to share the `my-config` `ConfigMap` above +using the same environment variables as the example in [Directly setting environment +variables](#directly-settings-environment-variables), you would set the `configMaps` as follows: + +```yaml +configMaps: + my-config: + as: environment + items: + dbhost: + envVarName: DB_HOST + dbport: + envVarName: DB_PORT +``` + +In this configuration for the Helm Chart, we specify that we want to share the `my-config` `ConfigMap` as environment +variables with the main application container. Additionally, we want to map the `dbhost` config value to the `DB_HOST` +environment variable, and similarly map the `dbport` config value to the `DB_PORT` environment variable. + +Note that like directly setting environment variables, these are set at container start time, and thus the containers +need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if +you wish the `ConfigMap` changes to propagate immediately. + +##### Accessing the ConfigMap as Files + +You can mount the `ConfigMap` values as files on the container filesystem. To do so, you set the `as` attribute of the +`configMaps` input value to `volume`. + +For example, suppose you wanted to share the `my-config` `ConfigMap` above as the files `/etc/db/host` and +`/etc/db/port`. For this case, you would set the `configMaps` input value to: + +```yaml +configMaps: + my-config: + as: volume + mountPath: /etc/db + items: + dbhost: + filePath: host + dbport: + filePath: port +``` + +In the container, now the values for `dbhost` is stored as a text file at the path `/etc/db/host` and `dbport` is stored +at the path `/etc/db/port`. You can then read these files in in your application to get the values. + +Unlike environment variables, using files has the advantage of immediately reflecting changes to the `ConfigMap`. For +example, when you update `my-config`, the files at `/etc/db` are updated automatically with the new values, without +needing a redeployment to propagate the new values to the container. + +### Using Secrets + +In general, it is discouraged to store sensitive information such as passwords in `ConfigMaps`. Instead, Kubernetes +provides `Secrets` as an alternative resource to store sensitive data. Similar to `ConfigMaps`, `Secrets` are key value +pairs that store configuration values that can be managed independently of the `Pod` and containers. However, unlike +`ConfigMaps`, `Secrets` have the following properties: + +- A secret is only sent to a node if a pod on that node requires it. They are automatically garbage collected when there + are no more `Pods` referencing it on the node. +- A secret is stored in `tmpfs` on the node, so that it is only available in memory. +- Starting with Kubernetes 1.7, they can be encrypted at rest in `etcd` (note: this feature was in alpha state until + Kubernetes 1.13). + +You can read more about the protections and risks of using `Secrets` in [the official +documentation](https://kubernetes.io/docs/concepts/configuration/secret/#security-properties). + +Creating a `Secret` is very similar to creating a `ConfigMap`. For example, suppose you had a `Secret` to store the +database password. Like `ConfigMaps`, you can create a `Secret` directly using `kubectl`: + +``` +kubectl create secret generic my-secret --from-literal=password=1f2d1e2e67df +``` + +The `generic` keyword indicates the `Secret` type. Almost all use cases for your application should use this type. Other +types include `docker-registry` for specifying credentials for accessing a private docker registry, and `tls` for +specifying TLS certificates to access the Kubernetes API. + +You can also manage the `Secret` as code, although you may want to avoid this for `Secrets` to avoid leaking them in +unexpected locations (e.g source control). Unlike `ConfigMaps`, `Secrets` require values to be stored as base64 encoded +values when using resource files. So the configuration for the above example will be: + +```yaml +# my-secret.yaml +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: my-secret +data: + password: MWYyZDFlMmU2N2Rm +``` + +Note that `MWYyZDFlMmU2N2Rm` is the base 64 encoded version of `1f2d1e2e67df`. You can then apply this resource config +using `kubectl`: + +``` +kubectl apply -f my-secret.yaml +``` + +Similar to `ConfigMaps`, this Helm Chart supports two ways to inject `Secrets` into the application container: as +environment variables, or as files. The syntax to share the values is very similar to the `configMaps` input value, only +you use the `secrets` input value. The properties of each approach is very similar to `ConfigMaps`. Refer to [the +previous section](#using-configmaps) for more details on each approach. Here, we show you examples of the input values +to use for each approach. + +**Mounting secrets as environment variables**: In this example, we mount the `my-secret` `Secret` created above as the +environment variable `DB_PASSWORD`. + +```yaml +secrets: + my-secret: + as: environment + items: + password: + envVarName: DB_PASSWORD +``` + +**Mounting secrets as files**: In this example, we mount the `my-secret` `Secret` as the file `/etc/db/password`. + +```yaml +secrets: + my-secret: + as: volume + mountPath: /etc/db + items: + password: + filePath: password +``` + +**NOTE**: The volumes are different between `secrets` and `configMaps`. This means that if you use the same `mountPath` +for different secrets and config maps, you can end up with only one. It is undefined which `Secret` or `ConfigMap` ends +up getting mounted. To be safe, use a different `mountPath` for each one. + +**NOTE**: If you want mount the volumes created with `secrets` or `configMaps` on your init or sidecar containers, you will +have to append `-volume` to the volume name in . In the example above, the resulting volume will be `my-secret-volume`. + +```yaml +sideCarContainers: + sidecar: + image: sidecar/container:latest + volumeMounts: + - name: my-secret-volume + mountPath: /etc/db +``` + +### Which configuration method should I use? + +Which configuration method you should use depends on your needs. Here is a summary of the pro and con of each +approach: + +##### Directly setting environment variables + +**Pro**: + +- Simple setup +- Manage configuration values directly with application deployment config +- Most application languages support looking up environment variables + +**Con**: + +- Tightly couple configuration settings with application deployment +- Requires redeployment to update values +- Must store in plain text, and easy to leak into VCS + +**Best for**: + +- Iterating different configuration values during development +- Sotring non-sensitive values that are unique to each environment / deployment + +##### Using ConfigMaps + +**Pro**: + +- Keep config DRY by sharing a common set of configurations +- Independently update config values from the application deployment +- Automatically propagate new values when stored as files + +**Con**: + +- More overhead to manage the configuration +- Stored in plain text +- Available on all nodes automatically + +**Best for**: + +- Storing non-sensitive common configuration that are shared across environments +- Storing non-sensitive dynamic configuration values that change frequently + +##### Using Secrets + +**Pro**: + +- All the benefits of using `ConfigMaps` +- Can be encrypted at rest +- Opaque by default when viewing the values (harder to remember base 64 encoded version of "admin") +- Only available to nodes that use it, and only in memory + +**Con**: + +- All the challenges of using `ConfigMaps` +- Configured in plain text, making it difficult to manage as code securely +- Less safe than using dedicated secrets manager / store like HashiCorp Vault. + +**Best for**: + +- Storing sensitive configuration values + +back to [root README](/README.adoc#day-to-day-operations) + +## How do you update the application to a new version? + +To update the application to a new version, you can upgrade the Helm Release using updated values. For example, suppose +you deployed `nginx` version 1.15.4 using this Helm Chart with the following values: + +```yaml +containerImage: + repository: nginx + tag: 1.15.4 + +applicationName: nginx +``` + +In this example, we will further assume that you deployed this chart with the above values using the release name +`edge-service`, using a command similar to below: + +```bash +$ helm install -f values.yaml --name edge-service gruntwork/k8s-service +``` + +Now let's try upgrading `nginx` to version 1.15.8. To do so, we will first update our values file: + +```yaml +containerImage: + repository: nginx + tag: 1.15.8 + +applicationName: nginx +``` + +The only difference here is the `tag` of the `containerImage`. + +Next, we will upgrade our release using the updated values. To do so, we will use the `helm upgrade` command: + +```bash +$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service +``` + +This will update the created resources with the new values provided by the updated `values.yaml` file. For this example, +the only resource that will be updated is the `Deployment` resource, which will now have a new `Pod` spec that points to +`nginx:1.15.8` as opposed to `nginx:1.15.4`. This automatically triggers a rolling deployment internally to Kubernetes, +which will launch new `Pods` using the latest image, and shut down old `Pods` once those are ready. + +You can read more about how changes are rolled out on `Deployment` resources in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment). + +Note that certain changes will lead to a replacement of the `Deployment` resource. For example, updating the +`applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time +because the resources are replaced in an uncontrolled fashion. + +## How do I create a canary deployment? + +You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. + +To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: + +```yaml +canary: + enabled: true + containerImage: + repository: nginx + tag: 1.15.9 +``` +Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. + +back to [root README](/README.adoc#major-changes) + +## How do I verify my canary deployment? + +Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: + +```bash +$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" +NAME READY STATUS RESTARTS AGE +edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s +edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s +edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s +edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s +``` + +Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I roll back a canary deployment? + +Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: + +```bash +$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service +``` +Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. + +back to [root README](/README.adoc#day-to-day-operations) + +## How do I ensure a minimum number of Pods are available across node maintenance? + +Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary +maintenance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). +This chart exposes an input value `minPodsAvailable` that can be used to specify a minimum number of `Pods` to maintain +during a voluntary maintenance activity. Under the hood, this chart will create a corresponding `PodDisruptionBudget` to +ensure that a certain number of `Pods` are up before attempting to terminate additional ones. + +You can read more about `PodDisruptionBudgets` in [our blog post covering the +topic](https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085) +and in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). + + +back to [root README](/README.adoc#major-changes) + +## Why does the Pod have a preStop hook with a Shutdown Delay? + +When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from +registered addresses. This includes removing the `Pod` from the list of available `Pods` to service a `Service` +endpoint. However, because Kubernetes is a distributed system, there is a delay between the shutdown sequence and the +`Pod` being removed from available addresses. As a result, the `Pod` could still get traffic despite it having already +been shutdown on the node it was running on. + +Since there is no way to guarantee that the deletion has propagated across the cluster, we address this eventual +consistency issue by adding an arbitrary delay between the `Pod` being deleted and the initiation of the `Pod` shutdown +sequence. This is accomplished by adding a `sleep` command in the `preStop` hook. + +You can control the length of time to delay with the `shutdownDelay` input value. You can also disable this behavior by +setting the `shutdownDelay` to 0. + +You can read more about this topic in [our blog post +"Delaying Shutdown to Wait for Pod Deletion +Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). + + +back to [root README](/README.adoc#day-to-day-operations) + +## What is a sidecar container? + +In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` +share, amongst other things, the network stack, the IPC namespace, and in some cases the PID namespace. You can read +more about the resources that the containers in a `Pod` share in [the official +documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod/#what-is-a-pod). + +Sidecar Containers are additional containers that you wish to deploy in the `Pod` housing your application container. +This helm chart supports deploying these containers by configuring the `sideCarContainers` input value. This input value +is a map between the side car container name and the values of the container spec. The spec is rendered directly into +the `Deployment` resource, with the `name` being set to the key. For example: + +```yaml +sideCarContainers: + datadog: + image: datadog/agent:latest + env: + - name: DD_API_KEY + value: ASDF-1234 + - name: SD_BACKEND + value: docker + nginx: + image: nginx:1.15.4 +``` + +This input will be rendered in the `Deployment` resource as: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + ... Snipped for brevity ... +spec: + ... Snipped for brevity ... + template: + spec: + containers: + ... The first entry relates to the application ... + - name: datadog + image: datadog/agent:latest + env: + - name: DD_API_KEY + value: ASDF-1234 + - name: SD_BACKEND + value: docker + - name: nginx + image: nginx:1.15.4 +``` + +In this config, the side car containers are rendered as additional containers to deploy alongside the main application +container configured by the `containerImage`, `ports`, `livenessProbe`, etc input values. Note that the +`sideCarContainers` variable directly renders the spec, meaning that the additional values for the side cars such as +`livenessProbe` should be rendered directly within the `sideCarContainers` input value. + +back to [root README](/README.adoc#core-concepts) + +## How do I use a private registry? + +To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker +registry with a registry key. On managed Kubernetes clusters (e.g EKS, GKE, AKS), this is automated through the server +IAM roles that are assigned to the instance VMs. In most cases, if the instance VM IAM role has the permissions to +access the registry, the Kubernetes cluster will automatically be able to pull down images from the respective managed +registry (e.g ECR on EKS or GCR on GKE). + +Alternatively, you can specify docker registry keys in the Kubernetes cluster as `Secret` resources. This is helpful in +situations where you do not have the ability to assign registry access IAM roles to the node itself, or if you are +pulling images off of a different registry (e.g accessing GCR from EKS cluster). + +You can use `kubectl` to create a `Secret` in Kubernetes that can be used as a docker registry key: + +``` +kubectl create secret docker-registry NAME \ + --docker-server=DOCKER_REGISTRY_SERVER \ + --docker-username=DOCKER_USER \ + --docker-password=DOCKER_PASSWORD \ + --docker-email=DOCKER_EMAIL +``` + +This command will create a `Secret` resource named `NAME` that holds the specified docker registry credentials. You can +then specify the cluster to use this `Secret` when pulling down images for the service `Deployment` in this chart by +using the `imagePullSecrets` input value: + +``` +imagePullSecrets: + - NAME +``` + +You can learn more about using private registries with Kubernetes in [the official +documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). + +back to [root README](/README.adoc#day-to-day-operations) diff --git a/charts/k8s-daemonset/linter_values.yaml b/charts/k8s-daemonset/linter_values.yaml new file mode 100644 index 00000000..f800b2d7 --- /dev/null +++ b/charts/k8s-daemonset/linter_values.yaml @@ -0,0 +1,42 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS TO USE WITH HELM LINT +# This file declares a complete configuration value for this chart, with required values defined so that it can be used +# with helm lint to lint the chart. This should only specify the required values of the chart, and be combined with the +# default values of the chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent +containerImage: + repository: nginx + tag: stable + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" +applicationName: "linter" diff --git a/charts/k8s-daemonset/templates/NOTES.txt b/charts/k8s-daemonset/templates/NOTES.txt new file mode 100644 index 00000000..cc2b447f --- /dev/null +++ b/charts/k8s-daemonset/templates/NOTES.txt @@ -0,0 +1,43 @@ + +Check the status of your Deployment by running this comamnd: + +kubectl get daemonsets --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + + +List the related Pods with the following command: + +kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + + +Use the following command to view information about the Service: + +kubectl get services --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + + +{{ if .Values.containerPorts -}} +{{- $serviceType := .Values.service.type | default "ClusterIP" -}} +Get the application URL by running these commands: + +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" $serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "k8s-daemonset.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" $serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include "k8s-daemonset.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "k8s-daemonset.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" $serviceType }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + {{- range $portName, $portSpec := .Values.containerPorts }} + {{- if not $portSpec.disabled }} + echo "Visit http://127.0.0.1:80{{ $portSpec.port }} to use your application container serving port {{ $portName }}" + kubectl port-forward $POD_NAME 80{{ $portSpec.port }}:{{ $portSpec.port }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/k8s-daemonset/templates/_capabilities_helpers.tpl b/charts/k8s-daemonset/templates/_capabilities_helpers.tpl new file mode 100644 index 00000000..8ec79070 --- /dev/null +++ b/charts/k8s-daemonset/templates/_capabilities_helpers.tpl @@ -0,0 +1,42 @@ +{{/* Allow KubeVersion to be overridden. This is mostly used for testing purposes. */}} +{{- define "gruntwork.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} +{{- end -}} + +{{/* Get Ingress API Version */}} +{{- define "gruntwork.ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" (include "gruntwork.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* Ingress API version aware ingress backend */}} +{{- define "gruntwork.ingress.backend" -}} +{{/* NOTE: The leading whitespace is significant, as it is the specific yaml indentation for injection into the ingress resource. */}} + {{- if eq .ingressAPIVersion "networking.k8s.io/v1" }} + service: + name: {{ if .serviceName }}{{ .serviceName }}{{ else }}{{ .fullName }}{{ end }} + port: + {{- if int .servicePort }} + number: {{ .servicePort }} + {{- else }} + name: {{ .servicePort }} + {{- end }} + {{- else }} + serviceName: {{ if .serviceName }}{{ .serviceName }}{{ else }}{{ .fullName }}{{ end }} + servicePort: {{ .servicePort }} + {{- end }} +{{- end -}} + +{{/* Get PodDisruptionBudget API Version */}} +{{- define "gruntwork.pdb.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" (include "gruntwork.kubeVersion" .)) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} diff --git a/charts/k8s-daemonset/templates/_daemonset_spec.tpl b/charts/k8s-daemonset/templates/_daemonset_spec.tpl new file mode 100644 index 00000000..c47af452 --- /dev/null +++ b/charts/k8s-daemonset/templates/_daemonset_spec.tpl @@ -0,0 +1,427 @@ +{{- /* +Common deployment spec that is shared between the canary and main Deployment controllers. This template requires the +context: +- Values +- Release +- Chart +- isCanary (a boolean indicating if we are rendering the canary deployment or not) +You can construct this context using dict: +(dict "Values" .Values "Release" .Release "Chart" .Chart "isCanary" true) +*/ -}} +{{- define "k8s-daemonset.daemonsetSpec" -}} +{{- /* +We must decide whether or not there are volumes to inject. The logic to decide whether or not to inject is based on +whether or not there are configMaps OR secrets that are specified as volume mounts (`as: volume` attributes). We do this +by using a map to track whether or not we have seen a volume type. We have to use a map because we can't update a +variable in helm chart templates. + +Similarly, we need to decide whether or not there are environment variables to add + +We need this because certain sections are omitted if there are no volumes or environment variables to add. +*/ -}} + +{{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} +{{- $hasInjectionTypes := dict "hasVolume" false "hasEnvVars" false "exposePorts" false -}} +{{- if .Values.envVars -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- if .Values.additionalContainerEnv -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} +{{- end -}} +{{- $allContainerPorts := values .Values.containerPorts -}} +{{- range $allContainerPorts -}} + {{/* We are exposing ports if there is at least one key in containerPorts that is not disabled (disabled = false or + omitted) + */}} + {{- if or (not (hasKey . "disabled")) (not .disabled) -}} + {{- $_ := set $hasInjectionTypes "exposePorts" true -}} + {{- end -}} +{{- end -}} +{{- $allSecrets := values .Values.secrets -}} +{{- range $allSecrets -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "secrets config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- $allConfigMaps := values .Values.configMaps -}} +{{- range $allConfigMaps -}} + {{- if eq (index . "as") "volume" -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} + {{- else if eq (index . "as") "environment" -}} + {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} + {{- else if eq (index . "as") "envFrom" }} + {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} + {{- else if eq (index . "as") "none" -}} + {{- /* noop */ -}} + {{- else -}} + {{- fail printf "configMaps config has unknown type: %s" (index . "as") -}} + {{- end -}} +{{- end -}} +{{- if gt (len .Values.persistentVolumes) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.scratchPaths) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +{{- if gt (len .Values.emptyDirs) 0 -}} + {{- $_ := set $hasInjectionTypes "hasVolume" true -}} +{{- end -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "k8s-daemonset.fullname" . }}{{ if .isCanary }}-canary{{ end }} + labels: + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + helm.sh/chart: {{ include "k8s-daemonset.chart" . }} + app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- range $key, $value := .Values.additionalDeploymentLabels }} + {{ $key }}: {{ $value }} + {{- end}} +{{- with .Values.deploymentAnnotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.deploymentStrategy.enabled }} + strategy: + type: {{ .Values.deploymentStrategy.type }} +{{- if and (eq .Values.deploymentStrategy.type "RollingUpdate") .Values.deploymentStrategy.rollingUpdate }} + rollingUpdate: +{{ toYaml .Values.deploymentStrategy.rollingUpdate | indent 6 }} +{{- end }} +{{- end }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .isCanary }} + gruntwork.io/deployment-type: canary + {{- else }} + gruntwork.io/deployment-type: main + {{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .isCanary }} + gruntwork.io/deployment-type: canary + {{- else }} + gruntwork.io/deployment-type: main + {{- end }} + {{- range $key, $value := .Values.additionalPodLabels }} + {{ $key }}: {{ $value }} + {{- end }} + + {{- with .Values.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if gt (len .Values.serviceAccount.name) 0 }} + serviceAccountName: "{{ .Values.serviceAccount.name }}" + {{- end }} + {{- if hasKey .Values.serviceAccount "automountServiceAccountToken" }} + automountServiceAccountToken : {{ .Values.serviceAccount.automountServiceAccountToken }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- end}} + + containers: + {{- if .isCanary }} + - name: {{ .Values.applicationName }}-canary + {{- $repo := required ".Values.canary.containerImage.repository is required" .Values.canary.containerImage.repository }} + {{- $tag := required ".Values.canary.containerImage.tag is required" .Values.canary.containerImage.tag }} + image: "{{ $repo }}:{{ $tag }}" + imagePullPolicy: {{ .Values.canary.containerImage.pullPolicy | default "IfNotPresent" }} + {{- else }} + - name: {{ .Values.applicationName }} + {{- $repo := required ".Values.containerImage.repository is required" .Values.containerImage.repository }} + {{- $tag := required ".Values.containerImage.tag is required" .Values.containerImage.tag }} + image: "{{ $repo }}:{{ $tag }}" + imagePullPolicy: {{ .Values.containerImage.pullPolicy | default "IfNotPresent" }} + {{- end }} + {{- if .Values.containerCommand }} + command: +{{ toYaml .Values.containerCommand | indent 12 }} + {{- end }} + + {{- if index $hasInjectionTypes "exposePorts" }} + ports: + {{- /* + NOTE: we check for a disabled flag here so that users of the helm + chart can override the default containerPorts. Specifically, defining a new + containerPorts in values.yaml will be merged with the default provided by the + chart. For example, if the user provides: + + containerPorts: + app: + port: 8080 + protocol: TCP + + Then this is merged with the default and becomes: + + containerPorts: + app: + port: 8080 + protocol: TCP + http: + port: 80 + protocol: TCP + https: + port: 443 + protocol: TCP + + and so it becomes append as opposed to replace. To handle this, + we allow users to explicitly disable predefined ports. So if the user wants to + replace the ports with their own, they would provide the following values file: + + containerPorts: + app: + port: 8080 + protocol: TCP + http: + disabled: true + https: + disabled: true + */ -}} + {{- range $key, $portSpec := .Values.containerPorts }} + {{- if not $portSpec.disabled }} + - name: {{ $key }} + containerPort: {{ int $portSpec.port }} + protocol: {{ $portSpec.protocol }} + {{- end }} + {{- end }} + {{- end }} + + + {{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 12 }} + {{- end}} + resources: +{{ toYaml .Values.containerResources | indent 12 }} + + {{- if or .Values.lifecycleHooks.enabled (gt (int .Values.shutdownDelay) 0) }} + lifecycle: + {{- if and .Values.lifecycleHooks.enabled .Values.lifecycleHooks.postStart }} + postStart: +{{ toYaml .Values.lifecycleHooks.postStart | indent 14 }} + {{- end }} + + {{- if and .Values.lifecycleHooks.enabled .Values.lifecycleHooks.preStop }} + preStop: +{{ toYaml .Values.lifecycleHooks.preStop | indent 14 }} + {{- else if gt (int .Values.shutdownDelay) 0 }} + # Include a preStop hook with a shutdown delay for eventual consistency reasons. + # See https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304 + preStop: + exec: + command: + - sleep + - "{{ int .Values.shutdownDelay }}" + {{- end }} + + {{- end }} + + {{- /* START ENV VAR LOGIC */ -}} + {{- if index $hasInjectionTypes "hasEnvVars" }} + env: + - name: SYSLOG_HOST + value: "sysloghost" + - name: SYSLOG_PORT + value: "514" + - name: SYSLOG_PROTOCOL + value: "udp" + {{- end }} + {{- range $key, $value := .Values.envVars }} + - name: {{ $key }} + value: {{ quote $value }} + {{- end }} + {{- if .Values.additionalContainerEnv }} +{{ toYaml .Values.additionalContainerEnv | indent 12 }} + {{- end }} + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "environment" }} + {{- range $configKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on configMaps items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + configMapKeyRef: + name: {{ $name }} + key: {{ $configKey }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "environment" }} + {{- range $secretKey, $keyEnvVarConfig := $value.items }} + - name: {{ required "envVarName is required on secrets items when using environment" $keyEnvVarConfig.envVarName | quote }} + valueFrom: + secretKeyRef: + name: {{ $name }} + key: {{ $secretKey }} + {{- end }} + {{- end }} + {{- end }} + {{- if index $hasInjectionTypes "hasEnvFrom" }} + envFrom: + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "envFrom" }} + - configMapRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "envFrom" }} + - secretRef: + name: {{ $name }} + {{- end }} + {{- end }} + {{- end }} + {{- /* END ENV VAR LOGIC */ -}} + + + {{- /* START VOLUME MOUNT LOGIC */ -}} + {{- if index $hasInjectionTypes "hasVolume" }} + volumeMounts: + {{- end }} + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "volume" }} + - name: {{ $name }}-volume + mountPath: {{ quote $value.mountPath }} + {{- if $value.subPath }} + subPath: {{ quote $value.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "volume" }} + - name: {{ $name }}-volume + mountPath: {{ quote $value.mountPath }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.persistentVolumes }} + - name: {{ $name }} + mountPath: {{ quote $value.mountPath }} + {{- end }} + {{- range $name, $value := .Values.scratchPaths }} + - name: {{ $name }} + mountPath: {{ quote $value }} + {{- end }} + {{- range $name, $value := .Values.emptyDirs }} + - name: {{ $name }} + mountPath: {{ quote $value }} + {{- end }} + {{- /* END VOLUME MOUNT LOGIC */ -}} + + {{- range $key, $value := .Values.sideCarContainers }} + - name: {{ $key }} +{{ toYaml $value | indent 10 }} + {{- end }} + + + {{- if gt (len .Values.initContainers) 0 }} + initContainers: + {{- range $key, $value := .Values.initContainers }} + - name: {{ $key }} +{{ toYaml $value | indent 10 }} + {{- end }} + {{- end }} + + {{- /* START IMAGE PULL SECRETS LOGIC */ -}} + {{- if gt (len .Values.imagePullSecrets) 0 }} + imagePullSecrets: + {{- range $secretName := .Values.imagePullSecrets }} + - name: {{ $secretName }} + {{- end }} + {{- end }} + {{- /* END IMAGE PULL SECRETS LOGIC */ -}} + + {{- /* START TERMINATION GRACE PERIOD LOGIC */ -}} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end}} + {{- /* END TERMINATION GRACE PERIOD LOGIC */ -}} + + {{- /* START VOLUME LOGIC */ -}} + {{- if index $hasInjectionTypes "hasVolume" }} + volumes: + {{- end }} + {{- range $name, $value := .Values.configMaps }} + {{- if eq $value.as "volume" }} + - name: {{ $name }}-volume + configMap: + name: {{ $name }} + {{- if $value.items }} + items: + {{- range $configKey, $keyMountConfig := $value.items }} + - key: {{ $configKey }} + path: {{ required "filePath is required for configMap items" $keyMountConfig.filePath | quote }} + {{- if $keyMountConfig.fileMode }} + mode: {{ include "k8s-daemonset.fileModeOctalToDecimal" $keyMountConfig.fileMode }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.secrets }} + {{- if eq $value.as "volume" }} + - name: {{ $name }}-volume + secret: + secretName: {{ $name }} + {{- if $value.items }} + items: + {{- range $secretKey, $keyMountConfig := $value.items }} + - key: {{ $secretKey }} + path: {{ required "filePath is required for secrets items" $keyMountConfig.filePath | quote }} + {{- if $keyMountConfig.fileMode }} + mode: {{ include "k8s-daemonset.fileModeOctalToDecimal" $keyMountConfig.fileMode }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range $name, $value := .Values.persistentVolumes }} + - name: {{ $name }} + persistentVolumeClaim: + claimName: {{ $value.claimName }} + {{- end }} + {{- range $name, $value := .Values.scratchPaths }} + - name: {{ $name }} + emptyDir: + medium: "Memory" + {{- end }} + {{- range $name, $value := .Values.emptyDirs }} + - name: {{ $name }} + emptyDir: {} + {{- end }} + {{- /* END VOLUME LOGIC */ -}} + + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end -}} diff --git a/charts/k8s-daemonset/templates/_helpers.tpl b/charts/k8s-daemonset/templates/_helpers.tpl new file mode 100644 index 00000000..11b35ed6 --- /dev/null +++ b/charts/k8s-daemonset/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-daemonset.name" -}} + {{- .Values.applicationName | required "applicationName is required" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8s-daemonset.fullname" -}} + {{- $name := required "applicationName is required" .Values.applicationName -}} + {{- if .Values.fullnameOverride -}} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} + {{- else if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8s-daemonset.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Convert octal to decimal (e.g 644 => 420). For file permission modes, many people are more familiar with octal notation. +However, due to yaml/json limitations, all the Kubernetes resources require file modes to be reported in decimal. +*/}} +{{- define "k8s-daemonset.fileModeOctalToDecimal" -}} + {{- $digits := splitList "" (toString .) -}} + + {{/* Make sure there are exactly 3 digits */}} + {{- if ne (len $digits) 3 -}} + {{- fail (printf "File mode octal expects exactly 3 digits: %s" .) -}} + {{- end -}} + + {{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} + {{- $accumulator := dict "res" 0 -}} + {{- range $idx, $digit := $digits -}} + {{- $digitI := atoi $digit -}} + + {{/* atoi from sprig swallows conversion errors, so we double check to make sure it is a valid conversion */}} + {{- if and (eq $digitI 0) (ne $digit "0") -}} + {{- fail (printf "Digit %d of %s is not a number: %s" $idx . $digit) -}} + {{- end -}} + + {{/* Make sure each digit is less than 8 */}} + {{- if ge $digitI 8 -}} + {{- fail (printf "%s is not a valid octal digit" $digit) -}} + {{- end -}} + + {{/* Since we don't have math.Pow, we hard code */}} + {{- if eq $idx 0 -}} + {{/* 8^2 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 64)) -}} + {{- else if eq $idx 1 -}} + {{/* 8^1 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 8)) -}} + {{- else -}} + {{/* 8^0 */}} + {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 1)) -}} + {{- end -}} + {{- end -}} + {{- "res" | index $accumulator | toString | printf -}} +{{- end -}} diff --git a/charts/k8s-daemonset/templates/daemonset.yaml b/charts/k8s-daemonset/templates/daemonset.yaml new file mode 100644 index 00000000..4921af2c --- /dev/null +++ b/charts/k8s-daemonset/templates/daemonset.yaml @@ -0,0 +1,5 @@ +{{- /* +The main Deployment Controller for the application being deployed. This resource manages the creation and replacement +of the Pods backing your application. +*/ -}} +{{ include "k8s-daemonset.daemonsetSpec" (dict "Values" .Values "isCanary" false "Release" .Release "Chart" .Chart) }} diff --git a/charts/k8s-daemonset/templates/gmc.yaml b/charts/k8s-daemonset/templates/gmc.yaml new file mode 100644 index 00000000..1553d50f --- /dev/null +++ b/charts/k8s-daemonset/templates/gmc.yaml @@ -0,0 +1,27 @@ +{{- /* +If the operator configures the google.managedCertificate input variable, then also create a ManagedCertificate resource +that will provision a Google managed SSL certificate. +*/ -}} +{{- if .Values.google.managedCertificate.enabled -}} +{{- /* +We declare some variables defined on the Values. These are reused in `with` and `range` blocks where the scoped variable +(`.`) is rebound within the block. +*/ -}} +{{- $domainName := .Values.google.managedCertificate.domainName -}} +{{- $certificateName := .Values.google.managedCertificate.name -}} +apiVersion: networking.gke.io/v1beta1 +kind: ManagedCertificate +metadata: + name: {{ $certificateName }} + labels: + gruntwork.io/app-name: {{ .Values.applicationName }} + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + app.kubernetes.io/name: {{ include "k8s-service.name" . }} + helm.sh/chart: {{ include "k8s-service.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + domains: + - {{ $domainName }} +{{- end }} diff --git a/charts/k8s-daemonset/templates/ingress.yaml b/charts/k8s-daemonset/templates/ingress.yaml new file mode 100644 index 00000000..10f935f7 --- /dev/null +++ b/charts/k8s-daemonset/templates/ingress.yaml @@ -0,0 +1,101 @@ +{{- /* +If the operator configures the ingress input variable, then also create an Ingress resource that will route to the +service. Note that Ingress can only route to a Service, so the operator must also configure a Service. +*/ -}} +{{- if .Values.ingress.enabled -}} + +{{- /* +We declare some variables defined on the Values. These are reused in `with` and `range` blocks where the scoped variable +(`.`) is rebound within the block. +*/ -}} +{{- $fullName := include "k8s-service.fullname" . -}} +{{- $ingressAPIVersion := include "gruntwork.ingress.apiVersion" . -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $additionalPathsHigherPriority := .Values.ingress.additionalPathsHigherPriority }} +{{- $additionalPaths := .Values.ingress.additionalPaths }} +{{- $servicePort := .Values.ingress.servicePort -}} +{{- $baseVarsForBackend := dict "fullName" $fullName "ingressAPIVersion" $ingressAPIVersion -}} + +apiVersion: {{ $ingressAPIVersion }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + gruntwork.io/app-name: {{ .Values.applicationName }} + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + app.kubernetes.io/name: {{ include "k8s-service.name" . }} + helm.sh/chart: {{ include "k8s-service.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.ingress.annotations }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} +{{- with .Values.ingress.tls }} + tls: +{{ toYaml . | indent 4}} +{{- end }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ . | quote }} + http: + paths: + {{- range $additionalPathsHigherPriority }} + - path: {{ .path }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} + {{- end }} + - path: {{ $ingressPath }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") $ingressPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (dict "serviceName" $fullName "servicePort" $servicePort | merge $baseVarsForBackend) }} + {{- range $additionalPaths }} + - path: {{ .path }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + {{- range $additionalPathsHigherPriority }} + - path: {{ .path }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} + {{- end }} + - path: {{ $ingressPath }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") $ingressPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (dict "serviceName" $fullName "servicePort" $servicePort | merge $baseVarsForBackend) }} + {{- range $additionalPaths }} + - path: {{ .path }} + {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} + {{- end }} + + {{- end }} +{{- end }} diff --git a/charts/k8s-daemonset/templates/pdb.yaml b/charts/k8s-daemonset/templates/pdb.yaml new file mode 100644 index 00000000..631e92cf --- /dev/null +++ b/charts/k8s-daemonset/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- /* +If there is a specification for minimum number of Pods that should be available, create a PodDisruptionBudget +*/ -}} +{{- if .Values.minPodsAvailable -}} +apiVersion: {{ include "gruntwork.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "k8s-service.fullname" . }} + labels: + gruntwork.io/app-name: {{ .Values.applicationName }} + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + app.kubernetes.io/name: {{ include "k8s-service.name" . }} + helm.sh/chart: {{ include "k8s-service.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + minAvailable: {{ int .Values.minPodsAvailable }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "k8s-service.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/k8s-daemonset/templates/service.yaml b/charts/k8s-daemonset/templates/service.yaml new file mode 100644 index 00000000..f831547f --- /dev/null +++ b/charts/k8s-daemonset/templates/service.yaml @@ -0,0 +1,42 @@ +{{- /* +If the operator configures the service input variable, then also create a Service resource that exposes the Pod as a +stable endpoint that can be routed within the Kubernetes cluster. +*/ -}} +{{- if .Values.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "k8s-daemonset.fullname" . }} + labels: + # These labels are required by helm. You can read more about required labels in the chart best practices guide: + # https://docs.helm.sh/chart_best_practices/#standard-labels + app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} + helm.sh/chart: {{ include "k8s-daemonset.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.service.annotations }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} + ports: + {{- range $key, $value := .Values.service.ports }} + - name: {{ $key }} +{{ toYaml $value | indent 6 }} + {{- end }} + selector: + app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if .Values.service.sessionAffinityConfig }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml . | indent 4 }} + {{- end}} + {{- end}} + {{- end}} +{{- end }} diff --git a/charts/k8s-daemonset/templates/serviceaccount.yaml b/charts/k8s-daemonset/templates/serviceaccount.yaml new file mode 100644 index 00000000..ba47e205 --- /dev/null +++ b/charts/k8s-daemonset/templates/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }} + namespace: {{ $.Release.Namespace }} + labels: + app: {{ template "k8s-service.name" . }} + {{- if .Values.serviceAccount.labels }} + {{- toYaml .Values.serviceAccount.labels | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- if gt (len .Values.imagePullSecrets) 0 }} +imagePullSecrets: + {{- range $secretName := .Values.imagePullSecrets }} + - name: {{ $secretName }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/k8s-daemonset/templates/servicemonitor.yaml b/charts/k8s-daemonset/templates/servicemonitor.yaml new file mode 100644 index 00000000..380a0a6b --- /dev/null +++ b/charts/k8s-daemonset/templates/servicemonitor.yaml @@ -0,0 +1,22 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "k8s-service.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + chart: {{ template "k8s-service.chart" . }} + app: {{ template "k8s-service.name" . }} + heritage: "{{ .Release.Service }}" + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + {{- values .Values.serviceMonitor.endpoints | toYaml | nindent 6 }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template "k8s-service.name" . }} +{{- end }} diff --git a/charts/k8s-daemonset/values.yaml b/charts/k8s-daemonset/values.yaml new file mode 100644 index 00000000..41deb811 --- /dev/null +++ b/charts/k8s-daemonset/values.yaml @@ -0,0 +1,713 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS +# This file declares the configuration input values for the k8s-service Helm chart. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES +# These values are expected to be defined and passed in by the operator when deploying this helm chart. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# this chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. +# +# EXAMPLE: +# +# containerImage: +# repository: nginx +# tag: stable +# pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. The label is keyed under "gruntwork.io/app-name" + + +#---------------------------------------------------------------------------------------------------------------------- +# OPTIONAL VALUES +# These values have defaults, but may be overridden by the operator +#---------------------------------------------------------------------------------------------------------------------- + +# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default +# configured on the image. Omit to run the default command configured on the image. +# +# Example (run echo "Hello World"): +# +# containerCommand: +# - "echo" +# - "Hello World" +containerCommand: null + +# containerPorts is a map that specifies the ports to open on the container. This is a nested map: the first map lists +# the named ports, while the second layer lists the port spec. The named references can be used to refer to the specific +# port of the container in other resources, like Service. +# The expected keys of the port spec are: +# - port (int) (required) : The port in the container that should be exposed. +# - protocol (string) (required) : The network protocol (e.g TCP or UDP) that is exposed. +# - disabled (bool) : Whether or not this port is disabled. This defaults to false if unset. Provided as a +# convenience to override the default ports on the commandline. For example, to +# disable the default port, you can pass `--set containerPorts.http.disabled=true`. +# +# The default config exposes TCP port 80 and binds the name `http` to it. +containerPorts: + http: + port: 80 + protocol: TCP + +# livenessProbe is a map that specifies the liveness probe of the main application container. Liveness probes indicate +# when a container has reached a fatal state where it needs to be restarted to recover. When the liveness probe fails, +# the container is automatically recreated. You can read more about container liveness probes in the official docs: +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +# NOTE: This variable is injected directly into the container spec. +# +# The following example specifies an http GET based liveness probe, that will base the probe on a http GET request to +# the port bound to name `http` (see description on `containerPorts`) on the path `/`. +# +# EXAMPLE: +# +# livenessProbe: +# httpGet: +# path: / +# port: http +#livenessProbe: {} + +# readinessProbe is a map that specifies the readiness probe of the main application container. Readiness probes +# indicate when a container is unable to serve traffic. When the readiness probe fails, the container is cycled out of +# the list of available containers to the `Service`. You can read more about readiness probes in the official docs: +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +# NOTE: This variable is injected directly into the container spec. +# +# The following example specifies an http GET based readiness probe, that will base the probe on a http GET request to +# the port bound to name `http` (see description on `containerPorts`) on the path `/`. +# +# EXAMPLE: +# +# readinessProbe: +# httpGet: +# path: / +# port: http +#readinessProbe: {} + +# securityContext is a map that specified the privillege and access control settings for a Pod of Container. Security Context +# can be specified when the application requires additional access control permissions. More details on securityContext and supported +# settings can be found at https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# similar to the podSecurityContext {} however, this sets security attributes at the container level rather than at the pod level scope. + +# +# EXAMPLE: +# 1) To run a container in privilleged mode +# securityContext: +# privilleged: true +# +# 2) To run a container as a specific user +# securityContext: +# runAsUser: 2000 +# securityContext: {} + +# podSecurityContext holds pod-level security access control settings. +# similar to the securityContext {} however, this sets security attributes at the pod level rather than at the container level scope. +# this allows certain attributes to be set that are not possible in the container level. For example 'fsGroup'. +# more details can be found at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core + +# EXAMPLE: +# podSecurityContext: +# fsGroup: 2000 +podSecurityContext: {} + + +# shutdownDelay is the number of seconds to delay the shutdown sequence of the Pod by. This is implemented as a sleep +# call in the preStop hook. By default, this chart includes a preStop hook with a shutdown delay for eventual +# consistency reasons. You can read more about why you might want to do this in +# https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304 +# You can disable this behavior by setting this value to 0. +# NOTE: this conflicts with lifecycleHooks.preStop +shutdownDelay: 5 + +# lifecycleHooks configures container lifecycle hooks on the Pod so you can run arbitrary commands after the +# container starts (postStart) or before the container stops. +# Refer to https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ for more information on container +# lifecycles. +# +# EXAMPLE: +# +# lifecycleHooks: +# enabled: true +# postStart: +# exec: +# command: +# - echo +# - "Run after starting container" +# preStop: +# exec: +# command: +# - echo +# - "Run before stopping container" +# +# NOTE: the preStop hook conflicts with shutdownDelay +lifecycleHooks: + enabled: false + +# sideCarContainers specifies any additional containers that should be deployed as side cars to the main application +# container. This will be included in the Deployment container spec so that it will be included in the application Pod. +# This is a nested map, where the first map key is used to name the container, with the nested map being injected as the +# container spec. +# +# The following example specifies a data dog agent container as a side car with some environment variables, binding the +# name `datadog`: +# +# EXAMPLE: +# +# sideCarContainers: +# datadog: +# image: datadog/agent:latest +# env: +# - name: DD_API_KEY +# value: ASDF-1234 +# - name: SD_BACKEND +# value: docker +sideCarContainers: {} + +# initContainers specifies any additional containers that should be deployed as init containers to the main application +# container. This will be included in the Deployment container spec so that it will be included in the application Pod. +# This is a nested map, where the first map key is used to name the container, with the nested map being injected as the +# container spec. +# +# The following example specifies a flyway image as an init container with an environment variable, binding the +# name `flyway`: +# +# EXAMPLE: +# +# initContainers: +# flyway: +# image: flyway/flyway +# env: +# - name: FLYWAY_LOCATIONS +# value: 'filesystem:/flyway/migrations' +initContainers: {} + +# canary specifies test pod(s) that are deployed alongside your application's stable track pods. +# It is useful for testing a new release candidate in a production environment with minimal disruption and +# for allowing you to find any issues early. +# The expected keys of the canary spec are: +# - enabled (bool) (required) : Whether or not the canary deployment should be created. If false, no canary deployment will be created. +# - containerImage (map) (required) : A map that specifies the application container and tag to be managed by the canary deployment. +# This has the same structure as containerImage. +# - replicaCount (int) : The number of pods that should be managed by the canary deployment. Defaults to 1 if unset. +# +# The following example specifies a simple canary deployment: +# +# EXAMPLE: +# +# canary: +# enabled: true +# replicaCount: 1 +# containerImage: +# repository: nginx +# tag: 1.16.0 +# pullPolicy: IfNotPresent +canary: {} + + +# deploymentStrategy specifies the strategy used to replace old Pods by new ones. Type can be "RollingUpdate" or +# "Recreate". "RollingUpdate" is the default value. +# RollingUpdate: The Deployment updates Pods in a rolling update fashion. +# Recreate: All existing Pods are killed before new ones are created. +# +# RollingUpdate can be further refined by providing custom rollingUpdate options. +# The rollingUpdate variable is a map that is directly injected into the deployment spec and it has the following keys: +# - maxUnavailable (Optional) : Field that specifies the maximum number of Pods that can be unavailable +# during the update process. The value can be an absolute number +# (for example, 5) or a percentage of desired Pods (for example, 10%). +# The value cannot be 0 if rollingUpdate.maxSurge is 0. +# This option defaults to 25%. +# - maxSurge (Optional) : Field that specifies the maximum number of Pods that can be created over +# the desired number of Pods. The value can be an absolute number (for example, 5) +# or a percentage of desired Pods (for example, 10%). The value cannot be 0 if +# MaxUnavailable is 0. +# This option defaults to 25%. +# +# EXAMPLE: +# +# deploymentStrategy: +# enabled: false +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 30% +# maxUnavailable: 30% +deploymentStrategy: + enabled: false + type: RollingUpdate + rollingUpdate: {} + +# deploymentAnnotations will add the provided map to the annotations for the Deployment resource created by this chart. +# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. +# NOTE: This variable is injected directly into the deployment spec. +deploymentAnnotations: {} + +# additionalDeploymentLabels will add the provided map to the labels for the Deployment resource created by this chart. +# this is in addition to the helm template related labels created by the chart +# The keys and values are free form, but subject to the limitations of Kubernetes labelling. +# NOTE: This variable is injected directly into the deployment spec. +additionalDeploymentLabels: {} + +# podAnnotations will add the provided map to the annotations for the Pod resource created by the Deployment. +# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. +# NOTE: This variable is injected directly into the pod spec. +podAnnotations: {} + +# additionalDeploymentLabels will add the provided map to the labels for the Pods created by the deployment resource. +# this is in addition to the helm template related labels created by the chart +# The keys and values are free form, but subject to the limitations of Kubernetes labelling. +# The match labels for the deployment aren't affected by these additional labels +# NOTE: This variable is injected directly into the deployment spec. +additionalPodLabels: {} + +# minPodsAvailable specifies the minimum number of pods that should be available at any given point in time. This is +# used to configure a PodDisruptionBudget for the included pod. See +# https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085 +# for an introduction to PodDisruptionBudgets. +# NOTE: setting this to 0 will skip creating the PodDisruptionBudget resource. +minPodsAvailable: 0 + +# service is a map that specifies the configuration for the Service resource that is created by the chart. +# The expected keys are: +# - enabled (bool) (required) : Whether or not the Service resource should be created. If false, no Service +# resource will be created. +# - type (string) : The Service type, as defined in Kubernetes +# (https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) +# Defaults to ClusterIP. +# - annotations (map) : Annotations that should be added to the Service resource. This is injected +# directly in to the resource yaml. +# - ports (map) (required) : A map that specifies the port bindings of the service against the Pods in the +# Deployment. This has the same structure as containerPorts, with the additional +# key of `targetPort` to indicate which port of the container the service port +# should route to. The `targetPort` can be a name defined in `containerPorts`. +# - sessionAffinity (string) : Used to maintain session affinity, as defined in Kubernetes - supports 'ClientIP' and 'None' +# (https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) +# Kubernetes defaults to None. +# - sessionAffinityConfig (object) : Configuration for session affinity, as defined in Kubernetes +# (https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) +# +# The following example uses the default config and enables client IP based session affinity with a maximum session +# sticky time of 3 hours. +# EXAMPLE: +# +# service: +# enabled: true +# ports: +# app: +# port: 80 +# targetPort: http +# protocol: TCP +# sessionAffinity: ClientIP +# sessionAffinityConfig: +# clientIP: +# timeoutSeconds: 10800 +# +# The default config configures a Service of type ClusterIP with no annotation, and binds port 80 of the pod to the +# port 80 of the service, and names the binding as `app`: +service: + enabled: true + ports: + app: + port: 80 + targetPort: http + protocol: TCP + +# servicemonitor is a map that can be used to configure a Service monitor for the operator. By default, service monitor is off. +# The expected keys are: +# - enabled (bool) (required) : Whether or not the Service Monitor resource should be created. If false, no +# Service Monitor resource will be created. +# - namespace (string) (required) : Namespace of Endpoints object. +# - endpoints (list[map]) (required) : An object used to discovers targets from listed endpoints of a service. +# For each endpoint address one target is discovered per port. +# If the endpoint is backed by a pod, all additional container ports of the pod, +# not bound to an endpoint port, are discovered as targets as well. +# +# The following example specifies a ServiceMonitor rule that describes the set of targets to be monitored by Prometheus. +# EXAMPLE: +# +# serviceMonitor: +# enabled: true +# namespace: monitoring +# endpoints: +# default: +# interval: 10s +# scrapeTimeout: 10s +# honorLabels: true +# path: /metrics +# port: http +# scheme: http +serviceMonitor: + enabled: false + namespace: monitoring + labels: {} + endpoints: {} + +# ingress is a map that can be used to configure an Ingress resource for this service. By default, turn off ingress. +# NOTE: if you enable Ingress, then Service must also be enabled. +# The expected keys are: +# - enabled (bool) (required) : Whether or not the Ingress resource should be created. If false, no +# Ingress resource will be created. +# - annotations (map) : Annotations that should be added to the Service resource. This is +# injected directly in to the resource yaml. +# - tls (list[map]) : Sets up TLS termination on the ingress rule. Each item is a separate TLS +# rule that maps to one or more hosts specified in this ingress rule. This +# is injected directly in to the resource yaml. +# - hosts (list[string]) : Sets up the host routes for the ingress resource. There will be a routing +# rule for each host defined in this list. If empty, will match all hosts. +# - path (string) (required) : The url path to match to route to the Service. +# - pathType (string) (required in k8s version 1.19+) +# : The path type to use for the ingress rule. Refer to +# https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types +# for more information. +# - servicePort (int|string) (required) : The port (as a number) or the name of the port on the Service to route +# to. +# - additionalPaths (list[map]) : Additional paths that should be added to the ingress which will be lower +# priority than the application service path. Each item corresponds to +# another path, and should define `path`, `pathType`, `serviceName`, and +# `servicePort`. +# - additionalPathsHigherPriority (list[map]) +# : Additional paths that should be added to the ingress which will be higher +# priority than the application service path. Each item corresponds to +# another path, and should define `path`, `pathType`, `serviceName`, and +# `servicePort`. +# +# The following example specifies an Ingress rule that routes chart-example.local/app to the Service port `app` with +# TLS configured using the certificate key pair in the Secret `chart-example-tls`: +# +# EXAMPLE: +# +# ingress: +# enabled: true +# annotations: +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# path: /app +# servicePort: app +# hosts: +# - chart-example.local +# tls: +# - secretName: chart-example-tls +# hosts: +# - chart-example.local +ingress: + enabled: false + +# envVars is a map of strings to strings that specifies hard coded environment variables that should be set on the +# application container. The keys will be mapped to environment variable keys, with the values mapping to the +# environment variable values. +# +# NOTE: If you wish to set environment variables using Secrets, see the `secrets` setting in this file. +# +# The following example configures two environment variables, DB_HOST and DB_PORT: +# +# EXAMPLE: +# +# envVars: +# DB_HOST: "mysql.default.svc.cluster.local" +# DB_PORT: 3306 +envVars: {} + +# additionalContainerEnv is a list of additional environment variables +# definitions that will be inserted into the Container's environment YAML. +# +# Example: +# additionalContainerEnv: +# - name: DD_AGENT_HOST +# valueFrom: +# fieldRef: +# fieldPath: status.hostIP +# - name: DD_ENTITY_ID +# valueFrom: +# fieldRef: +# fieldPath: metadata.uid +additionalContainerEnv: {} + +# configMaps is a map that specifies the ConfigMap resources that should be exposed to the main application container. Each +# entry in the map represents a ConfigMap resource. The key refers to the name of the ConfigMap that should be exposed, +# with the value specifying how to expose the ConfigMap. The value is also a map and has the following attributes: +# - as (enum[volume,environment,envFrom,none]) (required) +# : ConfigMaps can be exposed to Pods as a volume mount, or as environment variables. This attribute is a string +# enum that is expected to be either "volume", "environment", or "envFrom", specifying that the ConfigMap should +# be exposed as a mounted volume, via environment variables, or loaded as environment variables respectively. +# This attribute can also be set to "none", which disables the `ConfigMap` on the container. +# - mountPath (string) +# : For ConfigMaps mounted as a volume, specify the mount path on the container file system where the config values +# will be available. Required when the ConfigMap is exposed as a volume. Ignored when the ConfigMap is exposed as +# environment variables. +# - subPath (string) +# : For ConfigMaps mounted as a volume, specify the sub path on the volume system where the config values will be +# available. Optional when the ConfigMap is exposed as a volume. Ignored when the ConfigMap is exposed as +# environment variables. +# - items (map[ConfigMapItem]) +# : Specify how each ConfigMap value should be made available. The keys are the key of the ConfigMap that you wish +# to configure, while the value is another map that controls how that key should be exposed. Required when the +# ConfigMap is exposed as environment variables. When the ConfigMap is exposed as a volume, this field is optional. +# If empty for volume ConfigMaps, all ConfigMpas will be mounted with the key as the file name relative to the +# mountPath. See below for expected attributes. +# The expected attributes of the `ConfigMapItem` map (the submap within `items`) are: +# - filePath (string) : The file path relative to the ConfigMap mountPath where the value of the ConfigMap keyed at +# the given key of the item should be mounted to in the container. Ignored when the ConfigMap +# is exposed as environment variables. +# - fileMode (string) : The permissions mode of the file when mounted in the container. Ignored when the ConfigMap is +# exposed as environment variables. Expected to be the octal (e.g 777, 644). Defaults to 644. +# - envVarName (string) : The name of the environment variable where the value of the ConfigMap keyed at the given key +# of the item should be stored. Ignored when the ConfigMap is exposed as a volume mount. +# +# NOTE: These config values are only automatically injected to the main application container. To add them to the side +# car containers, use the official Kubernetes Pod syntax: +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ +# +# The following example exposes the ConfigMap `myconfig` as a volume mounted to `/etc/myconfig`, while it exposes the +# ConfigMap `myotherconfig` as an environment variable. Additionally, it automatically mounts all of the keys +# `anotherconfig` as environment variables using the `envFrom` keyword. +# +# EXAMPLE: +# +# configMaps: +# myconfig: +# as: volume +# mountPath: /etc/myconfig +# myotherconfig: +# as: environment +# items: +# foo: +# envVarName: CONFIG_FOO +# anotherconfig: +# as: envFrom +configMaps: {} + +# persistentVolumes is a map that specifies PersistentVolumes that should be mounted on the pod. Each entry represents a +# persistent volume which should already exist within your cluster. They Key is the name of the persistent volume. +# The value is also a map and has the following attributes: +# - mountPath (string) (required) +# : The path within the container upon which this volume should be mounted. +# - claimName (string) (required) +# : The name of the Persistent Volume Claim on which this Persistent Volume in bound. +# +# EXAMPLE: +# persistentVolumes: +# example-pv: +# mountPath: /mnt/myVol +# claimName: example-pv-claim +# example-pv-2: +# mountPath: /mnt/myOtherVol +# claimName: example-pv2-claim +# +# +persistentVolumes: {} + +# scratchPaths is a map of key value pairs that specifies which paths in the container should be setup as scratch space. +# Under the hood each entry in the map is converted to a tmpfs volume with the name set to the key and mounted into the +# container on the path provided as the value. +# +# EXAMPLE: +# scratchPaths: +# example: /mnt/scratch +scratchPaths: {} + +# emptyDirs is a map of key value pairs that specifies which paths in the container should be setup as an emptyDir volume. +# Under the hood each entry in the map is converted to a volume stored on whatever medium that backs the node +# (disk, SSD, network storage) and mounted into the container on the path provided as the value. +# +# EXAMPLE: +# emptyDirs: +# example: /mnt/example +emptyDirs: {} + +# secrets is a map that specifies the Secret resources that should be exposed to the main application container. Each entry in +# the map represents a Secret resource. The key refers to the name of the Secret that should be exposed, with the value +# specifying how to expose the Secret. The value is also a map and has the following attributes: +# - as (enum[volume,environment,envFrom,none]) (required) +# : Secrets can be exposed to Pods as a volume mount, or as environment variables. This attribute is a string enum +# that is expected to be either "volume", "environment", or "envFrom", specifying that the Secret should be +# exposed as a mounted volume, via environment variables, or loaded in its entirety as environment variables +# respectively. This attribute can also be set to "none", which disables the `Secret` on the container. +# - mountPath (string) +# : For Secrets mounted as a volume, specify the mount path on the container file system where the secrets will be +# available. Required when the Secret is exposed as a volume. Ignored when the Secret is exposed as environment +# variables. +# - items (map[SecretItem]) +# : Specify how each Secret value should be made available. The keys are the key of the Secret that you wish to +# configure, while the value is another map that controls how that key should be exposed. Required when the Secret +# is exposed as environment variables. When the Secret is exposed as a volume, this field is optional. If empty for +# volume Secrets, all Secrets will be mounted with the key as the file name relative to the mountPath. See below +# for expected attributes. +# The expected attributes of the `SecretItem` map (the submap within `items`) are: +# - filePath (string) : The file path relative to the Secret mountPath where the value of the Secret keyed at the +# given key of the item should be mounted to in the container. Ignored when the Secret is +# exposed as environment variables. +# - fileMode (string) : The permissions mode of the file when mounted in the container. Ignored when the Secret is +# exposed as environment variables. Expected to be the octal (e.g 777, 644). Defaults to 644. +# - envVarName (string) : The name of the environment variable where the value of the Secret keyed at the given key of +# the item should be stored. Ignored when the Secret is exposed as a volume mount. +# +# NOTE: These secrets are only automatically injected to the main application container. To add them to the side car +# containers, use the official Kubernetes Pod syntax: +# https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets +# +# The following example exposes the Secret `mysecret` as a volume mounted to `/etc/mysecret`, while it exposes the +# Secret `myothersecret` as an environment variable. Additionally, it automatically mounts all of the keys +# `anothersecret` as environment variables using the `envFrom` keyword. +# +# EXAMPLE: +# +# secrets: +# mysecret: +# as: volume +# mountPath: /etc/mysecret +# myothersecret: +# as: environment +# items: +# foo: +# envVarName: SECRET_FOO +# anothersecret: +# as: envFrom +secrets: {} + +# containerResources specifies the amount of resources the application container will require. Only specify if you have +# specific resource needs. +# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look +# like: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +containerResources: {} + +# nodeSelector and affinity specify restrictions on what node this pod should be scheduled on. +# NOTE: These variables are injected directly into the pod spec. See the official documentation for what this might look +# like: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +nodeSelector: {} +affinity: {} + +# tolerations can be used to allow the pod to be scheduled on nodes with a specific taint. +# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look +# like: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# imagePullSecrets lists the Secret resources that should be used for accessing private registries. Each item in the +# list is a string that corresponds to the Secret name. +imagePullSecrets: [] + +# terminationGracePeriodSeconds sets grace period Kubernetes will wait before terminating the pod. The timeout happens +# in parallel to preStop hook and the SIGTERM signal, Kubernetes does not wait for preStop to finish before beginning +# the grace period. +# +# EXAMPLE: +# terminationGracePeriodSeconds: 30 +terminationGracePeriodSeconds: {} + +# serviceAccount is a map that configures the ServiceAccount information for the Pod. +# The expected keys of serviceAccount are: +# - name (string) : The name of the ServiceAccount in the Namespace where the Pod is deployed +# that should be used. By default this is the default ServiceAccount of the +# Namespace. +# - automountServiceAccountToken (bool) : Whether or not to automatically mount the ServiceAccount token as a volume +# into the Pod. Note that this can be used to override the equivalent config +# on the ServiceAccount. +# - create (bool) : Whether or not to create a service account with the desired name +# - annotations (map) : Annotations will add the provided map to the annotations for the service +# account created +# - labels (map) : Labels will add the provided map to the annotations for the service +# account created +# +# The default config uses empty string to indicate that the default service account should be used and one shouldn't +# be created +serviceAccount: + name: "" + create: false + annotations: {} + labels: {} + +# horizontalPodAutoscaler is a map that configures the Horizontal Pod Autoscaler information for this pod +# The expected keys of hpa are: +# - enabled (bool) : Whether or not Horizontal Pod Autoscaler should be created, if false the +# Horizontal Pod Autoscaler will not be created +# - minReplicas (int) : The minimum amount of replicas allowed +# - maxReplicas (int) : The maximum amount of replicas allowed +# - avgCpuUtilization (int) : The target average CPU utilization to be used with the metrics +# - avgMemoryUtilization (int) : The target average Memory utilization to be used with the metrics +# +# The default config will not create the Horizontal Pod Autoscaler by setting enabled = false, the default values are +# set so if enabled is true the horizontalPodAutoscaler has valid values. +horizontalPodAutoscaler: + enabled: false + minReplicas: 1 + maxReplicas: 10 + +# customResources is a map that lets you define Kubernetes resources you want installed and configured as part of this chart. +# The expected keys of customResources are: +# - enabled (bool) : Whether or not the provided custom resource definitions should be created. +# - resources (map) : A map of custom Kubernetes resources you want to install during the installation of the chart. +# +# NOTE: By default enabled = false, and no custom resources will be created. If you provide any resources, be sure to +# provide them as quoted using "|", and set enabled: true. +# +# The following example creates a custom ConfigMap and a Secret. +# +# EXAMPLE: +# +# customResources: +# enabled: true +# resources: +# custom_configmap: | +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: example +# data: +# key: value +# custom_secret: | +# apiVersion: v1 +# kind: Secret +# metadata: +# name: example +# type: Opaque +# data: +# key: dmFsdWU= +customResources: + enabled: false + resources: {} + +# fullnameOverride is a string that allows overriding the default fullname that appears as the +# application name and is used as the application name by kubernetes. +fullnameOverride: "" + +#---------------------------------------------------------------------------------------------------------------------- +# GOOGLE SPECIFIC VALUES +# google specifies Google (GKE) specific configuration to be set via arguments/env. variables +#---------------------------------------------------------------------------------------------------------------------- +google: + # managedCertificate can be used to provision a Google Managed Certificate. Associate the ManagedCertificate object + # to an Ingress by adding an annotation 'networking.gke.io/managed-certificates' to the Ingress. + # + # The expected keys are: + # - enabled (bool) (required) : Whether or not the ManagedCertificate resource should be created. + # - domainName (string) : Specifies the domain that the SSL certificate will be created for + # - name (string) : Specifies the name of the SSL certificate that you reference in Ingress with + # networking.gke.io/managed-certificates: name + # + # The following example specifies a ManagedCertificate with a domain name 'api.acme.com' and name 'acme-cert': + # + # EXAMPLE: + # + # google: + # managedCertificate: + # enabled: true + # name: acme-cert + # domainName: api.acme.com + # + # NOTE: if you enable managedCertificate, then Ingress must also be enabled. + # Use a Google Managed Certificate. By default, turn off. + managedCertificate: + enabled: false diff --git a/examples/k8s-daemonset-nginx/README.md b/examples/k8s-daemonset-nginx/README.md new file mode 100644 index 00000000..5de68698 --- /dev/null +++ b/examples/k8s-daemonset-nginx/README.md @@ -0,0 +1,381 @@ +# Quickstart Guide: K8S Service Nginx Example + +This quickstart guide uses the `k8s-service` Helm Chart to deploy Nginx with healthchecks defined onto your Kubernetes +cluster. In this guide, we define the input values necessary to set the application container packaged in the +`Deployment` as the `nginx` container. + +This guide is meant to demonstrate the defaults set by the Helm Chart to see what you get out of the box. + + +## Overview + +In this guide we will walk through the steps necessary to deploy a vanilla Nginx server using the `k8s-service` Helm +Chart against a Kubernetes cluster. We will use `minikube` for this guide, but the chart is designed to work with many +different Kubernetes clusters (e.g EKS or GKE). + +Here are the steps, linked to the relevant sections of this doc: + +1. [Install and setup `minikube`](#setting-up-your-kubernetes-cluster-minikube) +1. [Install and setup `helm`](#setting-up-helm-on-minikube) +1. [Deploy Nginx with `k8s-service`](#deploy-nginx-with-k8s-service) +1. [Check the status of the deployment](#check-the-status-of-the-deployment) +1. [Access Nginx](#accessing-nginx) +1. [Upgrade Nginx to a newer version](#upgrading-nginx-container-to-a-newer-version) + +**NOTE:** This guide assumes you are running the steps in this directory. If you are at the root of the repo, be sure to +change directory before starting: + +``` +cd examples/k8s-service-nginx +``` + +## Setting up your Kubernetes cluster: Minikube + +In this guide, we will use `minikube` as our Kubernetes cluster. Minikube is an official tool maintained by the +Kubernetes community to be able to provision and run Kubernetes locally your machine. By having a local environment you +can have fast iteration cycles while you develop and play with Kubernetes before deploying to production. You can learn +more about Minikube in [the official docs](https://kubernetes.io/docs/setup/minikube/). + +To setup `minikube`: + +1. Install kubectl by following [the official docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +1. Install the minikube utility by following [the official + docs](https://kubernetes.io/docs/tasks/tools/install-minikube/) +1. Run `minikube start` to provision a new `minikube` instance on your local machine. +1. Verify setup with `kubectl`: `kubectl cluster-info` + +## Setting up Helm on Minikube + +In order to install Helm Charts, we need to have the Helm CLI. First install the [`helm` +client](https://docs.helm.sh/using_helm/#installing-helm). Make sure the binary is discoverble in your `PATH` variable. +See [this stackoverflow post](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix) +for instructions on setting up your `PATH` on Unix, and [this +post](https://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on +Windows. + +Verify your installation by running `helm version`: + +```bash +$ helm version +version.BuildInfo{Version:"v3.1+unreleased", GitCommit:"c12a9aee02ec07b78dce07274e4816d9863d765e", GitTreeState:"clean", GoVersion:"go1.13.9"} +``` + +## Deploy Nginx with k8s-service + +Now that we have a working Kubernetes cluster with Helm installed and ready to go, the next step is to deploy Nginx +using the `k8s-service` chart. + +This folder contains predefined input values you can use with the `k8s-service` chart to deploy Nginx. These values +define the container image to use as part of the deployment, and augments the default values of the chart by defining a +`livenessProbe` and `readinessProbe` for the main container (which in this case will be `nginx:1.14.2`). Take a look at +[the provided `values.yaml` file](./values.yaml) to see how the values are defined. + +We will now instruct helm to install the `k8s-service` chart using these values. To do so, we will use the `helm +install` command: + +``` +helm install -f values.yaml ../../charts/k8s-service +``` + +The above command will instruct the `helm` client to install the Helm Chart defined in the relative path +`../../charts/k8s-service`, merging the input values defined in `values.yaml` with the one provided by the chart. + +Note that when you install this chart, `helm` will select a random name to use for your release. In Helm, a release +ties together the provided input values with a chart install, tracking the state of the resources that have been +deployed using Helm. The release name is uniquely identifies the release, and can be used to interact with a previous +deployment. + +When you run this command, you should see output similar to below: + +``` +NAME: queenly-liger +LAST DEPLOYED: Sat Feb 16 09:14:39 2019 +NAMESPACE: default +STATUS: DEPLOYED + +RESOURCES: +==> v1/Service +NAME AGE +queenly-liger-nginx 0s + +==> v1/Deployment +queenly-liger-nginx 0s + +==> v1/Pod(related) + +NAME READY STATUS RESTARTS AGE +queenly-liger-nginx-7b7bb49d-b8tf8 0/1 ContainerCreating 0 0s +queenly-liger-nginx-7b7bb49d-fgjd4 0/1 ContainerCreating 0 0s +queenly-liger-nginx-7b7bb49d-zxpcm 0/1 ContainerCreating 0 0s + + +NOTES: +Check the status of your Deployment by running this comamnd: + +kubectl get deployments --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" + + +List the related Pods with the following command: + +kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" + + +Use the following command to view information about the Service: + +kubectl get services --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-service.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + + +Get the application URL by running these commands: + export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application container serving port http" + kubectl port-forward $POD_NAME 8080:80 +``` + +The install command will always output: + +- The release name. In this case, the name is `queenly-liger`. +- The namespace where the resources are created. In this case, the namespace is `default`. +- The status of the release. In this case, the release was successfully deployed so the status is `DEPLOYED`. +- A summary of the resources created. Additionally, for certain resources, `helm` will also output the related resource. + For example, in this case, `helm` outputted all the `Pods` created by the `Deployment` resource. +- Any additional notes provided by the chart maintainer. For `k8s-service`, we output some commands you can use to check on the + status of the service. + +Since we will be referring to this output for the remainder of this guide, it would be a good idea to copy paste the +output somewhere so you can refer to it. If you ever lose the information and want to see the output again, you can use +the `helm status` command to view the output. The `helm status` command takes in the release name, and outputs +information about that release. + +Now that the service is installed and deployed, let's verify the deployment! + +## Check the Status of the Deployment + +In the previous step, we deployed Nginx using the `k8s-service` Helm Chart. Now we want to verify it has deployed +successfully. + +Under the hood the Helm Chart creates a `Deployment` resource in Kubernetes. `Deployments` are a controller that can be +used to declaratively manage your application. When you create the `Deployment` resource, it instructs Kubernetes the +desired state of the application deployment (e.g how many `Pods` to use, what container image to use, any volumes to +mount, etc). Kubernetes will then asynchronously create resources to match the desired state. This means that instead of +creating and updating `Pods` on the cluster, you can simply declare that you want 3 Nginx `Pods` deployed and let +Kubernetes handle the details. The downside of this is that the deployment happens asynchronously. In other words, this +means the Helm Chart may install successfully but the deployment could still fail. + +So let's look at the status of the deployment to confirm the deployment successfully finished. In the output above, the +`NOTES` section lists out a command that can be used to get information about the `Deployment`. So let's try running +that: + +``` +$ kubectl get deployments --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +queenly-liger-nginx 3 3 3 3 5m +``` + +In the output above, Kubernetes is reporting information about the `Pods` related to the `Deployment`. Each column is a +count of the number of `Pods` that fit that description. In this case, we have the correct number of `Pods` that are up +to date on the latest image (`UP-TO-DATE`) and available to accept traffic (`AVAILABLE`). When those columns diverge +from the `DESIRED` column, then that means either the deployment is still in progress, or something failed in the +process. + +You can further dig deeper using `describe`, or querying the different subresources such as the underlying Pods. For +this guide, we are satisfied with the `Deployment` status output above. See the [How do I check the status of the +rollout?](/charts/k8s-service/README.md#how-do-i-check-the-status-of-the-rollout) section of the chart README for +more details on how to check in on the detailed status of a rollout, and to help troubleshoot any issues in your +environment. + +Once you have confirmed the `Deployment` has rolled out successfully, the next step is to verify that Nginx is up and +accessible. + +## Accessing Nginx + +### Accessing a Pod directly + +Let's first try accessing a single Nginx `Pod`. To do so, we will open a tunnel from our local machine that routes +through the Kubernetes control plane to the underlying `Pod` on the worker nodes. + +To open the tunnel, we need two pieces of information: + +- The name of the `Pod` to open the tunnel to. +- The open ports on the `Pod` and the port we wish to access. + +To retrieve the name of a `Pod`, we can inspect the list of `Pods` created by the `Deployment`. As in the previous +section, the `helm install` output notes contains a command we can use to get the list of `Pods` managed by the +`Deployment`, so let's try running that here: + +``` +$ kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +NAME READY STATUS RESTARTS AGE +queenly-liger-nginx-7b7bb49d-b8tf8 1/1 Running 0 13m +queenly-liger-nginx-7b7bb49d-fgjd4 1/1 Running 0 13m +queenly-liger-nginx-7b7bb49d-zxpcm 1/1 Running 0 13m +``` + +Here you can see that there are 3 `Pods` in the `READY` state that match that criteria. Pick one of them to access from +the list above and record the name. + +Next, we need to see what ports are open on the `Pod`. The `k8s-service` Helm Chart will open ports 80 to the +container by default. However, if you do not know which ports are open, you can inspect the `Pod` to a list of the open +ports. To get detailed information about a `Pod`, use `kubectl describe pod NAME`. In our example, we will pull detailed +information about the `Pod` `queenly-liger-nginx-7b7bb49d-b8tf8`: + +``` +$ kubectl describe pod queenly-liger-nginx-7b7bb49d-b8tf8 +Name: queenly-liger-nginx-7b7bb49d-b8tf8 +Namespace: default +Priority: 0 +PriorityClassName: +Node: minikube/10.0.2.15 +Start Time: Sat, 16 Feb 2019 09:14:40 -0800 +Labels: app.kubernetes.io/instance=queenly-liger + app.kubernetes.io/name=nginx + pod-template-hash=7b7bb49d +Annotations: +Status: Running +IP: 172.17.0.6 +Controlled By: ReplicaSet/queenly-liger-nginx-7b7bb49d +Containers: + nginx: + Container ID: docker://ac921c94c8d5f9428815d64bfa541f0481ab37ddaf42a37f2ebec95eb61ef2c0 + Image: nginx:1.14.2 + Image ID: docker-pullable://nginx@sha256:d1eed840d5b357b897a872d17cdaa8a4fc8e6eb43faa8ad2febb31ce0c537910 + Ports: 80/TCP + Host Ports: 0/TCP + State: Running + Started: Sat, 16 Feb 2019 09:15:09 -0800 + Ready: True + Restart Count: 0 + Liveness: http-get http://:http/ delay=0s timeout=1s period=10s #success=1 #failure=3 + Readiness: http-get http://:http/ delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from default-token-nskm6 (ro) +Conditions: + Type Status + Initialized True + Ready True + ContainersReady True + PodScheduled True +Volumes: + default-token-nskm6: + Type: Secret (a volume populated by a Secret) + SecretName: default-token-nskm6 + Optional: false +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s + node.kubernetes.io/unreachable:NoExecute for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15m default-scheduler Successfully assigned default/queenly-liger-nginx-7b7bb49d-b8tf8 to minikube + Normal Pulling 15m kubelet, minikube pulling image "nginx:1.14.2" + Normal Pulled 14m kubelet, minikube Successfully pulled image "nginx:1.14.2" + Normal Created 14m kubelet, minikube Created container + Normal Started 14m kubelet, minikube Started container +``` + +This outputs all the detailed metadata about the running `Pod`, as well as an event log of all the cluster activity +related to the `Pod`. In the output, the `Containers` section shows addtional information about each container deployed in the `Pod`. Since we want to know the open ports for the `nginx` container, we will look at the `Ports` section of the `nginx` container in that output. Here is the specific output we are interested in: + +``` +Containers: + nginx: + Ports: 80/TCP +``` + +In the output, we confirm that indeed port 80 is open. So let's open a port forward! + +In this example, we will open a tunnel from port 8080 on our local machine to port 80 of the `Pod`: + +``` +$ kubectl port-forward queenly-liger-nginx-7b7bb49d-b8tf8 8080:80 +Forwarding from 127.0.0.1:8080 -> 80 +Forwarding from [::1]:8080 -> 80 +``` + +This command will run in the foreground, and keeps the tunnel open as long as the command is running. You can close the +tunnel at any time by hitting `Ctrl+C`. + +Now try accessing `localhost:8080` in the browser. You should get the default nginx welcome page. Assuming you do not +have nginx running locally, this means that you have successfully accessed the `Pod` from your local machine! + +### Accessing a Pod through a Service + +In the previous step we created a port forward from our local machine to the `Pod` directly. However, normally you would +not want to access your applications this way because `Pods` are ephemeral in Kubernetes. They come and go as nodes +scale up and down. They are also limited to the single resource and thus do not do any form of load balancing. This is +where `Services` come into play. + +A `Service` in Kubernetes is used to expose a group of `Pods` that match a given selector under a stable endpoint. +`Service` resources track which `Pods` are live and ready, and only route traffic to those that are in the `READY` +status. The `READY` status is managed using `readinessProbes`: as long as the `Pod` passes the readiness check, the +`Pod` will be marked `READY` and kept in the pool for the `Service`. + +There are several different types of `Services`. You can learn more about the different types in the [How do I expose my +application](/charts/k8s-service/README.md#how-do-i-expose-my-application-internally-to-the-cluster) section of the +chart README. For this example, we used the default `Service` resource created by the chart, but overrode the type to be +`NodePort`. A `NodePort` `Service` exposes a port on the Kubernetes worker that routes to the `Service` endpoint. This +endpoint will load balance across the `Pods` that match the selector for the `Service`. + +To access a `NodePort` `Service`, we need to first find out what port is exposed. We can do this by querying for the +`Service` using `kubectl`. As before, the `NOTES` output contains a command we can use to find the related `Service`. +However, the `NOTES` output also contains instructions for directly getting the service node port and service node ip. +Here, we will use those commands to extract the endpoint for the `Service`, with one modification. Because we are +running the `Service` on `minikube`, there is one layer of indirection in the `minikube` VM. `minikube` runs in its own +VM on your machine, which means that the ip of the node will be incorrect. So instead of querying for the registered +node IP in Kubernetes, we will instead use `minikube` to get the ip address of the `minikube` VM to use as the node IP: + +```bash +export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services queenly-liger-nginx) +export NODE_IP=$(minikube ip) +echo http://$NODE_IP:$NODE_PORT +``` + +The first command queries the `Service` resource to find out the node port that was used to expose the service. The second +command queries the ip address of `minikub`. The last command will `echo` out the endpoint where the service is +available. Try hitting that endpoint in your browser and you should see the familiar nginx splash screen. + +## Undeploying Nginx + +Once you have tested and are satisfied with your Nginx service, you can undeploy it to clean up your cluster. To +undeploy the service, you need to delete the corresponding Helm Release. Helm Releases are what tracks all the resources +associated with a chart in a single deployment. + +To delete the Helm Release, you need to first identify the name of the release. The release name is outputted as part of +the initial deployment. For our example in this guide, the name is `queenly-liger`. + +If you forget the release name, you can always look it up from the list of deployed charts using `helm ls`: + +``` +$ helm ls +NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE +queenly-liger 1 Sat Feb 16 11:36:01 2019 DEPLOYED k8s-service-0.0.1-replace default +``` + +Once you have the release name, you can use the `helm uninstall` command to delete a release and undeploy all the +associated resources: + +``` +$ helm uninstall queenly-liger +release "queenly-liger" deleted +``` + +To check, you can use `kubectl` to query for the resources. For example, now if you query for the `Service`, you will +get an error: + +``` +$ kubectl get --namespace default services queenly-liger-nginx +Error from server (NotFound): services "queenly-liger-nginx" not found +``` + + +## Summary + +Congratulations! At this point, you have: + +- Setup `minikube` to have a local dev environment of Kubernetes. +- Installed and deployed Helm on `minikube`. +- Deployed nginx on to `minikube` using the `k8s-service` Helm Chart. +- Verified the deployment by querying for resources using `kubectl` and opening port forwards to access the endpoints. + +To learn more about the `k8s-service` Helm Chart, refer to [the chart documentation](/charts/k8s-service). diff --git a/examples/k8s-daemonset-nginx/values.yaml b/examples/k8s-daemonset-nginx/values.yaml new file mode 100644 index 00000000..d1d9abe4 --- /dev/null +++ b/examples/k8s-daemonset-nginx/values.yaml @@ -0,0 +1,69 @@ +#---------------------------------------------------------------------------------------------------------------------- +# CHART PARAMETERS FOR NGINX EXAMPLE +# This file declares the required values for the k8s-daemonset helm chart to deploy nginx. +# This is a YAML-formatted file. +#---------------------------------------------------------------------------------------------------------------------- + +#---------------------------------------------------------------------------------------------------------------------- +# REQUIRED VALUES OF CHART +# These are the required values defined by the k8s-daemonset chart. Here we will set them to deploy an nginx container. +#---------------------------------------------------------------------------------------------------------------------- + +# containerImage is a map that describes the container image that should be used to serve the application managed by +# the k8s-daemonset chart. +# The expected keys are: +# - repository (string) (required) : The container image repository that should be used. +# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a +# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, +# `canary`, or other tags that are designed to be “floating”. +# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See +# the official Kubernetes docs for more info. If undefined, this will default to +# `IfNotPresent`. +# +# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# the image should only be pulled if it has not been pulled previously. We deploy a specific, locked tag so that we +# don't inadvertently upgrade nginx during a deployment that changes some other unrelated input value. +containerImage: + repository: fluent/fluentd-kubernetes-daemonset + tag: v1-debian-syslog + pullPolicy: IfNotPresent + +# applicationName is a string that names the application. This is used to label the pod and to name the main application +# container in the pod spec. Here we use nginx as the name since we are deploying nginx. +applicationName: "fluentd" + +#---------------------------------------------------------------------------------------------------------------------- +# OVERRIDE OPTIONAL VALUES +# These values have defaults in the k8s-daemonset chart, but we override a few of them for the purposes of this demo. +#---------------------------------------------------------------------------------------------------------------------- + +# livenessProbe is a map that specifies the liveness probe of the main application container. Liveness probes indicate +# when a container has reached a fatal state where it needs to be restarted to recover. When the liveness probe fails, +# the container is automatically recreated. You can read more about container liveness probes in the official docs: +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +# NOTE: This variable is injected directly into the container spec. +# +# The following example specifies an http GET based liveness probe, that will base the probe on a http GET request to +# the port bound to name `http` (port 80 in the default settings) on the path `/`. +livenessProbe: + httpGet: + path: / + port: http + +# readinessProbe is a map that specifies the readiness probe of the main application container. Readiness probes +# indicate when a container is unable to serve traffic. When the readiness probe fails, the container is cycled out of +# the list of available containers to the `Service`. You can read more about readiness probes in the official docs: +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +# NOTE: This variable is injected directly into the container spec. +# +# The following example specifies an http GET based readiness probe, that will base the probe on a http GET request to +# the port bound to name `http` (see description on `containerPorts`) on the path `/`. +readinessProbe: + httpGet: + path: / + port: http + +# We override the service type to use NodePort so that we can access the Service from outside the Kubernetes cluster. +service: + type: NodePort diff --git a/test/k8s_daemonset_nginx_example_test.go b/test/k8s_daemonset_nginx_example_test.go new file mode 100644 index 00000000..10fdc5b6 --- /dev/null +++ b/test/k8s_daemonset_nginx_example_test.go @@ -0,0 +1,206 @@ +//go:build all || integration +// +build all integration + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + http_helper "github.com/gruntwork-io/terratest/modules/http-helper" + "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/gruntwork-io/terratest/modules/random" + test_structure "github.com/gruntwork-io/terratest/modules/test-structure" + "github.com/stretchr/testify/require" + "golang.org/x/mod/semver" +) + +// Test that: +// +// 1. We can deploy the example +// 2. The deployment succeeds without errors +// 3. We can open a port forward to one of the Pods and access nginx +// 4. We can access nginx via the service endpoint +// 5. We can access nginx via the ingress endpoint +// 6. If we set a lower priority path, the application path takes precendence over the nginx service +// 7. If we set a higher priority path, that takes precedence over the nginx service +func TestK8SServiceNginxExample(t *testing.T) { + t.Parallel() + + workingDir := filepath.Join(".", "stages", t.Name()) + + //os.Setenv("SKIP_setup", "true") + //os.Setenv("SKIP_create_namespace", "true") + //os.Setenv("SKIP_install", "true") + //os.Setenv("SKIP_validate_initial_deployment", "true") + //os.Setenv("SKIP_upgrade", "true") + //os.Setenv("SKIP_validate_upgrade", "true") + //os.Setenv("SKIP_delete", "true") + //os.Setenv("SKIP_delete_namespace", "true") + + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + examplePath, err := filepath.Abs(filepath.Join("..", "examples", "k8s-daemonset-nginx")) + require.NoError(t, err) + + // Create a test namespace to deploy resources into, to avoid colliding with other tests + test_structure.RunTestStage(t, "setup", func() { + kubectlOptions := k8s.NewKubectlOptions("", "", "") + test_structure.SaveKubectlOptions(t, workingDir, kubectlOptions) + + uniqueID := random.UniqueId() + test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) + }) + kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) + uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") + testNamespace := fmt.Sprintf("k8s-daemonset-nginx-%s", strings.ToLower(uniqueID)) + + defer test_structure.RunTestStage(t, "delete_namespace", func() { + k8s.DeleteNamespace(t, kubectlOptions, testNamespace) + }) + + test_structure.RunTestStage(t, "create_namespace", func() { + k8s.CreateNamespace(t, kubectlOptions, testNamespace) + }) + + kubectlOptions.Namespace = testNamespace + + // Use the values file in the example and deploy the chart in the test namespace + // Set a random release name + releaseName := fmt.Sprintf("k8s-daemonset-nginx-%s", strings.ToLower(uniqueID)) + options := &helm.Options{ + KubectlOptions: kubectlOptions, + ValuesFiles: []string{filepath.Join(examplePath, "values.yaml")}, + SetValues: map[string]string{ + "ingress.enabled": "true", + "ingress.path": "/app", + "ingress.pathType": "Prefix", + "ingress.servicePort": "http", + "ingress.annotations.kubernetes\\.io/ingress\\.class": "nginx", + "ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/rewrite-target": "/", + "ingress.additionalPaths[0].path": "/app", + "ingress.additionalPaths[0].pathType": "Prefix", + "ingress.additionalPaths[0].serviceName": "black-hole", + "ingress.additionalPaths[0].servicePort": "80", + "ingress.additionalPaths[1].path": "/black-hole", + "ingress.additionalPaths[1].pathType": "Prefix", + "ingress.additionalPaths[1].serviceName": "black-hole", + "ingress.additionalPaths[1].servicePort": "80", + }, + } + + defer test_structure.RunTestStage(t, "delete", func() { + helm.Delete(t, options, releaseName, true) + }) + + test_structure.RunTestStage(t, "install", func() { + helm.Install(t, options, helmChartPath, releaseName) + }) + + test_structure.RunTestStage(t, "validate_initial_deployment", func() { + verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected) + verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + + // We expect this to succeed, because the black hole service that overlaps with the nginx service is added as lower + // priority. + verifyIngressAvailable(t, kubectlOptions, releaseName, "/app", nginxValidationFunction) + + // On the other hand, we expect this to fail because the black hole service does not exist + verifyIngressAvailable(t, kubectlOptions, releaseName, "/black-hole", serviceUnavailableValidationFunction) + }) + + test_structure.RunTestStage(t, "upgrade", func() { + // Now redeploy with higher priority path and make sure it fails + options.SetValues["ingress.additionalPathsHigherPriority[0].path"] = "/app" + options.SetValues["ingress.additionalPathsHigherPriority[0].pathType"] = "Prefix" + options.SetValues["ingress.additionalPathsHigherPriority[0].serviceName"] = "black-hole" + options.SetValues["ingress.additionalPathsHigherPriority[0].servicePort"] = "80" + helm.Upgrade(t, options, helmChartPath, releaseName) + }) + + test_structure.RunTestStage(t, "validate_upgrade", func() { + // We expect the service to still come up cleanly + verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected) + verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + + // ... but now the nginx service via ingress should be unavailable because of the higher priority black hole path + verifyIngressAvailable(t, kubectlOptions, releaseName, "/app", serviceUnavailableValidationFunction) + }) +} + +// nginxValidationFunction checks that we get a 200 response with the nginx welcome page. +func nginxValidationFunction(statusCode int, body string) bool { + return statusCode == 200 && strings.Contains(body, "Welcome to nginx") +} + +// serviceUnavailableValidationFunction checks that we get a 503 response and the maintenance page +func serviceUnavailableValidationFunction(statusCode int, body string) bool { + return statusCode == 503 && strings.Contains(body, "Service Temporarily Unavailable") +} + +func verifyIngressAvailable( + t *testing.T, + kubectlOptions *k8s.KubectlOptions, + ingressName string, + path string, + validationFunction func(int, string) bool, +) { + version, err := k8s.GetKubernetesClusterVersionWithOptionsE(t, kubectlOptions) + require.NoError(t, err) + + // If the actual cluster version is >= v1.19.0, use networkingv1 functions. Otherwise, use networkingv1beta1 + // functions. + var ingressEndpoint string + if semver.Compare(version, "v1.19.0") >= 0 { + // Get the ingress and wait until it is available + k8s.WaitUntilIngressAvailable( + t, + kubectlOptions, + ingressName, + WaitTimerRetries, + WaitTimerSleep, + ) + + // Now hit the service endpoint to verify it is accessible + ingress := k8s.GetIngress(t, kubectlOptions, ingressName) + if ingress.Status.LoadBalancer.Ingress[0].IP == "" { + ingressEndpoint = ingress.Status.LoadBalancer.Ingress[0].Hostname + } else { + ingressEndpoint = ingress.Status.LoadBalancer.Ingress[0].IP + } + } else { + // Get the ingress and wait until it is available + k8s.WaitUntilIngressAvailableV1Beta1( + t, + kubectlOptions, + ingressName, + WaitTimerRetries, + WaitTimerSleep, + ) + + // Now hit the service endpoint to verify it is accessible + ingress := k8s.GetIngressV1Beta1(t, kubectlOptions, ingressName) + if ingress.Status.LoadBalancer.Ingress[0].IP == "" { + ingressEndpoint = ingress.Status.LoadBalancer.Ingress[0].Hostname + } else { + ingressEndpoint = ingress.Status.LoadBalancer.Ingress[0].IP + } + } + + http_helper.HttpGetWithRetryWithCustomValidation( + t, + fmt.Sprintf("http://%s%s", ingressEndpoint, path), + nil, + WaitTimerRetries, + WaitTimerSleep, + validationFunction, + ) +} diff --git a/test/k8s_service_template_test.go b/test/k8s_service_template_test.go index 81d3fed0..27605587 100644 --- a/test/k8s_service_template_test.go +++ b/test/k8s_service_template_test.go @@ -753,7 +753,7 @@ func TestK8SServiceDeploymentRollingUpdateStrategy(t *testing.T) { "deploymentStrategy.enabled": "true", "deploymentStrategy.type": "RollingUpdate", }, - ) + ) assert.EqualValues(t, "RollingUpdate", string(deployment.Spec.Strategy.Type)) require.Nil(t, deployment.Spec.Strategy.RollingUpdate) From c3ae35ee2ba581d8e7d9ce9176830af458cbe4f4 Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Tue, 26 Apr 2022 16:26:40 -0400 Subject: [PATCH 2/7] k8s-daemonset initial cut --- .../README.md | 152 ++++++++-------- .../values.yaml | 12 +- ... => k8s_daemonset_fluentd_example_test.go} | 44 ++--- ...monset_template_render_helpers_for_test.go | 167 ++++++++++++++++++ 4 files changed, 271 insertions(+), 104 deletions(-) rename examples/{k8s-daemonset-nginx => k8s-daemonset-fluentd}/README.md (73%) rename examples/{k8s-daemonset-nginx => k8s-daemonset-fluentd}/values.yaml (89%) rename test/{k8s_daemonset_nginx_example_test.go => k8s_daemonset_fluentd_example_test.go} (82%) create mode 100644 test/k8s_daemonset_template_render_helpers_for_test.go diff --git a/examples/k8s-daemonset-nginx/README.md b/examples/k8s-daemonset-fluentd/README.md similarity index 73% rename from examples/k8s-daemonset-nginx/README.md rename to examples/k8s-daemonset-fluentd/README.md index 5de68698..72c3856b 100644 --- a/examples/k8s-daemonset-nginx/README.md +++ b/examples/k8s-daemonset-fluentd/README.md @@ -1,15 +1,15 @@ -# Quickstart Guide: K8S Service Nginx Example +# Quickstart Guide: K8S Service Fluentd Example -This quickstart guide uses the `k8s-service` Helm Chart to deploy Nginx with healthchecks defined onto your Kubernetes +This quickstart guide uses the `k8s-daemonset` Helm Chart to deploy Fluentd with healthchecks defined onto your Kubernetes cluster. In this guide, we define the input values necessary to set the application container packaged in the -`Deployment` as the `nginx` container. +`Daemonset` as the `fluentd` container. This guide is meant to demonstrate the defaults set by the Helm Chart to see what you get out of the box. ## Overview -In this guide we will walk through the steps necessary to deploy a vanilla Nginx server using the `k8s-service` Helm +In this guide we will walk through the steps necessary to deploy a vanilla Fluentd server using the `k8s-daemonset` Helm Chart against a Kubernetes cluster. We will use `minikube` for this guide, but the chart is designed to work with many different Kubernetes clusters (e.g EKS or GKE). @@ -17,16 +17,16 @@ Here are the steps, linked to the relevant sections of this doc: 1. [Install and setup `minikube`](#setting-up-your-kubernetes-cluster-minikube) 1. [Install and setup `helm`](#setting-up-helm-on-minikube) -1. [Deploy Nginx with `k8s-service`](#deploy-nginx-with-k8s-service) +1. [Deploy Fluentd with `k8s-daemonset`](#deploy-fluentd-with-k8s-daemonset) 1. [Check the status of the deployment](#check-the-status-of-the-deployment) -1. [Access Nginx](#accessing-nginx) -1. [Upgrade Nginx to a newer version](#upgrading-nginx-container-to-a-newer-version) +1. [Access Fluentd](#accessing-fluentd) +1. [Upgrade Fluentd to a newer version](#upgrading-fluentd-container-to-a-newer-version) **NOTE:** This guide assumes you are running the steps in this directory. If you are at the root of the repo, be sure to change directory before starting: ``` -cd examples/k8s-service-nginx +cd examples/k8s-daemonset-fluentd ``` ## Setting up your Kubernetes cluster: Minikube @@ -60,25 +60,25 @@ $ helm version version.BuildInfo{Version:"v3.1+unreleased", GitCommit:"c12a9aee02ec07b78dce07274e4816d9863d765e", GitTreeState:"clean", GoVersion:"go1.13.9"} ``` -## Deploy Nginx with k8s-service +## Deploy Fluentd with k8s-daemonset -Now that we have a working Kubernetes cluster with Helm installed and ready to go, the next step is to deploy Nginx -using the `k8s-service` chart. +Now that we have a working Kubernetes cluster with Helm installed and ready to go, the next step is to deploy Fluentd +using the `k8s-daemonset` chart. -This folder contains predefined input values you can use with the `k8s-service` chart to deploy Nginx. These values +This folder contains predefined input values you can use with the `k8s-daemonset` chart to deploy Fluentd. These values define the container image to use as part of the deployment, and augments the default values of the chart by defining a -`livenessProbe` and `readinessProbe` for the main container (which in this case will be `nginx:1.14.2`). Take a look at +`livenessProbe` and `readinessProbe` for the main container (which in this case will be `fluentd:1.14.2`). Take a look at [the provided `values.yaml` file](./values.yaml) to see how the values are defined. -We will now instruct helm to install the `k8s-service` chart using these values. To do so, we will use the `helm +We will now instruct helm to install the `k8s-daemonset` chart using these values. To do so, we will use the `helm install` command: ``` -helm install -f values.yaml ../../charts/k8s-service +helm install -f values.yaml ../../charts/k8s-daemonset ``` The above command will instruct the `helm` client to install the Helm Chart defined in the relative path -`../../charts/k8s-service`, merging the input values defined in `values.yaml` with the one provided by the chart. +`../../charts/k8s-daemonset`, merging the input values defined in `values.yaml` with the one provided by the chart. Note that when you install this chart, `helm` will select a random name to use for your release. In Helm, a release ties together the provided input values with a chart install, tracking the state of the resources that have been @@ -96,37 +96,37 @@ STATUS: DEPLOYED RESOURCES: ==> v1/Service NAME AGE -queenly-liger-nginx 0s +queenly-liger-fluentd 0s -==> v1/Deployment -queenly-liger-nginx 0s +==> v1/Daemonset +queenly-liger-fluentd 0s ==> v1/Pod(related) NAME READY STATUS RESTARTS AGE -queenly-liger-nginx-7b7bb49d-b8tf8 0/1 ContainerCreating 0 0s -queenly-liger-nginx-7b7bb49d-fgjd4 0/1 ContainerCreating 0 0s -queenly-liger-nginx-7b7bb49d-zxpcm 0/1 ContainerCreating 0 0s +queenly-liger-fluentd-7b7bb49d-b8tf8 0/1 ContainerCreating 0 0s +queenly-liger-fluentd-7b7bb49d-fgjd4 0/1 ContainerCreating 0 0s +queenly-liger-fluentd-7b7bb49d-zxpcm 0/1 ContainerCreating 0 0s NOTES: -Check the status of your Deployment by running this comamnd: +Check the status of your Daemonset by running this comamnd: -kubectl get deployments --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +kubectl get deployments --namespace default -l "app.kubernetes.io/name=fluentd,app.kubernetes.io/instance=queenly-liger" List the related Pods with the following command: -kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +kubectl get pods --namespace default -l "app.kubernetes.io/name=fluentd,app.kubernetes.io/instance=queenly-liger" Use the following command to view information about the Service: -kubectl get services --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-service.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" +kubectl get services --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" Get the application URL by running these commands: - export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" -o jsonpath="{.items[0].metadata.name}") + export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=fluentd,app.kubernetes.io/instance=queenly-liger" -o jsonpath="{.items[0].metadata.name}") echo "Visit http://127.0.0.1:8080 to use your application container serving port http" kubectl port-forward $POD_NAME 8080:80 ``` @@ -137,8 +137,8 @@ The install command will always output: - The namespace where the resources are created. In this case, the namespace is `default`. - The status of the release. In this case, the release was successfully deployed so the status is `DEPLOYED`. - A summary of the resources created. Additionally, for certain resources, `helm` will also output the related resource. - For example, in this case, `helm` outputted all the `Pods` created by the `Deployment` resource. -- Any additional notes provided by the chart maintainer. For `k8s-service`, we output some commands you can use to check on the + For example, in this case, `helm` outputted all the `Pods` created by the `Daemonset` resource. +- Any additional notes provided by the chart maintainer. For `k8s-daemonset`, we output some commands you can use to check on the status of the service. Since we will be referring to this output for the remainder of this guide, it would be a good idea to copy paste the @@ -148,49 +148,49 @@ information about that release. Now that the service is installed and deployed, let's verify the deployment! -## Check the Status of the Deployment +## Check the Status of the Daemonset -In the previous step, we deployed Nginx using the `k8s-service` Helm Chart. Now we want to verify it has deployed +In the previous step, we deployed Fluentd using the `k8s-daemonset` Helm Chart. Now we want to verify it has deployed successfully. -Under the hood the Helm Chart creates a `Deployment` resource in Kubernetes. `Deployments` are a controller that can be -used to declaratively manage your application. When you create the `Deployment` resource, it instructs Kubernetes the +Under the hood the Helm Chart creates a `Daemonset` resource in Kubernetes. `Daemonsets` are a controller that can be +used to declaratively manage your application. When you create the `Daemonset` resource, it instructs Kubernetes the desired state of the application deployment (e.g how many `Pods` to use, what container image to use, any volumes to mount, etc). Kubernetes will then asynchronously create resources to match the desired state. This means that instead of -creating and updating `Pods` on the cluster, you can simply declare that you want 3 Nginx `Pods` deployed and let +creating and updating `Pods` on the cluster, you can simply declare that you want 3 Fluentd `Pods` deployed and let Kubernetes handle the details. The downside of this is that the deployment happens asynchronously. In other words, this means the Helm Chart may install successfully but the deployment could still fail. So let's look at the status of the deployment to confirm the deployment successfully finished. In the output above, the -`NOTES` section lists out a command that can be used to get information about the `Deployment`. So let's try running +`NOTES` section lists out a command that can be used to get information about the `Daemonset`. So let's try running that: ``` -$ kubectl get deployments --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +$ kubectl get deployments --namespace default -l "app.kubernetes.io/name=fluentd,app.kubernetes.io/instance=queenly-liger" NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -queenly-liger-nginx 3 3 3 3 5m +queenly-liger-fluentd 3 3 3 3 5m ``` -In the output above, Kubernetes is reporting information about the `Pods` related to the `Deployment`. Each column is a +In the output above, Kubernetes is reporting information about the `Pods` related to the `Daemonset`. Each column is a count of the number of `Pods` that fit that description. In this case, we have the correct number of `Pods` that are up to date on the latest image (`UP-TO-DATE`) and available to accept traffic (`AVAILABLE`). When those columns diverge from the `DESIRED` column, then that means either the deployment is still in progress, or something failed in the process. You can further dig deeper using `describe`, or querying the different subresources such as the underlying Pods. For -this guide, we are satisfied with the `Deployment` status output above. See the [How do I check the status of the -rollout?](/charts/k8s-service/README.md#how-do-i-check-the-status-of-the-rollout) section of the chart README for +this guide, we are satisfied with the `Daemonset` status output above. See the [How do I check the status of the +rollout?](/charts/k8s-daemonset/README.md#how-do-i-check-the-status-of-the-rollout) section of the chart README for more details on how to check in on the detailed status of a rollout, and to help troubleshoot any issues in your environment. -Once you have confirmed the `Deployment` has rolled out successfully, the next step is to verify that Nginx is up and +Once you have confirmed the `Daemonset` has rolled out successfully, the next step is to verify that Fluentd is up and accessible. -## Accessing Nginx +## Accessing Fluentd ### Accessing a Pod directly -Let's first try accessing a single Nginx `Pod`. To do so, we will open a tunnel from our local machine that routes +Let's first try accessing a single Fluentd `Pod`. To do so, we will open a tunnel from our local machine that routes through the Kubernetes control plane to the underlying `Pod` on the worker nodes. To open the tunnel, we need two pieces of information: @@ -198,46 +198,46 @@ To open the tunnel, we need two pieces of information: - The name of the `Pod` to open the tunnel to. - The open ports on the `Pod` and the port we wish to access. -To retrieve the name of a `Pod`, we can inspect the list of `Pods` created by the `Deployment`. As in the previous +To retrieve the name of a `Pod`, we can inspect the list of `Pods` created by the `Daemonset`. As in the previous section, the `helm install` output notes contains a command we can use to get the list of `Pods` managed by the -`Deployment`, so let's try running that here: +`Daemonset`, so let's try running that here: ``` -$ kubectl get pods --namespace default -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=queenly-liger" +$ kubectl get pods --namespace default -l "app.kubernetes.io/name=fluentd,app.kubernetes.io/instance=queenly-liger" NAME READY STATUS RESTARTS AGE -queenly-liger-nginx-7b7bb49d-b8tf8 1/1 Running 0 13m -queenly-liger-nginx-7b7bb49d-fgjd4 1/1 Running 0 13m -queenly-liger-nginx-7b7bb49d-zxpcm 1/1 Running 0 13m +queenly-liger-fluentd-7b7bb49d-b8tf8 1/1 Running 0 13m +queenly-liger-fluentd-7b7bb49d-fgjd4 1/1 Running 0 13m +queenly-liger-fluentd-7b7bb49d-zxpcm 1/1 Running 0 13m ``` Here you can see that there are 3 `Pods` in the `READY` state that match that criteria. Pick one of them to access from the list above and record the name. -Next, we need to see what ports are open on the `Pod`. The `k8s-service` Helm Chart will open ports 80 to the +Next, we need to see what ports are open on the `Pod`. The `k8s-daemonset` Helm Chart will open ports 80 to the container by default. However, if you do not know which ports are open, you can inspect the `Pod` to a list of the open ports. To get detailed information about a `Pod`, use `kubectl describe pod NAME`. In our example, we will pull detailed -information about the `Pod` `queenly-liger-nginx-7b7bb49d-b8tf8`: +information about the `Pod` `queenly-liger-fluentd-7b7bb49d-b8tf8`: ``` -$ kubectl describe pod queenly-liger-nginx-7b7bb49d-b8tf8 -Name: queenly-liger-nginx-7b7bb49d-b8tf8 +$ kubectl describe pod queenly-liger-fluentd-7b7bb49d-b8tf8 +Name: queenly-liger-fluentd-7b7bb49d-b8tf8 Namespace: default Priority: 0 PriorityClassName: Node: minikube/10.0.2.15 Start Time: Sat, 16 Feb 2019 09:14:40 -0800 Labels: app.kubernetes.io/instance=queenly-liger - app.kubernetes.io/name=nginx + app.kubernetes.io/name=fluentd pod-template-hash=7b7bb49d Annotations: Status: Running IP: 172.17.0.6 -Controlled By: ReplicaSet/queenly-liger-nginx-7b7bb49d +Controlled By: ReplicaSet/queenly-liger-fluentd-7b7bb49d Containers: - nginx: + fluentd: Container ID: docker://ac921c94c8d5f9428815d64bfa541f0481ab37ddaf42a37f2ebec95eb61ef2c0 - Image: nginx:1.14.2 - Image ID: docker-pullable://nginx@sha256:d1eed840d5b357b897a872d17cdaa8a4fc8e6eb43faa8ad2febb31ce0c537910 + Image: fluentd:1.14.2 + Image ID: docker-pullable://fluentd@sha256:d1eed840d5b357b897a872d17cdaa8a4fc8e6eb43faa8ad2febb31ce0c537910 Ports: 80/TCP Host Ports: 0/TCP State: Running @@ -267,19 +267,19 @@ Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal Scheduled 15m default-scheduler Successfully assigned default/queenly-liger-nginx-7b7bb49d-b8tf8 to minikube - Normal Pulling 15m kubelet, minikube pulling image "nginx:1.14.2" - Normal Pulled 14m kubelet, minikube Successfully pulled image "nginx:1.14.2" + Normal Scheduled 15m default-scheduler Successfully assigned default/queenly-liger-fluentd-7b7bb49d-b8tf8 to minikube + Normal Pulling 15m kubelet, minikube pulling image "fluentd:1.14.2" + Normal Pulled 14m kubelet, minikube Successfully pulled image "fluentd:1.14.2" Normal Created 14m kubelet, minikube Created container Normal Started 14m kubelet, minikube Started container ``` This outputs all the detailed metadata about the running `Pod`, as well as an event log of all the cluster activity -related to the `Pod`. In the output, the `Containers` section shows addtional information about each container deployed in the `Pod`. Since we want to know the open ports for the `nginx` container, we will look at the `Ports` section of the `nginx` container in that output. Here is the specific output we are interested in: +related to the `Pod`. In the output, the `Containers` section shows addtional information about each container deployed in the `Pod`. Since we want to know the open ports for the `fluentd` container, we will look at the `Ports` section of the `fluentd` container in that output. Here is the specific output we are interested in: ``` Containers: - nginx: + fluentd: Ports: 80/TCP ``` @@ -288,7 +288,7 @@ In the output, we confirm that indeed port 80 is open. So let's open a port forw In this example, we will open a tunnel from port 8080 on our local machine to port 80 of the `Pod`: ``` -$ kubectl port-forward queenly-liger-nginx-7b7bb49d-b8tf8 8080:80 +$ kubectl port-forward queenly-liger-fluentd-7b7bb49d-b8tf8 8080:80 Forwarding from 127.0.0.1:8080 -> 80 Forwarding from [::1]:8080 -> 80 ``` @@ -296,8 +296,8 @@ Forwarding from [::1]:8080 -> 80 This command will run in the foreground, and keeps the tunnel open as long as the command is running. You can close the tunnel at any time by hitting `Ctrl+C`. -Now try accessing `localhost:8080` in the browser. You should get the default nginx welcome page. Assuming you do not -have nginx running locally, this means that you have successfully accessed the `Pod` from your local machine! +Now try accessing `localhost:8080` in the browser. You should get the default fluentd welcome page. Assuming you do not +have fluentd running locally, this means that you have successfully accessed the `Pod` from your local machine! ### Accessing a Pod through a Service @@ -312,7 +312,7 @@ status. The `READY` status is managed using `readinessProbes`: as long as the `P `Pod` will be marked `READY` and kept in the pool for the `Service`. There are several different types of `Services`. You can learn more about the different types in the [How do I expose my -application](/charts/k8s-service/README.md#how-do-i-expose-my-application-internally-to-the-cluster) section of the +application](/charts/k8s-daemonset/README.md#how-do-i-expose-my-application-internally-to-the-cluster) section of the chart README. For this example, we used the default `Service` resource created by the chart, but overrode the type to be `NodePort`. A `NodePort` `Service` exposes a port on the Kubernetes worker that routes to the `Service` endpoint. This endpoint will load balance across the `Pods` that match the selector for the `Service`. @@ -326,18 +326,18 @@ VM on your machine, which means that the ip of the node will be incorrect. So in node IP in Kubernetes, we will instead use `minikube` to get the ip address of the `minikube` VM to use as the node IP: ```bash -export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services queenly-liger-nginx) +export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services queenly-liger-fluentd) export NODE_IP=$(minikube ip) echo http://$NODE_IP:$NODE_PORT ``` The first command queries the `Service` resource to find out the node port that was used to expose the service. The second command queries the ip address of `minikub`. The last command will `echo` out the endpoint where the service is -available. Try hitting that endpoint in your browser and you should see the familiar nginx splash screen. +available. Try hitting that endpoint in your browser and you should see the familiar fluentd splash screen. -## Undeploying Nginx +## Undeploying Fluentd -Once you have tested and are satisfied with your Nginx service, you can undeploy it to clean up your cluster. To +Once you have tested and are satisfied with your Fluentd service, you can undeploy it to clean up your cluster. To undeploy the service, you need to delete the corresponding Helm Release. Helm Releases are what tracks all the resources associated with a chart in a single deployment. @@ -349,7 +349,7 @@ If you forget the release name, you can always look it up from the list of deplo ``` $ helm ls NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE -queenly-liger 1 Sat Feb 16 11:36:01 2019 DEPLOYED k8s-service-0.0.1-replace default +queenly-liger 1 Sat Feb 16 11:36:01 2019 DEPLOYED k8s-daemonset-0.0.1-replace default ``` Once you have the release name, you can use the `helm uninstall` command to delete a release and undeploy all the @@ -364,8 +364,8 @@ To check, you can use `kubectl` to query for the resources. For example, now if get an error: ``` -$ kubectl get --namespace default services queenly-liger-nginx -Error from server (NotFound): services "queenly-liger-nginx" not found +$ kubectl get --namespace default services queenly-liger-fluentd +Error from server (NotFound): services "queenly-liger-fluentd" not found ``` @@ -375,7 +375,7 @@ Congratulations! At this point, you have: - Setup `minikube` to have a local dev environment of Kubernetes. - Installed and deployed Helm on `minikube`. -- Deployed nginx on to `minikube` using the `k8s-service` Helm Chart. +- Deployed fluentd on to `minikube` using the `k8s-daemonset` Helm Chart. - Verified the deployment by querying for resources using `kubectl` and opening port forwards to access the endpoints. -To learn more about the `k8s-service` Helm Chart, refer to [the chart documentation](/charts/k8s-service). +To learn more about the `k8s-daemonset` Helm Chart, refer to [the chart documentation](/charts/k8s-daemonset). diff --git a/examples/k8s-daemonset-nginx/values.yaml b/examples/k8s-daemonset-fluentd/values.yaml similarity index 89% rename from examples/k8s-daemonset-nginx/values.yaml rename to examples/k8s-daemonset-fluentd/values.yaml index d1d9abe4..95187b1c 100644 --- a/examples/k8s-daemonset-nginx/values.yaml +++ b/examples/k8s-daemonset-fluentd/values.yaml @@ -1,19 +1,19 @@ #---------------------------------------------------------------------------------------------------------------------- # CHART PARAMETERS FOR NGINX EXAMPLE -# This file declares the required values for the k8s-daemonset helm chart to deploy nginx. +# This file declares the required values for the k8s-daemonset helm chart to deploy fluentd. # This is a YAML-formatted file. #---------------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------------------------------------------------------------- # REQUIRED VALUES OF CHART -# These are the required values defined by the k8s-daemonset chart. Here we will set them to deploy an nginx container. +# These are the required values defined by the k8s-daemonset chart. Here we will set them to deploy an fluentd container. #---------------------------------------------------------------------------------------------------------------------- # containerImage is a map that describes the container image that should be used to serve the application managed by # the k8s-daemonset chart. # The expected keys are: # - repository (string) (required) : The container image repository that should be used. -# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` +# E.g `fluentd` ; `gcr.io/kubernetes-helm/tiller` # - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a # fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, # `canary`, or other tags that are designed to be “floating”. @@ -21,16 +21,16 @@ # the official Kubernetes docs for more info. If undefined, this will default to # `IfNotPresent`. # -# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that +# The following example deploys the `fluentd:stable` image with a `IfNotPresent` image pull policy, which indicates that # the image should only be pulled if it has not been pulled previously. We deploy a specific, locked tag so that we -# don't inadvertently upgrade nginx during a deployment that changes some other unrelated input value. +# don't inadvertently upgrade fluentd during a deployment that changes some other unrelated input value. containerImage: repository: fluent/fluentd-kubernetes-daemonset tag: v1-debian-syslog pullPolicy: IfNotPresent # applicationName is a string that names the application. This is used to label the pod and to name the main application -# container in the pod spec. Here we use nginx as the name since we are deploying nginx. +# container in the pod spec. Here we use fluentd as the name since we are deploying fluentd. applicationName: "fluentd" #---------------------------------------------------------------------------------------------------------------------- diff --git a/test/k8s_daemonset_nginx_example_test.go b/test/k8s_daemonset_fluentd_example_test.go similarity index 82% rename from test/k8s_daemonset_nginx_example_test.go rename to test/k8s_daemonset_fluentd_example_test.go index 10fdc5b6..eb4912ae 100644 --- a/test/k8s_daemonset_nginx_example_test.go +++ b/test/k8s_daemonset_fluentd_example_test.go @@ -25,11 +25,11 @@ import ( // // 1. We can deploy the example // 2. The deployment succeeds without errors -// 3. We can open a port forward to one of the Pods and access nginx -// 4. We can access nginx via the service endpoint -// 5. We can access nginx via the ingress endpoint -// 6. If we set a lower priority path, the application path takes precendence over the nginx service -// 7. If we set a higher priority path, that takes precedence over the nginx service +// 3. We can open a port forward to one of the Pods and access fluentd +// 4. We can access fluentd via the service endpoint +// 5. We can access fluentd via the ingress endpoint +// 6. If we set a lower priority path, the application path takes precendence over the fluentd service +// 7. If we set a higher priority path, that takes precedence over the fluentd service func TestK8SServiceNginxExample(t *testing.T) { t.Parallel() @@ -46,7 +46,7 @@ func TestK8SServiceNginxExample(t *testing.T) { helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) require.NoError(t, err) - examplePath, err := filepath.Abs(filepath.Join("..", "examples", "k8s-daemonset-nginx")) + examplePath, err := filepath.Abs(filepath.Join("..", "examples", "k8s-daemonset-fluentd")) require.NoError(t, err) // Create a test namespace to deploy resources into, to avoid colliding with other tests @@ -59,7 +59,7 @@ func TestK8SServiceNginxExample(t *testing.T) { }) kubectlOptions := test_structure.LoadKubectlOptions(t, workingDir) uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") - testNamespace := fmt.Sprintf("k8s-daemonset-nginx-%s", strings.ToLower(uniqueID)) + testNamespace := fmt.Sprintf("k8s-daemonset-fluentd-%s", strings.ToLower(uniqueID)) defer test_structure.RunTestStage(t, "delete_namespace", func() { k8s.DeleteNamespace(t, kubectlOptions, testNamespace) @@ -73,7 +73,7 @@ func TestK8SServiceNginxExample(t *testing.T) { // Use the values file in the example and deploy the chart in the test namespace // Set a random release name - releaseName := fmt.Sprintf("k8s-daemonset-nginx-%s", strings.ToLower(uniqueID)) + releaseName := fmt.Sprintf("k8s-daemonset-fluentd-%s", strings.ToLower(uniqueID)) options := &helm.Options{ KubectlOptions: kubectlOptions, ValuesFiles: []string{filepath.Join(examplePath, "values.yaml")}, @@ -82,8 +82,8 @@ func TestK8SServiceNginxExample(t *testing.T) { "ingress.path": "/app", "ingress.pathType": "Prefix", "ingress.servicePort": "http", - "ingress.annotations.kubernetes\\.io/ingress\\.class": "nginx", - "ingress.annotations.nginx\\.ingress\\.kubernetes\\.io/rewrite-target": "/", + "ingress.annotations.kubernetes\\.io/ingress\\.class": "fluentd", + "ingress.annotations.fluentd\\.ingress\\.kubernetes\\.io/rewrite-target": "/", "ingress.additionalPaths[0].path": "/app", "ingress.additionalPaths[0].pathType": "Prefix", "ingress.additionalPaths[0].serviceName": "black-hole", @@ -104,13 +104,13 @@ func TestK8SServiceNginxExample(t *testing.T) { }) test_structure.RunTestStage(t, "validate_initial_deployment", func() { - verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected) - verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) - verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + verifyPodsCreatedSuccessfully(t, kubectlOptions, "fluentd", releaseName, NumPodsExpected) + verifyAllPodsAvailable(t, kubectlOptions, "fluentd", releaseName, fluentdValidationFunction) + verifyServiceAvailable(t, kubectlOptions, "fluentd", releaseName, fluentdValidationFunction) - // We expect this to succeed, because the black hole service that overlaps with the nginx service is added as lower + // We expect this to succeed, because the black hole service that overlaps with the fluentd service is added as lower // priority. - verifyIngressAvailable(t, kubectlOptions, releaseName, "/app", nginxValidationFunction) + verifyIngressAvailable(t, kubectlOptions, releaseName, "/app", fluentdValidationFunction) // On the other hand, we expect this to fail because the black hole service does not exist verifyIngressAvailable(t, kubectlOptions, releaseName, "/black-hole", serviceUnavailableValidationFunction) @@ -127,18 +127,18 @@ func TestK8SServiceNginxExample(t *testing.T) { test_structure.RunTestStage(t, "validate_upgrade", func() { // We expect the service to still come up cleanly - verifyPodsCreatedSuccessfully(t, kubectlOptions, "nginx", releaseName, NumPodsExpected) - verifyAllPodsAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) - verifyServiceAvailable(t, kubectlOptions, "nginx", releaseName, nginxValidationFunction) + verifyPodsCreatedSuccessfully(t, kubectlOptions, "fluentd", releaseName, NumPodsExpected) + verifyAllPodsAvailable(t, kubectlOptions, "fluentd", releaseName, fluentdValidationFunction) + verifyServiceAvailable(t, kubectlOptions, "fluentd", releaseName, fluentdValidationFunction) - // ... but now the nginx service via ingress should be unavailable because of the higher priority black hole path + // ... but now the fluentd service via ingress should be unavailable because of the higher priority black hole path verifyIngressAvailable(t, kubectlOptions, releaseName, "/app", serviceUnavailableValidationFunction) }) } -// nginxValidationFunction checks that we get a 200 response with the nginx welcome page. -func nginxValidationFunction(statusCode int, body string) bool { - return statusCode == 200 && strings.Contains(body, "Welcome to nginx") +// fluentdValidationFunction checks that we get a 200 response with the fluentd welcome page. +func fluentdValidationFunction(statusCode int, body string) bool { + return statusCode == 200 && strings.Contains(body, "Welcome to fluentd") } // serviceUnavailableValidationFunction checks that we get a 503 response and the maintenance page diff --git a/test/k8s_daemonset_template_render_helpers_for_test.go b/test/k8s_daemonset_template_render_helpers_for_test.go new file mode 100644 index 00000000..441b0247 --- /dev/null +++ b/test/k8s_daemonset_template_render_helpers_for_test.go @@ -0,0 +1,167 @@ +//go:build all || tpl +// +build all tpl + +// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently +// run just the template tests. See the test README for more information. + +package test + +import ( + "path/filepath" + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + + certapi "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1beta1" +) + +func renderK8SServiceDeploymentWithSetValues(t *testing.T, setValues map[string]string) appsv1.DaemonSet { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the daemonset resource + out := helm.RenderTemplate(t, options, helmChartPath, "daemonset", []string{"templates/daemonset.yaml"}) + + // Parse the daemonset and return it + var daemonset appsv1.DaemonSet + helm.UnmarshalK8SYaml(t, out, &daemonset) + return daemonset +} + +func renderK8SServiceCanaryDeploymentWithSetValues(t *testing.T, setValues map[string]string) appsv1.DaemonSet { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the canary daemonset resource + out := helm.RenderTemplate(t, options, helmChartPath, "canarydaemonset", []string{"templates/canarydaemonset.yaml"}) + + // Parse the canary daemonset and return it + var canarydaemonset appsv1.DaemonSet + helm.UnmarshalK8SYaml(t, out, &canarydaemonset) + return canarydaemonset +} + +func renderK8SServiceIngressWithSetValues(t *testing.T, setValues map[string]string) networkingv1.Ingress { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the ingress resource + out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) + + // Parse the ingress and return it + var ingress networkingv1.Ingress + helm.UnmarshalK8SYaml(t, out, &ingress) + return ingress +} + +func renderK8SServiceIngressWithValuesFile(t *testing.T, valuesFilePath string) networkingv1.Ingress { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{ + filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml"), + valuesFilePath, + }, + } + // Render just the ingress resource + out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) + + // Parse the ingress and return it + var ingress networkingv1.Ingress + helm.UnmarshalK8SYaml(t, out, &ingress) + return ingress +} + +func renderK8SServiceExtV1Beta1IngressWithSetValues(t *testing.T, setValues map[string]string) extv1beta1.Ingress { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the ingress resource + out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) + + // Parse the ingress and return it + var ingress extv1beta1.Ingress + helm.UnmarshalK8SYaml(t, out, &ingress) + return ingress +} + +func renderK8SServiceManagedCertificateWithSetValues(t *testing.T, setValues map[string]string) certapi.ManagedCertificate { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the google managed certificate resource + out := helm.RenderTemplate(t, options, helmChartPath, "gmc", []string{"templates/gmc.yaml"}) + + // Parse the google managed certificate and return it + var cert certapi.ManagedCertificate + helm.UnmarshalK8SYaml(t, out, &cert) + return cert +} + +func renderK8SServiceAccountWithSetValues(t *testing.T, setValues map[string]string) corev1.ServiceAccount { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the service account resource + out := helm.RenderTemplate(t, options, helmChartPath, "serviceaccount", []string{"templates/serviceaccount.yaml"}) + + // Parse the service account and return it + var serviceaccount corev1.ServiceAccount + helm.UnmarshalK8SYaml(t, out, &serviceaccount) + return serviceaccount +} + +func renderK8SServiceWithSetValues(t *testing.T, setValues map[string]string) corev1.Service { + helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) + require.NoError(t, err) + + // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. + options := &helm.Options{ + ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, + SetValues: setValues, + } + // Render just the service resource + out := helm.RenderTemplate(t, options, helmChartPath, "service", []string{"templates/service.yaml"}) + + // Parse the service and return it + var service corev1.Service + helm.UnmarshalK8SYaml(t, out, &service) + return service +} From f7459e3d90243f16a82e7bd37b05290a9e723bde Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Tue, 26 Apr 2022 16:55:40 -0400 Subject: [PATCH 3/7] Update README.md --- charts/k8s-daemonset/README.md | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/charts/k8s-daemonset/README.md b/charts/k8s-daemonset/README.md index c205d5c9..15800032 100644 --- a/charts/k8s-daemonset/README.md +++ b/charts/k8s-daemonset/README.md @@ -1,13 +1,13 @@ # Kubernetes Service Helm Chart This Helm Chart can be used to deploy your application container under a -[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) resource onto your Kubernetes +[DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource onto your Kubernetes cluster. You can use this Helm Chart to run and deploy a long-running container, such as a web service or backend microservice. The container will be packaged into -[Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) that are managed by the `Deployment` +[Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) that are managed by the `DaemonSet` controller. -This Helm Chart can also be used to front the `Pods` of the `Deployment` resource with a +This Helm Chart can also be used to front the `Pods` of the `DaemonSet` resource with a [Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide a stable endpoint to access the `Pods`, as well as load balance traffic to them. The Helm Chart can also specify [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) rules to further configure complex routing @@ -29,9 +29,9 @@ back to [root README](/README.adoc#core-concepts) The following resources will be deployed with this Helm Chart, depending on which configuration values you use: -- `Deployment`: The main `Deployment` controller that will manage the application container image specified in the +- `Deployment`: The main `DaemonSet` controller that will manage the application container image specified in the `containerImage` input value. -- Secondary `Deployment` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). +- Secondary `DaemonSet` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). - `Service`: The `Service` resource providing a stable endpoint that can be used to address to `Pods` created by the `Deployment` controller. Created only if you configure the `service` input (and set `service.enabled = true`). @@ -39,13 +39,6 @@ The following resources will be deployed with this Helm Chart, depending on whic - `Ingress`: The `Ingress` resource providing host and path routing rules to the `Service` for the deployed `Ingress` controller in the cluster. Created only if you configure the `ingress` input (and set `ingress.enabled = true`). -- `Horizontal Pod Autoscaler`: The `Horizontal Pod Autoscaler` automatically scales the number of pods in a replication - controller, deployment, replica set or stateful set based on observed CPU or memory utilization. - Created only if the user sets `horizontalPodAutoscaler.enabled = true`. -- `PodDisruptionBudget`: The `PodDisruptionBudget` resource that specifies a disruption budget for the `Pods` managed by - the `Deployment`. This manages how many pods can be disrupted by a voluntary disruption (e.g - node maintenance). Created if you specify a non-zero value for the `minPodsAvailable` input - value. - `ManagedCertificate`: The `ManagedCertificate` is a [GCP](https://cloud.google.com/) -specific resource that creates a Google Managed SSL certificate. Google-managed SSL certificates are provisioned, renewed, and managed for your domain names. Read more about Google-managed SSL certificates [here](https://cloud.google.com/load-balancing/docs/ssl-certificates#managed-certs). Created only if you configure the `google.managedCertificate` input (and set `google.managedCertificate.enabled = true` and `google.managedCertificate.domainName = your.domain.name`). @@ -1134,11 +1127,11 @@ sideCarContainers: image: nginx:1.15.4 ``` -This input will be rendered in the `Deployment` resource as: +This input will be rendered in the `DaemonSet` resource as: ```yaml apiVersion: apps/v1 -kind: Deployment +kind: DaemonSet metadata: ... Snipped for brevity ... spec: From d498e6d55ab5751ba376b5956ee052fdac6bb19e Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Tue, 26 Apr 2022 17:51:02 -0400 Subject: [PATCH 4/7] Updated README --- charts/k8s-daemonset/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/k8s-daemonset/README.md b/charts/k8s-daemonset/README.md index 15800032..249010df 100644 --- a/charts/k8s-daemonset/README.md +++ b/charts/k8s-daemonset/README.md @@ -1,4 +1,4 @@ -# Kubernetes Service Helm Chart +# Kubernetes DaemonSet Helm Chart This Helm Chart can be used to deploy your application container under a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource onto your Kubernetes From 3b23df17dfefe6f685f031ecfeee737a89d4c20a Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Wed, 27 Apr 2022 13:50:56 -0400 Subject: [PATCH 5/7] Refactoring further --- charts/k8s-daemonset/.helmignore | 2 + charts/k8s-daemonset/Chart.yaml | 35 +- charts/k8s-daemonset/README.md | 1202 ----------------- charts/k8s-daemonset/linter_values.yaml | 42 - charts/k8s-daemonset/templates/NOTES.txt | 43 - .../templates/_capabilities_helpers.tpl | 42 - .../templates/_daemonset_spec.tpl | 427 ------ charts/k8s-daemonset/templates/_helpers.tpl | 73 - charts/k8s-daemonset/templates/daemonset.yaml | 59 +- charts/k8s-daemonset/templates/gmc.yaml | 27 - charts/k8s-daemonset/templates/ingress.yaml | 101 -- charts/k8s-daemonset/templates/pdb.yaml | 23 - charts/k8s-daemonset/templates/service.yaml | 42 - .../templates/serviceaccount.yaml | 22 - .../templates/servicemonitor.yaml | 22 - charts/k8s-daemonset/values.yaml | 742 +--------- examples/k8s-daemonset-fluentd/values.yaml | 96 +- 17 files changed, 137 insertions(+), 2863 deletions(-) delete mode 100644 charts/k8s-daemonset/README.md delete mode 100644 charts/k8s-daemonset/linter_values.yaml delete mode 100644 charts/k8s-daemonset/templates/NOTES.txt delete mode 100644 charts/k8s-daemonset/templates/_capabilities_helpers.tpl delete mode 100644 charts/k8s-daemonset/templates/_daemonset_spec.tpl delete mode 100644 charts/k8s-daemonset/templates/_helpers.tpl delete mode 100644 charts/k8s-daemonset/templates/gmc.yaml delete mode 100644 charts/k8s-daemonset/templates/ingress.yaml delete mode 100644 charts/k8s-daemonset/templates/pdb.yaml delete mode 100644 charts/k8s-daemonset/templates/service.yaml delete mode 100644 charts/k8s-daemonset/templates/serviceaccount.yaml delete mode 100644 charts/k8s-daemonset/templates/servicemonitor.yaml diff --git a/charts/k8s-daemonset/.helmignore b/charts/k8s-daemonset/.helmignore index f0c13194..0e8a0eb3 100644 --- a/charts/k8s-daemonset/.helmignore +++ b/charts/k8s-daemonset/.helmignore @@ -14,8 +14,10 @@ *.swp *.bak *.tmp +*.orig *~ # Various IDEs .project .idea/ *.tmproj +.vscode/ diff --git a/charts/k8s-daemonset/Chart.yaml b/charts/k8s-daemonset/Chart.yaml index 9f74c173..ad575858 100644 --- a/charts/k8s-daemonset/Chart.yaml +++ b/charts/k8s-daemonset/Chart.yaml @@ -1,11 +1,24 @@ -apiVersion: v1 -name: k8s-daemonset -description: A Helm chart to package your application container for Kubernetes -# This will be updated with the release tag in the CI/CD pipeline before publishing. This has to be a valid semver for -# the linter to accept. -version: 0.0.1-replace -home: https://github.com/gruntwork-io/helm-kubernetes-services -maintainers: - - name: Gruntwork - email: info@gruntwork.io - url: https://gruntwork.io +apiVersion: v2 +name: k8s-service +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/charts/k8s-daemonset/README.md b/charts/k8s-daemonset/README.md deleted file mode 100644 index c205d5c9..00000000 --- a/charts/k8s-daemonset/README.md +++ /dev/null @@ -1,1202 +0,0 @@ -# Kubernetes Service Helm Chart - -This Helm Chart can be used to deploy your application container under a -[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) resource onto your Kubernetes -cluster. You can use this Helm Chart to run and deploy a long-running container, such as a web service or backend -microservice. The container will be packaged into -[Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) that are managed by the `Deployment` -controller. - -This Helm Chart can also be used to front the `Pods` of the `Deployment` resource with a -[Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide a stable endpoint to access the -`Pods`, as well as load balance traffic to them. The Helm Chart can also specify -[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) rules to further configure complex routing -rules in front of the `Service`. - -If you're using the chart to deploy to [GKE](https://cloud.google.com/kubernetes-engine/), you can also use the chart to deploy a [Google Managed SSL Certificate](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs) and associate it with the Ingress. - - -## How to use this chart? - -* See the [root README](/README.adoc) for general instructions on using Gruntwork Helm Charts. -* See the [examples](/examples) folder for example usage. -* See the provided [values.yaml](./values.yaml) file for the required and optional configuration values that you can set - on this chart. - -back to [root README](/README.adoc#core-concepts) - -## What resources does this Helm Chart deploy? - -The following resources will be deployed with this Helm Chart, depending on which configuration values you use: - -- `Deployment`: The main `Deployment` controller that will manage the application container image specified in the - `containerImage` input value. -- Secondary `Deployment` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). -- `Service`: The `Service` resource providing a stable endpoint that can be used to address to `Pods` created by the - `Deployment` controller. Created only if you configure the `service` input (and set - `service.enabled = true`). -- `ServiceMonitor`: The `ServiceMonitor` describes the set of targets to be monitored by Prometheus. Created only if you configure the service input and set `serviceMonitor.enabled = true`. -- `Ingress`: The `Ingress` resource providing host and path routing rules to the `Service` for the deployed `Ingress` - controller in the cluster. Created only if you configure the `ingress` input (and set - `ingress.enabled = true`). -- `Horizontal Pod Autoscaler`: The `Horizontal Pod Autoscaler` automatically scales the number of pods in a replication - controller, deployment, replica set or stateful set based on observed CPU or memory utilization. - Created only if the user sets `horizontalPodAutoscaler.enabled = true`. -- `PodDisruptionBudget`: The `PodDisruptionBudget` resource that specifies a disruption budget for the `Pods` managed by - the `Deployment`. This manages how many pods can be disrupted by a voluntary disruption (e.g - node maintenance). Created if you specify a non-zero value for the `minPodsAvailable` input - value. -- `ManagedCertificate`: The `ManagedCertificate` is a [GCP](https://cloud.google.com/) -specific resource that creates a Google Managed SSL certificate. Google-managed SSL certificates are provisioned, renewed, and managed for your domain names. Read more about Google-managed SSL certificates [here](https://cloud.google.com/load-balancing/docs/ssl-certificates#managed-certs). Created only if you configure the `google.managedCertificate` input (and set - `google.managedCertificate.enabled = true` and `google.managedCertificate.domainName = your.domain.name`). - -back to [root README](/README.adoc#core-concepts) - -## How do I deploy additional services not managed by the chart? - -You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` -key. You provide each resource manifest directly as a value under `customResources.resources` and set -`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in -[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and -[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I expose my application internally to the cluster? - -In general, `Pods` are considered ephemeral in Kubernetes. `Pods` can come and go at any point in time, either because -containers fail or the underlying instances crash. In either case, the dynamic nature of `Pods` make it difficult to -consistently access your application if you are individually addressing the `Pods` directly. - -Traditionally, this is solved using service discovery, where you have a stateful system that the `Pods` would register -to when they are available. Then, your other applications can query the system to find all the available `Pods` and -access one of the available ones. - -Kubernetes provides a built in mechanism for service discovery in the `Service` resource. `Services` are an abstraction -that groups a set of `Pods` behind a consistent, stable endpoint to address them. By creating a `Service` resource, you -can provide a single endpoint to other applications to connect to the `Pods` behind the `Service`, and not worry about -the dynamic nature of the `Pods`. - -You can read a more detailed description of `Services` in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/service/). Here we will cover just enough to -understand how to access your app. - -By default, this Helm Chart will deploy your application container in a `Pod` that exposes ports 80. These will -be exposed to the Kubernetes cluster behind the `Service` resource, which exposes port 80. You can modify this behavior -by overriding the `containerPorts` input value and the `service` input value. See the corresponding section in the -`values.yaml` file for more details. - -Once the `Service` is created, you can check what endpoint the `Service` provides by querying Kubernetes using -`kubectl`. First, retrieve the `Service` name that is outputted in the install summary when you first install the Helm -Chart. If you forget, you can get the same information at a later point using `helm status`. For example, if you had -previously installed this chart under the name `edge-service`, you can run the following command to see the created -resources: - -```bash -$ helm status edge-service -LAST DEPLOYED: Fri Feb 8 16:25:49 2019 -NAMESPACE: default -STATUS: DEPLOYED - -RESOURCES: -==> v1/Service -NAME AGE -edge-service-nginx 24m - -==> v1/Deployment -edge-service-nginx 24m - -==> v1/Pod(related) - -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 24m -edge-service-nginx-844c978df7-mln26 1/1 Running 0 24m -edge-service-nginx-844c978df7-rdsr8 1/1 Running 0 24m -``` - -This will show you some metadata about the release, the deployed resources, and any notes provided by the Helm Chart. In -this example, the service name is `edge-service-nginx` so we will use that to query the `Service`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx ClusterIP 172.20.186.176 80/TCP 27m -``` - -Here you can see basic information about the `Service`. The important piece of information is the `CLUSTER-IP` and -`PORT` fields, which tell you the available endpoint for the `Service`, and any exposed ports. Given that, any `Pod` in -your Kubernetes cluster can access the `Pods` of this application by hitting `{CLUSTER-IP}:{PORT}`. So for this example, -that will be `172.20.186.176:80`. - -But what if you want to automatically find a `Service` by name? The name of the `Service` created by this Helm Chart is -always `{RELEASE_NAME}-{applicationName}`, where `applicationName` is provided in the input value and `RELEASE_NAME` is -set when you install the Helm Chart. This means that the name is predictable, while the allocated IP address may not be. - -To address the `Service` by name, Kubernetes provides two ways: - -- environment variables -- DNS - -### Addressing Service by Environment Variables - -For each active `Service` that a `Pod` has access to, Kubernetes will automatically set a set of environment variables -in the container. These are `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` to get the host address (ip address) -and port respectively, where `SVCNAME` is the name of the `Service`. Note that `SVCNAME` will be the all caps version -with underscores of the `Service` name. - -Using the previous example where we installed this chart with a release name `edge-service` and `applicationName` -`nginx`, we get the `Service` name `edge-service-nginx`. Kubernetes will expose the following environment variables to -all containers that can access the `Service`: - -``` -EDGE_SERVICE_NGINX_SERVICE_HOST=172.20.186.176 -EDGE_SERVICE_NGINX_SERVICE_PORT=80 -``` - -Note that environment variables are set when the container first boots up. This means that if you already had `Pods` -deployed in your system before the `Service` was created, you will have to cycle the `Pods` in order to get the -environment variables. If you wish to avoid ordering issues, you can use the DNS method to address the `Service` -instead, if that is available. - -### Addressing Service by DNS - -If your Kubernetes cluster is deployed with the DNS add-on (this is automatically installed for EKS and GKE), then you -can rely on DNS to address your `Service`. Every `Service` in Kubernetes will register the domain -`{SVCNAME}.{NAMESPACE}.svc.cluster.local` to the DNS service of the cluster. This means that all your `Pods` in the -cluster can get the `Service` host by hitting that domain. - -The `NAMESPACE` in the domain refers to the `Namespace` where the `Service` was created. By default, all resources are -created in the `default` namespace. This is configurable at install time of the Helm Chart using the `--namespace` -option. - -In our example, we deployed the chart to the `default` `Namespace`, and the `Service` name is `edge-service-nginx`. So in -this case, the domain of the `Service` will be `edge-service-nginx.default.svc.cluster.local`. When any `Pod` addresses -that domain, it will get the address `172.20.186.176`. - -Note that DNS does not resolve ports, so in this case, you will have to know which port the `Service` uses. So in your -`Pod`, you will have to know that the `Service` exposes port `80` when you address it in your code for the container as -`edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it -is specified in the Helm Chart input value. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I expose my application externally, outside of the cluster? - -Similar to the previous section ([How do I expose my application internally to the -cluster?](#how-do-i-expose-my-application-internally-to-the-cluster), you can use a `Service` resource to expose your -application externally. The primary service type that facilitates external access is the `NodePort` `Service` type. - -The `NodePort` `Service` type will expose the `Service` by binding an available port on the network interface of the -physical machines running the `Pod`. This is different from a network interface internal to Kubernetes, which is only -accessible within the cluster. Since the port is on the host machine network interface, you can access the `Service` by -hitting that port on the node. - -For example, suppose you had a 2 node Kubernetes cluster deployed on EC2. Suppose further that all your EC2 instances -have public IP addresses that you can access. For the sake of this example, we will assign random IP addresses to the -instances: - -- 54.219.117.250 -- 38.110.235.198 - -Now let's assume you deployed this helm chart using the `NodePort` `Service` type. You can do this by setting the -`service.type` input value to `NodePort`: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `NodePort`, binding an -available port on the host machine to access the `Service`. You can confirm this by querying the `Service` using -`kubectl`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx NodePort 10.99.244.96 80:31035/TCP 33s -``` - -In this example, you can see that the `Service` type is `NodePort` as expected. Additionally, you can see that the there -is a port binding between port 80 and 31035. This port binding refers to the binding between the `Service` port (80 in -this case) and the host port (31035 in this case). - -One thing to be aware of about `NodePorts` is that the port binding will exist on all nodes in the cluster. This means -that, in our 2 node example, both nodes now have a port binding of 31035 on the host network interface that routes to -the `Service`, regardless of whether or not the node is running the `Pods` backing the `Service` endpoint. This means -that you can reach the `Service` on both of the following endpoints: - -- `54.219.117.250:31035` -- `38.110.235.198:31035` - -This means that no two `Service` can share the same `NodePort`, as the port binding is shared across the cluster. -Additionally, if you happen to hit a node that is not running a `Pod` backing the `Service`, Kubernetes will -automatically hop to one that is. - -You might use the `NodePort` if you do not wish to manage load balancers through Kubernetes, or if you are running -Kubernetes on prem where you do not have native support for managed load balancers. - -To summarize: - -- `NodePort` is the simplest way to expose your `Service` to externally to the cluster. -- You have a limit on the number of `NodePort` `Services` you can have in your cluster, imposed by the number of open ports - available on your host machines. -- You have potentially inefficient hopping if you happen to route to a node that is not running the `Pod` backing the - `Service`. - -Additionally, Kubernetes provides two mechanisms to manage an external load balancer that routes to the `NodePort` for -you. The two ways are: - -- [Using a `LoadBalancer` `Service` type](#loadbalancer-service-type) -- [Using `Ingress` resources with an `Ingress Controller`](#ingress-and-ingress-controllers) - -### LoadBalancer Service Type - -The `LoadBalancer` `Service` type will expose the `Service` by allocating a managed load balancer in the cloud that is -hosting the Kubernetes cluster. On AWS, this will be an ELB, while on GCP, this will be a Cloud Load Balancer. When the -`LoadBalancer` `Service` is created, Kubernetes will automatically create the underlying load balancer resource in the -cloud for you, and create all the target groups so that they route to the `Pods` backing the `Service`. - -You can deploy this helm chart using the `LoadBalancer` `Service` type by setting the `service.type` input value to -`LoadBalancer`: - -```yaml -service: - enabled: true - type: LoadBalancer - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `LoadBalancer`, allocating -a managed load balancer in the cloud hosting your Kubernetes cluster. You can get the attached load balancer by querying -the `Service` using `kubectl`. In this example, we will assume we are using EKS: - -``` -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx LoadBalancer 172.20.7.35 a02fef4d02e41... 80:32127/TCP 1m -``` - -Now, in this example, we have an entry in the `EXTERNAL-IP` field. This is truncated here, but you can get the actual -output when you describe the service: - -``` -$ kubectl describe service edge-service-nginx -Name: edge-service-nginx -Namespace: default -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/managed-by=helm - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - helm.sh/chart=k8s-service-0.1.0 -Annotations: -Selector: app.kubernetes.io/instance=edge-service,app.kubernetes.io/name=nginx,gruntwork.io/app-name=nginx -Type: LoadBalancer -IP: 172.20.7.35 -LoadBalancer Ingress: a02fef4d02e4111e9891806271fc7470-173030870.us-west-2.elb.amazonaws.com -Port: app 80/TCP -TargetPort: 80/TCP -NodePort: app 32127/TCP -Endpoints: 10.0.3.19:80 -Session Affinity: None -External Traffic Policy: Cluster -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal EnsuringLoadBalancer 2m service-controller Ensuring load balancer - Normal EnsuredLoadBalancer 2m service-controller Ensured load balancer -``` - -In the describe output, there is a field named `LoadBalancer Ingress`. When you have a `LoadBalancer` `Service` type, -this field contains the public DNS endpoint of the associated load balancer resource in the cloud provider. In this -case, we have an AWS ELB instance, so this endpoint is the public endpoint of the associated ELB resource. - -**Note:** Eagle eyed readers might also notice that there is an associated `NodePort` on the resource. This is because under the -hood, `LoadBalancer` `Services` utilize `NodePorts` to handle the connection between the managed load balancer of the -cloud provider and the Kubernetes `Pods`. This is because at this time, there is no portable way to ensure that the -network between the cloud load balancers and Kubernetes can be shared such that the load balancers can route to the -internal network of the Kubernetes cluster. Therefore, Kubernetes resorts to using `NodePort` as an abstraction layer to -connect the `LoadBalancer` to the `Pods` backing the `Service`. This means that `LoadBalancer` `Services` share the same -drawbacks as using a `NodePort` `Service`. - -To summarize: - -- `LoadBalancer` provides a way to set up a cloud load balancer resource that routes to the provisioned `NodePort` on - each node in your Kubernetes cluster. -- `LoadBalancer` can be used to provide a persistent endpoint that is robust to the ephemeral nature of nodes in your - cluster. E.g it is able to route to live nodes in the face of node failures. -- `LoadBalancer` does not support weighted balancing. This means that you cannot balance the traffic so that it prefers - nodes that have more instances of the `Pod` running. -- Note that under the hood, `LoadBalancer` utilizes a `NodePort` `Service`, and thus shares the same limits as `NodePort`. - -### Ingress and Ingress Controllers - -`Ingress` is a mechanism in Kubernetes that abstracts externally exposing a `Service` from the `Service` config itself. -`Ingress` resources support: - -- assigning an externally accessible URL to a `Service` -- perform hostname and path based routing of `Services` -- load balance traffic using customizable balancing rules -- terminate SSL - -You can read more about `Ingress` resources in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/). Here, we will cover the basics to -understand how `Ingress` can be used to externally expose the `Service`. - -At a high level, the `Ingress` resource is used to specify the configuration for a particular `Service`. In turn, the -`Ingress Controller` is responsible for fulfilling those configurations in the cluster. This means that the first -decision to make in using `Ingress` resources, is selecting an appropriate `Ingress Controller` for your cluster. - -#### Choosing an Ingress Controller - -Before you can use an `Ingress` resource, you must install an `Ingress Controller` in your Kubernetes cluster. There are -many kinds of `Ingress Controllers` available, each with different properties. You can see [a few examples listed in the -official documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). - -When you use an external cloud `Ingress Controller` such as the [GCE Ingress -Controller](https://github.com/kubernetes/ingress-gce/blob/master/README.md) or [AWS ALB Ingress -Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller), Kubernetes will allocate an externally -addressable load balancer (for GCE this will be a Cloud Load Balancer and for AWS this will be an ALB) that fulfills the -`Ingress` rules. This includes routing the domain names and paths to the right `Service` as configured by the `Ingress` -rules. Additionally, Kubernetes will manage the target groups of the load balancer so that they are up to date with -the latest `Ingress` configuration. However, in order for this to work, there needs to be some way for the load balancer -to connect to the `Pods` servicing the `Service`. Since the `Pods` are internal to the Kubernetes network and the load -balancers are external to the network, there must be a `NodePort` that links the two together. As such, like the -`LoadBalancer` `Service` type, these `Ingress Controllers` also require a `NodePort` under the hood. - - - -Alternatively, you can use an internal `Ingress Controller` that runs within Kubernetes as `Pods`. For example, the -official `nginx Ingress Controller` will launch `nginx` as `Pods` within your Kubernetes cluster. These `nginx` `Pods` -are then configured using `Ingress` resources, which then allows `nginx` to route to the right `Pods`. Since the `nginx` -`Pods` are internal to the Kubernetes network, there is no need for your `Services` to be `NodePorts` as they are -addressable within the network by the `Pods`. However, this means that you need some other mechanism to expose `nginx` -to the outside world, which will require a `NodePort`. The advantage of this approach, despite still requiring a -`NodePort`, is that you can have a single `NodePort` that routes to multiple services using hostnames or paths as -managed by `nginx`, as opposed to requiring a `NodePort` per `Service` you wish to expose. - -Which `Ingress Controller` type you wish to use depends on your infrastructure needs. If you have relatively few -`Services`, and you want the simplicity of a managed cloud load balancer experience, you might opt for the external -`Ingress Controllers` such as GCE and AWS ALB controllers. On the other hand, if you have thousands of micro services -that push you to the limits of the available number of ports on a host machine, you might opt for an internal `Ingress -Controller` approach. Whichever approach you decide, be sure to document your decision where you install the particular -`Ingress Controller` so that others in your team know and understand the tradeoffs you made. - -#### Configuring Ingress for your Service - -Once you have an `Ingress Controller` installed and configured on your Kuberentes cluster, you can now start creating -`Ingress` resources to add routes to it. This helm chart supports configuring an `Ingress` resource to complement the -`Service` resource that is created in the chart. - -To add an `Ingress` resource, first make sure you have a `Service` enabled on the chart. Depending on the chosen -`Ingress Controller`, the `Service` type should be `NodePort` or `ClusterIP`. Here, we will create a `NodePort` -`Service` exposing port 80: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -Then, we will add the configuration for the `Ingress` resource by specifying the `ingress` input value. For this -example, we will assume that we want to route `/app` to our `Service`, with the domain hosted on `app.yourco.com`: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - hosts: - - app.yourco.com -``` - -This will configure the load balancer backing the `Ingress Controller` that will route any traffic with host and path -prefix `app.yourco.com/app` to the `Service` on port 80. If `app.yourco.com` is configured to point to the `Ingress -Controller` load balancer, then once you deploy the helm chart you should be able to start accessing your app on that -endpoint. - -#### Registering additional paths - -Sometimes you might want to add additional path rules beyond the main service rule that is injected to the `Ingress` -resource. For example, you might want a path that routes to the sidecar containers, or you might want to reuse a single -`Ingress` for multiple different `Service` endpoints because to share load balancers. For these situations, you can use -the `additionalPaths` and `additionalPathsHigherPriority` input values. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarMonitor` served on port -3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarMonitor: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route `/app` to the `app` service endpoint and `/sidecar` to the `sidecarMonitor` service endpoint, we will configure -the `app` service path rules as the main service route and the `sidecarMonitor` as an additional path rule: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPaths: - - path: /sidecar - servicePort: 3000 -``` - -Now suppose you had a sidecar service that will return a fixed response indicating server maintainance and you want to -temporarily route all requests to that endpoint without taking down the pod. You can do this by creating a route that -catches all paths as a higher priority path using the `additionalPathsHigherPriority` input value. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarFixedResponse` served on -port 3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarFixedResponse: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route all traffic to the fixed response port: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPathsHigherPriority: - - path: /* - servicePort: 3000 -``` - -The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be -evaluated first when routing requests. - -back to [root README](/README.adoc#day-to-day-operations) - -### How do I expose additional ports? - -By default, this Helm Chart will deploy your application container in a Pod that exposes ports 80. Sometimes you might -want to expose additional ports in your application - for example a separate port for Prometheus metrics. You can expose -additional ports for your application by overriding `containerPorts` and `service` input values: - -```yaml - -containerPorts: - http: - port: 80 - protocol: TCP - prometheus: - port: 2020 - protocol: TCP - -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - prometheus: - port: 2020 - targetPort: 2020 - protocol: TCP - -``` - - -## How do I deploy a worker service? - -Worker services typically do not have a RPC or web server interface to access it. Instead, worker services act on their -own and typically reach out to get the data they need. These services should be deployed without any ports exposed. -However, by default `k8s-service` will deploy an internally exposed service with port 80 open. - -To disable the default port, you can use the following `values.yaml` inputs: - -``` -containerPorts: - http: - disabled: true - -service: - enabled: false -``` - -This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on -the container. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I check the status of the rollout? - -This Helm Chart packages your application into a `Deployment` controller. The `Deployment` controller will be -responsible with managing the `Pods` of your application, ensuring that the Kubernetes cluster matches the desired state -configured by the chart inputs. - -When the Helm Chart installs, `helm` will mark the installation as successful when the resources are created. Under the -hood, the `Deployment` controller will do the work towards ensuring the desired number of `Pods` are up and running. - -For example, suppose you set the `replicaCount` variable to 3 when installing this chart. This will configure the -`Deployment` resource to maintain 3 replicas of the `Pod` at any given time, launching new ones if there is a deficit or -removing old ones if there is a surplus. - -To see the current status of the `Deployment`, you can query Kubernetes using `kubectl`. The `Deployment` resource of -the chart are labeled with the `applicationName` input value and the release name provided by helm. So for example, -suppose you deployed this chart using the following `values.yaml` file and command: - -```yaml -applicationName: nginx -containerImage: - repository: nginx - tag: stable -``` - -```bash -$ helm install -n edge-service gruntwork/k8s-service -``` - -In this example, the `applicationName` is set to `nginx`, while the release name is set to `edge-service`. This chart -will then install a `Deployment` resource in the default `Namespace` with the following labels that uniquely identifies -it: - -``` -app.kubernetes.io/name: nginx -app.kubernetes.io/instance: edge-service -``` - -So now you can query Kubernetes for that `Deployment` resource using these labels to see the state: - -```bash -$ kubectl get deployments -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -edge-service-nginx 3 3 3 1 24s -``` - -This includes a few useful information: - -- `DESIRED` lists the number of `Pods` that should be running in your cluster. -- `CURRENT` lists how many `Pods` are currently created in the cluster. -- `UP-TO-DATE` lists how many `Pods` are running the desired image. -- `AVAILABLE` lists how many `Pods` are currently ready to serve traffic, as defined by the `readinessProbe`. - -When all the numbers are in sync and equal, that means the `Deployment` was rolled out successfully and all the `Pods` -are passing the readiness healthchecks. - -In the example output above, note how the `Available` count is `1`, but the others are `3`. This means that all 3 `Pods` -were successfully created with the latest image, but only `1` of them successfully came up. You can dig deeper into the -individual `Pods` to check the status of the unavailable `Pods`. The `Pods` are labeled the same way, so you can pass in -the same label query to get the `Pods` managed by the deployment: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -``` - -This will show you the status of each individual `Pod` in your deployment. In this example output, there are 2 `Pods` -that are in the `Pending` status, meaning that they have not been scheduled yet. We can look into why the `Pod` failed -to schedule by getting detailed information about the `Pod` with the `describe` command. Unlike `get pods`, `describe -pod` requires a single `Pod` so we will grab the name of one of the failing `Pods` above and feed it to `describe pod`: - -```bash -$ kubectl describe pod edge-service-nginx-844c978df7-mln26 -Name: edge-service-nginx-844c978df7-mln26 -Namespace: default -Priority: 0 -PriorityClassName: -Node: -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - pod-template-hash=4007534893 -Annotations: -Status: Pending -IP: -Controlled By: ReplicaSet/edge-service-nginx-844c978df7 -Containers: - nginx: - Image: nginx:stable - Ports: 80/TCP - Host Ports: 0/TCP - Environment: - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from default-token-mgkr9 (ro) -Conditions: - Type Status - PodScheduled False -Volumes: - default-token-mgkr9: - Type: Secret (a volume populated by a Secret) - SecretName: default-token-mgkr9 - Optional: false -QoS Class: BestEffort -Node-Selectors: -Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s - node.kubernetes.io/unreachable:NoExecute for 300s -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning FailedScheduling 1m (x25 over 3m) default-scheduler 0/2 nodes are available: 2 Insufficient pods. -``` - -This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because -there is not enough capacity in the cluster to schedule the `Pod`. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I set and share configurations with the application? - -While you can bake most application configuration values into the application container, you might need to inject -dynamic configuration variables into the container. These are typically values that change depending on the environment, -such as the MySQL database endpoint. Additionally, you might also want a way to securely share secrets with the -container such that they are not hard coded in plain text in the container or in the Helm Chart values yaml file. To -support these use cases, this Helm Chart provides three ways to share configuration values with the application -container: - -- [Directly setting environment variables](#directly-setting-environment-variables) -- [Using ConfigMaps](#using-configmaps) -- [Using Secrets](#using-secrets) - -### Directly setting environment variables - -The simplest way to set a configuration value for the container is to set an environment variable for the container -runtime. These variables are set by Kubernetes before the container application is booted, which can then be looked up -using the standard OS lookup functions for environment variables. - -You can use the `envVars` input value to set an environment variable at deploy time. For example, the following entry in -a `values.yaml` file will set the `DB_HOST` environment variable to `mysql.default.svc.cluster.local` and the `DB_PORT` -environment variable to `3306`: - -```yaml -envVars: - DB_HOST: "mysql.default.svc.cluster.local" - DB_PORT: 3306 -``` - -One thing to be aware of when using environment variables is that they are set at start time of the container. This -means that updating the environment variables require restarting the containers so that they propagate. - -### Using ConfigMaps - -While environment variables are an easy way to inject configuration values, what if you want to share the configuration -across multiple deployments? If you wish to use the direct environment variables approach, you would have no choice but -to copy paste the values across each deployment. When this value needs to change, you are now faced with going through -each deployment and updating the reference. - -For this situation, `ConfigMaps` would be a better option. `ConfigMaps` help decouple configuration values from the -`Deployment` and `Pod` config, allowing you to share the values across the deployments. `ConfigMaps` are dedicated -resources in Kubernetes that store configuration values as key value pairs. - -For example, suppose you had a `ConfigMap` to store the database information. You might store the information as two key -value pairs: one for the host (`dbhost`) and one for the port (`dbport`). You can create a `ConfigMap` directly using -`kubectl`, or by using a resource file. - -To directly create the `ConfigMap`: - -``` -kubectl create configmap my-config --from-literal=dbhost=mysql.default.svc.cluster.local --from-literal=dbport=3306 -``` - -Alternatively, you can manage the `ConfigMap` as code using a kubernetes resource config: - -```yaml -# my-config.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-config -data: - dbhost: mysql.default.svc.cluster.local - dbport: 3306 -``` - -You can then apply this resource file using `kubectl`: - -``` -kubectl apply -f my-config.yaml -``` - -`kubectl` supports multiple ways to seed the `ConfigMap`. You can read all the different ways to create a `ConfigMap` in -[the official -documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap). - -Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during -deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with -the application container. There are two ways to inject the `ConfigMap`: - -- [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) -- [Accessing the `ConfigMap` as Files](#accessing-the-configmap-as-files) - -**NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use -`Secrets` or an external secret store. - -##### Accessing the ConfigMap as Environment Variables - -You can set the values of the `ConfigMap` as environment variables in the application container. To do so, you set the -`as` attribute of the `configMaps` input value to `environment`. For example, to share the `my-config` `ConfigMap` above -using the same environment variables as the example in [Directly setting environment -variables](#directly-settings-environment-variables), you would set the `configMaps` as follows: - -```yaml -configMaps: - my-config: - as: environment - items: - dbhost: - envVarName: DB_HOST - dbport: - envVarName: DB_PORT -``` - -In this configuration for the Helm Chart, we specify that we want to share the `my-config` `ConfigMap` as environment -variables with the main application container. Additionally, we want to map the `dbhost` config value to the `DB_HOST` -environment variable, and similarly map the `dbport` config value to the `DB_PORT` environment variable. - -Note that like directly setting environment variables, these are set at container start time, and thus the containers -need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if -you wish the `ConfigMap` changes to propagate immediately. - -##### Accessing the ConfigMap as Files - -You can mount the `ConfigMap` values as files on the container filesystem. To do so, you set the `as` attribute of the -`configMaps` input value to `volume`. - -For example, suppose you wanted to share the `my-config` `ConfigMap` above as the files `/etc/db/host` and -`/etc/db/port`. For this case, you would set the `configMaps` input value to: - -```yaml -configMaps: - my-config: - as: volume - mountPath: /etc/db - items: - dbhost: - filePath: host - dbport: - filePath: port -``` - -In the container, now the values for `dbhost` is stored as a text file at the path `/etc/db/host` and `dbport` is stored -at the path `/etc/db/port`. You can then read these files in in your application to get the values. - -Unlike environment variables, using files has the advantage of immediately reflecting changes to the `ConfigMap`. For -example, when you update `my-config`, the files at `/etc/db` are updated automatically with the new values, without -needing a redeployment to propagate the new values to the container. - -### Using Secrets - -In general, it is discouraged to store sensitive information such as passwords in `ConfigMaps`. Instead, Kubernetes -provides `Secrets` as an alternative resource to store sensitive data. Similar to `ConfigMaps`, `Secrets` are key value -pairs that store configuration values that can be managed independently of the `Pod` and containers. However, unlike -`ConfigMaps`, `Secrets` have the following properties: - -- A secret is only sent to a node if a pod on that node requires it. They are automatically garbage collected when there - are no more `Pods` referencing it on the node. -- A secret is stored in `tmpfs` on the node, so that it is only available in memory. -- Starting with Kubernetes 1.7, they can be encrypted at rest in `etcd` (note: this feature was in alpha state until - Kubernetes 1.13). - -You can read more about the protections and risks of using `Secrets` in [the official -documentation](https://kubernetes.io/docs/concepts/configuration/secret/#security-properties). - -Creating a `Secret` is very similar to creating a `ConfigMap`. For example, suppose you had a `Secret` to store the -database password. Like `ConfigMaps`, you can create a `Secret` directly using `kubectl`: - -``` -kubectl create secret generic my-secret --from-literal=password=1f2d1e2e67df -``` - -The `generic` keyword indicates the `Secret` type. Almost all use cases for your application should use this type. Other -types include `docker-registry` for specifying credentials for accessing a private docker registry, and `tls` for -specifying TLS certificates to access the Kubernetes API. - -You can also manage the `Secret` as code, although you may want to avoid this for `Secrets` to avoid leaking them in -unexpected locations (e.g source control). Unlike `ConfigMaps`, `Secrets` require values to be stored as base64 encoded -values when using resource files. So the configuration for the above example will be: - -```yaml -# my-secret.yaml -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: my-secret -data: - password: MWYyZDFlMmU2N2Rm -``` - -Note that `MWYyZDFlMmU2N2Rm` is the base 64 encoded version of `1f2d1e2e67df`. You can then apply this resource config -using `kubectl`: - -``` -kubectl apply -f my-secret.yaml -``` - -Similar to `ConfigMaps`, this Helm Chart supports two ways to inject `Secrets` into the application container: as -environment variables, or as files. The syntax to share the values is very similar to the `configMaps` input value, only -you use the `secrets` input value. The properties of each approach is very similar to `ConfigMaps`. Refer to [the -previous section](#using-configmaps) for more details on each approach. Here, we show you examples of the input values -to use for each approach. - -**Mounting secrets as environment variables**: In this example, we mount the `my-secret` `Secret` created above as the -environment variable `DB_PASSWORD`. - -```yaml -secrets: - my-secret: - as: environment - items: - password: - envVarName: DB_PASSWORD -``` - -**Mounting secrets as files**: In this example, we mount the `my-secret` `Secret` as the file `/etc/db/password`. - -```yaml -secrets: - my-secret: - as: volume - mountPath: /etc/db - items: - password: - filePath: password -``` - -**NOTE**: The volumes are different between `secrets` and `configMaps`. This means that if you use the same `mountPath` -for different secrets and config maps, you can end up with only one. It is undefined which `Secret` or `ConfigMap` ends -up getting mounted. To be safe, use a different `mountPath` for each one. - -**NOTE**: If you want mount the volumes created with `secrets` or `configMaps` on your init or sidecar containers, you will -have to append `-volume` to the volume name in . In the example above, the resulting volume will be `my-secret-volume`. - -```yaml -sideCarContainers: - sidecar: - image: sidecar/container:latest - volumeMounts: - - name: my-secret-volume - mountPath: /etc/db -``` - -### Which configuration method should I use? - -Which configuration method you should use depends on your needs. Here is a summary of the pro and con of each -approach: - -##### Directly setting environment variables - -**Pro**: - -- Simple setup -- Manage configuration values directly with application deployment config -- Most application languages support looking up environment variables - -**Con**: - -- Tightly couple configuration settings with application deployment -- Requires redeployment to update values -- Must store in plain text, and easy to leak into VCS - -**Best for**: - -- Iterating different configuration values during development -- Sotring non-sensitive values that are unique to each environment / deployment - -##### Using ConfigMaps - -**Pro**: - -- Keep config DRY by sharing a common set of configurations -- Independently update config values from the application deployment -- Automatically propagate new values when stored as files - -**Con**: - -- More overhead to manage the configuration -- Stored in plain text -- Available on all nodes automatically - -**Best for**: - -- Storing non-sensitive common configuration that are shared across environments -- Storing non-sensitive dynamic configuration values that change frequently - -##### Using Secrets - -**Pro**: - -- All the benefits of using `ConfigMaps` -- Can be encrypted at rest -- Opaque by default when viewing the values (harder to remember base 64 encoded version of "admin") -- Only available to nodes that use it, and only in memory - -**Con**: - -- All the challenges of using `ConfigMaps` -- Configured in plain text, making it difficult to manage as code securely -- Less safe than using dedicated secrets manager / store like HashiCorp Vault. - -**Best for**: - -- Storing sensitive configuration values - -back to [root README](/README.adoc#day-to-day-operations) - -## How do you update the application to a new version? - -To update the application to a new version, you can upgrade the Helm Release using updated values. For example, suppose -you deployed `nginx` version 1.15.4 using this Helm Chart with the following values: - -```yaml -containerImage: - repository: nginx - tag: 1.15.4 - -applicationName: nginx -``` - -In this example, we will further assume that you deployed this chart with the above values using the release name -`edge-service`, using a command similar to below: - -```bash -$ helm install -f values.yaml --name edge-service gruntwork/k8s-service -``` - -Now let's try upgrading `nginx` to version 1.15.8. To do so, we will first update our values file: - -```yaml -containerImage: - repository: nginx - tag: 1.15.8 - -applicationName: nginx -``` - -The only difference here is the `tag` of the `containerImage`. - -Next, we will upgrade our release using the updated values. To do so, we will use the `helm upgrade` command: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` - -This will update the created resources with the new values provided by the updated `values.yaml` file. For this example, -the only resource that will be updated is the `Deployment` resource, which will now have a new `Pod` spec that points to -`nginx:1.15.8` as opposed to `nginx:1.15.4`. This automatically triggers a rolling deployment internally to Kubernetes, -which will launch new `Pods` using the latest image, and shut down old `Pods` once those are ready. - -You can read more about how changes are rolled out on `Deployment` resources in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment). - -Note that certain changes will lead to a replacement of the `Deployment` resource. For example, updating the -`applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time -because the resources are replaced in an uncontrolled fashion. - -## How do I create a canary deployment? - -You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. - -To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: - -```yaml -canary: - enabled: true - containerImage: - repository: nginx - tag: 1.15.9 -``` -Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. - -back to [root README](/README.adoc#major-changes) - -## How do I verify my canary deployment? - -Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s -``` - -Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I roll back a canary deployment? - -Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` -Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I ensure a minimum number of Pods are available across node maintenance? - -Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary -maintenance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). -This chart exposes an input value `minPodsAvailable` that can be used to specify a minimum number of `Pods` to maintain -during a voluntary maintenance activity. Under the hood, this chart will create a corresponding `PodDisruptionBudget` to -ensure that a certain number of `Pods` are up before attempting to terminate additional ones. - -You can read more about `PodDisruptionBudgets` in [our blog post covering the -topic](https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085) -and in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). - - -back to [root README](/README.adoc#major-changes) - -## Why does the Pod have a preStop hook with a Shutdown Delay? - -When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from -registered addresses. This includes removing the `Pod` from the list of available `Pods` to service a `Service` -endpoint. However, because Kubernetes is a distributed system, there is a delay between the shutdown sequence and the -`Pod` being removed from available addresses. As a result, the `Pod` could still get traffic despite it having already -been shutdown on the node it was running on. - -Since there is no way to guarantee that the deletion has propagated across the cluster, we address this eventual -consistency issue by adding an arbitrary delay between the `Pod` being deleted and the initiation of the `Pod` shutdown -sequence. This is accomplished by adding a `sleep` command in the `preStop` hook. - -You can control the length of time to delay with the `shutdownDelay` input value. You can also disable this behavior by -setting the `shutdownDelay` to 0. - -You can read more about this topic in [our blog post -"Delaying Shutdown to Wait for Pod Deletion -Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). - - -back to [root README](/README.adoc#day-to-day-operations) - -## What is a sidecar container? - -In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` -share, amongst other things, the network stack, the IPC namespace, and in some cases the PID namespace. You can read -more about the resources that the containers in a `Pod` share in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod/#what-is-a-pod). - -Sidecar Containers are additional containers that you wish to deploy in the `Pod` housing your application container. -This helm chart supports deploying these containers by configuring the `sideCarContainers` input value. This input value -is a map between the side car container name and the values of the container spec. The spec is rendered directly into -the `Deployment` resource, with the `name` being set to the key. For example: - -```yaml -sideCarContainers: - datadog: - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - nginx: - image: nginx:1.15.4 -``` - -This input will be rendered in the `Deployment` resource as: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - ... Snipped for brevity ... -spec: - ... Snipped for brevity ... - template: - spec: - containers: - ... The first entry relates to the application ... - - name: datadog - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - - name: nginx - image: nginx:1.15.4 -``` - -In this config, the side car containers are rendered as additional containers to deploy alongside the main application -container configured by the `containerImage`, `ports`, `livenessProbe`, etc input values. Note that the -`sideCarContainers` variable directly renders the spec, meaning that the additional values for the side cars such as -`livenessProbe` should be rendered directly within the `sideCarContainers` input value. - -back to [root README](/README.adoc#core-concepts) - -## How do I use a private registry? - -To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker -registry with a registry key. On managed Kubernetes clusters (e.g EKS, GKE, AKS), this is automated through the server -IAM roles that are assigned to the instance VMs. In most cases, if the instance VM IAM role has the permissions to -access the registry, the Kubernetes cluster will automatically be able to pull down images from the respective managed -registry (e.g ECR on EKS or GCR on GKE). - -Alternatively, you can specify docker registry keys in the Kubernetes cluster as `Secret` resources. This is helpful in -situations where you do not have the ability to assign registry access IAM roles to the node itself, or if you are -pulling images off of a different registry (e.g accessing GCR from EKS cluster). - -You can use `kubectl` to create a `Secret` in Kubernetes that can be used as a docker registry key: - -``` -kubectl create secret docker-registry NAME \ - --docker-server=DOCKER_REGISTRY_SERVER \ - --docker-username=DOCKER_USER \ - --docker-password=DOCKER_PASSWORD \ - --docker-email=DOCKER_EMAIL -``` - -This command will create a `Secret` resource named `NAME` that holds the specified docker registry credentials. You can -then specify the cluster to use this `Secret` when pulling down images for the service `Deployment` in this chart by -using the `imagePullSecrets` input value: - -``` -imagePullSecrets: - - NAME -``` - -You can learn more about using private registries with Kubernetes in [the official -documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). - -back to [root README](/README.adoc#day-to-day-operations) diff --git a/charts/k8s-daemonset/linter_values.yaml b/charts/k8s-daemonset/linter_values.yaml deleted file mode 100644 index f800b2d7..00000000 --- a/charts/k8s-daemonset/linter_values.yaml +++ /dev/null @@ -1,42 +0,0 @@ -#---------------------------------------------------------------------------------------------------------------------- -# CHART PARAMETERS TO USE WITH HELM LINT -# This file declares a complete configuration value for this chart, with required values defined so that it can be used -# with helm lint to lint the chart. This should only specify the required values of the chart, and be combined with the -# default values of the chart. -# This is a YAML-formatted file. -#---------------------------------------------------------------------------------------------------------------------- - -#---------------------------------------------------------------------------------------------------------------------- -# REQUIRED VALUES -# These values are expected to be defined and passed in by the operator when deploying this helm chart. -#---------------------------------------------------------------------------------------------------------------------- - -# containerImage is a map that describes the container image that should be used to serve the application managed by -# this chart. -# The expected keys are: -# - repository (string) (required) : The container image repository that should be used. -# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` -# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a -# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, -# `canary`, or other tags that are designed to be “floating”. -# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See -# the official Kubernetes docs for more info. If undefined, this will default to -# `IfNotPresent`. -# -# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that -# the image should only be pulled if it has not been pulled previously. -# -# EXAMPLE: -# -# containerImage: -# repository: nginx -# tag: stable -# pullPolicy: IfNotPresent -containerImage: - repository: nginx - tag: stable - pullPolicy: IfNotPresent - -# applicationName is a string that names the application. This is used to label the pod and to name the main application -# container in the pod spec. The label is keyed under "gruntwork.io/app-name" -applicationName: "linter" diff --git a/charts/k8s-daemonset/templates/NOTES.txt b/charts/k8s-daemonset/templates/NOTES.txt deleted file mode 100644 index cc2b447f..00000000 --- a/charts/k8s-daemonset/templates/NOTES.txt +++ /dev/null @@ -1,43 +0,0 @@ - -Check the status of your Deployment by running this comamnd: - -kubectl get daemonsets --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - - -List the related Pods with the following command: - -kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - - -Use the following command to view information about the Service: - -kubectl get services --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - - -{{ if .Values.containerPorts -}} -{{- $serviceType := .Values.service.type | default "ClusterIP" -}} -Get the application URL by running these commands: - -{{- if .Values.ingress.enabled }} -{{- range .Values.ingress.hosts }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} -{{- end }} -{{- else if contains "NodePort" $serviceType }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "k8s-daemonset.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" $serviceType }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc -w {{ include "k8s-daemonset.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "k8s-daemonset.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" $serviceType }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "k8s-daemonset.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - {{- range $portName, $portSpec := .Values.containerPorts }} - {{- if not $portSpec.disabled }} - echo "Visit http://127.0.0.1:80{{ $portSpec.port }} to use your application container serving port {{ $portName }}" - kubectl port-forward $POD_NAME 80{{ $portSpec.port }}:{{ $portSpec.port }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/charts/k8s-daemonset/templates/_capabilities_helpers.tpl b/charts/k8s-daemonset/templates/_capabilities_helpers.tpl deleted file mode 100644 index 8ec79070..00000000 --- a/charts/k8s-daemonset/templates/_capabilities_helpers.tpl +++ /dev/null @@ -1,42 +0,0 @@ -{{/* Allow KubeVersion to be overridden. This is mostly used for testing purposes. */}} -{{- define "gruntwork.kubeVersion" -}} - {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} -{{- end -}} - -{{/* Get Ingress API Version */}} -{{- define "gruntwork.ingress.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" (include "gruntwork.kubeVersion" .)) -}} - {{- print "networking.k8s.io/v1" -}} - {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} - {{- print "networking.k8s.io/v1beta1" -}} - {{- else -}} - {{- print "extensions/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* Ingress API version aware ingress backend */}} -{{- define "gruntwork.ingress.backend" -}} -{{/* NOTE: The leading whitespace is significant, as it is the specific yaml indentation for injection into the ingress resource. */}} - {{- if eq .ingressAPIVersion "networking.k8s.io/v1" }} - service: - name: {{ if .serviceName }}{{ .serviceName }}{{ else }}{{ .fullName }}{{ end }} - port: - {{- if int .servicePort }} - number: {{ .servicePort }} - {{- else }} - name: {{ .servicePort }} - {{- end }} - {{- else }} - serviceName: {{ if .serviceName }}{{ .serviceName }}{{ else }}{{ .fullName }}{{ end }} - servicePort: {{ .servicePort }} - {{- end }} -{{- end -}} - -{{/* Get PodDisruptionBudget API Version */}} -{{- define "gruntwork.pdb.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" (include "gruntwork.kubeVersion" .)) -}} - {{- print "policy/v1" -}} - {{- else -}} - {{- print "policy/v1beta1" -}} - {{- end -}} -{{- end -}} diff --git a/charts/k8s-daemonset/templates/_daemonset_spec.tpl b/charts/k8s-daemonset/templates/_daemonset_spec.tpl deleted file mode 100644 index c47af452..00000000 --- a/charts/k8s-daemonset/templates/_daemonset_spec.tpl +++ /dev/null @@ -1,427 +0,0 @@ -{{- /* -Common deployment spec that is shared between the canary and main Deployment controllers. This template requires the -context: -- Values -- Release -- Chart -- isCanary (a boolean indicating if we are rendering the canary deployment or not) -You can construct this context using dict: -(dict "Values" .Values "Release" .Release "Chart" .Chart "isCanary" true) -*/ -}} -{{- define "k8s-daemonset.daemonsetSpec" -}} -{{- /* -We must decide whether or not there are volumes to inject. The logic to decide whether or not to inject is based on -whether or not there are configMaps OR secrets that are specified as volume mounts (`as: volume` attributes). We do this -by using a map to track whether or not we have seen a volume type. We have to use a map because we can't update a -variable in helm chart templates. - -Similarly, we need to decide whether or not there are environment variables to add - -We need this because certain sections are omitted if there are no volumes or environment variables to add. -*/ -}} - -{{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} -{{- $hasInjectionTypes := dict "hasVolume" false "hasEnvVars" false "exposePorts" false -}} -{{- if .Values.envVars -}} - {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} -{{- end -}} -{{- if .Values.additionalContainerEnv -}} - {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} -{{- end -}} -{{- $allContainerPorts := values .Values.containerPorts -}} -{{- range $allContainerPorts -}} - {{/* We are exposing ports if there is at least one key in containerPorts that is not disabled (disabled = false or - omitted) - */}} - {{- if or (not (hasKey . "disabled")) (not .disabled) -}} - {{- $_ := set $hasInjectionTypes "exposePorts" true -}} - {{- end -}} -{{- end -}} -{{- $allSecrets := values .Values.secrets -}} -{{- range $allSecrets -}} - {{- if eq (index . "as") "volume" -}} - {{- $_ := set $hasInjectionTypes "hasVolume" true -}} - {{- else if eq (index . "as") "environment" -}} - {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} - {{- else if eq (index . "as") "envFrom" }} - {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} - {{- else if eq (index . "as") "none" -}} - {{- /* noop */ -}} - {{- else -}} - {{- fail printf "secrets config has unknown type: %s" (index . "as") -}} - {{- end -}} -{{- end -}} -{{- $allConfigMaps := values .Values.configMaps -}} -{{- range $allConfigMaps -}} - {{- if eq (index . "as") "volume" -}} - {{- $_ := set $hasInjectionTypes "hasVolume" true -}} - {{- else if eq (index . "as") "environment" -}} - {{- $_ := set $hasInjectionTypes "hasEnvVars" true -}} - {{- else if eq (index . "as") "envFrom" }} - {{- $_ := set $hasInjectionTypes "hasEnvFrom" true -}} - {{- else if eq (index . "as") "none" -}} - {{- /* noop */ -}} - {{- else -}} - {{- fail printf "configMaps config has unknown type: %s" (index . "as") -}} - {{- end -}} -{{- end -}} -{{- if gt (len .Values.persistentVolumes) 0 -}} - {{- $_ := set $hasInjectionTypes "hasVolume" true -}} -{{- end -}} -{{- if gt (len .Values.scratchPaths) 0 -}} - {{- $_ := set $hasInjectionTypes "hasVolume" true -}} -{{- end -}} -{{- if gt (len .Values.emptyDirs) 0 -}} - {{- $_ := set $hasInjectionTypes "hasVolume" true -}} -{{- end -}} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ include "k8s-daemonset.fullname" . }}{{ if .isCanary }}-canary{{ end }} - labels: - # These labels are required by helm. You can read more about required labels in the chart best practices guide: - # https://docs.helm.sh/chart_best_practices/#standard-labels - helm.sh/chart: {{ include "k8s-daemonset.chart" . }} - app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- range $key, $value := .Values.additionalDeploymentLabels }} - {{ $key }}: {{ $value }} - {{- end}} -{{- with .Values.deploymentAnnotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: -{{- if .Values.deploymentStrategy.enabled }} - strategy: - type: {{ .Values.deploymentStrategy.type }} -{{- if and (eq .Values.deploymentStrategy.type "RollingUpdate") .Values.deploymentStrategy.rollingUpdate }} - rollingUpdate: -{{ toYaml .Values.deploymentStrategy.rollingUpdate | indent 6 }} -{{- end }} -{{- end }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- if .isCanary }} - gruntwork.io/deployment-type: canary - {{- else }} - gruntwork.io/deployment-type: main - {{- end }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- if .isCanary }} - gruntwork.io/deployment-type: canary - {{- else }} - gruntwork.io/deployment-type: main - {{- end }} - {{- range $key, $value := .Values.additionalPodLabels }} - {{ $key }}: {{ $value }} - {{- end }} - - {{- with .Values.podAnnotations }} - annotations: -{{ toYaml . | indent 8 }} - {{- end }} - spec: - {{- if gt (len .Values.serviceAccount.name) 0 }} - serviceAccountName: "{{ .Values.serviceAccount.name }}" - {{- end }} - {{- if hasKey .Values.serviceAccount "automountServiceAccountToken" }} - automountServiceAccountToken : {{ .Values.serviceAccount.automountServiceAccountToken }} - {{- end }} - {{- if .Values.podSecurityContext }} - securityContext: -{{ toYaml .Values.podSecurityContext | indent 8 }} - {{- end}} - - containers: - {{- if .isCanary }} - - name: {{ .Values.applicationName }}-canary - {{- $repo := required ".Values.canary.containerImage.repository is required" .Values.canary.containerImage.repository }} - {{- $tag := required ".Values.canary.containerImage.tag is required" .Values.canary.containerImage.tag }} - image: "{{ $repo }}:{{ $tag }}" - imagePullPolicy: {{ .Values.canary.containerImage.pullPolicy | default "IfNotPresent" }} - {{- else }} - - name: {{ .Values.applicationName }} - {{- $repo := required ".Values.containerImage.repository is required" .Values.containerImage.repository }} - {{- $tag := required ".Values.containerImage.tag is required" .Values.containerImage.tag }} - image: "{{ $repo }}:{{ $tag }}" - imagePullPolicy: {{ .Values.containerImage.pullPolicy | default "IfNotPresent" }} - {{- end }} - {{- if .Values.containerCommand }} - command: -{{ toYaml .Values.containerCommand | indent 12 }} - {{- end }} - - {{- if index $hasInjectionTypes "exposePorts" }} - ports: - {{- /* - NOTE: we check for a disabled flag here so that users of the helm - chart can override the default containerPorts. Specifically, defining a new - containerPorts in values.yaml will be merged with the default provided by the - chart. For example, if the user provides: - - containerPorts: - app: - port: 8080 - protocol: TCP - - Then this is merged with the default and becomes: - - containerPorts: - app: - port: 8080 - protocol: TCP - http: - port: 80 - protocol: TCP - https: - port: 443 - protocol: TCP - - and so it becomes append as opposed to replace. To handle this, - we allow users to explicitly disable predefined ports. So if the user wants to - replace the ports with their own, they would provide the following values file: - - containerPorts: - app: - port: 8080 - protocol: TCP - http: - disabled: true - https: - disabled: true - */ -}} - {{- range $key, $portSpec := .Values.containerPorts }} - {{- if not $portSpec.disabled }} - - name: {{ $key }} - containerPort: {{ int $portSpec.port }} - protocol: {{ $portSpec.protocol }} - {{- end }} - {{- end }} - {{- end }} - - - {{- if .Values.securityContext }} - securityContext: -{{ toYaml .Values.securityContext | indent 12 }} - {{- end}} - resources: -{{ toYaml .Values.containerResources | indent 12 }} - - {{- if or .Values.lifecycleHooks.enabled (gt (int .Values.shutdownDelay) 0) }} - lifecycle: - {{- if and .Values.lifecycleHooks.enabled .Values.lifecycleHooks.postStart }} - postStart: -{{ toYaml .Values.lifecycleHooks.postStart | indent 14 }} - {{- end }} - - {{- if and .Values.lifecycleHooks.enabled .Values.lifecycleHooks.preStop }} - preStop: -{{ toYaml .Values.lifecycleHooks.preStop | indent 14 }} - {{- else if gt (int .Values.shutdownDelay) 0 }} - # Include a preStop hook with a shutdown delay for eventual consistency reasons. - # See https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304 - preStop: - exec: - command: - - sleep - - "{{ int .Values.shutdownDelay }}" - {{- end }} - - {{- end }} - - {{- /* START ENV VAR LOGIC */ -}} - {{- if index $hasInjectionTypes "hasEnvVars" }} - env: - - name: SYSLOG_HOST - value: "sysloghost" - - name: SYSLOG_PORT - value: "514" - - name: SYSLOG_PROTOCOL - value: "udp" - {{- end }} - {{- range $key, $value := .Values.envVars }} - - name: {{ $key }} - value: {{ quote $value }} - {{- end }} - {{- if .Values.additionalContainerEnv }} -{{ toYaml .Values.additionalContainerEnv | indent 12 }} - {{- end }} - {{- range $name, $value := .Values.configMaps }} - {{- if eq $value.as "environment" }} - {{- range $configKey, $keyEnvVarConfig := $value.items }} - - name: {{ required "envVarName is required on configMaps items when using environment" $keyEnvVarConfig.envVarName | quote }} - valueFrom: - configMapKeyRef: - name: {{ $name }} - key: {{ $configKey }} - {{- end }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.secrets }} - {{- if eq $value.as "environment" }} - {{- range $secretKey, $keyEnvVarConfig := $value.items }} - - name: {{ required "envVarName is required on secrets items when using environment" $keyEnvVarConfig.envVarName | quote }} - valueFrom: - secretKeyRef: - name: {{ $name }} - key: {{ $secretKey }} - {{- end }} - {{- end }} - {{- end }} - {{- if index $hasInjectionTypes "hasEnvFrom" }} - envFrom: - {{- range $name, $value := .Values.configMaps }} - {{- if eq $value.as "envFrom" }} - - configMapRef: - name: {{ $name }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.secrets }} - {{- if eq $value.as "envFrom" }} - - secretRef: - name: {{ $name }} - {{- end }} - {{- end }} - {{- end }} - {{- /* END ENV VAR LOGIC */ -}} - - - {{- /* START VOLUME MOUNT LOGIC */ -}} - {{- if index $hasInjectionTypes "hasVolume" }} - volumeMounts: - {{- end }} - {{- range $name, $value := .Values.configMaps }} - {{- if eq $value.as "volume" }} - - name: {{ $name }}-volume - mountPath: {{ quote $value.mountPath }} - {{- if $value.subPath }} - subPath: {{ quote $value.subPath }} - {{- end }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.secrets }} - {{- if eq $value.as "volume" }} - - name: {{ $name }}-volume - mountPath: {{ quote $value.mountPath }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.persistentVolumes }} - - name: {{ $name }} - mountPath: {{ quote $value.mountPath }} - {{- end }} - {{- range $name, $value := .Values.scratchPaths }} - - name: {{ $name }} - mountPath: {{ quote $value }} - {{- end }} - {{- range $name, $value := .Values.emptyDirs }} - - name: {{ $name }} - mountPath: {{ quote $value }} - {{- end }} - {{- /* END VOLUME MOUNT LOGIC */ -}} - - {{- range $key, $value := .Values.sideCarContainers }} - - name: {{ $key }} -{{ toYaml $value | indent 10 }} - {{- end }} - - - {{- if gt (len .Values.initContainers) 0 }} - initContainers: - {{- range $key, $value := .Values.initContainers }} - - name: {{ $key }} -{{ toYaml $value | indent 10 }} - {{- end }} - {{- end }} - - {{- /* START IMAGE PULL SECRETS LOGIC */ -}} - {{- if gt (len .Values.imagePullSecrets) 0 }} - imagePullSecrets: - {{- range $secretName := .Values.imagePullSecrets }} - - name: {{ $secretName }} - {{- end }} - {{- end }} - {{- /* END IMAGE PULL SECRETS LOGIC */ -}} - - {{- /* START TERMINATION GRACE PERIOD LOGIC */ -}} - {{- if .Values.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} - {{- end}} - {{- /* END TERMINATION GRACE PERIOD LOGIC */ -}} - - {{- /* START VOLUME LOGIC */ -}} - {{- if index $hasInjectionTypes "hasVolume" }} - volumes: - {{- end }} - {{- range $name, $value := .Values.configMaps }} - {{- if eq $value.as "volume" }} - - name: {{ $name }}-volume - configMap: - name: {{ $name }} - {{- if $value.items }} - items: - {{- range $configKey, $keyMountConfig := $value.items }} - - key: {{ $configKey }} - path: {{ required "filePath is required for configMap items" $keyMountConfig.filePath | quote }} - {{- if $keyMountConfig.fileMode }} - mode: {{ include "k8s-daemonset.fileModeOctalToDecimal" $keyMountConfig.fileMode }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.secrets }} - {{- if eq $value.as "volume" }} - - name: {{ $name }}-volume - secret: - secretName: {{ $name }} - {{- if $value.items }} - items: - {{- range $secretKey, $keyMountConfig := $value.items }} - - key: {{ $secretKey }} - path: {{ required "filePath is required for secrets items" $keyMountConfig.filePath | quote }} - {{- if $keyMountConfig.fileMode }} - mode: {{ include "k8s-daemonset.fileModeOctalToDecimal" $keyMountConfig.fileMode }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- range $name, $value := .Values.persistentVolumes }} - - name: {{ $name }} - persistentVolumeClaim: - claimName: {{ $value.claimName }} - {{- end }} - {{- range $name, $value := .Values.scratchPaths }} - - name: {{ $name }} - emptyDir: - medium: "Memory" - {{- end }} - {{- range $name, $value := .Values.emptyDirs }} - - name: {{ $name }} - emptyDir: {} - {{- end }} - {{- /* END VOLUME LOGIC */ -}} - - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} -{{- end -}} diff --git a/charts/k8s-daemonset/templates/_helpers.tpl b/charts/k8s-daemonset/templates/_helpers.tpl deleted file mode 100644 index 11b35ed6..00000000 --- a/charts/k8s-daemonset/templates/_helpers.tpl +++ /dev/null @@ -1,73 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Expand the name of the chart. -*/}} -{{- define "k8s-daemonset.name" -}} - {{- .Values.applicationName | required "applicationName is required" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "k8s-daemonset.fullname" -}} - {{- $name := required "applicationName is required" .Values.applicationName -}} - {{- if .Values.fullnameOverride -}} - {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} - {{- else if contains $name .Release.Name -}} - {{- .Release.Name | trunc 63 | trimSuffix "-" -}} - {{- else -}} - {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} - {{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "k8s-daemonset.chart" -}} - {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Convert octal to decimal (e.g 644 => 420). For file permission modes, many people are more familiar with octal notation. -However, due to yaml/json limitations, all the Kubernetes resources require file modes to be reported in decimal. -*/}} -{{- define "k8s-daemonset.fileModeOctalToDecimal" -}} - {{- $digits := splitList "" (toString .) -}} - - {{/* Make sure there are exactly 3 digits */}} - {{- if ne (len $digits) 3 -}} - {{- fail (printf "File mode octal expects exactly 3 digits: %s" .) -}} - {{- end -}} - - {{/* Go Templates do not support variable updating, so we simulate it using dictionaries */}} - {{- $accumulator := dict "res" 0 -}} - {{- range $idx, $digit := $digits -}} - {{- $digitI := atoi $digit -}} - - {{/* atoi from sprig swallows conversion errors, so we double check to make sure it is a valid conversion */}} - {{- if and (eq $digitI 0) (ne $digit "0") -}} - {{- fail (printf "Digit %d of %s is not a number: %s" $idx . $digit) -}} - {{- end -}} - - {{/* Make sure each digit is less than 8 */}} - {{- if ge $digitI 8 -}} - {{- fail (printf "%s is not a valid octal digit" $digit) -}} - {{- end -}} - - {{/* Since we don't have math.Pow, we hard code */}} - {{- if eq $idx 0 -}} - {{/* 8^2 */}} - {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 64)) -}} - {{- else if eq $idx 1 -}} - {{/* 8^1 */}} - {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 8)) -}} - {{- else -}} - {{/* 8^0 */}} - {{- $_ := set $accumulator "res" (add (index $accumulator "res") (mul $digitI 1)) -}} - {{- end -}} - {{- end -}} - {{- "res" | index $accumulator | toString | printf -}} -{{- end -}} diff --git a/charts/k8s-daemonset/templates/daemonset.yaml b/charts/k8s-daemonset/templates/daemonset.yaml index 4921af2c..424eab3e 100644 --- a/charts/k8s-daemonset/templates/daemonset.yaml +++ b/charts/k8s-daemonset/templates/daemonset.yaml @@ -1,5 +1,54 @@ -{{- /* -The main Deployment Controller for the application being deployed. This resource manages the creation and replacement -of the Pods backing your application. -*/ -}} -{{ include "k8s-daemonset.daemonsetSpec" (dict "Values" .Values "isCanary" false "Release" .Release "Chart" .Chart) }} +{{- if .Values.DaemonSet.create }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ .Values.DaemonSet.name }} + namespace: {{ $.Release.Namespace }} + labels: + app: {{ .Values.DaemonSet.name }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ .Values.DaemonSet.name }} + release: {{ .Release.Name }} + updateStrategy: + {{- if eq .Values.DaemonSet.updateStrategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.DaemonSet.maxUnavailable }} + {{- end }} + template: + metadata: + labels: + app: {{ .Values.DaemonSet.name }} + release: {{ .Release.Name }} + spec: + tolerations: + - key: {{ .Values.DaemonSet.tolerationKey }} + operator: {{ .Values.DaemonSet.tolerationOperator }} + effect: {{ .Values.DaemonSet.tolerationEffect }} + containers: + - name: {{ .Values.DaemonSet.name }} + image: {{ .Values.DaemonSet.containerImage }} + {{- if .Values.DaemonSet.volumeMountCreate }} + volumeMounts: + - name: {{ .Values.DaemonSet.volumeName }} + mountPath: {{ .Values.DaemonSet.volumeMountPath }} + {{- if .Values.DaemonSet.extraVolumeMountCreate }} + - name: {{ .Values.DaemonSet.extraVolumeName }} + mountPath: {{ .Values.DaemonSet.extraVolumeMountPath }} + readOnly: true + {{- end }} + volumes: + - name: {{ .Values.DaemonSet.volumeName }} + hostPath: + path: {{ .Values.DaemonSet.volumeMountPath }} + {{- end }} + {{- if .Values.DaemonSet.extraVolumeMountCreate }} + - name: {{ .Values.DaemonSet.extraVolumeName }} + hostPath: + path: {{ .Values.DaemonSet.extraVolumeMountPath }} + {{- end }} +{{- end }} + diff --git a/charts/k8s-daemonset/templates/gmc.yaml b/charts/k8s-daemonset/templates/gmc.yaml deleted file mode 100644 index 1553d50f..00000000 --- a/charts/k8s-daemonset/templates/gmc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- /* -If the operator configures the google.managedCertificate input variable, then also create a ManagedCertificate resource -that will provision a Google managed SSL certificate. -*/ -}} -{{- if .Values.google.managedCertificate.enabled -}} -{{- /* -We declare some variables defined on the Values. These are reused in `with` and `range` blocks where the scoped variable -(`.`) is rebound within the block. -*/ -}} -{{- $domainName := .Values.google.managedCertificate.domainName -}} -{{- $certificateName := .Values.google.managedCertificate.name -}} -apiVersion: networking.gke.io/v1beta1 -kind: ManagedCertificate -metadata: - name: {{ $certificateName }} - labels: - gruntwork.io/app-name: {{ .Values.applicationName }} - # These labels are required by helm. You can read more about required labels in the chart best practices guide: - # https://docs.helm.sh/chart_best_practices/#standard-labels - app.kubernetes.io/name: {{ include "k8s-service.name" . }} - helm.sh/chart: {{ include "k8s-service.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - domains: - - {{ $domainName }} -{{- end }} diff --git a/charts/k8s-daemonset/templates/ingress.yaml b/charts/k8s-daemonset/templates/ingress.yaml deleted file mode 100644 index 10f935f7..00000000 --- a/charts/k8s-daemonset/templates/ingress.yaml +++ /dev/null @@ -1,101 +0,0 @@ -{{- /* -If the operator configures the ingress input variable, then also create an Ingress resource that will route to the -service. Note that Ingress can only route to a Service, so the operator must also configure a Service. -*/ -}} -{{- if .Values.ingress.enabled -}} - -{{- /* -We declare some variables defined on the Values. These are reused in `with` and `range` blocks where the scoped variable -(`.`) is rebound within the block. -*/ -}} -{{- $fullName := include "k8s-service.fullname" . -}} -{{- $ingressAPIVersion := include "gruntwork.ingress.apiVersion" . -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $ingressPathType := .Values.ingress.pathType -}} -{{- $additionalPathsHigherPriority := .Values.ingress.additionalPathsHigherPriority }} -{{- $additionalPaths := .Values.ingress.additionalPaths }} -{{- $servicePort := .Values.ingress.servicePort -}} -{{- $baseVarsForBackend := dict "fullName" $fullName "ingressAPIVersion" $ingressAPIVersion -}} - -apiVersion: {{ $ingressAPIVersion }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - gruntwork.io/app-name: {{ .Values.applicationName }} - # These labels are required by helm. You can read more about required labels in the chart best practices guide: - # https://docs.helm.sh/chart_best_practices/#standard-labels - app.kubernetes.io/name: {{ include "k8s-service.name" . }} - helm.sh/chart: {{ include "k8s-service.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Values.ingress.annotations }} -{{- with .Values.ingress.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -{{- end }} -spec: -{{- if .Values.ingress.tls }} -{{- with .Values.ingress.tls }} - tls: -{{ toYaml . | indent 4}} -{{- end }} -{{- end }} - rules: - {{- if .Values.ingress.hosts }} - {{- range .Values.ingress.hosts }} - - host: {{ . | quote }} - http: - paths: - {{- range $additionalPathsHigherPriority }} - - path: {{ .path }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} - {{- end }} - - path: {{ $ingressPath }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") $ingressPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (dict "serviceName" $fullName "servicePort" $servicePort | merge $baseVarsForBackend) }} - {{- range $additionalPaths }} - - path: {{ .path }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} - {{- end }} - {{- end }} - {{- else }} - - http: - paths: - {{- range $additionalPathsHigherPriority }} - - path: {{ .path }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} - {{- end }} - - path: {{ $ingressPath }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") $ingressPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (dict "serviceName" $fullName "servicePort" $servicePort | merge $baseVarsForBackend) }} - {{- range $additionalPaths }} - - path: {{ .path }} - {{- if and (eq $ingressAPIVersion "networking.k8s.io/v1") .pathType }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- include "gruntwork.ingress.backend" (merge . $baseVarsForBackend) }} - {{- end }} - - {{- end }} -{{- end }} diff --git a/charts/k8s-daemonset/templates/pdb.yaml b/charts/k8s-daemonset/templates/pdb.yaml deleted file mode 100644 index 631e92cf..00000000 --- a/charts/k8s-daemonset/templates/pdb.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- /* -If there is a specification for minimum number of Pods that should be available, create a PodDisruptionBudget -*/ -}} -{{- if .Values.minPodsAvailable -}} -apiVersion: {{ include "gruntwork.pdb.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: {{ include "k8s-service.fullname" . }} - labels: - gruntwork.io/app-name: {{ .Values.applicationName }} - # These labels are required by helm. You can read more about required labels in the chart best practices guide: - # https://docs.helm.sh/chart_best_practices/#standard-labels - app.kubernetes.io/name: {{ include "k8s-service.name" . }} - helm.sh/chart: {{ include "k8s-service.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - minAvailable: {{ int .Values.minPodsAvailable }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "k8s-service.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} diff --git a/charts/k8s-daemonset/templates/service.yaml b/charts/k8s-daemonset/templates/service.yaml deleted file mode 100644 index f831547f..00000000 --- a/charts/k8s-daemonset/templates/service.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- /* -If the operator configures the service input variable, then also create a Service resource that exposes the Pod as a -stable endpoint that can be routed within the Kubernetes cluster. -*/ -}} -{{- if .Values.service.enabled -}} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "k8s-daemonset.fullname" . }} - labels: - # These labels are required by helm. You can read more about required labels in the chart best practices guide: - # https://docs.helm.sh/chart_best_practices/#standard-labels - app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} - helm.sh/chart: {{ include "k8s-daemonset.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Values.service.annotations }} -{{- with .Values.service.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -{{- end }} -spec: - type: {{ .Values.service.type | default "ClusterIP" }} - ports: - {{- range $key, $value := .Values.service.ports }} - - name: {{ $key }} -{{ toYaml $value | indent 6 }} - {{- end }} - selector: - app.kubernetes.io/name: {{ include "k8s-daemonset.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - {{- if .Values.service.sessionAffinity }} - sessionAffinity: {{ .Values.service.sessionAffinity }} - {{- if .Values.service.sessionAffinityConfig }} - {{- with .Values.service.sessionAffinityConfig }} - sessionAffinityConfig: -{{ toYaml . | indent 4 }} - {{- end}} - {{- end}} - {{- end}} -{{- end }} diff --git a/charts/k8s-daemonset/templates/serviceaccount.yaml b/charts/k8s-daemonset/templates/serviceaccount.yaml deleted file mode 100644 index ba47e205..00000000 --- a/charts/k8s-daemonset/templates/serviceaccount.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.serviceAccount.name }} - namespace: {{ $.Release.Namespace }} - labels: - app: {{ template "k8s-service.name" . }} - {{- if .Values.serviceAccount.labels }} - {{- toYaml .Values.serviceAccount.labels | nindent 4 }} - {{- end }} - {{- if .Values.serviceAccount.annotations }} - annotations: - {{ toYaml .Values.serviceAccount.annotations | indent 4 }} - {{- end }} -{{- if gt (len .Values.imagePullSecrets) 0 }} -imagePullSecrets: - {{- range $secretName := .Values.imagePullSecrets }} - - name: {{ $secretName }} - {{- end }} -{{- end }} -{{- end }} diff --git a/charts/k8s-daemonset/templates/servicemonitor.yaml b/charts/k8s-daemonset/templates/servicemonitor.yaml deleted file mode 100644 index 380a0a6b..00000000 --- a/charts/k8s-daemonset/templates/servicemonitor.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "k8s-service.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ .Values.serviceMonitor.namespace }} - {{- end }} - labels: - chart: {{ template "k8s-service.chart" . }} - app: {{ template "k8s-service.name" . }} - heritage: "{{ .Release.Service }}" - {{- if .Values.serviceMonitor.labels }} - {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} - {{- end }} -spec: - endpoints: - {{- values .Values.serviceMonitor.endpoints | toYaml | nindent 6 }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "k8s-service.name" . }} -{{- end }} diff --git a/charts/k8s-daemonset/values.yaml b/charts/k8s-daemonset/values.yaml index 41deb811..9053187b 100644 --- a/charts/k8s-daemonset/values.yaml +++ b/charts/k8s-daemonset/values.yaml @@ -1,713 +1,29 @@ -#---------------------------------------------------------------------------------------------------------------------- -# CHART PARAMETERS -# This file declares the configuration input values for the k8s-service Helm chart. -# This is a YAML-formatted file. -#---------------------------------------------------------------------------------------------------------------------- - -#---------------------------------------------------------------------------------------------------------------------- -# REQUIRED VALUES -# These values are expected to be defined and passed in by the operator when deploying this helm chart. -#---------------------------------------------------------------------------------------------------------------------- - -# containerImage is a map that describes the container image that should be used to serve the application managed by -# this chart. -# The expected keys are: -# - repository (string) (required) : The container image repository that should be used. -# E.g `nginx` ; `gcr.io/kubernetes-helm/tiller` -# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a -# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, -# `canary`, or other tags that are designed to be “floating”. -# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See -# the official Kubernetes docs for more info. If undefined, this will default to -# `IfNotPresent`. -# -# The following example deploys the `nginx:stable` image with a `IfNotPresent` image pull policy, which indicates that -# the image should only be pulled if it has not been pulled previously. -# -# EXAMPLE: -# -# containerImage: -# repository: nginx -# tag: stable -# pullPolicy: IfNotPresent - -# applicationName is a string that names the application. This is used to label the pod and to name the main application -# container in the pod spec. The label is keyed under "gruntwork.io/app-name" - - -#---------------------------------------------------------------------------------------------------------------------- -# OPTIONAL VALUES -# These values have defaults, but may be overridden by the operator -#---------------------------------------------------------------------------------------------------------------------- - -# containerCommand is a list of strings that indicate a custom command to run for the container in place of the default -# configured on the image. Omit to run the default command configured on the image. -# -# Example (run echo "Hello World"): -# -# containerCommand: -# - "echo" -# - "Hello World" -containerCommand: null - -# containerPorts is a map that specifies the ports to open on the container. This is a nested map: the first map lists -# the named ports, while the second layer lists the port spec. The named references can be used to refer to the specific -# port of the container in other resources, like Service. -# The expected keys of the port spec are: -# - port (int) (required) : The port in the container that should be exposed. -# - protocol (string) (required) : The network protocol (e.g TCP or UDP) that is exposed. -# - disabled (bool) : Whether or not this port is disabled. This defaults to false if unset. Provided as a -# convenience to override the default ports on the commandline. For example, to -# disable the default port, you can pass `--set containerPorts.http.disabled=true`. -# -# The default config exposes TCP port 80 and binds the name `http` to it. -containerPorts: - http: - port: 80 - protocol: TCP - -# livenessProbe is a map that specifies the liveness probe of the main application container. Liveness probes indicate -# when a container has reached a fatal state where it needs to be restarted to recover. When the liveness probe fails, -# the container is automatically recreated. You can read more about container liveness probes in the official docs: -# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -# NOTE: This variable is injected directly into the container spec. -# -# The following example specifies an http GET based liveness probe, that will base the probe on a http GET request to -# the port bound to name `http` (see description on `containerPorts`) on the path `/`. -# -# EXAMPLE: -# -# livenessProbe: -# httpGet: -# path: / -# port: http -#livenessProbe: {} - -# readinessProbe is a map that specifies the readiness probe of the main application container. Readiness probes -# indicate when a container is unable to serve traffic. When the readiness probe fails, the container is cycled out of -# the list of available containers to the `Service`. You can read more about readiness probes in the official docs: -# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -# NOTE: This variable is injected directly into the container spec. -# -# The following example specifies an http GET based readiness probe, that will base the probe on a http GET request to -# the port bound to name `http` (see description on `containerPorts`) on the path `/`. -# -# EXAMPLE: -# -# readinessProbe: -# httpGet: -# path: / -# port: http -#readinessProbe: {} - -# securityContext is a map that specified the privillege and access control settings for a Pod of Container. Security Context -# can be specified when the application requires additional access control permissions. More details on securityContext and supported -# settings can be found at https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -# similar to the podSecurityContext {} however, this sets security attributes at the container level rather than at the pod level scope. - -# -# EXAMPLE: -# 1) To run a container in privilleged mode -# securityContext: -# privilleged: true -# -# 2) To run a container as a specific user -# securityContext: -# runAsUser: 2000 -# securityContext: {} - -# podSecurityContext holds pod-level security access control settings. -# similar to the securityContext {} however, this sets security attributes at the pod level rather than at the container level scope. -# this allows certain attributes to be set that are not possible in the container level. For example 'fsGroup'. -# more details can be found at https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core - -# EXAMPLE: -# podSecurityContext: -# fsGroup: 2000 -podSecurityContext: {} - - -# shutdownDelay is the number of seconds to delay the shutdown sequence of the Pod by. This is implemented as a sleep -# call in the preStop hook. By default, this chart includes a preStop hook with a shutdown delay for eventual -# consistency reasons. You can read more about why you might want to do this in -# https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304 -# You can disable this behavior by setting this value to 0. -# NOTE: this conflicts with lifecycleHooks.preStop -shutdownDelay: 5 - -# lifecycleHooks configures container lifecycle hooks on the Pod so you can run arbitrary commands after the -# container starts (postStart) or before the container stops. -# Refer to https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ for more information on container -# lifecycles. -# -# EXAMPLE: -# -# lifecycleHooks: -# enabled: true -# postStart: -# exec: -# command: -# - echo -# - "Run after starting container" -# preStop: -# exec: -# command: -# - echo -# - "Run before stopping container" -# -# NOTE: the preStop hook conflicts with shutdownDelay -lifecycleHooks: - enabled: false - -# sideCarContainers specifies any additional containers that should be deployed as side cars to the main application -# container. This will be included in the Deployment container spec so that it will be included in the application Pod. -# This is a nested map, where the first map key is used to name the container, with the nested map being injected as the -# container spec. -# -# The following example specifies a data dog agent container as a side car with some environment variables, binding the -# name `datadog`: -# -# EXAMPLE: -# -# sideCarContainers: -# datadog: -# image: datadog/agent:latest -# env: -# - name: DD_API_KEY -# value: ASDF-1234 -# - name: SD_BACKEND -# value: docker -sideCarContainers: {} - -# initContainers specifies any additional containers that should be deployed as init containers to the main application -# container. This will be included in the Deployment container spec so that it will be included in the application Pod. -# This is a nested map, where the first map key is used to name the container, with the nested map being injected as the -# container spec. -# -# The following example specifies a flyway image as an init container with an environment variable, binding the -# name `flyway`: -# -# EXAMPLE: -# -# initContainers: -# flyway: -# image: flyway/flyway -# env: -# - name: FLYWAY_LOCATIONS -# value: 'filesystem:/flyway/migrations' -initContainers: {} - -# canary specifies test pod(s) that are deployed alongside your application's stable track pods. -# It is useful for testing a new release candidate in a production environment with minimal disruption and -# for allowing you to find any issues early. -# The expected keys of the canary spec are: -# - enabled (bool) (required) : Whether or not the canary deployment should be created. If false, no canary deployment will be created. -# - containerImage (map) (required) : A map that specifies the application container and tag to be managed by the canary deployment. -# This has the same structure as containerImage. -# - replicaCount (int) : The number of pods that should be managed by the canary deployment. Defaults to 1 if unset. -# -# The following example specifies a simple canary deployment: -# -# EXAMPLE: -# -# canary: -# enabled: true -# replicaCount: 1 -# containerImage: -# repository: nginx -# tag: 1.16.0 -# pullPolicy: IfNotPresent -canary: {} - - -# deploymentStrategy specifies the strategy used to replace old Pods by new ones. Type can be "RollingUpdate" or -# "Recreate". "RollingUpdate" is the default value. -# RollingUpdate: The Deployment updates Pods in a rolling update fashion. -# Recreate: All existing Pods are killed before new ones are created. -# -# RollingUpdate can be further refined by providing custom rollingUpdate options. -# The rollingUpdate variable is a map that is directly injected into the deployment spec and it has the following keys: -# - maxUnavailable (Optional) : Field that specifies the maximum number of Pods that can be unavailable -# during the update process. The value can be an absolute number -# (for example, 5) or a percentage of desired Pods (for example, 10%). -# The value cannot be 0 if rollingUpdate.maxSurge is 0. -# This option defaults to 25%. -# - maxSurge (Optional) : Field that specifies the maximum number of Pods that can be created over -# the desired number of Pods. The value can be an absolute number (for example, 5) -# or a percentage of desired Pods (for example, 10%). The value cannot be 0 if -# MaxUnavailable is 0. -# This option defaults to 25%. -# -# EXAMPLE: -# -# deploymentStrategy: -# enabled: false -# type: RollingUpdate -# rollingUpdate: -# maxSurge: 30% -# maxUnavailable: 30% -deploymentStrategy: - enabled: false - type: RollingUpdate - rollingUpdate: {} - -# deploymentAnnotations will add the provided map to the annotations for the Deployment resource created by this chart. -# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. -# NOTE: This variable is injected directly into the deployment spec. -deploymentAnnotations: {} - -# additionalDeploymentLabels will add the provided map to the labels for the Deployment resource created by this chart. -# this is in addition to the helm template related labels created by the chart -# The keys and values are free form, but subject to the limitations of Kubernetes labelling. -# NOTE: This variable is injected directly into the deployment spec. -additionalDeploymentLabels: {} - -# podAnnotations will add the provided map to the annotations for the Pod resource created by the Deployment. -# The keys and values are free form, but subject to the limitations of Kubernetes resource annotations. -# NOTE: This variable is injected directly into the pod spec. -podAnnotations: {} - -# additionalDeploymentLabels will add the provided map to the labels for the Pods created by the deployment resource. -# this is in addition to the helm template related labels created by the chart -# The keys and values are free form, but subject to the limitations of Kubernetes labelling. -# The match labels for the deployment aren't affected by these additional labels -# NOTE: This variable is injected directly into the deployment spec. -additionalPodLabels: {} - -# minPodsAvailable specifies the minimum number of pods that should be available at any given point in time. This is -# used to configure a PodDisruptionBudget for the included pod. See -# https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085 -# for an introduction to PodDisruptionBudgets. -# NOTE: setting this to 0 will skip creating the PodDisruptionBudget resource. -minPodsAvailable: 0 - -# service is a map that specifies the configuration for the Service resource that is created by the chart. -# The expected keys are: -# - enabled (bool) (required) : Whether or not the Service resource should be created. If false, no Service -# resource will be created. -# - type (string) : The Service type, as defined in Kubernetes -# (https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) -# Defaults to ClusterIP. -# - annotations (map) : Annotations that should be added to the Service resource. This is injected -# directly in to the resource yaml. -# - ports (map) (required) : A map that specifies the port bindings of the service against the Pods in the -# Deployment. This has the same structure as containerPorts, with the additional -# key of `targetPort` to indicate which port of the container the service port -# should route to. The `targetPort` can be a name defined in `containerPorts`. -# - sessionAffinity (string) : Used to maintain session affinity, as defined in Kubernetes - supports 'ClientIP' and 'None' -# (https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) -# Kubernetes defaults to None. -# - sessionAffinityConfig (object) : Configuration for session affinity, as defined in Kubernetes -# (https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) -# -# The following example uses the default config and enables client IP based session affinity with a maximum session -# sticky time of 3 hours. -# EXAMPLE: -# -# service: -# enabled: true -# ports: -# app: -# port: 80 -# targetPort: http -# protocol: TCP -# sessionAffinity: ClientIP -# sessionAffinityConfig: -# clientIP: -# timeoutSeconds: 10800 -# -# The default config configures a Service of type ClusterIP with no annotation, and binds port 80 of the pod to the -# port 80 of the service, and names the binding as `app`: -service: - enabled: true - ports: - app: - port: 80 - targetPort: http - protocol: TCP - -# servicemonitor is a map that can be used to configure a Service monitor for the operator. By default, service monitor is off. -# The expected keys are: -# - enabled (bool) (required) : Whether or not the Service Monitor resource should be created. If false, no -# Service Monitor resource will be created. -# - namespace (string) (required) : Namespace of Endpoints object. -# - endpoints (list[map]) (required) : An object used to discovers targets from listed endpoints of a service. -# For each endpoint address one target is discovered per port. -# If the endpoint is backed by a pod, all additional container ports of the pod, -# not bound to an endpoint port, are discovered as targets as well. -# -# The following example specifies a ServiceMonitor rule that describes the set of targets to be monitored by Prometheus. -# EXAMPLE: -# -# serviceMonitor: -# enabled: true -# namespace: monitoring -# endpoints: -# default: -# interval: 10s -# scrapeTimeout: 10s -# honorLabels: true -# path: /metrics -# port: http -# scheme: http -serviceMonitor: - enabled: false - namespace: monitoring - labels: {} - endpoints: {} - -# ingress is a map that can be used to configure an Ingress resource for this service. By default, turn off ingress. -# NOTE: if you enable Ingress, then Service must also be enabled. -# The expected keys are: -# - enabled (bool) (required) : Whether or not the Ingress resource should be created. If false, no -# Ingress resource will be created. -# - annotations (map) : Annotations that should be added to the Service resource. This is -# injected directly in to the resource yaml. -# - tls (list[map]) : Sets up TLS termination on the ingress rule. Each item is a separate TLS -# rule that maps to one or more hosts specified in this ingress rule. This -# is injected directly in to the resource yaml. -# - hosts (list[string]) : Sets up the host routes for the ingress resource. There will be a routing -# rule for each host defined in this list. If empty, will match all hosts. -# - path (string) (required) : The url path to match to route to the Service. -# - pathType (string) (required in k8s version 1.19+) -# : The path type to use for the ingress rule. Refer to -# https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types -# for more information. -# - servicePort (int|string) (required) : The port (as a number) or the name of the port on the Service to route -# to. -# - additionalPaths (list[map]) : Additional paths that should be added to the ingress which will be lower -# priority than the application service path. Each item corresponds to -# another path, and should define `path`, `pathType`, `serviceName`, and -# `servicePort`. -# - additionalPathsHigherPriority (list[map]) -# : Additional paths that should be added to the ingress which will be higher -# priority than the application service path. Each item corresponds to -# another path, and should define `path`, `pathType`, `serviceName`, and -# `servicePort`. -# -# The following example specifies an Ingress rule that routes chart-example.local/app to the Service port `app` with -# TLS configured using the certificate key pair in the Secret `chart-example-tls`: -# -# EXAMPLE: -# -# ingress: -# enabled: true -# annotations: -# kubernetes.io/ingress.class: nginx -# kubernetes.io/tls-acme: "true" -# path: /app -# servicePort: app -# hosts: -# - chart-example.local -# tls: -# - secretName: chart-example-tls -# hosts: -# - chart-example.local -ingress: - enabled: false - -# envVars is a map of strings to strings that specifies hard coded environment variables that should be set on the -# application container. The keys will be mapped to environment variable keys, with the values mapping to the -# environment variable values. -# -# NOTE: If you wish to set environment variables using Secrets, see the `secrets` setting in this file. -# -# The following example configures two environment variables, DB_HOST and DB_PORT: -# -# EXAMPLE: -# -# envVars: -# DB_HOST: "mysql.default.svc.cluster.local" -# DB_PORT: 3306 -envVars: {} - -# additionalContainerEnv is a list of additional environment variables -# definitions that will be inserted into the Container's environment YAML. -# -# Example: -# additionalContainerEnv: -# - name: DD_AGENT_HOST -# valueFrom: -# fieldRef: -# fieldPath: status.hostIP -# - name: DD_ENTITY_ID -# valueFrom: -# fieldRef: -# fieldPath: metadata.uid -additionalContainerEnv: {} - -# configMaps is a map that specifies the ConfigMap resources that should be exposed to the main application container. Each -# entry in the map represents a ConfigMap resource. The key refers to the name of the ConfigMap that should be exposed, -# with the value specifying how to expose the ConfigMap. The value is also a map and has the following attributes: -# - as (enum[volume,environment,envFrom,none]) (required) -# : ConfigMaps can be exposed to Pods as a volume mount, or as environment variables. This attribute is a string -# enum that is expected to be either "volume", "environment", or "envFrom", specifying that the ConfigMap should -# be exposed as a mounted volume, via environment variables, or loaded as environment variables respectively. -# This attribute can also be set to "none", which disables the `ConfigMap` on the container. -# - mountPath (string) -# : For ConfigMaps mounted as a volume, specify the mount path on the container file system where the config values -# will be available. Required when the ConfigMap is exposed as a volume. Ignored when the ConfigMap is exposed as -# environment variables. -# - subPath (string) -# : For ConfigMaps mounted as a volume, specify the sub path on the volume system where the config values will be -# available. Optional when the ConfigMap is exposed as a volume. Ignored when the ConfigMap is exposed as -# environment variables. -# - items (map[ConfigMapItem]) -# : Specify how each ConfigMap value should be made available. The keys are the key of the ConfigMap that you wish -# to configure, while the value is another map that controls how that key should be exposed. Required when the -# ConfigMap is exposed as environment variables. When the ConfigMap is exposed as a volume, this field is optional. -# If empty for volume ConfigMaps, all ConfigMpas will be mounted with the key as the file name relative to the -# mountPath. See below for expected attributes. -# The expected attributes of the `ConfigMapItem` map (the submap within `items`) are: -# - filePath (string) : The file path relative to the ConfigMap mountPath where the value of the ConfigMap keyed at -# the given key of the item should be mounted to in the container. Ignored when the ConfigMap -# is exposed as environment variables. -# - fileMode (string) : The permissions mode of the file when mounted in the container. Ignored when the ConfigMap is -# exposed as environment variables. Expected to be the octal (e.g 777, 644). Defaults to 644. -# - envVarName (string) : The name of the environment variable where the value of the ConfigMap keyed at the given key -# of the item should be stored. Ignored when the ConfigMap is exposed as a volume mount. -# -# NOTE: These config values are only automatically injected to the main application container. To add them to the side -# car containers, use the official Kubernetes Pod syntax: -# https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ -# -# The following example exposes the ConfigMap `myconfig` as a volume mounted to `/etc/myconfig`, while it exposes the -# ConfigMap `myotherconfig` as an environment variable. Additionally, it automatically mounts all of the keys -# `anotherconfig` as environment variables using the `envFrom` keyword. -# -# EXAMPLE: -# -# configMaps: -# myconfig: -# as: volume -# mountPath: /etc/myconfig -# myotherconfig: -# as: environment -# items: -# foo: -# envVarName: CONFIG_FOO -# anotherconfig: -# as: envFrom -configMaps: {} - -# persistentVolumes is a map that specifies PersistentVolumes that should be mounted on the pod. Each entry represents a -# persistent volume which should already exist within your cluster. They Key is the name of the persistent volume. -# The value is also a map and has the following attributes: -# - mountPath (string) (required) -# : The path within the container upon which this volume should be mounted. -# - claimName (string) (required) -# : The name of the Persistent Volume Claim on which this Persistent Volume in bound. -# -# EXAMPLE: -# persistentVolumes: -# example-pv: -# mountPath: /mnt/myVol -# claimName: example-pv-claim -# example-pv-2: -# mountPath: /mnt/myOtherVol -# claimName: example-pv2-claim -# -# -persistentVolumes: {} - -# scratchPaths is a map of key value pairs that specifies which paths in the container should be setup as scratch space. -# Under the hood each entry in the map is converted to a tmpfs volume with the name set to the key and mounted into the -# container on the path provided as the value. -# -# EXAMPLE: -# scratchPaths: -# example: /mnt/scratch -scratchPaths: {} - -# emptyDirs is a map of key value pairs that specifies which paths in the container should be setup as an emptyDir volume. -# Under the hood each entry in the map is converted to a volume stored on whatever medium that backs the node -# (disk, SSD, network storage) and mounted into the container on the path provided as the value. -# -# EXAMPLE: -# emptyDirs: -# example: /mnt/example -emptyDirs: {} - -# secrets is a map that specifies the Secret resources that should be exposed to the main application container. Each entry in -# the map represents a Secret resource. The key refers to the name of the Secret that should be exposed, with the value -# specifying how to expose the Secret. The value is also a map and has the following attributes: -# - as (enum[volume,environment,envFrom,none]) (required) -# : Secrets can be exposed to Pods as a volume mount, or as environment variables. This attribute is a string enum -# that is expected to be either "volume", "environment", or "envFrom", specifying that the Secret should be -# exposed as a mounted volume, via environment variables, or loaded in its entirety as environment variables -# respectively. This attribute can also be set to "none", which disables the `Secret` on the container. -# - mountPath (string) -# : For Secrets mounted as a volume, specify the mount path on the container file system where the secrets will be -# available. Required when the Secret is exposed as a volume. Ignored when the Secret is exposed as environment -# variables. -# - items (map[SecretItem]) -# : Specify how each Secret value should be made available. The keys are the key of the Secret that you wish to -# configure, while the value is another map that controls how that key should be exposed. Required when the Secret -# is exposed as environment variables. When the Secret is exposed as a volume, this field is optional. If empty for -# volume Secrets, all Secrets will be mounted with the key as the file name relative to the mountPath. See below -# for expected attributes. -# The expected attributes of the `SecretItem` map (the submap within `items`) are: -# - filePath (string) : The file path relative to the Secret mountPath where the value of the Secret keyed at the -# given key of the item should be mounted to in the container. Ignored when the Secret is -# exposed as environment variables. -# - fileMode (string) : The permissions mode of the file when mounted in the container. Ignored when the Secret is -# exposed as environment variables. Expected to be the octal (e.g 777, 644). Defaults to 644. -# - envVarName (string) : The name of the environment variable where the value of the Secret keyed at the given key of -# the item should be stored. Ignored when the Secret is exposed as a volume mount. -# -# NOTE: These secrets are only automatically injected to the main application container. To add them to the side car -# containers, use the official Kubernetes Pod syntax: -# https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets -# -# The following example exposes the Secret `mysecret` as a volume mounted to `/etc/mysecret`, while it exposes the -# Secret `myothersecret` as an environment variable. Additionally, it automatically mounts all of the keys -# `anothersecret` as environment variables using the `envFrom` keyword. -# -# EXAMPLE: -# -# secrets: -# mysecret: -# as: volume -# mountPath: /etc/mysecret -# myothersecret: -# as: environment -# items: -# foo: -# envVarName: SECRET_FOO -# anothersecret: -# as: envFrom -secrets: {} - -# containerResources specifies the amount of resources the application container will require. Only specify if you have -# specific resource needs. -# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look -# like: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ -containerResources: {} - -# nodeSelector and affinity specify restrictions on what node this pod should be scheduled on. -# NOTE: These variables are injected directly into the pod spec. See the official documentation for what this might look -# like: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ -nodeSelector: {} -affinity: {} - -# tolerations can be used to allow the pod to be scheduled on nodes with a specific taint. -# NOTE: This variable is injected directly into the pod spec. See the official documentation for what this might look -# like: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -tolerations: [] - -# imagePullSecrets lists the Secret resources that should be used for accessing private registries. Each item in the -# list is a string that corresponds to the Secret name. -imagePullSecrets: [] - -# terminationGracePeriodSeconds sets grace period Kubernetes will wait before terminating the pod. The timeout happens -# in parallel to preStop hook and the SIGTERM signal, Kubernetes does not wait for preStop to finish before beginning -# the grace period. -# -# EXAMPLE: -# terminationGracePeriodSeconds: 30 -terminationGracePeriodSeconds: {} - -# serviceAccount is a map that configures the ServiceAccount information for the Pod. -# The expected keys of serviceAccount are: -# - name (string) : The name of the ServiceAccount in the Namespace where the Pod is deployed -# that should be used. By default this is the default ServiceAccount of the -# Namespace. -# - automountServiceAccountToken (bool) : Whether or not to automatically mount the ServiceAccount token as a volume -# into the Pod. Note that this can be used to override the equivalent config -# on the ServiceAccount. -# - create (bool) : Whether or not to create a service account with the desired name -# - annotations (map) : Annotations will add the provided map to the annotations for the service -# account created -# - labels (map) : Labels will add the provided map to the annotations for the service -# account created -# -# The default config uses empty string to indicate that the default service account should be used and one shouldn't -# be created -serviceAccount: - name: "" - create: false - annotations: {} - labels: {} - -# horizontalPodAutoscaler is a map that configures the Horizontal Pod Autoscaler information for this pod -# The expected keys of hpa are: -# - enabled (bool) : Whether or not Horizontal Pod Autoscaler should be created, if false the -# Horizontal Pod Autoscaler will not be created -# - minReplicas (int) : The minimum amount of replicas allowed -# - maxReplicas (int) : The maximum amount of replicas allowed -# - avgCpuUtilization (int) : The target average CPU utilization to be used with the metrics -# - avgMemoryUtilization (int) : The target average Memory utilization to be used with the metrics -# -# The default config will not create the Horizontal Pod Autoscaler by setting enabled = false, the default values are -# set so if enabled is true the horizontalPodAutoscaler has valid values. -horizontalPodAutoscaler: - enabled: false - minReplicas: 1 - maxReplicas: 10 - -# customResources is a map that lets you define Kubernetes resources you want installed and configured as part of this chart. -# The expected keys of customResources are: -# - enabled (bool) : Whether or not the provided custom resource definitions should be created. -# - resources (map) : A map of custom Kubernetes resources you want to install during the installation of the chart. -# -# NOTE: By default enabled = false, and no custom resources will be created. If you provide any resources, be sure to -# provide them as quoted using "|", and set enabled: true. -# -# The following example creates a custom ConfigMap and a Secret. -# -# EXAMPLE: -# -# customResources: -# enabled: true -# resources: -# custom_configmap: | -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: example -# data: -# key: value -# custom_secret: | -# apiVersion: v1 -# kind: Secret -# metadata: -# name: example -# type: Opaque -# data: -# key: dmFsdWU= -customResources: - enabled: false - resources: {} - -# fullnameOverride is a string that allows overriding the default fullname that appears as the -# application name and is used as the application name by kubernetes. -fullnameOverride: "" - -#---------------------------------------------------------------------------------------------------------------------- -# GOOGLE SPECIFIC VALUES -# google specifies Google (GKE) specific configuration to be set via arguments/env. variables -#---------------------------------------------------------------------------------------------------------------------- -google: - # managedCertificate can be used to provision a Google Managed Certificate. Associate the ManagedCertificate object - # to an Ingress by adding an annotation 'networking.gke.io/managed-certificates' to the Ingress. - # - # The expected keys are: - # - enabled (bool) (required) : Whether or not the ManagedCertificate resource should be created. - # - domainName (string) : Specifies the domain that the SSL certificate will be created for - # - name (string) : Specifies the name of the SSL certificate that you reference in Ingress with - # networking.gke.io/managed-certificates: name - # - # The following example specifies a ManagedCertificate with a domain name 'api.acme.com' and name 'acme-cert': - # - # EXAMPLE: - # - # google: - # managedCertificate: - # enabled: true - # name: acme-cert - # domainName: api.acme.com - # - # NOTE: if you enable managedCertificate, then Ingress must also be enabled. - # Use a Google Managed Certificate. By default, turn off. - managedCertificate: - enabled: false +# DaemonSet is a map that configures the pods to run as daemon on all (or some) Nodes. +# The expected keys of DaemonSet are: +# - name (string) : The name of the DaemonSet. +# - create (bool) : Whether or not to create a DaemonSet with the desired name +# - labels (map) : Labels will add the provided map to the DaemonSet created +# - selectorLabels (map) : Tells the resource to match the pod, according to that label +# - tolerationKey (string) : Toleration Key, Example: node-role.kubernetes.io/master +# - tolerationOperator (string) : Toleration Operator, Example: Exists +# - tolerationEffect (string) : Toleration Key, Example: NoSchedule +# - volumeName (string) : Name of the volumes mount inside container +# - volumeMountPath (string) : Path name to Mount volumes into container +# - updateStrategy (string) : Type of deployment +# - maxUnavailable (number) : Only used when updateStrategy is set to "RollingUpdate" + +DaemonSet: + name: "fluentd" + create: true + tolerationKey: "node-role.kubernetes.io/master" + tolerationOperator: "Exists" + tolerationEffect: "NoSchedule" + containerImage: "quay.io/fluentd_elasticsearch/fluentd:v2.5.2" + volumeMountCreate: true + volumeName: "varlog" + volumeMountPath: "/var/log" + updateStrategy: "RollingUpdate" + maxUnavailable: 1 + extraVolumeMountCreate: true + extraVolumeName: "varlibdockercontainers" + extraVolumeMountPath: "/var/lib/docker/containers" diff --git a/examples/k8s-daemonset-fluentd/values.yaml b/examples/k8s-daemonset-fluentd/values.yaml index 95187b1c..9053187b 100644 --- a/examples/k8s-daemonset-fluentd/values.yaml +++ b/examples/k8s-daemonset-fluentd/values.yaml @@ -1,69 +1,29 @@ -#---------------------------------------------------------------------------------------------------------------------- -# CHART PARAMETERS FOR NGINX EXAMPLE -# This file declares the required values for the k8s-daemonset helm chart to deploy fluentd. -# This is a YAML-formatted file. -#---------------------------------------------------------------------------------------------------------------------- +# DaemonSet is a map that configures the pods to run as daemon on all (or some) Nodes. +# The expected keys of DaemonSet are: +# - name (string) : The name of the DaemonSet. +# - create (bool) : Whether or not to create a DaemonSet with the desired name +# - labels (map) : Labels will add the provided map to the DaemonSet created +# - selectorLabels (map) : Tells the resource to match the pod, according to that label +# - tolerationKey (string) : Toleration Key, Example: node-role.kubernetes.io/master +# - tolerationOperator (string) : Toleration Operator, Example: Exists +# - tolerationEffect (string) : Toleration Key, Example: NoSchedule +# - volumeName (string) : Name of the volumes mount inside container +# - volumeMountPath (string) : Path name to Mount volumes into container +# - updateStrategy (string) : Type of deployment +# - maxUnavailable (number) : Only used when updateStrategy is set to "RollingUpdate" -#---------------------------------------------------------------------------------------------------------------------- -# REQUIRED VALUES OF CHART -# These are the required values defined by the k8s-daemonset chart. Here we will set them to deploy an fluentd container. -#---------------------------------------------------------------------------------------------------------------------- - -# containerImage is a map that describes the container image that should be used to serve the application managed by -# the k8s-daemonset chart. -# The expected keys are: -# - repository (string) (required) : The container image repository that should be used. -# E.g `fluentd` ; `gcr.io/kubernetes-helm/tiller` -# - tag (string) (required) : The tag of the image (e.g `latest`) that should be used. We recommend using a -# fixed tag or the SHA of the image. Avoid using the tags `latest`, `head`, -# `canary`, or other tags that are designed to be “floating”. -# - pullPolicy (string) : The image pull policy to employ. Determines when the image will be pulled in. See -# the official Kubernetes docs for more info. If undefined, this will default to -# `IfNotPresent`. -# -# The following example deploys the `fluentd:stable` image with a `IfNotPresent` image pull policy, which indicates that -# the image should only be pulled if it has not been pulled previously. We deploy a specific, locked tag so that we -# don't inadvertently upgrade fluentd during a deployment that changes some other unrelated input value. -containerImage: - repository: fluent/fluentd-kubernetes-daemonset - tag: v1-debian-syslog - pullPolicy: IfNotPresent - -# applicationName is a string that names the application. This is used to label the pod and to name the main application -# container in the pod spec. Here we use fluentd as the name since we are deploying fluentd. -applicationName: "fluentd" - -#---------------------------------------------------------------------------------------------------------------------- -# OVERRIDE OPTIONAL VALUES -# These values have defaults in the k8s-daemonset chart, but we override a few of them for the purposes of this demo. -#---------------------------------------------------------------------------------------------------------------------- - -# livenessProbe is a map that specifies the liveness probe of the main application container. Liveness probes indicate -# when a container has reached a fatal state where it needs to be restarted to recover. When the liveness probe fails, -# the container is automatically recreated. You can read more about container liveness probes in the official docs: -# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -# NOTE: This variable is injected directly into the container spec. -# -# The following example specifies an http GET based liveness probe, that will base the probe on a http GET request to -# the port bound to name `http` (port 80 in the default settings) on the path `/`. -livenessProbe: - httpGet: - path: / - port: http - -# readinessProbe is a map that specifies the readiness probe of the main application container. Readiness probes -# indicate when a container is unable to serve traffic. When the readiness probe fails, the container is cycled out of -# the list of available containers to the `Service`. You can read more about readiness probes in the official docs: -# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ -# NOTE: This variable is injected directly into the container spec. -# -# The following example specifies an http GET based readiness probe, that will base the probe on a http GET request to -# the port bound to name `http` (see description on `containerPorts`) on the path `/`. -readinessProbe: - httpGet: - path: / - port: http - -# We override the service type to use NodePort so that we can access the Service from outside the Kubernetes cluster. -service: - type: NodePort +DaemonSet: + name: "fluentd" + create: true + tolerationKey: "node-role.kubernetes.io/master" + tolerationOperator: "Exists" + tolerationEffect: "NoSchedule" + containerImage: "quay.io/fluentd_elasticsearch/fluentd:v2.5.2" + volumeMountCreate: true + volumeName: "varlog" + volumeMountPath: "/var/log" + updateStrategy: "RollingUpdate" + maxUnavailable: 1 + extraVolumeMountCreate: true + extraVolumeName: "varlibdockercontainers" + extraVolumeMountPath: "/var/lib/docker/containers" From e8904b3e952cff2cafe2d749d94366dc67592192 Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Wed, 27 Apr 2022 14:17:39 -0400 Subject: [PATCH 6/7] Updated README.md --- charts/k8s-daemonset/README.md | 1178 +------------------------------- 1 file changed, 22 insertions(+), 1156 deletions(-) diff --git a/charts/k8s-daemonset/README.md b/charts/k8s-daemonset/README.md index 249010df..6dac59f4 100644 --- a/charts/k8s-daemonset/README.md +++ b/charts/k8s-daemonset/README.md @@ -7,13 +7,7 @@ microservice. The container will be packaged into [Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) that are managed by the `DaemonSet` controller. -This Helm Chart can also be used to front the `Pods` of the `DaemonSet` resource with a -[Service](https://kubernetes.io/docs/concepts/services-networking/service/) to provide a stable endpoint to access the -`Pods`, as well as load balance traffic to them. The Helm Chart can also specify -[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) rules to further configure complex routing -rules in front of the `Service`. - -If you're using the chart to deploy to [GKE](https://cloud.google.com/kubernetes-engine/), you can also use the chart to deploy a [Google Managed SSL Certificate](https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs) and associate it with the Ingress. +This Helm Chart can be used to deploy the pod under a `DaemonSet` resource onto your Kubernetes cluster. ## How to use this chart? @@ -29,1167 +23,39 @@ back to [root README](/README.adoc#core-concepts) The following resources will be deployed with this Helm Chart, depending on which configuration values you use: -- `Deployment`: The main `DaemonSet` controller that will manage the application container image specified in the +- `DaemonSet`: The main `DaemonSet` controller that will manage the application container image specified in the `containerImage` input value. -- Secondary `DaemonSet` for use as canary: An optional `Deployment` controller that will manage a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of the application container image specified in the `canary.containerImage` input value. This is useful for testing a new application tag, in parallel to your stable tag, prior to rolling the new tag out. Created only if you configure the `canary.containerImage` values (and set `canary.enabled = true`). -- `Service`: The `Service` resource providing a stable endpoint that can be used to address to `Pods` created by the - `Deployment` controller. Created only if you configure the `service` input (and set - `service.enabled = true`). -- `ServiceMonitor`: The `ServiceMonitor` describes the set of targets to be monitored by Prometheus. Created only if you configure the service input and set `serviceMonitor.enabled = true`. -- `Ingress`: The `Ingress` resource providing host and path routing rules to the `Service` for the deployed `Ingress` - controller in the cluster. Created only if you configure the `ingress` input (and set - `ingress.enabled = true`). -- `ManagedCertificate`: The `ManagedCertificate` is a [GCP](https://cloud.google.com/) -specific resource that creates a Google Managed SSL certificate. Google-managed SSL certificates are provisioned, renewed, and managed for your domain names. Read more about Google-managed SSL certificates [here](https://cloud.google.com/load-balancing/docs/ssl-certificates#managed-certs). Created only if you configure the `google.managedCertificate` input (and set - `google.managedCertificate.enabled = true` and `google.managedCertificate.domainName = your.domain.name`). - -back to [root README](/README.adoc#core-concepts) - -## How do I deploy additional services not managed by the chart? - -You can create custom Kubernetes resources, that are not directly managed by the chart, within the `customResources` -key. You provide each resource manifest directly as a value under `customResources.resources` and set -`customResources.enabled` to `true`. For examples of custom resources, take a look at the examples in -[test/fixtures/custom_resources_values.yaml](../../test/fixtures/custom_resources_values.yaml) and -[test/fixtures/multiple_custom_resources_values.yaml](../../test/fixtures/multiple_custom_resources_values.yaml). - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I expose my application internally to the cluster? - -In general, `Pods` are considered ephemeral in Kubernetes. `Pods` can come and go at any point in time, either because -containers fail or the underlying instances crash. In either case, the dynamic nature of `Pods` make it difficult to -consistently access your application if you are individually addressing the `Pods` directly. - -Traditionally, this is solved using service discovery, where you have a stateful system that the `Pods` would register -to when they are available. Then, your other applications can query the system to find all the available `Pods` and -access one of the available ones. - -Kubernetes provides a built in mechanism for service discovery in the `Service` resource. `Services` are an abstraction -that groups a set of `Pods` behind a consistent, stable endpoint to address them. By creating a `Service` resource, you -can provide a single endpoint to other applications to connect to the `Pods` behind the `Service`, and not worry about -the dynamic nature of the `Pods`. - -You can read a more detailed description of `Services` in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/service/). Here we will cover just enough to -understand how to access your app. - -By default, this Helm Chart will deploy your application container in a `Pod` that exposes ports 80. These will -be exposed to the Kubernetes cluster behind the `Service` resource, which exposes port 80. You can modify this behavior -by overriding the `containerPorts` input value and the `service` input value. See the corresponding section in the -`values.yaml` file for more details. - -Once the `Service` is created, you can check what endpoint the `Service` provides by querying Kubernetes using -`kubectl`. First, retrieve the `Service` name that is outputted in the install summary when you first install the Helm -Chart. If you forget, you can get the same information at a later point using `helm status`. For example, if you had -previously installed this chart under the name `edge-service`, you can run the following command to see the created -resources: - -```bash -$ helm status edge-service -LAST DEPLOYED: Fri Feb 8 16:25:49 2019 -NAMESPACE: default -STATUS: DEPLOYED - -RESOURCES: -==> v1/Service -NAME AGE -edge-service-nginx 24m - -==> v1/Deployment -edge-service-nginx 24m - -==> v1/Pod(related) - -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 24m -edge-service-nginx-844c978df7-mln26 1/1 Running 0 24m -edge-service-nginx-844c978df7-rdsr8 1/1 Running 0 24m -``` - -This will show you some metadata about the release, the deployed resources, and any notes provided by the Helm Chart. In -this example, the service name is `edge-service-nginx` so we will use that to query the `Service`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx ClusterIP 172.20.186.176 80/TCP 27m -``` - -Here you can see basic information about the `Service`. The important piece of information is the `CLUSTER-IP` and -`PORT` fields, which tell you the available endpoint for the `Service`, and any exposed ports. Given that, any `Pod` in -your Kubernetes cluster can access the `Pods` of this application by hitting `{CLUSTER-IP}:{PORT}`. So for this example, -that will be `172.20.186.176:80`. - -But what if you want to automatically find a `Service` by name? The name of the `Service` created by this Helm Chart is -always `{RELEASE_NAME}-{applicationName}`, where `applicationName` is provided in the input value and `RELEASE_NAME` is -set when you install the Helm Chart. This means that the name is predictable, while the allocated IP address may not be. - -To address the `Service` by name, Kubernetes provides two ways: - -- environment variables -- DNS - -### Addressing Service by Environment Variables - -For each active `Service` that a `Pod` has access to, Kubernetes will automatically set a set of environment variables -in the container. These are `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` to get the host address (ip address) -and port respectively, where `SVCNAME` is the name of the `Service`. Note that `SVCNAME` will be the all caps version -with underscores of the `Service` name. - -Using the previous example where we installed this chart with a release name `edge-service` and `applicationName` -`nginx`, we get the `Service` name `edge-service-nginx`. Kubernetes will expose the following environment variables to -all containers that can access the `Service`: - -``` -EDGE_SERVICE_NGINX_SERVICE_HOST=172.20.186.176 -EDGE_SERVICE_NGINX_SERVICE_PORT=80 -``` - -Note that environment variables are set when the container first boots up. This means that if you already had `Pods` -deployed in your system before the `Service` was created, you will have to cycle the `Pods` in order to get the -environment variables. If you wish to avoid ordering issues, you can use the DNS method to address the `Service` -instead, if that is available. - -### Addressing Service by DNS - -If your Kubernetes cluster is deployed with the DNS add-on (this is automatically installed for EKS and GKE), then you -can rely on DNS to address your `Service`. Every `Service` in Kubernetes will register the domain -`{SVCNAME}.{NAMESPACE}.svc.cluster.local` to the DNS service of the cluster. This means that all your `Pods` in the -cluster can get the `Service` host by hitting that domain. - -The `NAMESPACE` in the domain refers to the `Namespace` where the `Service` was created. By default, all resources are -created in the `default` namespace. This is configurable at install time of the Helm Chart using the `--namespace` -option. - -In our example, we deployed the chart to the `default` `Namespace`, and the `Service` name is `edge-service-nginx`. So in -this case, the domain of the `Service` will be `edge-service-nginx.default.svc.cluster.local`. When any `Pod` addresses -that domain, it will get the address `172.20.186.176`. - -Note that DNS does not resolve ports, so in this case, you will have to know which port the `Service` uses. So in your -`Pod`, you will have to know that the `Service` exposes port `80` when you address it in your code for the container as -`edge-service-nginx.default.svc.cluster.local:80`. However, like the `Service` name, this should be predictable since it -is specified in the Helm Chart input value. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I expose my application externally, outside of the cluster? - -Similar to the previous section ([How do I expose my application internally to the -cluster?](#how-do-i-expose-my-application-internally-to-the-cluster), you can use a `Service` resource to expose your -application externally. The primary service type that facilitates external access is the `NodePort` `Service` type. - -The `NodePort` `Service` type will expose the `Service` by binding an available port on the network interface of the -physical machines running the `Pod`. This is different from a network interface internal to Kubernetes, which is only -accessible within the cluster. Since the port is on the host machine network interface, you can access the `Service` by -hitting that port on the node. - -For example, suppose you had a 2 node Kubernetes cluster deployed on EC2. Suppose further that all your EC2 instances -have public IP addresses that you can access. For the sake of this example, we will assign random IP addresses to the -instances: - -- 54.219.117.250 -- 38.110.235.198 - -Now let's assume you deployed this helm chart using the `NodePort` `Service` type. You can do this by setting the -`service.type` input value to `NodePort`: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `NodePort`, binding an -available port on the host machine to access the `Service`. You can confirm this by querying the `Service` using -`kubectl`: - -```bash -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx NodePort 10.99.244.96 80:31035/TCP 33s -``` - -In this example, you can see that the `Service` type is `NodePort` as expected. Additionally, you can see that the there -is a port binding between port 80 and 31035. This port binding refers to the binding between the `Service` port (80 in -this case) and the host port (31035 in this case). - -One thing to be aware of about `NodePorts` is that the port binding will exist on all nodes in the cluster. This means -that, in our 2 node example, both nodes now have a port binding of 31035 on the host network interface that routes to -the `Service`, regardless of whether or not the node is running the `Pods` backing the `Service` endpoint. This means -that you can reach the `Service` on both of the following endpoints: - -- `54.219.117.250:31035` -- `38.110.235.198:31035` - -This means that no two `Service` can share the same `NodePort`, as the port binding is shared across the cluster. -Additionally, if you happen to hit a node that is not running a `Pod` backing the `Service`, Kubernetes will -automatically hop to one that is. - -You might use the `NodePort` if you do not wish to manage load balancers through Kubernetes, or if you are running -Kubernetes on prem where you do not have native support for managed load balancers. - -To summarize: - -- `NodePort` is the simplest way to expose your `Service` to externally to the cluster. -- You have a limit on the number of `NodePort` `Services` you can have in your cluster, imposed by the number of open ports - available on your host machines. -- You have potentially inefficient hopping if you happen to route to a node that is not running the `Pod` backing the - `Service`. - -Additionally, Kubernetes provides two mechanisms to manage an external load balancer that routes to the `NodePort` for -you. The two ways are: - -- [Using a `LoadBalancer` `Service` type](#loadbalancer-service-type) -- [Using `Ingress` resources with an `Ingress Controller`](#ingress-and-ingress-controllers) - -### LoadBalancer Service Type - -The `LoadBalancer` `Service` type will expose the `Service` by allocating a managed load balancer in the cloud that is -hosting the Kubernetes cluster. On AWS, this will be an ELB, while on GCP, this will be a Cloud Load Balancer. When the -`LoadBalancer` `Service` is created, Kubernetes will automatically create the underlying load balancer resource in the -cloud for you, and create all the target groups so that they route to the `Pods` backing the `Service`. - -You can deploy this helm chart using the `LoadBalancer` `Service` type by setting the `service.type` input value to -`LoadBalancer`: - -```yaml -service: - enabled: true - type: LoadBalancer - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -When you install this helm chart with this input config, helm will deploy the `Service` as a `LoadBalancer`, allocating -a managed load balancer in the cloud hosting your Kubernetes cluster. You can get the attached load balancer by querying -the `Service` using `kubectl`. In this example, we will assume we are using EKS: - -``` -$ kubectl get service edge-service-nginx -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -edge-service-nginx LoadBalancer 172.20.7.35 a02fef4d02e41... 80:32127/TCP 1m -``` - -Now, in this example, we have an entry in the `EXTERNAL-IP` field. This is truncated here, but you can get the actual -output when you describe the service: - -``` -$ kubectl describe service edge-service-nginx -Name: edge-service-nginx -Namespace: default -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/managed-by=helm - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - helm.sh/chart=k8s-service-0.1.0 -Annotations: -Selector: app.kubernetes.io/instance=edge-service,app.kubernetes.io/name=nginx,gruntwork.io/app-name=nginx -Type: LoadBalancer -IP: 172.20.7.35 -LoadBalancer Ingress: a02fef4d02e4111e9891806271fc7470-173030870.us-west-2.elb.amazonaws.com -Port: app 80/TCP -TargetPort: 80/TCP -NodePort: app 32127/TCP -Endpoints: 10.0.3.19:80 -Session Affinity: None -External Traffic Policy: Cluster -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal EnsuringLoadBalancer 2m service-controller Ensuring load balancer - Normal EnsuredLoadBalancer 2m service-controller Ensured load balancer -``` - -In the describe output, there is a field named `LoadBalancer Ingress`. When you have a `LoadBalancer` `Service` type, -this field contains the public DNS endpoint of the associated load balancer resource in the cloud provider. In this -case, we have an AWS ELB instance, so this endpoint is the public endpoint of the associated ELB resource. - -**Note:** Eagle eyed readers might also notice that there is an associated `NodePort` on the resource. This is because under the -hood, `LoadBalancer` `Services` utilize `NodePorts` to handle the connection between the managed load balancer of the -cloud provider and the Kubernetes `Pods`. This is because at this time, there is no portable way to ensure that the -network between the cloud load balancers and Kubernetes can be shared such that the load balancers can route to the -internal network of the Kubernetes cluster. Therefore, Kubernetes resorts to using `NodePort` as an abstraction layer to -connect the `LoadBalancer` to the `Pods` backing the `Service`. This means that `LoadBalancer` `Services` share the same -drawbacks as using a `NodePort` `Service`. - -To summarize: - -- `LoadBalancer` provides a way to set up a cloud load balancer resource that routes to the provisioned `NodePort` on - each node in your Kubernetes cluster. -- `LoadBalancer` can be used to provide a persistent endpoint that is robust to the ephemeral nature of nodes in your - cluster. E.g it is able to route to live nodes in the face of node failures. -- `LoadBalancer` does not support weighted balancing. This means that you cannot balance the traffic so that it prefers - nodes that have more instances of the `Pod` running. -- Note that under the hood, `LoadBalancer` utilizes a `NodePort` `Service`, and thus shares the same limits as `NodePort`. - -### Ingress and Ingress Controllers - -`Ingress` is a mechanism in Kubernetes that abstracts externally exposing a `Service` from the `Service` config itself. -`Ingress` resources support: - -- assigning an externally accessible URL to a `Service` -- perform hostname and path based routing of `Services` -- load balance traffic using customizable balancing rules -- terminate SSL - -You can read more about `Ingress` resources in [the official -documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/). Here, we will cover the basics to -understand how `Ingress` can be used to externally expose the `Service`. - -At a high level, the `Ingress` resource is used to specify the configuration for a particular `Service`. In turn, the -`Ingress Controller` is responsible for fulfilling those configurations in the cluster. This means that the first -decision to make in using `Ingress` resources, is selecting an appropriate `Ingress Controller` for your cluster. - -#### Choosing an Ingress Controller - -Before you can use an `Ingress` resource, you must install an `Ingress Controller` in your Kubernetes cluster. There are -many kinds of `Ingress Controllers` available, each with different properties. You can see [a few examples listed in the -official documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-controllers). - -When you use an external cloud `Ingress Controller` such as the [GCE Ingress -Controller](https://github.com/kubernetes/ingress-gce/blob/master/README.md) or [AWS ALB Ingress -Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller), Kubernetes will allocate an externally -addressable load balancer (for GCE this will be a Cloud Load Balancer and for AWS this will be an ALB) that fulfills the -`Ingress` rules. This includes routing the domain names and paths to the right `Service` as configured by the `Ingress` -rules. Additionally, Kubernetes will manage the target groups of the load balancer so that they are up to date with -the latest `Ingress` configuration. However, in order for this to work, there needs to be some way for the load balancer -to connect to the `Pods` servicing the `Service`. Since the `Pods` are internal to the Kubernetes network and the load -balancers are external to the network, there must be a `NodePort` that links the two together. As such, like the -`LoadBalancer` `Service` type, these `Ingress Controllers` also require a `NodePort` under the hood. - - - -Alternatively, you can use an internal `Ingress Controller` that runs within Kubernetes as `Pods`. For example, the -official `nginx Ingress Controller` will launch `nginx` as `Pods` within your Kubernetes cluster. These `nginx` `Pods` -are then configured using `Ingress` resources, which then allows `nginx` to route to the right `Pods`. Since the `nginx` -`Pods` are internal to the Kubernetes network, there is no need for your `Services` to be `NodePorts` as they are -addressable within the network by the `Pods`. However, this means that you need some other mechanism to expose `nginx` -to the outside world, which will require a `NodePort`. The advantage of this approach, despite still requiring a -`NodePort`, is that you can have a single `NodePort` that routes to multiple services using hostnames or paths as -managed by `nginx`, as opposed to requiring a `NodePort` per `Service` you wish to expose. - -Which `Ingress Controller` type you wish to use depends on your infrastructure needs. If you have relatively few -`Services`, and you want the simplicity of a managed cloud load balancer experience, you might opt for the external -`Ingress Controllers` such as GCE and AWS ALB controllers. On the other hand, if you have thousands of micro services -that push you to the limits of the available number of ports on a host machine, you might opt for an internal `Ingress -Controller` approach. Whichever approach you decide, be sure to document your decision where you install the particular -`Ingress Controller` so that others in your team know and understand the tradeoffs you made. - -#### Configuring Ingress for your Service - -Once you have an `Ingress Controller` installed and configured on your Kuberentes cluster, you can now start creating -`Ingress` resources to add routes to it. This helm chart supports configuring an `Ingress` resource to complement the -`Service` resource that is created in the chart. - -To add an `Ingress` resource, first make sure you have a `Service` enabled on the chart. Depending on the chosen -`Ingress Controller`, the `Service` type should be `NodePort` or `ClusterIP`. Here, we will create a `NodePort` -`Service` exposing port 80: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP -``` - -Then, we will add the configuration for the `Ingress` resource by specifying the `ingress` input value. For this -example, we will assume that we want to route `/app` to our `Service`, with the domain hosted on `app.yourco.com`: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - hosts: - - app.yourco.com -``` - -This will configure the load balancer backing the `Ingress Controller` that will route any traffic with host and path -prefix `app.yourco.com/app` to the `Service` on port 80. If `app.yourco.com` is configured to point to the `Ingress -Controller` load balancer, then once you deploy the helm chart you should be able to start accessing your app on that -endpoint. - -#### Registering additional paths - -Sometimes you might want to add additional path rules beyond the main service rule that is injected to the `Ingress` -resource. For example, you might want a path that routes to the sidecar containers, or you might want to reuse a single -`Ingress` for multiple different `Service` endpoints because to share load balancers. For these situations, you can use -the `additionalPaths` and `additionalPathsHigherPriority` input values. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarMonitor` served on port -3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarMonitor: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route `/app` to the `app` service endpoint and `/sidecar` to the `sidecarMonitor` service endpoint, we will configure -the `app` service path rules as the main service route and the `sidecarMonitor` as an additional path rule: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPaths: - - path: /sidecar - servicePort: 3000 -``` - -Now suppose you had a sidecar service that will return a fixed response indicating server maintainance and you want to -temporarily route all requests to that endpoint without taking down the pod. You can do this by creating a route that -catches all paths as a higher priority path using the `additionalPathsHigherPriority` input value. - -Consider the following `Service`, where we have the `app` served on port 80, and the `sidecarFixedResponse` served on -port 3000: - -```yaml -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - sidecarFixedResponse: - port: 3000 - targetPort: 3000 - protocol: TCP -``` - -To route all traffic to the fixed response port: - -```yaml -ingress: - enabled: true - path: /app - servicePort: 80 - additionalPathsHigherPriority: - - path: /* - servicePort: 3000 -``` - -The `/*` rule which routes to port 3000 will always be used even when accessing the path `/app` because it will be -evaluated first when routing requests. - -back to [root README](/README.adoc#day-to-day-operations) - -### How do I expose additional ports? - -By default, this Helm Chart will deploy your application container in a Pod that exposes ports 80. Sometimes you might -want to expose additional ports in your application - for example a separate port for Prometheus metrics. You can expose -additional ports for your application by overriding `containerPorts` and `service` input values: - -```yaml - -containerPorts: - http: - port: 80 - protocol: TCP - prometheus: - port: 2020 - protocol: TCP - -service: - enabled: true - type: NodePort - ports: - app: - port: 80 - targetPort: 80 - protocol: TCP - prometheus: - port: 2020 - targetPort: 2020 - protocol: TCP - -``` - - -## How do I deploy a worker service? - -Worker services typically do not have a RPC or web server interface to access it. Instead, worker services act on their -own and typically reach out to get the data they need. These services should be deployed without any ports exposed. -However, by default `k8s-service` will deploy an internally exposed service with port 80 open. - -To disable the default port, you can use the following `values.yaml` inputs: - -``` -containerPorts: - http: - disabled: true - -service: - enabled: false -``` - -This will override the default settings such that only the `Deployment` resource is created, with no ports exposed on -the container. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I check the status of the rollout? - -This Helm Chart packages your application into a `Deployment` controller. The `Deployment` controller will be -responsible with managing the `Pods` of your application, ensuring that the Kubernetes cluster matches the desired state -configured by the chart inputs. - -When the Helm Chart installs, `helm` will mark the installation as successful when the resources are created. Under the -hood, the `Deployment` controller will do the work towards ensuring the desired number of `Pods` are up and running. - -For example, suppose you set the `replicaCount` variable to 3 when installing this chart. This will configure the -`Deployment` resource to maintain 3 replicas of the `Pod` at any given time, launching new ones if there is a deficit or -removing old ones if there is a surplus. - -To see the current status of the `Deployment`, you can query Kubernetes using `kubectl`. The `Deployment` resource of -the chart are labeled with the `applicationName` input value and the release name provided by helm. So for example, -suppose you deployed this chart using the following `values.yaml` file and command: - -```yaml -applicationName: nginx -containerImage: - repository: nginx - tag: stable -``` - -```bash -$ helm install -n edge-service gruntwork/k8s-service -``` - -In this example, the `applicationName` is set to `nginx`, while the release name is set to `edge-service`. This chart -will then install a `Deployment` resource in the default `Namespace` with the following labels that uniquely identifies -it: - -``` -app.kubernetes.io/name: nginx -app.kubernetes.io/instance: edge-service -``` - -So now you can query Kubernetes for that `Deployment` resource using these labels to see the state: - -```bash -$ kubectl get deployments -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -edge-service-nginx 3 3 3 1 24s -``` - -This includes a few useful information: - -- `DESIRED` lists the number of `Pods` that should be running in your cluster. -- `CURRENT` lists how many `Pods` are currently created in the cluster. -- `UP-TO-DATE` lists how many `Pods` are running the desired image. -- `AVAILABLE` lists how many `Pods` are currently ready to serve traffic, as defined by the `readinessProbe`. - -When all the numbers are in sync and equal, that means the `Deployment` was rolled out successfully and all the `Pods` -are passing the readiness healthchecks. - -In the example output above, note how the `Available` count is `1`, but the others are `3`. This means that all 3 `Pods` -were successfully created with the latest image, but only `1` of them successfully came up. You can dig deeper into the -individual `Pods` to check the status of the unavailable `Pods`. The `Pods` are labeled the same way, so you can pass in -the same label query to get the `Pods` managed by the deployment: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -``` - -This will show you the status of each individual `Pod` in your deployment. In this example output, there are 2 `Pods` -that are in the `Pending` status, meaning that they have not been scheduled yet. We can look into why the `Pod` failed -to schedule by getting detailed information about the `Pod` with the `describe` command. Unlike `get pods`, `describe -pod` requires a single `Pod` so we will grab the name of one of the failing `Pods` above and feed it to `describe pod`: -```bash -$ kubectl describe pod edge-service-nginx-844c978df7-mln26 -Name: edge-service-nginx-844c978df7-mln26 -Namespace: default -Priority: 0 -PriorityClassName: -Node: -Labels: app.kubernetes.io/instance=edge-service - app.kubernetes.io/name=nginx - gruntwork.io/app-name=nginx - pod-template-hash=4007534893 -Annotations: -Status: Pending -IP: -Controlled By: ReplicaSet/edge-service-nginx-844c978df7 -Containers: - nginx: - Image: nginx:stable - Ports: 80/TCP - Host Ports: 0/TCP - Environment: - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from default-token-mgkr9 (ro) -Conditions: - Type Status - PodScheduled False -Volumes: - default-token-mgkr9: - Type: Secret (a volume populated by a Secret) - SecretName: default-token-mgkr9 - Optional: false -QoS Class: BestEffort -Node-Selectors: -Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s - node.kubernetes.io/unreachable:NoExecute for 300s -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Warning FailedScheduling 1m (x25 over 3m) default-scheduler 0/2 nodes are available: 2 Insufficient pods. -``` -This will output detailed information about the `Pod`, including an event log. In this case, the roll out failed because -there is not enough capacity in the cluster to schedule the `Pod`. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I set and share configurations with the application? - -While you can bake most application configuration values into the application container, you might need to inject -dynamic configuration variables into the container. These are typically values that change depending on the environment, -such as the MySQL database endpoint. Additionally, you might also want a way to securely share secrets with the -container such that they are not hard coded in plain text in the container or in the Helm Chart values yaml file. To -support these use cases, this Helm Chart provides three ways to share configuration values with the application -container: - -- [Directly setting environment variables](#directly-setting-environment-variables) -- [Using ConfigMaps](#using-configmaps) -- [Using Secrets](#using-secrets) - -### Directly setting environment variables - -The simplest way to set a configuration value for the container is to set an environment variable for the container -runtime. These variables are set by Kubernetes before the container application is booted, which can then be looked up -using the standard OS lookup functions for environment variables. - -You can use the `envVars` input value to set an environment variable at deploy time. For example, the following entry in -a `values.yaml` file will set the `DB_HOST` environment variable to `mysql.default.svc.cluster.local` and the `DB_PORT` -environment variable to `3306`: - -```yaml -envVars: - DB_HOST: "mysql.default.svc.cluster.local" - DB_PORT: 3306 -``` - -One thing to be aware of when using environment variables is that they are set at start time of the container. This -means that updating the environment variables require restarting the containers so that they propagate. - -### Using ConfigMaps - -While environment variables are an easy way to inject configuration values, what if you want to share the configuration -across multiple deployments? If you wish to use the direct environment variables approach, you would have no choice but -to copy paste the values across each deployment. When this value needs to change, you are now faced with going through -each deployment and updating the reference. - -For this situation, `ConfigMaps` would be a better option. `ConfigMaps` help decouple configuration values from the -`Deployment` and `Pod` config, allowing you to share the values across the deployments. `ConfigMaps` are dedicated -resources in Kubernetes that store configuration values as key value pairs. - -For example, suppose you had a `ConfigMap` to store the database information. You might store the information as two key -value pairs: one for the host (`dbhost`) and one for the port (`dbport`). You can create a `ConfigMap` directly using -`kubectl`, or by using a resource file. - -To directly create the `ConfigMap`: - -``` -kubectl create configmap my-config --from-literal=dbhost=mysql.default.svc.cluster.local --from-literal=dbport=3306 -``` - -Alternatively, you can manage the `ConfigMap` as code using a kubernetes resource config: - -```yaml -# my-config.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-config -data: - dbhost: mysql.default.svc.cluster.local - dbport: 3306 -``` - -You can then apply this resource file using `kubectl`: - -``` -kubectl apply -f my-config.yaml -``` - -`kubectl` supports multiple ways to seed the `ConfigMap`. You can read all the different ways to create a `ConfigMap` in -[the official -documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap). - -Once the `ConfigMap` is created, you can access the `ConfigMap` within the `Pod` by configuring the access during -deployment. This Helm Chart provides the `configMaps` input value to configure what `ConfigMaps` should be shared with -the application container. There are two ways to inject the `ConfigMap`: - -- [Accessing the `ConfigMap` as Environment Variables](#accessing-the-configmap-as-environment-variables) -- [Accessing the `ConfigMap` as Files](#accessing-the-configmap-as-files) - -**NOTE**: It is generally not recommended to use `ConfigMaps` to store sensitive data. For those use cases, use -`Secrets` or an external secret store. - -##### Accessing the ConfigMap as Environment Variables - -You can set the values of the `ConfigMap` as environment variables in the application container. To do so, you set the -`as` attribute of the `configMaps` input value to `environment`. For example, to share the `my-config` `ConfigMap` above -using the same environment variables as the example in [Directly setting environment -variables](#directly-settings-environment-variables), you would set the `configMaps` as follows: - -```yaml -configMaps: - my-config: - as: environment - items: - dbhost: - envVarName: DB_HOST - dbport: - envVarName: DB_PORT -``` - -In this configuration for the Helm Chart, we specify that we want to share the `my-config` `ConfigMap` as environment -variables with the main application container. Additionally, we want to map the `dbhost` config value to the `DB_HOST` -environment variable, and similarly map the `dbport` config value to the `DB_PORT` environment variable. - -Note that like directly setting environment variables, these are set at container start time, and thus the containers -need to be restarted when the `ConfigMap` is updated for the new values to be propagated. You can use files instead if -you wish the `ConfigMap` changes to propagate immediately. - -##### Accessing the ConfigMap as Files - -You can mount the `ConfigMap` values as files on the container filesystem. To do so, you set the `as` attribute of the -`configMaps` input value to `volume`. - -For example, suppose you wanted to share the `my-config` `ConfigMap` above as the files `/etc/db/host` and -`/etc/db/port`. For this case, you would set the `configMaps` input value to: - -```yaml -configMaps: - my-config: - as: volume - mountPath: /etc/db - items: - dbhost: - filePath: host - dbport: - filePath: port -``` - -In the container, now the values for `dbhost` is stored as a text file at the path `/etc/db/host` and `dbport` is stored -at the path `/etc/db/port`. You can then read these files in in your application to get the values. - -Unlike environment variables, using files has the advantage of immediately reflecting changes to the `ConfigMap`. For -example, when you update `my-config`, the files at `/etc/db` are updated automatically with the new values, without -needing a redeployment to propagate the new values to the container. - -### Using Secrets - -In general, it is discouraged to store sensitive information such as passwords in `ConfigMaps`. Instead, Kubernetes -provides `Secrets` as an alternative resource to store sensitive data. Similar to `ConfigMaps`, `Secrets` are key value -pairs that store configuration values that can be managed independently of the `Pod` and containers. However, unlike -`ConfigMaps`, `Secrets` have the following properties: - -- A secret is only sent to a node if a pod on that node requires it. They are automatically garbage collected when there - are no more `Pods` referencing it on the node. -- A secret is stored in `tmpfs` on the node, so that it is only available in memory. -- Starting with Kubernetes 1.7, they can be encrypted at rest in `etcd` (note: this feature was in alpha state until - Kubernetes 1.13). - -You can read more about the protections and risks of using `Secrets` in [the official -documentation](https://kubernetes.io/docs/concepts/configuration/secret/#security-properties). - -Creating a `Secret` is very similar to creating a `ConfigMap`. For example, suppose you had a `Secret` to store the -database password. Like `ConfigMaps`, you can create a `Secret` directly using `kubectl`: - -``` -kubectl create secret generic my-secret --from-literal=password=1f2d1e2e67df -``` - -The `generic` keyword indicates the `Secret` type. Almost all use cases for your application should use this type. Other -types include `docker-registry` for specifying credentials for accessing a private docker registry, and `tls` for -specifying TLS certificates to access the Kubernetes API. - -You can also manage the `Secret` as code, although you may want to avoid this for `Secrets` to avoid leaking them in -unexpected locations (e.g source control). Unlike `ConfigMaps`, `Secrets` require values to be stored as base64 encoded -values when using resource files. So the configuration for the above example will be: - -```yaml -# my-secret.yaml -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: my-secret -data: - password: MWYyZDFlMmU2N2Rm -``` - -Note that `MWYyZDFlMmU2N2Rm` is the base 64 encoded version of `1f2d1e2e67df`. You can then apply this resource config -using `kubectl`: - -``` -kubectl apply -f my-secret.yaml -``` - -Similar to `ConfigMaps`, this Helm Chart supports two ways to inject `Secrets` into the application container: as -environment variables, or as files. The syntax to share the values is very similar to the `configMaps` input value, only -you use the `secrets` input value. The properties of each approach is very similar to `ConfigMaps`. Refer to [the -previous section](#using-configmaps) for more details on each approach. Here, we show you examples of the input values -to use for each approach. - -**Mounting secrets as environment variables**: In this example, we mount the `my-secret` `Secret` created above as the -environment variable `DB_PASSWORD`. - -```yaml -secrets: - my-secret: - as: environment - items: - password: - envVarName: DB_PASSWORD -``` - -**Mounting secrets as files**: In this example, we mount the `my-secret` `Secret` as the file `/etc/db/password`. - -```yaml -secrets: - my-secret: - as: volume - mountPath: /etc/db - items: - password: - filePath: password -``` - -**NOTE**: The volumes are different between `secrets` and `configMaps`. This means that if you use the same `mountPath` -for different secrets and config maps, you can end up with only one. It is undefined which `Secret` or `ConfigMap` ends -up getting mounted. To be safe, use a different `mountPath` for each one. - -**NOTE**: If you want mount the volumes created with `secrets` or `configMaps` on your init or sidecar containers, you will -have to append `-volume` to the volume name in . In the example above, the resulting volume will be `my-secret-volume`. - -```yaml -sideCarContainers: - sidecar: - image: sidecar/container:latest - volumeMounts: - - name: my-secret-volume - mountPath: /etc/db -``` - -### Which configuration method should I use? - -Which configuration method you should use depends on your needs. Here is a summary of the pro and con of each -approach: - -##### Directly setting environment variables - -**Pro**: - -- Simple setup -- Manage configuration values directly with application deployment config -- Most application languages support looking up environment variables - -**Con**: - -- Tightly couple configuration settings with application deployment -- Requires redeployment to update values -- Must store in plain text, and easy to leak into VCS - -**Best for**: - -- Iterating different configuration values during development -- Sotring non-sensitive values that are unique to each environment / deployment - -##### Using ConfigMaps - -**Pro**: - -- Keep config DRY by sharing a common set of configurations -- Independently update config values from the application deployment -- Automatically propagate new values when stored as files - -**Con**: - -- More overhead to manage the configuration -- Stored in plain text -- Available on all nodes automatically - -**Best for**: - -- Storing non-sensitive common configuration that are shared across environments -- Storing non-sensitive dynamic configuration values that change frequently - -##### Using Secrets - -**Pro**: - -- All the benefits of using `ConfigMaps` -- Can be encrypted at rest -- Opaque by default when viewing the values (harder to remember base 64 encoded version of "admin") -- Only available to nodes that use it, and only in memory - -**Con**: - -- All the challenges of using `ConfigMaps` -- Configured in plain text, making it difficult to manage as code securely -- Less safe than using dedicated secrets manager / store like HashiCorp Vault. - -**Best for**: - -- Storing sensitive configuration values - -back to [root README](/README.adoc#day-to-day-operations) - -## How do you update the application to a new version? - -To update the application to a new version, you can upgrade the Helm Release using updated values. For example, suppose -you deployed `nginx` version 1.15.4 using this Helm Chart with the following values: - -```yaml -containerImage: - repository: nginx - tag: 1.15.4 - -applicationName: nginx -``` - -In this example, we will further assume that you deployed this chart with the above values using the release name -`edge-service`, using a command similar to below: - -```bash -$ helm install -f values.yaml --name edge-service gruntwork/k8s-service -``` - -Now let's try upgrading `nginx` to version 1.15.8. To do so, we will first update our values file: - -```yaml -containerImage: - repository: nginx - tag: 1.15.8 - -applicationName: nginx -``` - -The only difference here is the `tag` of the `containerImage`. - -Next, we will upgrade our release using the updated values. To do so, we will use the `helm upgrade` command: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` - -This will update the created resources with the new values provided by the updated `values.yaml` file. For this example, -the only resource that will be updated is the `Deployment` resource, which will now have a new `Pod` spec that points to -`nginx:1.15.8` as opposed to `nginx:1.15.4`. This automatically triggers a rolling deployment internally to Kubernetes, -which will launch new `Pods` using the latest image, and shut down old `Pods` once those are ready. - -You can read more about how changes are rolled out on `Deployment` resources in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment). - -Note that certain changes will lead to a replacement of the `Deployment` resource. For example, updating the -`applicationName` will cause the `Deployment` resource to be deleted, and then created. This can lead to down time -because the resources are replaced in an uncontrolled fashion. - -## How do I create a canary deployment? - -You may optionally configure a [canary deployment](https://martinfowler.com/bliki/CanaryRelease.html) of an arbitrary tag that will run as an individual deployment behind your configured service. This is useful for ensuring a new application tag runs without issues prior to fully rolling it out. - -To configure a canary deployment, set `canary.enabled = true` and define the `containerImage` values. Typically, you will want to specify the tag of your next release candidate: - -```yaml -canary: - enabled: true - containerImage: - repository: nginx - tag: 1.15.9 -``` -Once deployed, your service will route traffic across both your stable and canary deployments, allowing you to monitor for and catch any issues early. - -back to [root README](/README.adoc#major-changes) - -## How do I verify my canary deployment? - -Canary deployment pods have the same name as your stable deployment pods, with the additional `-canary` appended to the end, like so: - -```bash -$ kubectl get pods -l "app.kubernetes.io/name=nginx,app.kubernetes.io/instance=edge-service" -NAME READY STATUS RESTARTS AGE -edge-service-nginx-844c978df7-f5wc4 1/1 Running 0 52s -edge-service-nginx-844c978df7-mln26 0/1 Pending 0 52s -edge-service-nginx-844c978df7-rdsr8 0/1 Pending 0 52s -edge-service-nginx-canary-844c978df7-bsr8 0/1 Pending 0 52s -``` - -Therefore, in this example, you could monitor your canary by running `kubectl logs -f edge-service-nginx-canary-844c978df7-bsr8` - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I roll back a canary deployment? - -Update your values.yaml file, setting `canary.enabled = false` and then upgrade your helm installation: - -```bash -$ helm upgrade -f values.yaml edge-service gruntwork/k8s-service -``` -Following this update, Kubernetes will determine that your canary deployment is no longer desired and will delete it. - -back to [root README](/README.adoc#day-to-day-operations) - -## How do I ensure a minimum number of Pods are available across node maintenance? - -Sometimes, you may want to ensure that a specific number of `Pods` are always available during [voluntary -maintenance](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions). -This chart exposes an input value `minPodsAvailable` that can be used to specify a minimum number of `Pods` to maintain -during a voluntary maintenance activity. Under the hood, this chart will create a corresponding `PodDisruptionBudget` to -ensure that a certain number of `Pods` are up before attempting to terminate additional ones. - -You can read more about `PodDisruptionBudgets` in [our blog post covering the -topic](https://blog.gruntwork.io/avoiding-outages-in-your-kubernetes-cluster-using-poddisruptionbudgets-ef6a4baa5085) -and in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#how-disruption-budgets-work). - - -back to [root README](/README.adoc#major-changes) - -## Why does the Pod have a preStop hook with a Shutdown Delay? - -When a `Pod` is removed from a Kubernetes cluster, the control plane notifies all nodes to remove the `Pod` from -registered addresses. This includes removing the `Pod` from the list of available `Pods` to service a `Service` -endpoint. However, because Kubernetes is a distributed system, there is a delay between the shutdown sequence and the -`Pod` being removed from available addresses. As a result, the `Pod` could still get traffic despite it having already -been shutdown on the node it was running on. - -Since there is no way to guarantee that the deletion has propagated across the cluster, we address this eventual -consistency issue by adding an arbitrary delay between the `Pod` being deleted and the initiation of the `Pod` shutdown -sequence. This is accomplished by adding a `sleep` command in the `preStop` hook. - -You can control the length of time to delay with the `shutdownDelay` input value. You can also disable this behavior by -setting the `shutdownDelay` to 0. - -You can read more about this topic in [our blog post -"Delaying Shutdown to Wait for Pod Deletion -Propagation"](https://blog.gruntwork.io/delaying-shutdown-to-wait-for-pod-deletion-propagation-445f779a8304). - - -back to [root README](/README.adoc#day-to-day-operations) - -## What is a sidecar container? - -In Kubernetes, `Pods` are one or more tightly coupled containers that are deployed together. The containers in the `Pod` -share, amongst other things, the network stack, the IPC namespace, and in some cases the PID namespace. You can read -more about the resources that the containers in a `Pod` share in [the official -documentation](https://kubernetes.io/docs/concepts/workloads/pods/pod/#what-is-a-pod). - -Sidecar Containers are additional containers that you wish to deploy in the `Pod` housing your application container. -This helm chart supports deploying these containers by configuring the `sideCarContainers` input value. This input value -is a map between the side car container name and the values of the container spec. The spec is rendered directly into -the `Deployment` resource, with the `name` being set to the key. For example: - -```yaml -sideCarContainers: - datadog: - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - nginx: - image: nginx:1.15.4 -``` - -This input will be rendered in the `DaemonSet` resource as: - -```yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - ... Snipped for brevity ... -spec: - ... Snipped for brevity ... - template: - spec: - containers: - ... The first entry relates to the application ... - - name: datadog - image: datadog/agent:latest - env: - - name: DD_API_KEY - value: ASDF-1234 - - name: SD_BACKEND - value: docker - - name: nginx - image: nginx:1.15.4 -``` +back to [root README](/README.adoc#core-concepts) -In this config, the side car containers are rendered as additional containers to deploy alongside the main application -container configured by the `containerImage`, `ports`, `livenessProbe`, etc input values. Note that the -`sideCarContainers` variable directly renders the spec, meaning that the additional values for the side cars such as -`livenessProbe` should be rendered directly within the `sideCarContainers` input value. -back to [root README](/README.adoc#core-concepts) +## Useful helm commands: + 1. List helm charts -## How do I use a private registry? + ```bash -To pull container images from a private registry, the Kubernetes cluster needs to be able to authenticate to the docker -registry with a registry key. On managed Kubernetes clusters (e.g EKS, GKE, AKS), this is automated through the server -IAM roles that are assigned to the instance VMs. In most cases, if the instance VM IAM role has the permissions to -access the registry, the Kubernetes cluster will automatically be able to pull down images from the respective managed -registry (e.g ECR on EKS or GCR on GKE). + helm list -Alternatively, you can specify docker registry keys in the Kubernetes cluster as `Secret` resources. This is helpful in -situations where you do not have the ability to assign registry access IAM roles to the node itself, or if you are -pulling images off of a different registry (e.g accessing GCR from EKS cluster). + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + my-daemonset-chart default 1 2022-04-27 17:52:38.166977136 +0000 UTC deployed k8s-service-0.1.0 1.16.0 + + ``` -You can use `kubectl` to create a `Secret` in Kubernetes that can be used as a docker registry key: + 2. List daemonsets and pods -``` -kubectl create secret docker-registry NAME \ - --docker-server=DOCKER_REGISTRY_SERVER \ - --docker-username=DOCKER_USER \ - --docker-password=DOCKER_PASSWORD \ - --docker-email=DOCKER_EMAIL -``` + ```bash -This command will create a `Secret` resource named `NAME` that holds the specified docker registry credentials. You can -then specify the cluster to use this `Secret` when pulling down images for the service `Deployment` in this chart by -using the `imagePullSecrets` input value: + # List Daemonsets + kubectl get daemonset + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + fluentd 1 1 1 1 1 2m45s -``` -imagePullSecrets: - - NAME -``` + # List pods + kubectl get pods + NAME READY STATUS RESTARTS AGE + fluentd-l9g2s 1/1 Running 0 3m14s + ``` -You can learn more about using private registries with Kubernetes in [the official -documentation](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry). back to [root README](/README.adoc#day-to-day-operations) From e2540517b7c836f2786c7c45cc51e28d7e80b655 Mon Sep 17 00:00:00 2001 From: Raghu Katti Date: Wed, 27 Apr 2022 14:23:32 -0400 Subject: [PATCH 7/7] Cleanup test --- ...monset_template_render_helpers_for_test.go | 167 ------------------ 1 file changed, 167 deletions(-) delete mode 100644 test/k8s_daemonset_template_render_helpers_for_test.go diff --git a/test/k8s_daemonset_template_render_helpers_for_test.go b/test/k8s_daemonset_template_render_helpers_for_test.go deleted file mode 100644 index 441b0247..00000000 --- a/test/k8s_daemonset_template_render_helpers_for_test.go +++ /dev/null @@ -1,167 +0,0 @@ -//go:build all || tpl -// +build all tpl - -// NOTE: We use build flags to differentiate between template tests and integration tests so that you can conveniently -// run just the template tests. See the test README for more information. - -package test - -import ( - "path/filepath" - "testing" - - "github.com/gruntwork-io/terratest/modules/helm" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - extv1beta1 "k8s.io/api/extensions/v1beta1" - networkingv1 "k8s.io/api/networking/v1" - - certapi "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/apis/networking.gke.io/v1beta1" -) - -func renderK8SServiceDeploymentWithSetValues(t *testing.T, setValues map[string]string) appsv1.DaemonSet { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the daemonset resource - out := helm.RenderTemplate(t, options, helmChartPath, "daemonset", []string{"templates/daemonset.yaml"}) - - // Parse the daemonset and return it - var daemonset appsv1.DaemonSet - helm.UnmarshalK8SYaml(t, out, &daemonset) - return daemonset -} - -func renderK8SServiceCanaryDeploymentWithSetValues(t *testing.T, setValues map[string]string) appsv1.DaemonSet { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the canary daemonset resource - out := helm.RenderTemplate(t, options, helmChartPath, "canarydaemonset", []string{"templates/canarydaemonset.yaml"}) - - // Parse the canary daemonset and return it - var canarydaemonset appsv1.DaemonSet - helm.UnmarshalK8SYaml(t, out, &canarydaemonset) - return canarydaemonset -} - -func renderK8SServiceIngressWithSetValues(t *testing.T, setValues map[string]string) networkingv1.Ingress { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the ingress resource - out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) - - // Parse the ingress and return it - var ingress networkingv1.Ingress - helm.UnmarshalK8SYaml(t, out, &ingress) - return ingress -} - -func renderK8SServiceIngressWithValuesFile(t *testing.T, valuesFilePath string) networkingv1.Ingress { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{ - filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml"), - valuesFilePath, - }, - } - // Render just the ingress resource - out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) - - // Parse the ingress and return it - var ingress networkingv1.Ingress - helm.UnmarshalK8SYaml(t, out, &ingress) - return ingress -} - -func renderK8SServiceExtV1Beta1IngressWithSetValues(t *testing.T, setValues map[string]string) extv1beta1.Ingress { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the ingress resource - out := helm.RenderTemplate(t, options, helmChartPath, "ingress", []string{"templates/ingress.yaml"}) - - // Parse the ingress and return it - var ingress extv1beta1.Ingress - helm.UnmarshalK8SYaml(t, out, &ingress) - return ingress -} - -func renderK8SServiceManagedCertificateWithSetValues(t *testing.T, setValues map[string]string) certapi.ManagedCertificate { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the google managed certificate resource - out := helm.RenderTemplate(t, options, helmChartPath, "gmc", []string{"templates/gmc.yaml"}) - - // Parse the google managed certificate and return it - var cert certapi.ManagedCertificate - helm.UnmarshalK8SYaml(t, out, &cert) - return cert -} - -func renderK8SServiceAccountWithSetValues(t *testing.T, setValues map[string]string) corev1.ServiceAccount { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the service account resource - out := helm.RenderTemplate(t, options, helmChartPath, "serviceaccount", []string{"templates/serviceaccount.yaml"}) - - // Parse the service account and return it - var serviceaccount corev1.ServiceAccount - helm.UnmarshalK8SYaml(t, out, &serviceaccount) - return serviceaccount -} - -func renderK8SServiceWithSetValues(t *testing.T, setValues map[string]string) corev1.Service { - helmChartPath, err := filepath.Abs(filepath.Join("..", "charts", "k8s-daemonset")) - require.NoError(t, err) - - // We make sure to pass in the linter_values.yaml values file, which we assume has all the required values defined. - options := &helm.Options{ - ValuesFiles: []string{filepath.Join("..", "charts", "k8s-daemonset", "linter_values.yaml")}, - SetValues: setValues, - } - // Render just the service resource - out := helm.RenderTemplate(t, options, helmChartPath, "service", []string{"templates/service.yaml"}) - - // Parse the service and return it - var service corev1.Service - helm.UnmarshalK8SYaml(t, out, &service) - return service -}