Skip to content

Commit

Permalink
Merge pull request #22 from moelsayed/job_based_addons
Browse files Browse the repository at this point in the history
System and user addons based on k8s jobs and configmaps
  • Loading branch information
galal-hussein authored Nov 18, 2017
2 parents 5dfe2a0 + 9e2c352 commit e53f7ad
Show file tree
Hide file tree
Showing 13 changed files with 523 additions and 127 deletions.
32 changes: 32 additions & 0 deletions addons/addons.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
package addons

func GetAddonsExcuteJob(addonName, nodeName, image string) string {
return `apiVersion: batch/v1
kind: Job
metadata:
name: ` + addonName + `-deploy-job
spec:
template:
metadata:
name: pi
spec:
hostNetwork: true
nodeName: ` + nodeName + `
containers:
- name: ` + addonName + `-pod
image: ` + image + `
command: [ "kubectl", "apply", "-f" , "/etc/config/` + addonName + `.yaml"]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
# Provide the name of the ConfigMap containing the files you want
# to add to the container
name: ` + addonName + `
items:
- key: ` + addonName + `
path: ` + addonName + `.yaml
restartPolicy: Never`
}
191 changes: 191 additions & 0 deletions addons/kubedns.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
package addons

func GetKubeDNSManifest(clusterDNSServer, clusterDomain string) string {
return `
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=` + clusterDomain + `.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/` + clusterDomain + `/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + clusterDomain + `,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + clusterDomain + `,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: ` + clusterDNSServer + `
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
`
}
14 changes: 14 additions & 0 deletions cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,17 @@ services:
infra_container_image: gcr.io/google_containers/pause-amd64:3.0
kubeproxy:
image: quay.io/coreos/hyperkube:v1.7.5_coreos.0

addons: |-
---
apiVersion: v1
kind: Pod
metadata:
name: my-nginx
namespace: default
spec:
containers:
- name: my-nginx
image: nginx
ports:
- containerPort: 80
87 changes: 78 additions & 9 deletions cluster/addons.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,101 @@ package cluster

import (
"fmt"
"time"

"github.com/rancher/rke/addons"
"github.com/rancher/rke/k8s"
"github.com/sirupsen/logrus"
)

const (
ClusterDNSServerIPEnvName = "RKE_DNS_SERVER"
ClusterDomainEnvName = "RKE_CLUSTER_DOMAIN"
KubeDNSAddonResourceName = "rke-kubedns-addon"
UserAddonResourceName = "rke-user-addon"
)

func (c *Cluster) DeployK8sAddOns() error {
err := c.deployKubeDNS()
return err
}

func (c *Cluster) DeployUserAddOns() error {
logrus.Infof("[addons] Setting up user addons..")
if c.Addons == "" {
logrus.Infof("[addons] No user addons configured..")
return nil
}

if err := c.doAddonDeploy(c.Addons, UserAddonResourceName); err != nil {
return err
}
logrus.Infof("[addons] User addon deployed successfully..")
return nil

}

func (c *Cluster) deployKubeDNS() error {
logrus.Infof("[plugins] Setting up KubeDNS")
logrus.Infof("[addons] Setting up KubeDNS")

kubeDNSYaml := addons.GetKubeDNSManifest(c.ClusterDNSServer, c.ClusterDomain)

if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
return err
}
logrus.Infof("[addons] KubeDNS deployed successfully..")
return nil

}

kubectlCmd := &KubectlCommand{
Cmd: []string{"apply -f /addons/kubedns*.yaml"},
func (c *Cluster) doAddonDeploy(addonYaml, resourceName string) error {

err := c.StoreAddonConfigMap(addonYaml, resourceName)
if err != nil {
return fmt.Errorf("Failed to save addon ConfigMap: %v", err)
}
logrus.Infof("[plugins] Executing the deploy command..")
err := c.RunKubectlCmd(kubectlCmd)

logrus.Infof("[addons] Executing deploy job..")

addonJob := addons.GetAddonsExcuteJob(resourceName, c.ControlPlaneHosts[0].AdvertisedHostname, c.Services.KubeAPI.Image)
err = c.ApplySystemAddonExcuteJob(addonJob)
if err != nil {
return fmt.Errorf("Failed to run kubectl command: %v", err)
return fmt.Errorf("Failed to deploy addon execute job: %v", err)
}
logrus.Infof("[plugins] kubeDNS deployed successfully..")
return nil
}

func (c *Cluster) StoreAddonConfigMap(addonYaml string, addonName string) error {
logrus.Infof("[addons] Saving addon ConfigMap to Kubernetes")
kubeClient, err := k8s.NewClient(c.LocalKubeConfigPath)
if err != nil {
return err
}
timeout := make(chan bool, 1)
go func() {
for {
err := k8s.UpdateConfigMap(kubeClient, []byte(addonYaml), addonName)
if err != nil {
time.Sleep(time.Second * 5)
fmt.Println(err)
continue
}
logrus.Infof("[addons] Successfully Saved addon to Kubernetes ConfigMap: %s", addonName)
timeout <- true
break
}
}()
select {
case <-timeout:
return nil
case <-time.After(time.Second * UpdateStateTimeout):
return fmt.Errorf("[addons] Timeout waiting for kubernetes to be ready")
}
}

func (c *Cluster) ApplySystemAddonExcuteJob(addonJob string) error {

if err := k8s.ApplyK8sSystemJob(addonJob, c.LocalKubeConfigPath); err != nil {
fmt.Println(err)
return err
}
return nil
}
Loading

0 comments on commit e53f7ad

Please sign in to comment.