Skip to content

Commit

Permalink
EKS added from the first spike
Browse files Browse the repository at this point in the history
  • Loading branch information
fargozhu authored and satadruroy committed May 24, 2019
1 parent 76c2389 commit 719f79a
Show file tree
Hide file tree
Showing 28 changed files with 699 additions and 0 deletions.
7 changes: 7 additions & 0 deletions eks/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
*.tfvars
*.pem
*.backup
*.tfstate
*.info
.terraform/
.pending/
33 changes: 33 additions & 0 deletions eks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
This recipe automates setup of an Elastic Kubernetes Service (EKS) on AWS. It was created to simplify deployment of [SUSE Cloud Foundry](https://github.com/SUSE/scf) which takes Kubernetes as a foundation.

## Preparation

### Tools

1. [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
2. [jq filter](https://stedolan.github.io/jq/)
3. [aws-cli](https://aws.amazon.com/cli/)
4. [aws-iam-configuratio](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html)
4. [terraform](https://www.terraform.io/)

### Permissions

Not quite sure what to say here but I will add something in future. For now just make sure that you have the permissions described [here](https://github.com/SUSE/scf/wiki/IAM-Requirements-for-EKS).

### Configurations

1. Add an EC2 Key Pairs to be used to join the nodes into the k8s cluster
2. Make sure that you are using a user with no admin rights

## Instructions

:warning: Please note the SCF specific security groups in `eks/terraform/eks-worker.tf`

1. run `aws configure` to authenticate to AWS
2. Copy `terraform.tfvars.template` to `terraform.tfvars` and edit the values.
3. Run `terraform apply -var-file=<filename>.tfvars` to create the cluster in AWS
4. Make sure you have the [latest `kubectl` ready](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
5. Make sure you have the [latest `helm` ready](https://github.com/helm/helm/releases).
6. Make sure you have the [`aws-iam-authenticator` binary ready](https://github.com/kubernetes-sigs/aws-iam-authenticator).
7. Check the health of your workers with `kubectl get nodes`.
8. (OPTIONAL) Have a look at [this guide](https://github.com/SUSE/scf/wiki/Deployment-on-Amazon-EKS) for setting up SUSE Cloud Application Platform on top of it.
34 changes: 34 additions & 0 deletions eks/modules.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
module "network" {
source = "./modules/network"

// pass variables from .tfvars
cluster_name = "${var.cluster_name}"
location = "${var.location}"
subnet_count = "${var.subnet_count}"
}

module "eks" {
source = "./modules/eks"

// pass variables from .tfvars
cluster_name = "${var.cluster_name}"
//accessing_computer_ip = "${var.accessing_computer_ip}"
location = "${var.location}"
keypair_name = "${var.keypair_name}"

// inputs from modules
vpc_id = "${module.network.vpc_id}"
app_subnet_ids = "${module.network.app_subnet_ids}"
}


/*
module "services" {
source = "./modules/services"
// pass variables from .tfvars
location = "${var.location}"
access_key = "${var.access_key}"
secret_key = "${var.secret_key}"
}
*/
17 changes: 17 additions & 0 deletions eks/modules/eks/allow-workers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
resource "kubernetes_config_map" "aws_auth" {
metadata {
name = "aws-auth"
namespace = "kube-system"
}

data {
mapRoles = <<ROLES
- rolearn: ${aws_iam_role.eks-worker.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
ROLES
}
depends_on = ["aws_eks_cluster.eks-cluster"]
}
24 changes: 24 additions & 0 deletions eks/modules/eks/eks-master.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# The actual EKS resource
resource "aws_eks_cluster" "eks-cluster" {
name = "${var.cluster_name}"
role_arn = "${aws_iam_role.eks-cluster.arn}"

vpc_config {
security_group_ids = ["${aws_security_group.eks-cluster.id}"]
//subnet_ids = ["${aws_subnet.main.*.id}"]
subnet_ids = ["${var.app_subnet_ids}"]
}

depends_on = [
"aws_iam_role_policy_attachment.eks-cluster-AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.eks-cluster-AmazonEKSServicePolicy",
]
}

output "endpoint" {
value = "${aws_eks_cluster.eks-cluster.endpoint}"
}

output "kubeconfig-certificate-authority-data" {
value = "${aws_eks_cluster.eks-cluster.certificate_authority.0.data}"
}
10 changes: 10 additions & 0 deletions eks/modules/eks/helm.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
provider "helm" {
version = "~> 0.9.0"

kubernetes {
}

service_account = "${kubernetes_service_account.tiller.metadata.0.name}"
namespace = "${kubernetes_service_account.tiller.metadata.0.namespace}"
tiller_image = "gcr.io/kubernetes-helm/tiller:v2.12.0"
}
72 changes: 72 additions & 0 deletions eks/modules/eks/iam.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@

# IAM roles for the EKS cluster

resource "aws_iam_role" "eks-cluster" {
name = "${var.cluster_name}-cluster"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "eks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "eks-cluster-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.eks-cluster.name}"
}

resource "aws_iam_role_policy_attachment" "eks-cluster-AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.eks-cluster.name}"
}

# IAM roles for EKS workers

resource "aws_iam_role" "eks-worker" {
name = "${var.cluster_name}-worker"

assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "eks-worker-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.eks-worker.name}"
}

resource "aws_iam_role_policy_attachment" "eks-worker-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.eks-worker.name}"
}

resource "aws_iam_role_policy_attachment" "eks-worker-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.eks-worker.name}"
}

resource "aws_iam_instance_profile" "eks-worker" {
name = "${var.cluster_name}-worker"
role = "${aws_iam_role.eks-worker.name}"
}
59 changes: 59 additions & 0 deletions eks/modules/eks/kubeconfig.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
locals {
kubeconfig = <<KUBECONFIG
apiVersion: v1
clusters:
- cluster:
server: ${aws_eks_cluster.eks-cluster.endpoint}
certificate-authority-data: ${aws_eks_cluster.eks-cluster.certificate_authority.0.data}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: aws
name: aws
current-context: aws
kind: Config
preferences: {}
users:
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws-iam-authenticator
args:
- "token"
- "-i"
- "${var.cluster_name}"
KUBECONFIG
}

locals {
config-map-auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${aws_iam_role.eks-worker.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
CONFIGMAPAWSAUTH
}

output "config-map-auth" {
value = "${local.config-map-auth}"
}

output "kubeconfig" {
value = "${local.kubeconfig}"

depends_on = ["aws_eks_cluster.eks-cluster"]
}
27 changes: 27 additions & 0 deletions eks/modules/eks/kubernetes-tiller.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
resource "kubernetes_service_account" "tiller" {
metadata {
name = "tiller"
namespace = "kube-system"
}

automount_service_account_token = true

//depends_on = ["null_resource.post_processor"]
}

resource "kubernetes_cluster_role_binding" "tiller" {
metadata {
name = "tiller"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = "tiller"
namespace = "kube-system"
}
depends_on = ["kubernetes_service_account.tiller"]
}
11 changes: 11 additions & 0 deletions eks/modules/eks/kubernetes.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
data "external" "aws_iam_authenticator" {
program = ["sh", "-c", "aws-iam-authenticator token -i ${var.cluster_name} | jq -r -c .status"]
}

provider "kubernetes" {
version = "~> 1.5"
load_config_file = false
host = "${aws_eks_cluster.eks-cluster.endpoint}"
cluster_ca_certificate = "${base64decode(aws_eks_cluster.eks-cluster.certificate_authority.0.data)}"
token = "${data.external.aws_iam_authenticator.result.token}"
}
7 changes: 7 additions & 0 deletions eks/modules/eks/output.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
output "eks_kubeconfig" {
value = "${local.kubeconfig}"
depends_on = [
"aws_eks_cluster.eks-cluster."
]
}

17 changes: 17 additions & 0 deletions eks/modules/eks/sg-eks-master.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
resource "aws_security_group" "eks-cluster" {
name = "${var.cluster_name}-cluster"
description = "Cluster communication with worker nodes"
//vpc_id = "${aws_vpc.main.id}"
vpc_id = "${var.vpc_id}"

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

tags {
Name = "${var.cluster_name}"
}
}
19 changes: 19 additions & 0 deletions eks/modules/eks/sg-eks-worker.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
resource "aws_security_group" "eks-worker" {
name = "${var.cluster_name}-worker"
description = "Security group for all workers in the cluster"
vpc_id = "${var.vpc_id}"

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

tags = "${
map(
"Name", "${var.cluster_name}-worker",
"kubernetes.io/cluster/${var.cluster_name}", "owned",
)
}"
}
Loading

0 comments on commit 719f79a

Please sign in to comment.