diff --git a/infra-examples/aws/README.md b/infra-examples/aws/README.md deleted file mode 100644 index 88c7d15..0000000 --- a/infra-examples/aws/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Reference Architecture for AWS - -This module includes OpenTofu modules to create AWS reference resources that are preconfigured to support Open edX as well as [Karpenter](https://karpenter.sh/) for management of [AWS EC2 spot-priced](https://aws.amazon.com/ec2/spot/) compute nodes and enhanced pod bin packing. - -## Virtual Private Cloud (VPC) - -There are no explicit requirements for Karpenter within this VPC defintion. However, there *are* several requirements for EKS which might vary from the VPC module defaults now or in the future. These include: - -- defined sets of subnets for both private and public networks -- a NAT gateway -- enabling DNS host names -- custom resource tags for public and private subnets -- explicit assignments of AWS region and availability zones - -See additional details here: [AWS VPC README](./vpc/README.rst) - -## Elastic Kubernetes Service (EKS) - -AWS EKS has grown more complex over time. This reference implementation is preconfigured as necessary to ensure that a.) you and others on your team can access the Kubernetes cluster both from the AWS Console as well as from kubectl, b.) it will work for an Open edX deployment, and c.) it will work with Karpenter. With these goals in mind, please note the following configuration details: - -- requirements detailed in the VPC section above are explicitly passed in to this module as inputs -- cluster endpoints for private and public access are enabled -- IAM Roles for Service Accounts (IRSA) is enabled -- Key Management Service (KMS) is enabled, encrypting all Kubernetes Secrets -- cluster access via aws-auth/configMap is enabled -- a karpenter.sh/discovery resource tag is added to the EKS instance -- various AWS EKS add-ons that are required by Open edX and/or Karpenter and/or its supporting systems (metrics-server, vpa) are included -- additional cluster node security configuration is added to allow node-to-node and pod-to-pod communication using internal DNS resolution -- a managed node group is added containing custom labels, IAM roles, and resource tags; all of which are required by Karpenter -- adds additional resources required by AWS EBS CSI Driver add-on, itself required by EKS since 1.22 -- additional EC2 security groups are added to enable pod shell access from kubectl - -See additional details here: [AWS EKS README](./k8s-cluster/README.rst) \ No newline at end of file diff --git a/infra-examples/aws/eks/provider.tf b/infra-examples/aws/eks/provider.tf deleted file mode 100644 index 8db34d4..0000000 --- a/infra-examples/aws/eks/provider.tf +++ /dev/null @@ -1,4 +0,0 @@ -provider "aws" { - region = var.aws_region - -} diff --git a/infra-examples/aws/eks/readme.md b/infra-examples/aws/eks/readme.md deleted file mode 100644 index 4512986..0000000 --- a/infra-examples/aws/eks/readme.md +++ /dev/null @@ -1,94 +0,0 @@ -# Terraform AWS VPC and EKS Cluster Deployment - -This guide provides a step-by-step process to deploy a Virtual Private Cloud (VPC) and an Elastic Kubernetes Service (EKS) cluster in AWS using Terraform. - -## Prerequisites - -Ensure the following tools and configurations are set up before proceeding: - -- **Terraform** installed (version 1.5.6+). -- **AWS CLI** installed and configured with the appropriate credentials and region. -- **SSH Key Pair** created in AWS (you’ll need the key pair name for `key_name`). -- Proper IAM permissions to create VPC, EC2, and EKS resources. - -## Steps for Deployment - -### 1. Clone the Repository - -```bash -git clone -cd -``` - -### 2. Initialize Terraform - -Run the following command to initialize Terraform and download the required providers and modules: - -``` bash -terraform init -``` - -### 3. Customize Variables - -Edit the `variables.tf` file or edit the `values.auto.tfvars.json` file to override default values as needed. Below is a table describing the available variables: - -| Variable | Description | Type | Default | -|------------------------------------ |---------------------------------------------------------------------------------|---------------|-------------------| -| `aws_region` | The AWS Region in which to deploy the resources | `string` | | -| `private_subnets` | List of private subnets | `list(string)`| `[]` | -| `public_subnets` | List of public subnets | `list(string)`| `[]` | -| `cidr` | CIDR block for the VPC | `string` | `10.0.0.0/16` | -| `azs` | List of availability zones to use | `list(string)`| `[]` | -| `vpc_name` | The VPC name | `string` | | -| `enable_nat_gateway` | Enable NAT Gateway | `bool` | `true` | -| `single_nat_gateway` | Use a single NAT Gateway | `bool` | `false` | -| `one_nat_gateway_per_az` | Deploy one NAT gateway per availability zone | `bool` | `true` | -| `instance_types` | EC2 Instance types for the Kubernetes nodes | `list(string)`| | -| `cluster_version` | Kubernetes version for the EKS cluster | `string` | `1.29` | -| `cluster_name` | Name of the EKS cluster | `string` | | -| `desired_size` | Desired number of nodes in the EKS cluster | `number` | `2` | -| `disk_size` | Disk size for the nodes (in GB) | `number` | `40` | -| `key_name` | Name of the SSH Key Pair | `string` | | -| `max_size` | Maximum number of nodes in the EKS cluster | `number` | `3` | -| `min_size` | Minimum number of nodes in the EKS cluster | `number` | `1` | -| `extra_ssh_cidrs` | List of additional IP blocks allowed SSH access | `list(string)`| `[]` | -| `registry_credentials` | Image registry credentials for the nodes | `string` | | -| `node_groups_tags` | A map of tags to add to all node group resources | `map(string)` | `{}` | -| `enable_cluster_autoscaler` | Enable cluster autoscaler for the EKS cluster | `bool` | `false` | -| `ubuntu_version` | Ubuntu version for the nodes (default: `jammy-22.04`) | `string` | `jammy-22.04` | -| `ami_id` | AMI ID for EKS nodes (optional) | `string` | `""` | -| `iam_role_use_name_prefix` | Use a name prefix for the IAM role associated with the cluster | `bool` | `true` | -| `iam_role_name` | IAM Role name for the cluster | `string` | `null` | -| `cluster_security_group_use_name_prefix`| Use a name prefix for the cluster security group | `bool` | `true` | -| `cluster_security_group_name` | Security group name for the cluster | `string` | `null` | -| `cluster_security_group_description`| Description of the cluster security group | `string` | `EKS cluster security group` | -| `node_group_subnets` | Subnets for node groups (typically private) | `list(string)`| `null` | -| `cluster_tags` | A map of tags to add to the cluster | `map(string)` | `{}` | -| `tags` | A map of tags to add to all resources | `map(string)` | `{}` | -| `node_group_name` | Name of the node group | `string` | `ubuntu_worker` | -| `capacity_type` | Type of capacity for EKS Node Group (options: `ON_DEMAND`, `SPOT`) | `string` | `ON_DEMAND` | -| `post_bootstrap_user_data` | Add post-bootstrap user data (optional) | `string` | `null` | - -### 4. Apply the Terraform Configuration - -Run the following command to deploy the infrastructure: - -```bash - terraform apply -``` - -### 5. Access the EKS Cluster - -Once the deployment is complete, you can configure your kubectl to access the EKS cluster: - -```bash - aws eks --region update-kubeconfig --name -``` - -### 6. Clean Up - -To destroy the infrastructure when you no longer need it, run: - -```bash - terraform destroy -``` diff --git a/infra-examples/aws/eks/values.auto.tfvars.json b/infra-examples/aws/eks/values.auto.tfvars.json deleted file mode 100644 index e4782d7..0000000 --- a/infra-examples/aws/eks/values.auto.tfvars.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "aws_region": "us-east-1", - "private_subnets": ["10.10.0.0/21", "10.10.8.0/21"], - "public_subnets": ["10.10.104.0/21", "10.10.120.0/21"], - "cidr": "10.10.0.0/16", - "azs": ["us-east-1a", "us-east-1b"], - "vpc_name": "your-vpc-name-here", - "enable_nat_gateway": true, - "single_nat_gateway": false, - "one_nat_gateway_per_az": true, - "instance_types": ["m6i.large"], - "cluster_name": "your-eks-cluster-name-here", - "cluster_version": "1.29", - "desired_size": 2, - "max_size": 3, - "min_size": 1, - "disk_size": 50, - "extra_ssh_cidrs": [], - "node_groups_tags": {}, - "enable_cluster_autoscaler": true, - "control_plane_subnet_ids": [], - "ubuntu_version": "jammy-22.04", - "ami_id": "", - "iam_role_use_name_prefix": true, - "iam_role_name": null, - "cluster_security_group_use_name_prefix": true, - "cluster_security_group_name": null, - "cluster_security_group_description": "EKS cluster security group", - "node_group_subnets": null, - "cluster_tags": {}, - "key_name": "atlas-stage-key", - "node_group_name": "ubuntu_worker", - "capacity_type": "ON_DEMAND", - "post_bootstrap_user_data": null, - "registry_credentials": "{\"auths\":{\"https://index.docker.io/v1/\":{\"auth\":\"your-docker-hub-token-here\"}}}", - "tags": { - "Name": "your-vpc-name-here" - } -} diff --git a/infra-examples/aws/eks/vpc.tf b/infra-examples/aws/eks/vpc.tf deleted file mode 100644 index 9df1d11..0000000 --- a/infra-examples/aws/eks/vpc.tf +++ /dev/null @@ -1,15 +0,0 @@ -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 5.13" - name = var.vpc_name - cidr = var.cidr - azs = var.azs - private_subnets = var.private_subnets - public_subnets = var.public_subnets - - enable_nat_gateway = var.enable_nat_gateway - single_nat_gateway = var.single_nat_gateway - one_nat_gateway_per_az = var.one_nat_gateway_per_az - - tags = var.tags -} diff --git a/infra-examples/aws/k8s-cluster/README.rst b/infra-examples/aws/k8s-cluster/README.rst deleted file mode 100644 index 04676d3..0000000 --- a/infra-examples/aws/k8s-cluster/README.rst +++ /dev/null @@ -1,90 +0,0 @@ -Amazon Elastic Kubernetes Service (EKS) -======================================= - -Implements a `Kubernetes Cluster `_ via `AWS Elastic Kubernetes Service (EKS) `_. A Kubernetes cluster is a set of nodes that run containerized applications that are grouped in pods and organized with namespaces. Containerizing an application into a Docker container means packaging that app with its dependences and its required services into a single binary run-time file that can be downloaded directly from the Docker registry. -Our Kubernetes Cluster resides inside the VPC on a private subnet, meaning that it is generally not visible to the public. In order to be able to receive traffic from the outside world we implement `Kubernetes Ingress Controllers `_ which in turn implement a `Kubernetes Ingress `_ -for both an `AWS Classic Load Balancer `_ as well as our `Nginx proxy server `_. - -**NOTE:** THIS MODULE DEPENDS ON THE TERRAFORM MODULE 'vpc' contained in the parent folder of this module. - -Implementation Strategy ------------------------ - -Our goal is to, as much as possible, implement a plain vanilla Kubernetes Cluster, pre-configured to use Karpenter, that generally uses all default configuration values and that allows EC2 as well as Fargate compute nodes. - -This module uses the latest version of the community-supported `AWS EKS Terraform module `_ to create a fully configured Kubernetes Cluster within the custom VPC. -AWS EKS Terraform module is widely supported and adopted, with more than 300 open source code contributers, and more than 21 million downloads from the Terraform registry as of March, 2023. - -How it works ------------- - -Amazon Elastic Kubernetes Service (Amazon EKS) is a managed container service to run and scale Kubernetes applications in the cloud. It is a managed service, meaning that AWS is responsible for up-time, and they apply periodic system updates and security patches automatically. - -.. image:: doc/diagram-eks.png - :width: 100% - :alt: EKS Diagram - - -AWS Fargate Serverless compute for containers ---------------------------------------------- - -AWS Fargate is a serverless, pay-as-you-go computing alternative to traditional EC2 instance-based computing nodes. It is compatible with both `Amazon Elastic Container Service (ECS) `_ and `Amazon Elastic Kubernetes Service (EKS) `_. -There are two distinct benefits to using Fargate instead of EC2 instances. First is cost. Similar to AWS Lambda, you only pay for the compute cycles that you consume. Most Open edX installations provision server infrastructure based on peak load estimates, which in point of fact only occur occasionally, during isolated events like approaching homework due dates, mid-term exams and so on. This in turn leads to EC2 instances being under-utilized most of the time. -Second, related, is scaling. Fargate can absorb whatever workload you send to it, meaning that during peak usage periods of your Open edX platform you won't need to worry about provisioning additional EC2 server capacity. - - -- **Running at scale**. Use Fargate with Amazon ECS or Amazon EKS to easily run and scale your containerized data processing workloads. -- **Optimize Costs**. With AWS Fargate there are no upfront expenses, pay for only the resources used. Further optimize with `Compute Savings Plans `_ and `Fargate Spot `_, then use `Graviton2 `_ powered Fargate for up to 40% price performance improvements. -- Only pay for what you use. Fargate scales the compute to closely match your specified resource requirements. With Fargate, there is no over-provisioning and paying for additional servers. - -How to Manually Add More Kubernetes Admins ------------------------------------------- - -By default your AWS IAM user account will be the only user who can view, interact with and manage your new Kubernetes cluster. Other IAM users with admin permissions will still need to be explicitly added to the list of Kluster admins. -If you're new to Kubernetes then you'll find detailed technical how-to instructions in the AWS EKS documentation, `Enabling IAM user and role access to your cluster `_. -You'll need kubectl in order to modify the aws-auth pod in your Kubernets cluster. - -**Note that since June-2022 the AWS EKS Kubernetes cluster configuration excludes public api access. This means that kubectl is only accessible via the bastion, from inside of the AWS VPC on the private subnets. -The convenience script /scripts/bastion-config.sh installs all of the Ubuntu packages and additional software that you'll need to connect to the k8s cluster using kubectl and k9s. You'll also need to -configure aws cli with an IAM key and secret with the requisite admin permissions.** - -.. code-block:: bash - - kubectl edit -n kube-system configmap/aws-auth - -Following is an example aws-auth configMap with additional IAM user accounts added to the admin "masters" group. - -.. code-block:: yaml - - # Please edit the object below. Lines beginning with a '#' will be ignored, - # and an empty file will abort the edit. If an error occurs while saving this file will be - # reopened with the relevant failures. - # - apiVersion: v1 - data: - mapRoles: | - - groups: - - system:bootstrappers - - system:nodes - rolearn: arn:aws:iam::012345678942:role/service-eks-node-group-20220518182244174100000002 - username: system:node:{{EC2PrivateDNSName}} - mapUsers: | - - groups: - - system:masters - userarn: arn:aws:iam::012345678942:user/lawrence.mcdaniel - username: lawrence.mcdaniel - - groups: - - system:masters - userarn: arn:aws:iam::012345678942:user/ci - username: ci - - groups: - - system:masters - userarn: arn:aws:iam::012345678942:user/bob_marley - username: bob_marley - kind: ConfigMap - metadata: - creationTimestamp: "2022-05-18T18:38:29Z" - name: aws-auth - namespace: kube-system - resourceVersion: "499488" - uid: 52d6e7fd-01b7-4c80-b831-b971507e5228 diff --git a/infra-examples/aws/k8s-cluster/addon_ebs_csi_driver.tf b/infra-examples/aws/k8s-cluster/addon_ebs_csi_driver.tf deleted file mode 100644 index 868acdd..0000000 --- a/infra-examples/aws/k8s-cluster/addon_ebs_csi_driver.tf +++ /dev/null @@ -1,77 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Dec-2022 -# -# Create the Amazon EBS CSI driver IAM role for service accounts -# https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html -# -# Note: in late december 2022 the AWS EKS EBS CSI Add-on suddenly began -# inheriting its IAM role from the karpenter node group rather than using -# the role that is explicitly created and assigned here. no idea why. -# As a workaround, i'm also adding the AmazonEBSCSIDriverPolicy policy to the -# karpenter node group, which is assigned inside the eks module in main.tf. -#------------------------------------------------------------------------------ -resource "random_integer" "role_suffix" { - min = 10000 - max = 99999 -} - -data "aws_iam_policy" "AmazonEBSCSIDriverPolicy" { - arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" -} - -# 2. Create the IAM role. -resource "aws_iam_role" "AmazonEKS_EBS_CSI_DriverRole" { - name = "AmazonEKS_EBS_CSI_DriverRole-${random_integer.role_suffix.result}" - assume_role_policy = jsonencode({ - "Version" : "2012-10-17", - "Statement" : [ - { - "Effect" : "Allow", - "Principal" : { - "Federated" : "arn:aws:iam::${var.account_id}:oidc-provider/${module.eks.oidc_provider}" - }, - "Action" : "sts:AssumeRoleWithWebIdentity", - "Condition" : { - "StringEquals" : { - "${module.eks.oidc_provider}:aud" : "sts.amazonaws.com", - "${module.eks.oidc_provider}:sub" : "system:serviceaccount:kube-system:ebs-csi-controller-sa" - } - } - } - ] - }) - tags = local.tags -} - -# 3. Attach the required AWS managed policy to the role -resource "aws_iam_role_policy_attachment" "aws_ebs_csi_driver" { - role = aws_iam_role.AmazonEKS_EBS_CSI_DriverRole.name - policy_arn = data.aws_iam_policy.AmazonEBSCSIDriverPolicy.arn -} - -# 5. Annotate the ebs-csi-controller-sa Kubernetes service account with the ARN of the IAM role -# 6. Restart the ebs-csi-controller deployment for the annotation to take effect -resource "null_resource" "annotate-ebs-csi-controller" { - provisioner "local-exec" { - command = <<-EOT - # 1. conifugre kubeconfig locally with the credentials data of the just-created - # kubernetes cluster. - # --------------------------------------- - aws eks --region ${var.aws_region} update-kubeconfig --name ${var.name} --alias ${var.name} - kubectl config use-context ${var.name} - kubectl config set-context --current --name=kube-system - - # 2. final install steps for EBS CSI Driver - # --------------------------------------- - kubectl annotate serviceaccount ebs-csi-controller-sa -n kube-system eks.amazonaws.com/role-arn=arn:aws:iam::${var.account_id}:role/${aws_iam_role.AmazonEKS_EBS_CSI_DriverRole.name} - kubectl rollout restart deployment ebs-csi-controller -n kube-system - EOT - } - - depends_on = [ - module.eks - ] -} diff --git a/infra-examples/aws/k8s-cluster/doc/aws-vpc-eks.png b/infra-examples/aws/k8s-cluster/doc/aws-vpc-eks.png deleted file mode 100644 index 74e1bf8..0000000 Binary files a/infra-examples/aws/k8s-cluster/doc/aws-vpc-eks.png and /dev/null differ diff --git a/infra-examples/aws/k8s-cluster/doc/diagram-eks.png b/infra-examples/aws/k8s-cluster/doc/diagram-eks.png deleted file mode 100644 index 0f79164..0000000 Binary files a/infra-examples/aws/k8s-cluster/doc/diagram-eks.png and /dev/null differ diff --git a/infra-examples/aws/k8s-cluster/doc/diagram-fargate.png b/infra-examples/aws/k8s-cluster/doc/diagram-fargate.png deleted file mode 100644 index 22e2d8a..0000000 Binary files a/infra-examples/aws/k8s-cluster/doc/diagram-fargate.png and /dev/null differ diff --git a/infra-examples/aws/k8s-cluster/doc/node_group-diagram.jpeg b/infra-examples/aws/k8s-cluster/doc/node_group-diagram.jpeg deleted file mode 100644 index 9219b97..0000000 Binary files a/infra-examples/aws/k8s-cluster/doc/node_group-diagram.jpeg and /dev/null differ diff --git a/infra-examples/aws/k8s-cluster/doc/node_security_group_additional_rules.png b/infra-examples/aws/k8s-cluster/doc/node_security_group_additional_rules.png deleted file mode 100644 index 3bc93e5..0000000 Binary files a/infra-examples/aws/k8s-cluster/doc/node_security_group_additional_rules.png and /dev/null differ diff --git a/infra-examples/aws/k8s-cluster/main.tf b/infra-examples/aws/k8s-cluster/main.tf deleted file mode 100644 index 6db6f74..0000000 --- a/infra-examples/aws/k8s-cluster/main.tf +++ /dev/null @@ -1,239 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Mar-2022 -# -# usage: create an EKS cluster with one managed node group for EC2 -# plus a Fargate profile for serverless computing. -# -# Technical documentation: -# - https://docs.aws.amazon.com/kubernetes -# - https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/ -# -#------------------------------------------------------------------------------ - -locals { - # Used by Karpenter config to determine correct partition (i.e. - `aws`, `aws-gov`, `aws-cn`, etc.) - partition = data.aws_partition.current.partition - - tags = { - "Name" = var.name - "openedx-k8s-harmony/name" = var.name - "openedx-k8s-harmony/region" = var.aws_region - "openedx-k8s-harmony/tofu" = "true" - } - -} - -module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "~> 19.13" - cluster_name = var.name - cluster_version = var.kubernetes_cluster_version - cluster_endpoint_private_access = true - cluster_endpoint_public_access = true - vpc_id = data.aws_vpc.reference.id - subnet_ids = data.aws_subnets.private.ids - create_cloudwatch_log_group = false - enable_irsa = true - - # NOTE: - # by default Kubernetes secrets are encrypted with this key. Add your IAM - # user ARN to the owner list in order to be able to view secrets. - # AWS EKS KMS console: https://us-east-2.console.aws.amazon.com/kms/home - # - # audit your AWS EKS KMS key access by running: - # aws kms get-key-policy --key-id ADD-YOUR-KEY-ID-HERE --region us-east-2 --policy-name default --output text - create_kms_key = var.eks_create_kms_key - kms_key_owners = var.kms_key_owners - - # Add your IAM user ARN to aws_auth_users in order to gain access to the cluster itself. - # Note that alternatively, the cluster creator (presumably, you) can edit the manifest - # for kube-system/aws-auth configMap, adding additional users and roles as needed. - # see: - manage_aws_auth_configmap = true - aws_auth_users = var.map_users - - tags = merge( - local.tags, - # Tag node group resources for Karpenter auto-discovery - # NOTE - if creating multiple security groups with this module, only tag the - # security group that Karpenter should utilize with the following tag - { "karpenter.sh/discovery" = var.name } - ) - - # AWS EKS add-ons that are required in order to support persistent volume - # claims for ElasticSearch and Caddy (if you opt for this rather than nginx). - # Other addons are required by Karpenter and other optional supporting services. - # - # see: https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html - cluster_addons = { - # required to support internal networking between containers - vpc-cni = { - name = "vpc-cni" - } - # required to support internal DNS name resolution within the cluster - coredns = { - name = "coredns" - } - # required to maintain network rules on nodes and to enable internal - # network communication between pods. - kube-proxy = { - name = "kube-proxy" - } - # Required for release 1.22 and newer in order to support persistent volume - # claims for ElasticSearch and Caddy (if you opt for this rather than nginx). - aws-ebs-csi-driver = { - name = "aws-ebs-csi-driver" - service_account_role_arn = aws_iam_role.AmazonEKS_EBS_CSI_DriverRole.arn - } - } - - # to enable internal https network communication between nodes. - node_security_group_additional_rules = { - ingress_self_all = { - description = "openedx-k8s-harmony: Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - cidr_blocks = [ - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - port_8443 = { - description = "openedx-k8s-harmony: open port 8443 to vpc" - protocol = "-1" - from_port = 8443 - to_port = 8443 - type = "ingress" - source_node_security_group = true - } - egress_all = { - description = "openedx-k8s-harmony: Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - } - - eks_managed_node_groups = { - # This node group is managed by Karpenter. There must be at least one - # node in this group at all times in order for Karpenter to monitor - # load and act on metrics data. Karpenter's bin packing algorithms - # perform more effectively with larger instance types. The default - # instance type is t3.large (2 vCPU / 8 GiB). These instances, - # beyond the 1 permanent instance, are assumed to be short-lived - # (a few hours or less) as these are usually only instantiated during - # bursts of user activity such as at the start of a scheduled lecture or - # exam on a large mooc. - service = { - capacity_type = "ON_DEMAND" - enable_monitoring = false - desired_size = var.eks_service_group_desired_size - min_size = var.eks_service_group_min_size - max_size = var.eks_service_group_max_size - - # for node affinity - labels = { - node-group = "service" - } - - iam_role_additional_policies = { - # Required by Karpenter - AmazonSSMManagedInstanceCore = "arn:${local.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" - - # Required by EBS CSI Add-on - AmazonEBSCSIDriverPolicy = data.aws_iam_policy.AmazonEBSCSIDriverPolicy.arn - } - - instance_types = ["${var.eks_service_group_instance_type}"] - tags = merge( - local.tags, - { Name = "eks-${var.shared_resource_identifier}" } - ) - } - - } -} - -#------------------------------------------------------------------------------ -# KARPENTER RESOURCES -#------------------------------------------------------------------------------ -# See more details in -# https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v19.16.0/modules/karpenter/README.md#external-node-iam-role-default -module "karpenter" { - source = "terraform-aws-modules/eks/aws/modules/karpenter" - version = "~> 19.16" - - cluster_name = module.eks.cluster_name - - irsa_oidc_provider_arn = module.eks.oidc_provider_arn - irsa_namespace_service_accounts = ["karpenter:karpenter", "harmony:karpenter"] - - # Since Karpenter is running on an EKS Managed Node group, - # we can re-use the role that was created for the node group - create_iam_role = false - iam_role_arn = module.eks.eks_managed_node_groups["service"].iam_role_arn - - # Disable Spot termination - enable_spot_termination = false - - tags = local.tags -} - -#------------------------------------------------------------------------------ -# SUPPORTING RESOURCES -#------------------------------------------------------------------------------ - -# add an AWS IAM Role definition providing AWS console access to -# AWS EKS cluster instances. -resource "kubectl_manifest" "eks-console-full-access" { - yaml_body = templatefile("${path.module}/yml/eks-console-full-access.yaml", {}) -} - -# to enable shell access to nodes from kubectl -resource "aws_security_group" "worker_group_mgmt" { - name_prefix = "${var.name}-eks_hosting_group_mgmt" - description = "openedx-k8s-harmony: Ingress CLB worker group management" - vpc_id = data.aws_vpc.reference.id - - ingress { - description = "openedx-k8s-harmony: Ingress CLB" - from_port = 22 - to_port = 22 - protocol = "tcp" - - cidr_blocks = [ - "10.0.0.0/8", - ] - } - - tags = local.tags -} - -resource "aws_security_group" "all_worker_mgmt" { - name_prefix = "${var.name}-eks_all_worker_management" - description = "openedx-k8s-harmony: Ingress CLB worker management" - vpc_id = data.aws_vpc.reference.id - - ingress { - description = "openedx-k8s-harmony: Ingress CLB" - from_port = 22 - to_port = 22 - protocol = "tcp" - - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - - tags = local.tags -} diff --git a/infra-examples/aws/k8s-cluster/outputs.tf b/infra-examples/aws/k8s-cluster/outputs.tf deleted file mode 100644 index 0bfb8ae..0000000 --- a/infra-examples/aws/k8s-cluster/outputs.tf +++ /dev/null @@ -1,205 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Mar-2022 -# -# usage: create an EKS cluster -#------------------------------------------------------------------------------ - -################################################################################ -# Cluster -################################################################################ - -output "cluster_arn" { - description = "The Amazon Resource Name (ARN) of the cluster" - value = module.eks.cluster_arn -} - -output "cluster_certificate_authority_data" { - description = "Base64 encoded certificate data required to communicate with the cluster" - value = module.eks.cluster_certificate_authority_data -} - -output "cluster_endpoint" { - description = "Endpoint for your Kubernetes API server" - value = module.eks.cluster_endpoint -} - -output "cluster_name" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = module.eks.cluster_name -} - -output "cluster_oidc_issuer_url" { - description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.eks.cluster_oidc_issuer_url -} - -output "cluster_platform_version" { - description = "Platform version for the cluster" - value = module.eks.cluster_platform_version -} - -output "cluster_status" { - description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = module.eks.cluster_status -} - -output "cluster_primary_security_group_id" { - description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.eks.cluster_primary_security_group_id -} - -################################################################################ -# Karpenter -################################################################################ - -output "karpenter_irsa_role_arn" { - description = "IRSA role created by the Karpenter module" - value = module.karpenter.irsa_arn -} - -output "karpenter_instance_profile_name" { - description = "Instance profile created by the Karpenter module" - value = module.karpenter.instance_profile_name -} - -################################################################################ -# Security Group -################################################################################ - -output "cluster_security_group_arn" { - description = "Amazon Resource Name (ARN) of the cluster security group" - value = module.eks.cluster_security_group_arn -} - -output "cluster_security_group_id" { - description = "ID of the cluster security group" - value = module.eks.cluster_security_group_id -} - -################################################################################ -# Node Security Group -################################################################################ - -output "node_security_group_arn" { - description = "Amazon Resource Name (ARN) of the node shared security group" - value = module.eks.node_security_group_arn -} - -output "node_security_group_id" { - description = "ID of the node shared security group" - value = module.eks.node_security_group_id -} - -################################################################################ -# IRSA -################################################################################ - -output "oidc_provider" { - description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = module.eks.oidc_provider -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.eks.oidc_provider_arn -} - -################################################################################ -# IAM Role -################################################################################ - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster" - value = module.eks.cluster_iam_role_name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster" - value = module.eks.cluster_iam_role_arn -} - -output "cluster_iam_role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = module.eks.cluster_iam_role_unique_id -} - -################################################################################ -# EKS Addons -################################################################################ - -output "cluster_addons" { - description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks.cluster_addons -} - -################################################################################ -# EKS Identity Provider -################################################################################ - -output "cluster_identity_providers" { - description = "Map of attribute maps for all EKS identity providers enabled" - value = module.eks.cluster_identity_providers -} - -################################################################################ -# CloudWatch Log Group -################################################################################ - -output "cloudwatch_log_group_name" { - description = "Name of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_name -} - -output "cloudwatch_log_group_arn" { - description = "Arn of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_arn -} - -################################################################################ -# Fargate Profile -################################################################################ - -output "fargate_profiles" { - description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.eks.fargate_profiles -} - -################################################################################ -# EKS Managed Node Groups -################################################################################ -output "service_node_group_iam_role_name" { - value = module.eks.eks_managed_node_groups["service"].iam_role_name -} - -output "service_node_group_iam_role_arn" { - value = module.eks.eks_managed_node_groups["service"].iam_role_arn -} -output "eks_managed_node_groups" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks.eks_managed_node_groups -} - -################################################################################ -# Self Managed Node Group -################################################################################ - -output "self_managed_node_groups" { - description = "Map of attribute maps for all self managed node groups created" - value = module.eks.self_managed_node_groups -} - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} - -################################################################################ -# ELB -################################################################################ diff --git a/infra-examples/aws/k8s-cluster/providers.tf b/infra-examples/aws/k8s-cluster/providers.tf deleted file mode 100644 index 23a1770..0000000 --- a/infra-examples/aws/k8s-cluster/providers.tf +++ /dev/null @@ -1,58 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Aug-2022 -# -# usage: all providers for Kubernetes and its sub-systems. The general strategy -# is to manage authentications via aws cli where possible, simply to limit -# the environment requirements in order to get this module to work. -# -# another alternative for each of the providers would be to rely on -# the local kubeconfig file. -#------------------------------------------------------------------------------ - -# Required by Karpenter -data "aws_partition" "current" {} - -# Configure the AWS Provider -provider "aws" { - region = var.aws_region -} - -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - -# Required by Karpenter and metrics-server -provider "kubectl" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - -# Required by Karpenter and metrics-server -provider "helm" { - kubernetes { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } - } -} diff --git a/infra-examples/aws/k8s-cluster/variables.tf b/infra-examples/aws/k8s-cluster/variables.tf deleted file mode 100644 index 2febbf8..0000000 --- a/infra-examples/aws/k8s-cluster/variables.tf +++ /dev/null @@ -1,118 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Mar-2022 -# -# usage: create an EKS cluster -#------------------------------------------------------------------------------ -variable "account_id" { - description = "a 12-digit AWS account id, all integers. example: 012345678999" - type = string -} - -variable "shared_resource_identifier" { - description = "a prefix to add to all resource names associated with this Kubernetes cluster instance" - type = string - default = "" -} - -variable "name" { - description = "a valid Kubernetes name definition" - type = string - default = "openedx-k8s-harmony" -} - -variable "aws_region" { - description = "the AWS region code. example: us-east-1. see https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html" - type = string - default = "us-east-1" -} - - -variable "enable_irsa" { - description = "true to create an OpenID Connect Provider for EKS to enable IRSA (IAM Roles for Service Accounts)." - type = bool - default = true -} - -variable "kubernetes_cluster_version" { - description = "the Kubernetes release for this cluster" - type = string - default = "1.27" -} - -variable "eks_create_kms_key" { - description = "true to create an AWS Key Management Service (KMS) key for encryption of all Kubernetes secrets in this cluster." - type = bool - default = true -} - -variable "eks_service_group_instance_type" { - description = "AWS EC2 instance type to deploy into the 'service' AWS EKS Managed Node Group" - type = string - default = "t3.large" -} - -variable "eks_service_group_min_size" { - description = "The minimum number of AWS EC2 instance nodes to run in the 'service' AWS EKS Managed Node Group" - type = number - default = 3 -} - -variable "eks_service_group_max_size" { - description = "The maximum number of AWS EC2 instance nodes to run in the 'service' AWS EKS Managed Node Group" - type = number - default = 3 -} - -variable "eks_service_group_desired_size" { - description = "Only read during cluster creation. The desired number of AWS EC2 instance nodes to run in the 'service' AWS EKS Managed Node Group" - type = number - default = 3 -} - -# sample data: -# ----------------------------------------------------------------------------- -# map_users = [ -# { -# userarn = "arn:aws:iam::012345678999:user/mcdaniel" -# username = "mcdaniel" -# groups = ["system:masters"] -# }, -# { -# userarn = "arn:aws:iam::012345678999:user/bob_marley" -# username = "bob_marley" -# groups = ["system:masters"] -# }, -#] -variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap." - type = list(object({ - userarn = string - username = string - groups = list(string) - })) - default = [] -} - -variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap." - type = list(object({ - userarn = string - username = string - groups = list(string) - })) - default = [] -} - -# sample data: -# ----------------------------------------------------------------------------- -# kms_key_owners = [ -# "arn:aws:iam::012345678999:user/mcdaniel", -# "arn:aws:iam::012345678999:user/bob_marley", -# ] -variable "kms_key_owners" { - type = list(any) - default = [] -} diff --git a/infra-examples/aws/k8s-cluster/versions.tf b/infra-examples/aws/k8s-cluster/versions.tf deleted file mode 100644 index 7b4ae0a..0000000 --- a/infra-examples/aws/k8s-cluster/versions.tf +++ /dev/null @@ -1,38 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Mar-2022 -# -# usage: create an EKS cluster -#------------------------------------------------------------------------------ -terraform { - required_version = "~> 1.3" - - required_providers { - local = { - source = "hashicorp/local" - version = "~> 2.4" - } - random = { - source = "hashicorp/random" - version = "~> 3.5" - } - aws = { - source = "hashicorp/aws" - version = "~> 4.65" - } - kubectl = { - source = "gavinbunney/kubectl" - version = "~> 1.14" - } - helm = { - source = "hashicorp/helm" - version = "~> 2.9" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = "~> 2.20" - } - } -} diff --git a/infra-examples/aws/k8s-cluster/vpc.tf b/infra-examples/aws/k8s-cluster/vpc.tf deleted file mode 100644 index 13ea2e9..0000000 --- a/infra-examples/aws/k8s-cluster/vpc.tf +++ /dev/null @@ -1,19 +0,0 @@ -data "aws_vpc" "reference" { - filter { - name = "tag:Name" - values = [var.name] - } -} - -data "aws_subnets" "private" { - - filter { - name = "vpc-id" - values = [data.aws_vpc.reference.id] - } - - filter { - name = "tag:Type" - values = ["private"] - } -} diff --git a/infra-examples/aws/k8s-cluster/yml/eks-console-full-access.yaml b/infra-examples/aws/k8s-cluster/yml/eks-console-full-access.yaml deleted file mode 100644 index 0f9e5a6..0000000 --- a/infra-examples/aws/k8s-cluster/yml/eks-console-full-access.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: eks-console-dashboard-full-access-clusterrole -rules: - - apiGroups: - - "" - resources: - - nodes - - namespaces - - pods - verbs: - - get - - list - - apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - - replicasets - verbs: - - get - - list - - apiGroups: - - batch - resources: - - jobs - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: eks-console-dashboard-full-access-binding -subjects: - - kind: Group - name: eks-console-dashboard-full-access-group - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: eks-console-dashboard-full-access-clusterrole - apiGroup: rbac.authorization.k8s.io diff --git a/infra-examples/aws/main.tf b/infra-examples/aws/main.tf new file mode 100644 index 0000000..b9d8b7c --- /dev/null +++ b/infra-examples/aws/main.tf @@ -0,0 +1,91 @@ +locals { + kubernetes_version = "1.30" + cluster_name = "${var.kubernetes_cluster_name}-${var.environment}" +} + +data "aws_caller_identity" "current" {} + +data "aws_availability_zones" "available" {} + +module "main_vpc" { + source = "../../terraform/modules/aws/vpc" + + environment = var.environment + availability_zones = data.aws_availability_zones.available.names + + single_nat_gateway = true + one_nat_gateway_per_az = false + + private_subnets = [ + "10.0.1.0/24", + "10.0.2.0/24", + "10.0.3.0/24", + ] + + public_subnets = [ + "10.0.101.0/24", + "10.0.102.0/24", + "10.0.103.0/24", + ] + + tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + } + + public_subnet_tags = { + "Tier" = "Public" + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + private_subnet_tags = { + "Tier" = "Private" + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } +} + +module "kubernetes_cluster" { + source = "../../terraform/modules/aws/eks" + + environment = var.environment + vpc_id = module.main_vpc.vpc_id + + cluster_name = var.kubernetes_cluster_name + kubernetes_version = local.kubernetes_version + registry_credentials = var.docker_registry_credentials + + worker_node_ssh_key_name = var.worker_node_ssh_key_name + worker_node_instance_types = ["m6i.large"] +} + +module "bucket" { + source = "../../terraform/modules/aws/s3" + + environment = var.environment + bucket_prefix = "my-institute" +} + +module "mysql_database" { + source = "../../terraform/modules/aws/rds" + depends_on = [module.main_vpc] + + environment = var.environment + vpc_id = module.main_vpc.vpc_id + + database_cluster_name = "${module.kubernetes_cluster.cluster_name}-mysql" +} + +module "mongodb_database" { + source = "../../terraform/modules/aws/mongodb" + depends_on = [module.main_vpc] + + region = var.region + environment = var.environment + vpc_id = module.main_vpc.vpc_id + + aws_account_id = data.aws_caller_identity.current.account_id + mongodbatlas_project_id = var.mongodbatlas_project_id + mongodbatlas_cidr_block = var.mongodbatlas_cidr_block + + database_cluster_name = "${module.kubernetes_cluster.cluster_name}-mongodb" +} diff --git a/infra-examples/aws/eks/versions_override.tf b/infra-examples/aws/providers.tf similarity index 61% rename from infra-examples/aws/eks/versions_override.tf rename to infra-examples/aws/providers.tf index bff6a89..fa96f9c 100644 --- a/infra-examples/aws/eks/versions_override.tf +++ b/infra-examples/aws/providers.tf @@ -1,9 +1,11 @@ terraform { - required_version = "~> 1.9" required_providers { aws = { source = "hashicorp/aws" - version = "~> 5.67" } } } + +provider "aws" { + region = var.region +} diff --git a/infra-examples/aws/variables.tf b/infra-examples/aws/variables.tf new file mode 100644 index 0000000..0efea81 --- /dev/null +++ b/infra-examples/aws/variables.tf @@ -0,0 +1,35 @@ +variable "region" { + type = string + description = "The AWS Region in which to deploy the resources" +} + +variable "environment" { + type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" +} + +variable "docker_registry_credentials" { + type = string + description = "Image registry credentials to be added to the K8s worker nodes" + default = "" +} + +variable "kubernetes_cluster_name" { + type = string + description = "Name of the DigitalOcean Kubernetes cluster to create." +} + +variable "worker_node_ssh_key_name" { + type = string + description = "Name of the SSH Key Pair used for the worker nodes" +} + +variable "mongodbatlas_project_id" { + type = string + description = "The ID of the MongoDB Atlas project" +} + +variable "mongodbatlas_cidr_block" { + type = string + description = "The CIDR block in MongoDB Atlas" +} diff --git a/infra-examples/aws/vpc/README.rst b/infra-examples/aws/vpc/README.rst deleted file mode 100644 index a5ef222..0000000 --- a/infra-examples/aws/vpc/README.rst +++ /dev/null @@ -1,20 +0,0 @@ -Reference Infrastructure for AWS Virtual Private Cloud (VPC) -============================================================ - -Implements an `AWS Virtual Private Cloud `_ this is preconfigured to support an AWS Elastic Kubernetes Cluster. Amazon Virtual Private Cloud (Amazon VPC) enables you to launch AWS resources into a virtual network that you've defined. This virtual network closely resembles a traditional network that you'd operate in your own data center, with the benefits of using the scalable infrastructure of AWS. - -Implementation Strategy ------------------------ - -Our goal is to, as much as possible, implement a plain vanilla VPC that pre-configured as necessary to support an AWS Elastic Kubernetes Service instance. It generally uses all default configuration values. - -This module uses the latest version of the community-supported `AWS VPC Terraform module `_ to create a fully configured Virtual Private Cloud within your AWS account. -AWS VPC Terraform module is widely supported and adopted, with more than 100 open source code contributers, and more than 37 million downloads from the Terraform registry as of March, 2023. - -What it creates -~~~~~~~~~~~~~~~ - -.. image:: doc/aws-vpc-eks.png - :width: 100% - :alt: Virtual Private Cloud Diagram - diff --git a/infra-examples/aws/vpc/doc/aws-vpc-eks.png b/infra-examples/aws/vpc/doc/aws-vpc-eks.png deleted file mode 100644 index 74e1bf8..0000000 Binary files a/infra-examples/aws/vpc/doc/aws-vpc-eks.png and /dev/null differ diff --git a/infra-examples/aws/vpc/main.tf b/infra-examples/aws/vpc/main.tf deleted file mode 100644 index 511612a..0000000 --- a/infra-examples/aws/vpc/main.tf +++ /dev/null @@ -1,67 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com -# -# date: mar-2022 -# -# usage: create a VPC to contain all Open edX backend resources. -# this VPC is configured to generally use all AWS defaults. -# Thus, you should get the same configuration here that you'd -# get by creating a new VPC from the AWS Console. -# -# There are a LOT of options in this module. -# see https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest -#------------------------------------------------------------------------------ -locals { - azs = ["${var.aws_region}a", "${var.aws_region}b", "${var.aws_region}c"] - - # a bit of foreshadowing: - # AWS EKS uses tags for identifying resources which it interacts. - # here we are tagging the public and private subnets with specially-named tags - # that EKS uses to know where its public and internal load balancers should be placed. - # - # these tags are required, regardless of whether we're using EKS with EC2 worker nodes - # or with a Fargate Compute Cluster. - public_subnet_tags = { - "Type" = "public" - "kubernetes.io/cluster/${var.name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "Type" = "private" - "kubernetes.io/cluster/${var.name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - "karpenter.sh/discovery" = var.name - } - - tags = { - "Name" = var.name - "openedx-k8s-harmony/name" = var.name - "openedx-k8s-harmony/region" = var.aws_region - "openedx-k8s-harmony/tofu" = "true" - } - - -} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" - create_vpc = true - azs = local.azs - public_subnet_tags = local.public_subnet_tags - private_subnet_tags = local.private_subnet_tags - tags = local.tags - name = var.name - cidr = var.cidr - public_subnets = var.public_subnets - private_subnets = var.private_subnets - database_subnets = var.database_subnets - elasticache_subnets = var.elasticache_subnets - enable_ipv6 = var.enable_ipv6 - enable_dns_hostnames = var.enable_dns_hostnames - enable_nat_gateway = var.enable_nat_gateway - single_nat_gateway = var.single_nat_gateway - one_nat_gateway_per_az = var.one_nat_gateway_per_az -} diff --git a/infra-examples/aws/vpc/outputs.tf b/infra-examples/aws/vpc/outputs.tf deleted file mode 100644 index 1bc6ef7..0000000 --- a/infra-examples/aws/vpc/outputs.tf +++ /dev/null @@ -1,550 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Miguel Afonso -# https://www.linkedin.com/in/mmafonso/ -# -# date: Aug-2021 -# -# usage: create a VPC to contain all Open edX backend resources. -#------------------------------------------------------------------------------ - -output "vpc_id" { - description = "The ID of the VPC" - value = module.vpc.vpc_id -} - -output "vpc_arn" { - description = "The ARN of the VPC" - value = module.vpc.vpc_arn -} - -output "vpc_cidr_block" { - description = "The CIDR block of the VPC" - value = module.vpc.vpc_cidr_block -} - -output "default_security_group_id" { - description = "The ID of the security group created by default on VPC creation" - value = module.vpc.default_security_group_id -} - -output "default_network_acl_id" { - description = "The ID of the default network ACL" - value = module.vpc.default_network_acl_id -} - -output "default_route_table_id" { - description = "The ID of the default route table" - value = module.vpc.default_route_table_id -} - -output "vpc_instance_tenancy" { - description = "Tenancy of instances spin up within VPC" - value = module.vpc.vpc_instance_tenancy -} - -output "vpc_enable_dns_support" { - description = "Whether or not the VPC has DNS support" - value = module.vpc.vpc_enable_dns_support -} - -output "vpc_enable_dns_hostnames" { - description = "Whether or not the VPC has DNS hostname support" - value = module.vpc.vpc_enable_dns_hostnames -} - -output "vpc_main_route_table_id" { - description = "The ID of the main route table associated with this VPC" - value = module.vpc.vpc_main_route_table_id -} - -output "vpc_ipv6_association_id" { - description = "The association ID for the IPv6 CIDR block" - value = module.vpc.vpc_ipv6_association_id -} - -output "vpc_ipv6_cidr_block" { - description = "The IPv6 CIDR block" - value = module.vpc.vpc_ipv6_cidr_block -} - -output "vpc_secondary_cidr_blocks" { - description = "List of secondary CIDR blocks of the VPC" - value = module.vpc.vpc_secondary_cidr_blocks -} - -output "vpc_owner_id" { - description = "The ID of the AWS account that owns the VPC" - value = module.vpc.vpc_owner_id -} - -output "private_subnets" { - description = "List of IDs of private subnets" - value = module.vpc.private_subnets -} - -output "private_subnet_arns" { - description = "List of ARNs of private subnets" - value = module.vpc.private_subnet_arns -} - -output "private_subnets_cidr_blocks" { - description = "List of cidr_blocks of private subnets" - value = module.vpc.private_subnets_cidr_blocks -} - -output "private_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of private subnets in an IPv6 enabled VPC" - value = module.vpc.private_subnets_ipv6_cidr_blocks -} - -output "public_subnets" { - description = "List of IDs of public subnets" - value = module.vpc.public_subnets -} - -output "public_subnet_arns" { - description = "List of ARNs of public subnets" - value = module.vpc.public_subnet_arns -} - -output "public_subnets_cidr_blocks" { - description = "List of cidr_blocks of public subnets" - value = module.vpc.public_subnets_cidr_blocks -} - -output "public_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of public subnets in an IPv6 enabled VPC" - value = module.vpc.public_subnets_ipv6_cidr_blocks -} - -output "outpost_subnets" { - description = "List of IDs of outpost subnets" - value = module.vpc.outpost_subnets -} - -output "outpost_subnet_arns" { - description = "List of ARNs of outpost subnets" - value = module.vpc.outpost_subnet_arns -} - -output "outpost_subnets_cidr_blocks" { - description = "List of cidr_blocks of outpost subnets" - value = module.vpc.outpost_subnets_cidr_blocks -} - -output "outpost_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of outpost subnets in an IPv6 enabled VPC" - value = module.vpc.outpost_subnets_ipv6_cidr_blocks -} - -output "database_subnets" { - description = "List of IDs of database subnets" - value = module.vpc.database_subnets -} - -output "database_subnet_arns" { - description = "List of ARNs of database subnets" - value = module.vpc.database_subnet_arns -} - -output "database_subnets_cidr_blocks" { - description = "List of cidr_blocks of database subnets" - value = module.vpc.database_subnets_cidr_blocks -} - -output "database_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of database subnets in an IPv6 enabled VPC" - value = module.vpc.database_subnets_ipv6_cidr_blocks -} - -output "database_subnet_group" { - description = "ID of database subnet group" - value = module.vpc.database_subnet_group -} - -output "database_subnet_group_name" { - description = "Name of database subnet group" - value = module.vpc.database_subnet_group_name -} - -output "redshift_subnets" { - description = "List of IDs of redshift subnets" - value = module.vpc.redshift_subnets -} - -output "redshift_subnet_arns" { - description = "List of ARNs of redshift subnets" - value = module.vpc.redshift_subnet_arns -} - -output "redshift_subnets_cidr_blocks" { - description = "List of cidr_blocks of redshift subnets" - value = module.vpc.redshift_subnets_cidr_blocks -} - -output "redshift_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of redshift subnets in an IPv6 enabled VPC" - value = module.vpc.redshift_subnets_ipv6_cidr_blocks -} - -output "redshift_subnet_group" { - description = "ID of redshift subnet group" - value = module.vpc.redshift_subnet_group -} - -output "elasticache_subnets" { - description = "List of IDs of elasticache subnets" - value = module.vpc.elasticache_subnets -} - -output "elasticache_subnet_arns" { - description = "List of ARNs of elasticache subnets" - value = module.vpc.elasticache_subnet_arns -} - -output "elasticache_subnets_cidr_blocks" { - description = "List of cidr_blocks of elasticache subnets" - value = module.vpc.elasticache_subnets_cidr_blocks -} - -output "elasticache_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of elasticache subnets in an IPv6 enabled VPC" - value = module.vpc.elasticache_subnets_ipv6_cidr_blocks -} - -output "intra_subnets" { - description = "List of IDs of intra subnets" - value = module.vpc.intra_subnets -} - -output "intra_subnet_arns" { - description = "List of ARNs of intra subnets" - value = module.vpc.intra_subnet_arns -} - -output "intra_subnets_cidr_blocks" { - description = "List of cidr_blocks of intra subnets" - value = module.vpc.intra_subnets_cidr_blocks -} - -output "intra_subnets_ipv6_cidr_blocks" { - description = "List of IPv6 cidr_blocks of intra subnets in an IPv6 enabled VPC" - value = module.vpc.intra_subnets_ipv6_cidr_blocks -} - -output "elasticache_subnet_group" { - description = "ID of elasticache subnet group" - value = module.vpc.elasticache_subnet_group -} - -output "elasticache_subnet_group_name" { - description = "Name of elasticache subnet group" - value = module.vpc.elasticache_subnet_group_name -} - -output "public_route_table_ids" { - description = "List of IDs of public route tables" - value = module.vpc.public_route_table_ids -} - -output "private_route_table_ids" { - description = "List of IDs of private route tables" - value = module.vpc.private_route_table_ids -} - -output "database_route_table_ids" { - description = "List of IDs of database route tables" - value = module.vpc.database_route_table_ids -} - -output "redshift_route_table_ids" { - description = "List of IDs of redshift route tables" - value = module.vpc.redshift_route_table_ids -} - -output "elasticache_route_table_ids" { - description = "List of IDs of elasticache route tables" - value = module.vpc.elasticache_route_table_ids -} - -output "intra_route_table_ids" { - description = "List of IDs of intra route tables" - value = module.vpc.intra_route_table_ids -} - -output "public_internet_gateway_route_id" { - description = "ID of the internet gateway route" - value = module.vpc.public_internet_gateway_route_id -} - -output "public_internet_gateway_ipv6_route_id" { - description = "ID of the IPv6 internet gateway route" - value = module.vpc.public_internet_gateway_ipv6_route_id -} - -output "database_internet_gateway_route_id" { - description = "ID of the database internet gateway route" - value = module.vpc.database_internet_gateway_route_id -} - -output "database_nat_gateway_route_ids" { - description = "List of IDs of the database nat gateway route" - value = module.vpc.database_nat_gateway_route_ids -} - -output "database_ipv6_egress_route_id" { - description = "ID of the database IPv6 egress route" - value = module.vpc.database_ipv6_egress_route_id -} - -output "private_nat_gateway_route_ids" { - description = "List of IDs of the private nat gateway route" - value = module.vpc.private_nat_gateway_route_ids -} - -output "private_ipv6_egress_route_ids" { - description = "List of IDs of the ipv6 egress route" - value = module.vpc.private_ipv6_egress_route_ids -} - -output "private_route_table_association_ids" { - description = "List of IDs of the private route table association" - value = module.vpc.private_route_table_association_ids -} - -output "database_route_table_association_ids" { - description = "List of IDs of the database route table association" - value = module.vpc.database_route_table_association_ids -} - -output "redshift_route_table_association_ids" { - description = "List of IDs of the redshift route table association" - value = module.vpc.redshift_route_table_association_ids -} - -output "redshift_public_route_table_association_ids" { - description = "List of IDs of the public redshidt route table association" - value = module.vpc.redshift_public_route_table_association_ids -} - -output "elasticache_route_table_association_ids" { - description = "List of IDs of the elasticache route table association" - value = module.vpc.elasticache_route_table_association_ids -} - -output "intra_route_table_association_ids" { - description = "List of IDs of the intra route table association" - value = module.vpc.intra_route_table_association_ids -} - -output "public_route_table_association_ids" { - description = "List of IDs of the public route table association" - value = module.vpc.public_route_table_association_ids -} - -output "dhcp_options_id" { - description = "The ID of the DHCP options" - value = module.vpc.dhcp_options_id -} - -output "nat_ids" { - description = "List of allocation ID of Elastic IPs created for AWS NAT Gateway" - value = module.vpc.nat_ids -} - -output "nat_public_ips" { - description = "List of public Elastic IPs created for AWS NAT Gateway" - value = module.vpc.nat_public_ips -} - -output "natgw_ids" { - description = "List of NAT Gateway IDs" - value = module.vpc.natgw_ids -} - -output "igw_id" { - description = "The ID of the Internet Gateway" - value = module.vpc.igw_id -} - -output "igw_arn" { - description = "The ARN of the Internet Gateway" - value = module.vpc.igw_arn -} - -output "egress_only_internet_gateway_id" { - description = "The ID of the egress only Internet Gateway" - value = module.vpc.egress_only_internet_gateway_id -} - -output "cgw_ids" { - description = "List of IDs of Customer Gateway" - value = module.vpc.cgw_ids -} - -output "cgw_arns" { - description = "List of ARNs of Customer Gateway" - value = module.vpc.cgw_arns -} - -output "this_customer_gateway" { - description = "Map of Customer Gateway attributes" - value = module.vpc.this_customer_gateway -} - -output "vgw_id" { - description = "The ID of the VPN Gateway" - value = module.vpc.vgw_id -} - -output "vgw_arn" { - description = "The ARN of the VPN Gateway" - value = module.vpc.vgw_arn -} - -output "default_vpc_id" { - description = "The ID of the Default VPC" - value = module.vpc.default_vpc_id -} - -output "default_vpc_arn" { - description = "The ARN of the Default VPC" - value = module.vpc.default_vpc_arn -} - -output "default_vpc_cidr_block" { - description = "The CIDR block of the Default VPC" - value = module.vpc.default_vpc_cidr_block -} - -output "default_vpc_default_security_group_id" { - description = "The ID of the security group created by default on Default VPC creation" - value = module.vpc.default_vpc_default_security_group_id -} - -output "default_vpc_default_network_acl_id" { - description = "The ID of the default network ACL of the Default VPC" - value = module.vpc.default_vpc_default_network_acl_id -} - -output "default_vpc_default_route_table_id" { - description = "The ID of the default route table of the Default VPC" - value = module.vpc.default_vpc_default_route_table_id -} - -output "default_vpc_instance_tenancy" { - description = "Tenancy of instances spin up within Default VPC" - value = module.vpc.default_vpc_instance_tenancy -} - -output "default_vpc_enable_dns_support" { - description = "Whether or not the Default VPC has DNS support" - value = module.vpc.default_vpc_enable_dns_support -} - -output "default_vpc_enable_dns_hostnames" { - description = "Whether or not the Default VPC has DNS hostname support" - value = module.vpc.default_vpc_enable_dns_hostnames -} - -output "default_vpc_main_route_table_id" { - description = "The ID of the main route table associated with the Default VPC" - value = module.vpc.default_vpc_main_route_table_id -} - -output "public_network_acl_id" { - description = "ID of the public network ACL" - value = module.vpc.public_network_acl_id -} - -output "public_network_acl_arn" { - description = "ARN of the public network ACL" - value = module.vpc.public_network_acl_arn -} - -output "private_network_acl_id" { - description = "ID of the private network ACL" - value = module.vpc.private_network_acl_id -} - -output "private_network_acl_arn" { - description = "ARN of the private network ACL" - value = module.vpc.private_network_acl_arn -} - -output "outpost_network_acl_id" { - description = "ID of the outpost network ACL" - value = module.vpc.outpost_network_acl_id -} - -output "outpost_network_acl_arn" { - description = "ARN of the outpost network ACL" - value = module.vpc.outpost_network_acl_arn -} - -output "intra_network_acl_id" { - description = "ID of the intra network ACL" - value = module.vpc.intra_network_acl_id -} - -output "intra_network_acl_arn" { - description = "ARN of the intra network ACL" - value = module.vpc.intra_network_acl_arn -} - -output "database_network_acl_id" { - description = "ID of the database network ACL" - value = module.vpc.database_network_acl_id -} - -output "database_network_acl_arn" { - description = "ARN of the database network ACL" - value = module.vpc.database_network_acl_arn -} - -output "redshift_network_acl_id" { - description = "ID of the redshift network ACL" - value = module.vpc.redshift_network_acl_id -} - -output "redshift_network_acl_arn" { - description = "ARN of the redshift network ACL" - value = module.vpc.redshift_network_acl_arn -} - -output "elasticache_network_acl_id" { - description = "ID of the elasticache network ACL" - value = module.vpc.elasticache_network_acl_id -} - -output "elasticache_network_acl_arn" { - description = "ARN of the elasticache network ACL" - value = module.vpc.elasticache_network_acl_arn -} - -# VPC flow log -output "vpc_flow_log_id" { - description = "The ID of the Flow Log resource" - value = module.vpc.vpc_flow_log_id -} - -output "vpc_flow_log_destination_arn" { - description = "The ARN of the destination for VPC Flow Logs" - value = module.vpc.vpc_flow_log_destination_arn -} - -output "vpc_flow_log_destination_type" { - description = "The type of the destination for VPC Flow Logs" - value = module.vpc.vpc_flow_log_destination_type -} - -output "vpc_flow_log_cloudwatch_iam_role_arn" { - description = "The ARN of the IAM role used when pushing logs to Cloudwatch log group" - value = module.vpc.vpc_flow_log_cloudwatch_iam_role_arn -} - -# VPC endpoints -#output "vpc_endpoints" { -# description = "Array containing the full resource object and attributes for all endpoints created" -# value = module.vpc_endpoints.endpoints -#} diff --git a/infra-examples/aws/vpc/providers.tf b/infra-examples/aws/vpc/providers.tf deleted file mode 100644 index b67acf4..0000000 --- a/infra-examples/aws/vpc/providers.tf +++ /dev/null @@ -1,19 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: Aug-2022 -# -# usage: all providers for Kubernetes and its sub-systems. The general strategy -# is to manage authentications via aws cli where possible, simply to limit -# the environment requirements in order to get this module to work. -# -# another alternative for each of the providers would be to rely on -# the local kubeconfig file. -#------------------------------------------------------------------------------ - -# Configure the AWS Provider -provider "aws" { - region = var.aws_region -} - diff --git a/infra-examples/aws/vpc/variables.tf b/infra-examples/aws/vpc/variables.tf deleted file mode 100644 index f7dca9e..0000000 --- a/infra-examples/aws/vpc/variables.tf +++ /dev/null @@ -1,85 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Miguel Afonso -# https://www.linkedin.com/in/mmafonso/ -# -# date: Aug-2021 -# -# usage: create a VPC to contain all Open edX backend resources. -#------------------------------------------------------------------------------ -variable "aws_region" { - description = "The region in which the origin S3 bucket was created." - type = string - default = "us-east-1" -} - -variable "cidr" { - description = "The CIDR block for the VPC. Default value is a valid CIDR, but not acceptable by AWS and should be overridden" - type = string - default = "192.168.0.0/20" -} - -variable "database_subnets" { - description = "A list of database subnets" - type = list(string) - default = ["192.168.8.0/24", "192.168.9.0/24"] -} - -variable "elasticache_subnets" { - description = "A list of elasticache subnets" - type = list(string) - default = ["192.168.10.0/24", "192.168.11.0/24"] -} - -variable "enable_ipv6" { - description = "Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or the size of the CIDR block." - type = bool - default = false -} - -variable "enable_nat_gateway" { - description = "Should be true if you want to provision NAT Gateways for each of your private networks" - type = bool - default = true -} - -variable "one_nat_gateway_per_az" { - description = "Should be true if you want only one NAT Gateway per availability zone. Requires var.azs to be set, and the number of public_subnets created to be greater than or equal to the number of availability zones specified in var.azs" - type = bool - default = true -} - -variable "single_nat_gateway" { - description = "Should be true if you want to provision a single shared NAT Gateway across all of your private networks" - type = bool - default = false -} - -variable "enable_dns_hostnames" { - description = "Should be true to enable DNS hostnames in the VPC" - type = bool - default = false -} - -variable "name" { - description = "Name to be used on all the resources as identifier" - type = string - default = "openedx-k8s-harmony" -} - -variable "private_subnets" { - description = "A list of private subnets inside the VPC" - type = list(string) - default = ["192.168.4.0/24", "192.168.5.0/24", "192.168.6.0/24"] -} - -variable "public_subnets" { - description = "A list of public subnets inside the VPC" - type = list(string) - default = ["192.168.1.0/24", "192.168.2.0/24", "192.168.3.0/24"] -} - -variable "tags" { - description = "A map of tags to add to all resources" - type = map(string) - default = {} -} diff --git a/infra-examples/aws/vpc/versions.tf b/infra-examples/aws/vpc/versions.tf deleted file mode 100644 index ac1fc42..0000000 --- a/infra-examples/aws/vpc/versions.tf +++ /dev/null @@ -1,22 +0,0 @@ -#------------------------------------------------------------------------------ -# written by: Lawrence McDaniel -# https://lawrencemcdaniel.com/ -# -# date: March-2022 -# -# usage: build an EKS cluster load balancer that uses a Fargate Compute Cluster -#------------------------------------------------------------------------------ -terraform { - required_version = "~> 1.3" - - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.65" - } - local = { - source = "hashicorp/local" - version = "~> 2.4" - } - } -} diff --git a/terraform/modules/aws/eks/README.md b/terraform/modules/aws/eks/README.md new file mode 100644 index 0000000..09c949f --- /dev/null +++ b/terraform/modules/aws/eks/README.md @@ -0,0 +1,68 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [cluster\_autoscaler\_irsa\_role](#module\_cluster\_autoscaler\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.47 | +| [ebs\_csi\_irsa\_role](#module\_ebs\_csi\_irsa\_role) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.47 | +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | ~> 20.31 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ami.latest_ubuntu_eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_subnets.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | +| [aws_vpc.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [ami\_id](#input\_ami\_id) | EKS nodes AMI ID | `string` | `""` | no | +| [cluster\_name](#input\_cluster\_name) | The name of the Kubernetes cluster. | `string` | n/a | yes | +| [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Cluster security group description | `string` | `"EKS cluster security group"` | no | +| [cluster\_security\_group\_name](#input\_cluster\_security\_group\_name) | Cluster security group name | `string` | `null` | no | +| [cluster\_security\_group\_use\_name\_prefix](#input\_cluster\_security\_group\_use\_name\_prefix) | Determinate if it is necessary to create an security group prefix for the cluster | `bool` | `true` | no | +| [cluster\_tags](#input\_cluster\_tags) | A map of tags to add to the cluster | `map(string)` | `{}` | no | +| [enable\_cluster\_autoscaler](#input\_enable\_cluster\_autoscaler) | Determines whether to prepare the cluster to use cluster autoscaler | `bool` | `false` | no | +| [environment](#input\_environment) | The AWS project environment. (for example: production, staging, development, etc.) | `string` | n/a | yes | +| [iam\_role\_name](#input\_iam\_role\_name) | Cluster IAM role name | `string` | `null` | no | +| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determinate if it is necessary to create an iam role prefix for the cluster | `bool` | `true` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | The supported Kubernetes version to install for the cluster. | `string` | n/a | yes | +| [max\_worker\_node\_count](#input\_max\_worker\_node\_count) | Maximum node count in the autoscaling group | `number` | `3` | no | +| [min\_worker\_node\_count](#input\_min\_worker\_node\_count) | Minimum node count in the autoscaling group | `number` | `1` | no | +| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | Allow to add post bootstrap user data | `string` | `null` | no | +| [private\_subnets](#input\_private\_subnets) | List of private subnets | `list(string)` |
[
"10.10.0.0/21",
"10.10.8.0/21"
]
| no | +| [registry\_credentials](#input\_registry\_credentials) | Image registry credentials to be added to the node | `string` | n/a | yes | +| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | +| [ubuntu\_version](#input\_ubuntu\_version) | Ubuntu version to use (e.g. focal-20.04) when no ami\_id is provided | `string` | `"jammy-22.04"` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC to use for the Kubernetes cluster. | `string` | n/a | yes | +| [worker\_node\_capacity\_type](#input\_worker\_node\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no | +| [worker\_node\_count](#input\_worker\_node\_count) | Desired autoscaling node count | `number` | `2` | no | +| [worker\_node\_disk\_size](#input\_worker\_node\_disk\_size) | Node disk size | `number` | `40` | no | +| [worker\_node\_extra\_ssh\_cidrs](#input\_worker\_node\_extra\_ssh\_cidrs) | List of additional IP blocks with ssh access to the worker nodes | `list(string)` | `[]` | no | +| [worker\_node\_group\_name](#input\_worker\_node\_group\_name) | Name of the node group | `string` | `"ubuntu_worker"` | no | +| [worker\_node\_groups\_tags](#input\_worker\_node\_groups\_tags) | A map of tags to add to all node group resources | `map(string)` | `{}` | no | +| [worker\_node\_instance\_types](#input\_worker\_node\_instance\_types) | EC2 Instance type for the nodes | `list(string)` | n/a | yes | +| [worker\_node\_ssh\_key\_name](#input\_worker\_node\_ssh\_key\_name) | Name of the SSH Key Pair used for the worker nodes | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | +| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | +| [cluster\_version](#output\_cluster\_version) | The Kubernetes version for the cluster | +| [vpc\_arn](#output\_vpc\_arn) | The ARN of the VPC | +| [vpc\_cidr\_block](#output\_vpc\_cidr\_block) | The CIDR block of the VPC | +| [vpc\_id](#output\_vpc\_id) | The ID of the VPC | diff --git a/infra-examples/aws/eks/eks.tf b/terraform/modules/aws/eks/main.tf similarity index 76% rename from infra-examples/aws/eks/eks.tf rename to terraform/modules/aws/eks/main.tf index 6bbb54a..01797e3 100644 --- a/infra-examples/aws/eks/eks.tf +++ b/terraform/modules/aws/eks/main.tf @@ -1,21 +1,30 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + } +} + locals { cluster_autoscaler_tags = var.enable_cluster_autoscaler ? { "k8s.io/cluster-autoscaler/${var.cluster_name}" = "owned" "k8s.io/cluster-autoscaler/enabled" = "true" } : {} + post_bootstrap_user_data = var.post_bootstrap_user_data != null ? var.post_bootstrap_user_data : templatefile( "${path.module}/templates/post_bootstrap_user_data.tpl", { registry_credentials = var.registry_credentials } ) + # Define the default IAM role additional policies for all the node groups # every element must define: # - A key for the policy. It can be any string # - A value which is the ARN of the policy to add # - An enable key which determines if the policy is added or not - node_group_defaults_iam_role_additional_policies = [ - ] + node_group_defaults_iam_role_additional_policies = [] } data "aws_ami" "latest_ubuntu_eks" { @@ -24,18 +33,33 @@ data "aws_ami" "latest_ubuntu_eks" { filter { name = "name" - values = ["ubuntu-eks/k8s_${var.cluster_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-amd64-server-*"] + values = ["ubuntu-eks/k8s_${var.kubernetes_version}/images/hvm-ssd/ubuntu-${var.ubuntu_version}-amd64-server-*"] + } +} + +data "aws_vpc" "main" { + id = var.vpc_id +} + +data "aws_subnets" "main" { + filter { + name = "vpc-id" + values = [data.aws_vpc.main.id] + } + + tags = { + Tier = "Private" } } module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 20.24" + version = "~> 20.31" cluster_name = var.cluster_name - cluster_version = var.cluster_version + cluster_version = var.kubernetes_version cluster_endpoint_public_access = true - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = data.aws_vpc.main.id + subnet_ids = data.aws_subnets.main.ids enable_irsa = true cluster_tags = var.cluster_tags tags = var.tags @@ -68,7 +92,7 @@ module "eks" { from_port = 22 to_port = 22 type = "ingress" - cidr_blocks = concat([module.vpc.vpc_cidr_block], var.extra_ssh_cidrs) + cidr_blocks = concat([data.aws_vpc.main.cidr_block], var.worker_node_extra_ssh_cidrs) } } @@ -91,9 +115,9 @@ module "eks" { ubuntu_worker = { ami_id = var.ami_id != "" ? var.ami_id : data.aws_ami.latest_ubuntu_eks.id - key_name = var.key_name - name = var.node_group_name - subnet_ids = coalesce(var.node_group_subnets, module.vpc.private_subnets) + key_name = var.worker_node_ssh_key_name + name = var.worker_node_group_name + subnet_ids = data.aws_subnets.main.ids # This will ensure the boostrap user data is used to join the node # By default, EKS managed node groups will not append bootstrap script; @@ -101,11 +125,11 @@ module "eks" { # Note: this assumes the AMI provided is an EKS optimized AMI derivative enable_bootstrap_user_data = true - instance_types = var.instance_types - max_size = var.max_size - min_size = var.min_size - desired_size = var.desired_size - capacity_type = var.capacity_type + instance_types = var.worker_node_instance_types + max_size = var.max_worker_node_count + min_size = var.min_worker_node_count + desired_size = var.worker_node_count + capacity_type = var.worker_node_capacity_type create_security_group = false @@ -115,12 +139,12 @@ module "eks" { sda1 = { device_name = "/dev/sda1" ebs = { - volume_size = var.disk_size + volume_size = var.worker_node_disk_size } } } - tags = merge(var.node_groups_tags, local.cluster_autoscaler_tags) + tags = merge(var.worker_node_groups_tags, local.cluster_autoscaler_tags) } } } @@ -148,7 +172,7 @@ module "cluster_autoscaler_irsa_role" { count = var.enable_cluster_autoscaler ? 1 : 0 - role_name = "cluster-autoscaler-${var.cluster_name}" + role_name = "cluster-autoscaler-${module.eks.cluster_name}" attach_cluster_autoscaler_policy = true cluster_autoscaler_cluster_ids = [module.eks.cluster_name] tags = var.tags diff --git a/terraform/modules/aws/eks/outputs.tf b/terraform/modules/aws/eks/outputs.tf new file mode 100644 index 0000000..48bf850 --- /dev/null +++ b/terraform/modules/aws/eks/outputs.tf @@ -0,0 +1,29 @@ +output "vpc_id" { + description = "The ID of the VPC" + value = data.aws_vpc.main.id +} + +output "vpc_arn" { + description = "The ARN of the VPC" + value = data.aws_vpc.main.arn +} + +output "vpc_cidr_block" { + description = "The CIDR block of the VPC" + value = data.aws_vpc.main.cidr_block +} + +output "cluster_name" { + description = "The name of the EKS cluster" + value = module.eks.cluster_name +} + +output "cluster_endpoint" { + description = "Endpoint for your Kubernetes API server" + value = module.eks.cluster_endpoint +} + +output "cluster_version" { + description = "The Kubernetes version for the cluster" + value = module.eks.cluster_version +} diff --git a/infra-examples/aws/eks/templates/post_bootstrap_user_data.tpl b/terraform/modules/aws/eks/templates/post_bootstrap_user_data.tpl similarity index 100% rename from infra-examples/aws/eks/templates/post_bootstrap_user_data.tpl rename to terraform/modules/aws/eks/templates/post_bootstrap_user_data.tpl diff --git a/infra-examples/aws/eks/variables.tf b/terraform/modules/aws/eks/variables.tf similarity index 63% rename from infra-examples/aws/eks/variables.tf rename to terraform/modules/aws/eks/variables.tf index ff5c45d..6f14e14 100644 --- a/infra-examples/aws/eks/variables.tf +++ b/terraform/modules/aws/eks/variables.tf @@ -1,102 +1,122 @@ -variable "aws_region" { - description = "The AWS Region in which to deploy the resources" +variable "environment" { type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" } -variable "private_subnets" { - description = "List of private subnets" - type = list(string) - default = [] -} -variable "public_subnets" { - description = "List of public subnets" - type = list(string) - default = [] -} -variable "cidr" { - description = "CIDR block for the VPC" - type = string - default = "10.0.0.0/16" -} -variable "azs" { - description = "List of availability zones to use" - type = list(string) - default = [] -} -variable "vpc_name" { - description = "The VPC name" +variable "cluster_name" { type = string + description = "The name of the Kubernetes cluster." } -variable "enable_nat_gateway" { - description = "Enable NAT Gateway" +variable "cluster_security_group_use_name_prefix" { + description = "Determinate if it is necessary to create an security group prefix for the cluster" type = bool default = true } -variable "single_nat_gateway" { - description = "Single NAT Gateway" - type = bool - default = false +variable "cluster_security_group_name" { + description = "Cluster security group name" + type = string + default = null } -variable "one_nat_gateway_per_az" { - description = "One NAT gateway per AZ" - type = bool - default = true +variable "cluster_security_group_description" { + description = "Cluster security group description" + type = string + default = "EKS cluster security group" } -variable "instance_types" { - type = list(string) - description = "EC2 Instance type for the nodes" +variable "cluster_tags" { + description = "A map of tags to add to the cluster" + type = map(string) + default = {} +} + +variable "tags" { + description = "A map of tags to add to all resources" + type = map(string) + default = {} } -variable "cluster_version" { - default = "1.29" + +variable "kubernetes_version" { type = string - description = "Kubernetes version" + description = "The supported Kubernetes version to install for the cluster." } -variable "cluster_name" { + +variable "vpc_id" { type = string - description = "Name of the cluster" + description = "ID of the VPC to use for the Kubernetes cluster." } -variable "desired_size" { - default = 2 - type = number - description = "Desired node count" + +variable "private_subnets" { + type = list(string) + default = ["10.10.0.0/21", "10.10.8.0/21"] + description = "List of private subnets" +} + +variable "worker_node_instance_types" { + type = list(string) + description = "EC2 Instance type for the nodes" } -variable "disk_size" { + +variable "worker_node_disk_size" { default = 40 type = number description = "Node disk size" } -variable "key_name" { - type = string - description = "Name of the SSH Key Pair" + +variable "worker_node_count" { + default = 2 + type = number + description = "Desired autoscaling node count" } -variable "max_size" { + +variable "max_worker_node_count" { default = 3 type = number - description = "Maximum node count" + description = "Maximum node count in the autoscaling group" } -variable "min_size" { + +variable "min_worker_node_count" { default = 1 type = number - description = "Minimum node count" + description = "Minimum node count in the autoscaling group" } -variable "extra_ssh_cidrs" { - default = [] - type = list(string) - description = "List of additional IP blocks with ssh access" -} -variable "registry_credentials" { + +variable "worker_node_ssh_key_name" { type = string - description = "Image registry credentials to be added to the node" + description = "Name of the SSH Key Pair used for the worker nodes" +} + +variable "worker_node_extra_ssh_cidrs" { + type = list(string) + description = "List of additional IP blocks with ssh access to the worker nodes" + default = [] } -variable "node_groups_tags" { + +variable "worker_node_groups_tags" { description = "A map of tags to add to all node group resources" type = map(string) default = {} } + +variable "worker_node_group_name" { + description = "Name of the node group" + type = string + default = "ubuntu_worker" +} + +variable "worker_node_capacity_type" { + description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" + type = string + default = "ON_DEMAND" +} + +variable "registry_credentials" { + type = string + description = "Image registry credentials to be added to the node" +} + variable "enable_cluster_autoscaler" { description = "Determines whether to prepare the cluster to use cluster autoscaler" type = bool @@ -112,62 +132,25 @@ variable "ubuntu_version" { error_message = "The value must be in format str-num.num (e.g. focal-20.04)." } } + variable "ami_id" { description = "EKS nodes AMI ID" type = string default = "" } -# Variables for migration from 17.x.x to 18.x.x - Cluster + variable "iam_role_use_name_prefix" { description = "Determinate if it is necessary to create an iam role prefix for the cluster" type = bool default = true } + variable "iam_role_name" { description = "Cluster IAM role name" type = string default = null } -variable "cluster_security_group_use_name_prefix" { - description = "Determinate if it is necessary to create an security group prefix for the cluster" - type = bool - default = true -} -variable "cluster_security_group_name" { - description = "Cluster security group name" - type = string - default = null -} -variable "cluster_security_group_description" { - description = "Cluster security group description" - type = string - default = "EKS cluster security group" -} -variable "node_group_subnets" { - description = "List of subnets where nodes groups are deployed. Normally these are private and the same as EKS" - type = list(string) - default = null -} -variable "cluster_tags" { - description = "A map of tags to add to the cluster" - type = map(string) - default = {} -} -variable "tags" { - description = "A map of tags to add to all resources" - type = map(string) - default = {} -} -variable "node_group_name" { - description = "Name of the node group" - type = string - default = "ubuntu_worker" -} -variable "capacity_type" { - description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" - type = string - default = "ON_DEMAND" -} + variable "post_bootstrap_user_data" { type = string default = null diff --git a/terraform/modules/aws/mongodb/README.md b/terraform/modules/aws/mongodb/README.md new file mode 100644 index 0000000..e0a6be8 --- /dev/null +++ b/terraform/modules/aws/mongodb/README.md @@ -0,0 +1,69 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [mongodbatlas](#provider\_mongodbatlas) | n/a | +| [random](#provider\_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_route.peeraccess](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route) | resource | +| [aws_vpc_peering_connection_accepter.accept_mongo_peer](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_peering_connection_accepter) | resource | +| [mongodbatlas_cloud_backup_schedule.backup_schedule](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cloud_backup_schedule) | resource | +| [mongodbatlas_cluster.cluster](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/cluster) | resource | +| [mongodbatlas_database_user.users](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/database_user) | resource | +| [mongodbatlas_network_container.cluster_network_container](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/network_container) | resource | +| [mongodbatlas_network_peering.cluster_network_peering](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/network_peering) | resource | +| [mongodbatlas_project_ip_access_list.cluster_access_list](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/project_ip_access_list) | resource | +| [random_password.user_passwords](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [aws_vpc.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_account\_id](#input\_aws\_account\_id) | AWS account ID | `string` | n/a | yes | +| [database\_analytics\_nodes](#input\_database\_analytics\_nodes) | The number of analytics nodes in the MongoDB cluster | `number` | `null` | no | +| [database\_autoscaling\_max\_instances](#input\_database\_autoscaling\_max\_instances) | The maximum number of instances to have in the database instance autoscaling group | `number` | `3` | no | +| [database\_autoscaling\_min\_instances](#input\_database\_autoscaling\_min\_instances) | The minimum number of instances to have in the database instance autoscaling group | `number` | `1` | no | +| [database\_backup\_retention\_period](#input\_database\_backup\_retention\_period) | The retention period for the database backups in days | `number` | `35` | no | +| [database\_cluster\_instance\_size](#input\_database\_cluster\_instance\_size) | Database instance size | `string` | `"M10"` | no | +| [database\_cluster\_name](#input\_database\_cluster\_name) | The name of the MongoDB cluster | `string` | n/a | yes | +| [database\_cluster\_type](#input\_database\_cluster\_type) | Type of the MongoDB cluster | `string` | `"REPLICASET"` | no | +| [database\_cluster\_version](#input\_database\_cluster\_version) | The version of the MongoDB cluster | `string` | `"7.0"` | no | +| [database\_electable\_nodes](#input\_database\_electable\_nodes) | The number of electable nodes in the MongoDB cluster | `number` | `3` | no | +| [database\_read\_only\_nodes](#input\_database\_read\_only\_nodes) | The number of read\_only nodes in the MongoDB cluster | `number` | `null` | no | +| [database\_shards](#input\_database\_shards) | Number of shards to configure for the database | `number` | `1` | no | +| [database\_storage\_ipos](#input\_database\_storage\_ipos) | The disk IOPS to have for the database instance | `number` | `null` | no | +| [database\_storage\_size](#input\_database\_storage\_size) | The storage assigned to the database instance | `number` | `null` | no | +| [database\_storage\_type](#input\_database\_storage\_type) | The storage type to use for the database instance | `string` | `null` | no | +| [database\_users](#input\_database\_users) | Map of overrides for the user and database names. |
map(object({
username = string
database = string
forum_database = string
}))
| `{}` | no | +| [environment](#input\_environment) | The AWS project environment. (for example: production, staging, development, etc.) | `string` | n/a | yes | +| [is\_database\_autoscaling\_compute\_enabled](#input\_is\_database\_autoscaling\_compute\_enabled) | Whether to enable autoscaling of database instances | `bool` | `false` | no | +| [is\_database\_autoscaling\_disk\_gb\_enabled](#input\_is\_database\_autoscaling\_disk\_gb\_enabled) | Whether to enable autoscaling disk size for the database instance | `bool` | `true` | no | +| [is\_database\_storage\_encrypted](#input\_is\_database\_storage\_encrypted) | Whether the database storage is encrypted in rest | `bool` | `true` | no | +| [mongodbatlas\_cidr\_block](#input\_mongodbatlas\_cidr\_block) | The CIDR block in MongoDB Atlas | `string` | n/a | yes | +| [mongodbatlas\_project\_id](#input\_mongodbatlas\_project\_id) | The ID of the MongoDB Atlas project | `string` | n/a | yes | +| [region](#input\_region) | The AWS Region in which to deploy the resources | `string` | n/a | yes | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC to use for the MongoDB cluster | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_address](#output\_cluster\_address) | The address of the database cluster | +| [cluster\_connection\_strings](#output\_cluster\_connection\_strings) | Connection strings for the database cluster | +| [database\_cluster\_cluster\_id](#output\_database\_cluster\_cluster\_id) | The cluster ID of the database cluster | +| [database\_cluster\_id](#output\_database\_cluster\_id) | The unique resource ID of the database cluster | +| [database\_user\_credentials](#output\_database\_user\_credentials) | List of database and user credentials mapping. | diff --git a/terraform/modules/aws/mongodb/main.tf b/terraform/modules/aws/mongodb/main.tf new file mode 100644 index 0000000..f2912a9 --- /dev/null +++ b/terraform/modules/aws/mongodb/main.tf @@ -0,0 +1,158 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + random = { + source = "hashicorp/random" + } + mongodbatlas = { + source = "mongodb/mongodbatlas" + } + } +} + +locals { + region = upper(replace(var.region, "-", "_")) +} + +data "aws_vpc" "main" { + id = var.vpc_id +} + +resource "mongodbatlas_cluster" "cluster" { + depends_on = [mongodbatlas_network_container.cluster_network_container] + + project_id = var.mongodbatlas_project_id + name = "${var.database_cluster_name}-${var.environment}" + mongo_db_major_version = var.database_cluster_version + + cluster_type = var.database_cluster_type + replication_specs { + num_shards = var.database_shards + + regions_config { + priority = 7 + region_name = local.region + electable_nodes = var.database_electable_nodes + read_only_nodes = var.database_read_only_nodes + analytics_nodes = var.database_analytics_nodes + } + } + + # Auto-Scaling Settings + auto_scaling_disk_gb_enabled = var.is_database_autoscaling_disk_gb_enabled + auto_scaling_compute_enabled = var.is_database_autoscaling_compute_enabled + auto_scaling_compute_scale_down_enabled = var.is_database_autoscaling_compute_enabled + provider_auto_scaling_compute_min_instance_size = var.database_autoscaling_min_instances + provider_auto_scaling_compute_max_instance_size = var.database_autoscaling_max_instances + + # Provider Settings + provider_name = "AWS" + provider_region_name = local.region + disk_size_gb = var.database_storage_size + provider_disk_iops = var.database_storage_ipos + provider_volume_type = var.database_storage_type + cloud_backup = true + provider_instance_size_name = var.database_cluster_instance_size + encryption_at_rest_provider = var.is_database_storage_encrypted ? "AWS" : "NONE" +} + +resource "mongodbatlas_cloud_backup_schedule" "backup_schedule" { + project_id = mongodbatlas_cluster.cluster.project_id + cluster_name = mongodbatlas_cluster.cluster.name + + restore_window_days = var.database_backup_retention_period + + policy_item_daily { + frequency_interval = 1 + retention_unit = "days" + retention_value = var.database_backup_retention_period + } +} + + +# Add the vpc CIDR block to the access list +resource "mongodbatlas_project_ip_access_list" "cluster_access_list" { + project_id = var.mongodbatlas_project_id + cidr_block = data.aws_vpc.main.cidr_block +} + +# Network container to define the MongoDB Atlas CIDR block +resource "mongodbatlas_network_container" "cluster_network_container" { + project_id = var.mongodbatlas_project_id + atlas_cidr_block = var.mongodbatlas_cidr_block + provider_name = "AWS" + region_name = local.region +} + +# Peering between MongoDB Atlas and VPC +resource "mongodbatlas_network_peering" "cluster_network_peering" { + project_id = var.mongodbatlas_project_id + container_id = mongodbatlas_network_container.cluster_network_container.id + accepter_region_name = var.region + provider_name = "AWS" + route_table_cidr_block = data.aws_vpc.main.cidr_block + vpc_id = var.vpc_id + aws_account_id = var.aws_account_id +} + +# Auto accept peering connection request +resource "aws_vpc_peering_connection_accepter" "accept_mongo_peer" { + vpc_peering_connection_id = mongodbatlas_network_peering.cluster_network_peering.connection_id + auto_accept = true +} + +# Add peering connection to private routing table +resource "aws_route" "peeraccess" { + route_table_id = data.aws_vpc.main.main_route_table_id + destination_cidr_block = var.mongodbatlas_cidr_block + vpc_peering_connection_id = mongodbatlas_network_peering.cluster_network_peering.connection_id + depends_on = [ + aws_vpc_peering_connection_accepter.accept_mongo_peer + ] +} + +resource "random_password" "user_passwords" { + for_each = toset([ + for _, user in var.database_users : + user.username + ]) + + length = 16 + special = true + override_special = "$()-_[]{}<>" +} + +resource "mongodbatlas_database_user" "users" { + for_each = { + for _, user in var.database_users : + user.username => user + } + + project_id = var.mongodbatlas_project_id + auth_database_name = "admin" + + username = each.key + password = random_password.user_passwords[each.key].result + + roles { + role_name = "readAnyDatabase" + database_name = "admin" + } + + roles { + role_name = "readWrite" + database_name = each.value.database + } + + roles { + role_name = "readWrite" + database_name = each.value.forum_database + } + + scopes { + type = "CLUSTER" + name = mongodbatlas_cluster.cluster.name + } +} diff --git a/terraform/modules/aws/mongodb/outputs.tf b/terraform/modules/aws/mongodb/outputs.tf new file mode 100644 index 0000000..70d4a64 --- /dev/null +++ b/terraform/modules/aws/mongodb/outputs.tf @@ -0,0 +1,33 @@ +output "database_cluster_id" { + value = mongodbatlas_cluster.cluster.id + description = "The unique resource ID of the database cluster" +} + +output "database_cluster_cluster_id" { + value = mongodbatlas_cluster.cluster.cluster_id + description = "The cluster ID of the database cluster" +} + +output "cluster_address" { + value = mongodbatlas_cluster.cluster.srv_address + description = "The address of the database cluster" +} + +output "cluster_connection_strings" { + value = mongodbatlas_cluster.cluster.connection_strings + description = "Connection strings for the database cluster" +} + +output "database_user_credentials" { + value = { + for key, user in var.database_users : + key => { + username = user.username + password = try(mongodbatlas_database_user.users[user.username].password, "") + database = user.database + forum_database = "${key}-cs_comments_service" + } + } + description = "List of database and user credentials mapping." + sensitive = true +} diff --git a/terraform/modules/aws/mongodb/variables.tf b/terraform/modules/aws/mongodb/variables.tf new file mode 100644 index 0000000..f098e83 --- /dev/null +++ b/terraform/modules/aws/mongodb/variables.tf @@ -0,0 +1,140 @@ +variable "region" { + type = string + description = "The AWS Region in which to deploy the resources" +} + +variable "aws_account_id" { + type = string + description = "AWS account ID" +} + +variable "environment" { + type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" +} + +variable "mongodbatlas_project_id" { + type = string + description = "The ID of the MongoDB Atlas project" +} + +variable "mongodbatlas_cidr_block" { + type = string + description = "The CIDR block in MongoDB Atlas" +} + +variable "database_cluster_name" { + type = string + description = "The name of the MongoDB cluster" +} + +variable "database_cluster_version" { + type = string + description = "The version of the MongoDB cluster" + default = "7.0" +} + +variable "database_cluster_type" { + type = string + description = "Type of the MongoDB cluster" + default = "REPLICASET" +} + +variable "database_cluster_instance_size" { + type = string + description = "Database instance size" + default = "M10" +} + +variable "database_shards" { + type = number + description = "Number of shards to configure for the database" + default = 1 +} + +variable "database_electable_nodes" { + type = number + description = "The number of electable nodes in the MongoDB cluster" + default = 3 +} + +variable "database_read_only_nodes" { + type = number + description = "The number of read_only nodes in the MongoDB cluster" + default = null +} + +variable "database_analytics_nodes" { + type = number + description = "The number of analytics nodes in the MongoDB cluster" + default = null +} + +variable "database_storage_size" { + type = number + description = "The storage assigned to the database instance" + default = null +} + +variable "database_storage_ipos" { + type = number + description = "The disk IOPS to have for the database instance" + default = null +} + +variable "database_storage_type" { + type = string + description = "The storage type to use for the database instance" + default = null +} + +variable "database_autoscaling_min_instances" { + type = number + description = "The minimum number of instances to have in the database instance autoscaling group" + default = 1 +} + +variable "database_autoscaling_max_instances" { + type = number + description = "The maximum number of instances to have in the database instance autoscaling group" + default = 3 +} + +variable "is_database_autoscaling_compute_enabled" { + type = bool + description = "Whether to enable autoscaling of database instances" + default = false +} + +variable "is_database_autoscaling_disk_gb_enabled" { + type = bool + description = "Whether to enable autoscaling disk size for the database instance" + default = true +} + +variable "database_backup_retention_period" { + type = number + description = "The retention period for the database backups in days" + default = 35 +} + +variable "is_database_storage_encrypted" { + type = bool + description = "Whether the database storage is encrypted in rest" + default = true +} + +variable "vpc_id" { + type = string + description = "ID of the VPC to use for the MongoDB cluster" +} + +variable "database_users" { + type = map(object({ + username = string + database = string + forum_database = string + })) + default = {} + description = "Map of overrides for the user and database names." +} diff --git a/terraform/modules/aws/rds/README.md b/terraform/modules/aws/rds/README.md new file mode 100644 index 0000000..5929867 --- /dev/null +++ b/terraform/modules/aws/rds/README.md @@ -0,0 +1,65 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [random](#provider\_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_cloudwatch_metric_alarm.rds_storage_alarm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_metric_alarm) | resource | +| [aws_db_instance.rds_instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance) | resource | +| [aws_db_subnet_group.rds_subnet_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_subnet_group) | resource | +| [aws_kms_key.rds_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_security_group.rds_security_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [random_password.rds_root_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password) | resource | +| [random_string.rds_final_snapshot_suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [random_string.rds_root_username](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_subnets.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | +| [aws_vpc.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [database\_backup\_retention\_period](#input\_database\_backup\_retention\_period) | The retention period for the database backups in days | `number` | `35` | no | +| [database\_ca\_cert\_identifier](#input\_database\_ca\_cert\_identifier) | The CA certificate identifier if any | `string` | `null` | no | +| [database\_cluster\_instance\_size](#input\_database\_cluster\_instance\_size) | Database instance size | `string` | `"db.t3.micro"` | no | +| [database\_cluster\_name](#input\_database\_cluster\_name) | The name of the database cluster | `string` | n/a | yes | +| [database\_engine](#input\_database\_engine) | Database engine name | `string` | `"mysql"` | no | +| [database\_engine\_version](#input\_database\_engine\_version) | Database engine version | `string` | `"8.0"` | no | +| [database\_max\_storage](#input\_database\_max\_storage) | The maximum storage assigned to the database instance | `number` | `30` | no | +| [database\_min\_storage](#input\_database\_min\_storage) | The minimum storage assigned to the database instance | `number` | `15` | no | +| [database\_storage\_alarm\_alarm\_actions](#input\_database\_storage\_alarm\_alarm\_actions) | List of ARNs of actions to execute when the RDS storage alarm is triggered | `list(string)` | `[]` | no | +| [database\_storage\_alarm\_evaluation\_periods](#input\_database\_storage\_alarm\_evaluation\_periods) | The number of periods that need to violate the threshold before alarming | `number` | `1` | no | +| [database\_storage\_alarm\_period](#input\_database\_storage\_alarm\_period) | Evaluation periods for the usage in seconds | `number` | `300` | no | +| [database\_storage\_alarm\_threshold](#input\_database\_storage\_alarm\_threshold) | The threshold for database storage usage that triggers the alarm in bytes | `number` | `1000000000` | no | +| [environment](#input\_environment) | The AWS project environment. (for example: production, staging, development, etc.) | `string` | n/a | yes | +| [is\_auto\_major\_version\_upgrade\_enabled](#input\_is\_auto\_major\_version\_upgrade\_enabled) | Whether automatic major version upgrades are enabled | `bool` | `false` | no | +| [is\_auto\_minor\_version\_upgrade\_enabled](#input\_is\_auto\_minor\_version\_upgrade\_enabled) | Whether automatic minor version upgrades are enabled | `bool` | `false` | no | +| [is\_database\_storage\_alarm\_enabled](#input\_is\_database\_storage\_alarm\_enabled) | Whether database storage alarms are enabled | `bool` | `true` | no | +| [is\_database\_storage\_encrypted](#input\_is\_database\_storage\_encrypted) | Whether the database storage is encrypted in rest | `bool` | `true` | no | +| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC to use for the RDS cluster | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_connection\_endpoint](#output\_cluster\_connection\_endpoint) | The endpoint URL on which the database cluster is accessible | +| [cluster\_host](#output\_cluster\_host) | The hostname of the database cluster | +| [cluster\_port](#output\_cluster\_port) | The port on which the database cluster is waiting for client connections | +| [database\_cluster\_arn](#output\_database\_cluster\_arn) | The unique resource ID of the database cluster | +| [database\_cluster\_id](#output\_database\_cluster\_id) | The unique resource ID of the database cluster | +| [database\_cluster\_root\_password](#output\_database\_cluster\_root\_password) | Database root user password | +| [database\_cluster\_root\_user](#output\_database\_cluster\_root\_user) | Database root user | diff --git a/terraform/modules/aws/rds/main.tf b/terraform/modules/aws/rds/main.tf new file mode 100644 index 0000000..8745d36 --- /dev/null +++ b/terraform/modules/aws/rds/main.tf @@ -0,0 +1,129 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + random = { + source = "hashicorp/random" + } + } +} + +data "aws_vpc" "main" { + id = var.vpc_id +} + +data "aws_subnets" "main" { + filter { + name = "vpc-id" + values = [data.aws_vpc.main.id] + } + + tags = { + Tier = "Private" + } +} + +resource "random_string" "rds_root_username" { + length = 16 + special = false + numeric = false +} + +resource "random_password" "rds_root_password" { + length = 32 + special = true + override_special = "!#%&*()-_=+[]{}<>:?" # removes @ from special char list +} + +resource "random_string" "rds_final_snapshot_suffix" { + length = 16 + special = false + numeric = false +} + +resource "aws_kms_key" "rds_encryption" { + count = var.is_database_storage_encrypted ? 1 : 0 + description = "${title(var.database_cluster_name)} RDS Encryption" +} + +resource "aws_db_subnet_group" "rds_subnet_group" { + name = "${var.database_cluster_name} rds subnet group" + subnet_ids = data.aws_subnets.main.ids + + tags = merge(var.tags, { + name = "${var.database_cluster_name} rds subnet group" + }) +} + +resource "aws_security_group" "rds_security_group" { + name = "${var.database_cluster_name} rds security group" + vpc_id = data.aws_vpc.main.id + + ingress { + from_port = 3306 + to_port = 3306 + protocol = "tcp" + cidr_blocks = [data.aws_vpc.main.cidr_block] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = var.tags +} + +resource "aws_db_instance" "rds_instance" { + lifecycle { + ignore_changes = [ + final_snapshot_identifier, + ] + } + + identifier = "${var.database_cluster_name}-${var.environment}" + allocated_storage = var.database_min_storage + max_allocated_storage = var.database_max_storage + engine = var.database_engine + engine_version = var.database_engine_version + ca_cert_identifier = var.database_ca_cert_identifier + instance_class = var.database_cluster_instance_size + username = random_string.rds_root_username.result + password = random_password.rds_root_password.result + db_subnet_group_name = aws_db_subnet_group.rds_subnet_group.name + vpc_security_group_ids = [aws_security_group.rds_security_group.id] + + auto_minor_version_upgrade = var.is_auto_minor_version_upgrade_enabled + allow_major_version_upgrade = var.is_auto_major_version_upgrade_enabled + + backup_retention_period = var.database_backup_retention_period + + storage_encrypted = var.is_database_storage_encrypted + kms_key_id = var.is_database_storage_encrypted ? aws_kms_key.rds_encryption[0].arn : "" + + final_snapshot_identifier = "${var.database_cluster_name}-db-final-snapshot-${random_string.rds_final_snapshot_suffix.result}" + + tags = var.tags +} + +resource "aws_cloudwatch_metric_alarm" "rds_storage_alarm" { + count = var.is_database_storage_alarm_enabled ? 1 : 0 + + alarm_name = "${var.database_cluster_name}-db-storage-alarm" + comparison_operator = "LessThanThreshold" + metric_name = "FreeStorageSpace" + namespace = "AWS/RDS" + statistic = "Average" + + dimensions = { + DBInstanceIdentifier = var.database_cluster_name + } + + threshold = var.database_storage_alarm_threshold + period = var.database_storage_alarm_period + evaluation_periods = var.database_storage_alarm_evaluation_periods + alarm_actions = var.database_storage_alarm_alarm_actions +} diff --git a/terraform/modules/aws/rds/outputs.tf b/terraform/modules/aws/rds/outputs.tf new file mode 100644 index 0000000..1839083 --- /dev/null +++ b/terraform/modules/aws/rds/outputs.tf @@ -0,0 +1,36 @@ +output "database_cluster_id" { + value = aws_db_instance.rds_instance.id + description = "The unique resource ID of the database cluster" +} + +output "database_cluster_arn" { + value = aws_db_instance.rds_instance.arn + description = "The unique resource ID of the database cluster" +} + +output "database_cluster_root_user" { + value = random_string.rds_root_username.result + description = "Database root user" + sensitive = true +} + +output "database_cluster_root_password" { + value = random_password.rds_root_password.result + description = "Database root user password" + sensitive = true +} + +output "cluster_host" { + value = aws_db_instance.rds_instance.address + description = "The hostname of the database cluster" +} + +output "cluster_port" { + value = aws_db_instance.rds_instance.port + description = "The port on which the database cluster is waiting for client connections" +} + +output "cluster_connection_endpoint" { + value = aws_db_instance.rds_instance.endpoint + description = "The endpoint URL on which the database cluster is accessible" +} diff --git a/terraform/modules/aws/rds/variables.tf b/terraform/modules/aws/rds/variables.tf new file mode 100644 index 0000000..f35ed88 --- /dev/null +++ b/terraform/modules/aws/rds/variables.tf @@ -0,0 +1,110 @@ +variable "environment" { + type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" +} + +variable "database_cluster_name" { + type = string + description = "The name of the database cluster" +} + +variable "database_engine" { + type = string + description = "Database engine name" + default = "mysql" +} + +variable "database_engine_version" { + type = string + description = "Database engine version" + default = "8.0" +} + +variable "database_cluster_instance_size" { + type = string + description = "Database instance size" + default = "db.t3.micro" +} + +variable "database_min_storage" { + type = number + description = "The minimum storage assigned to the database instance" + default = 15 +} + +variable "database_max_storage" { + type = number + description = "The maximum storage assigned to the database instance" + default = 30 +} + +variable "database_backup_retention_period" { + type = number + description = "The retention period for the database backups in days" + default = 35 +} + +variable "database_ca_cert_identifier" { + type = string + description = "The CA certificate identifier if any" + default = null +} + +variable "database_storage_alarm_threshold" { + type = number + description = "The threshold for database storage usage that triggers the alarm in bytes" + default = 1000000000 +} + +variable "database_storage_alarm_period" { + type = number + description = "Evaluation periods for the usage in seconds" + default = 300 +} + +variable "database_storage_alarm_evaluation_periods" { + type = number + description = "The number of periods that need to violate the threshold before alarming" + default = 1 +} + +variable "database_storage_alarm_alarm_actions" { + type = list(string) + description = "List of ARNs of actions to execute when the RDS storage alarm is triggered" + default = [] +} + +variable "is_database_storage_encrypted" { + type = bool + description = "Whether the database storage is encrypted in rest" + default = true +} + +variable "is_auto_minor_version_upgrade_enabled" { + type = bool + description = "Whether automatic minor version upgrades are enabled" + default = false +} + +variable "is_auto_major_version_upgrade_enabled" { + type = bool + description = "Whether automatic major version upgrades are enabled" + default = false +} + +variable "is_database_storage_alarm_enabled" { + type = bool + description = "Whether database storage alarms are enabled" + default = true +} + +variable "vpc_id" { + type = string + description = "ID of the VPC to use for the RDS cluster" +} + +variable "tags" { + type = map(string) + description = "A map of tags to add to all resources" + default = {} +} diff --git a/terraform/modules/aws/s3/README.md b/terraform/modules/aws/s3/README.md new file mode 100644 index 0000000..331447f --- /dev/null +++ b/terraform/modules/aws/s3/README.md @@ -0,0 +1,49 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [random](#provider\_random) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_kms_key.s3_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | +| [aws_s3_bucket.s3_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_acl.s3_acl](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_acl) | resource | +| [aws_s3_bucket_cors_configuration.s3_cors](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_cors_configuration) | resource | +| [aws_s3_bucket_lifecycle_configuration.s3_lifecycle](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_lifecycle_configuration) | resource | +| [aws_s3_bucket_ownership_controls.s3_bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_ownership_controls) | resource | +| [aws_s3_bucket_public_access_block.s3_public_access_block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block) | resource | +| [aws_s3_bucket_server_side_encryption_configuration.s3_encryption](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | +| [aws_s3_bucket_versioning.s3_versioning](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource | +| [random_id.bucket_suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [allowed\_cors\_origins](#input\_allowed\_cors\_origins) | Lists the CORS origins to allow CORS requests from. | `list(string)` |
[
"*"
]
| no | +| [bucket\_prefix](#input\_bucket\_prefix) | The prefix for the AWS s3 bucket for easier identification. | `string` | n/a | yes | +| [environment](#input\_environment) | The AWS project environment. (for example: production, staging, development, etc.) | `string` | n/a | yes | +| [is\_force\_destroy\_enabled](#input\_is\_force\_destroy\_enabled) | Determines if the AWS s3 bucket is force-destroyed or not upon deletion. | `bool` | `true` | no | +| [is\_public](#input\_is\_public) | Determines whether the AWS s3 bucket's root object is publicly available or not. | `bool` | `false` | no | +| [is\_versioning\_enabled](#input\_is\_versioning\_enabled) | Determines if versioning is allowed on the AWS s3 bucket or not. | `bool` | `true` | no | +| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [bucket\_arn](#output\_bucket\_arn) | The unique resource ID of the bucket. | +| [bucket\_domain\_name](#output\_bucket\_domain\_name) | The domain name of the bucket, including the generated prefix. | +| [bucket\_id](#output\_bucket\_id) | The ID of the bucket that is generated during creation. | diff --git a/terraform/modules/aws/s3/main.tf b/terraform/modules/aws/s3/main.tf new file mode 100644 index 0000000..d009a00 --- /dev/null +++ b/terraform/modules/aws/s3/main.tf @@ -0,0 +1,100 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + + random = { + source = "hashicorp/random" + } + } +} + +resource "random_id" "bucket_suffix" { + byte_length = 8 +} + +resource "aws_s3_bucket" "s3_bucket" { + bucket = substr("${var.bucket_prefix}-${var.environment}-${random_id.bucket_suffix.dec}", 0, 37) + force_destroy = var.is_force_destroy_enabled + tags = var.tags +} + +resource "aws_s3_bucket_ownership_controls" "s3_bucket" { + bucket = aws_s3_bucket.s3_bucket.id + + rule { + object_ownership = "BucketOwnerPreferred" + } +} + + +resource "aws_kms_key" "s3_key" { + description = "S3 encryption key for ${aws_s3_bucket.s3_bucket.id} bucket" + deletion_window_in_days = 10 +} + +resource "aws_s3_bucket_cors_configuration" "s3_cors" { + bucket = aws_s3_bucket.s3_bucket.id + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST", "GET"] + allowed_origins = var.allowed_cors_origins + } +} + +resource "aws_s3_bucket_public_access_block" "s3_public_access_block" { + bucket = aws_s3_bucket.s3_bucket.id + + block_public_acls = false + block_public_policy = false + ignore_public_acls = false + restrict_public_buckets = false +} + +resource "aws_s3_bucket_acl" "s3_acl" { + depends_on = [ + aws_s3_bucket_ownership_controls.s3_bucket, + aws_s3_bucket_public_access_block.s3_public_access_block, + ] + bucket = aws_s3_bucket.s3_bucket.id + acl = "private" +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "s3_encryption" { + bucket = aws_s3_bucket.s3_bucket.id + count = var.is_public ? 0 : 1 + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.s3_key.arn + sse_algorithm = "aws:kms" + } + } +} + +resource "aws_s3_bucket_versioning" "s3_versioning" { + bucket = aws_s3_bucket.s3_bucket.id + + versioning_configuration { + status = var.is_versioning_enabled ? "Enabled" : "Disabled" + } +} + +resource "aws_s3_bucket_lifecycle_configuration" "s3_lifecycle" { + bucket = aws_s3_bucket.s3_bucket.id + + rule { + id = "tutor-env-rule-1" + status = "Enabled" + + expiration { + expired_object_delete_marker = true + } + + noncurrent_version_expiration { + noncurrent_days = 30 + } + } +} diff --git a/terraform/modules/aws/s3/outputs.tf b/terraform/modules/aws/s3/outputs.tf new file mode 100644 index 0000000..9a15f01 --- /dev/null +++ b/terraform/modules/aws/s3/outputs.tf @@ -0,0 +1,14 @@ +output "bucket_id" { + value = aws_s3_bucket.s3_bucket.id + description = "The ID of the bucket that is generated during creation." +} + +output "bucket_arn" { + value = aws_s3_bucket.s3_bucket.arn + description = "The unique resource ID of the bucket." +} + +output "bucket_domain_name" { + value = aws_s3_bucket.s3_bucket.bucket_domain_name + description = "The domain name of the bucket, including the generated prefix." +} diff --git a/terraform/modules/aws/s3/variables.tf b/terraform/modules/aws/s3/variables.tf new file mode 100644 index 0000000..5caf1dc --- /dev/null +++ b/terraform/modules/aws/s3/variables.tf @@ -0,0 +1,39 @@ +variable "environment" { + type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" +} + +variable "bucket_prefix" { + type = string + description = "The prefix for the AWS s3 bucket for easier identification." +} + +variable "allowed_cors_origins" { + type = list(string) + default = ["*"] + description = "Lists the CORS origins to allow CORS requests from." +} + +variable "is_public" { + type = bool + default = false + description = "Determines whether the AWS s3 bucket's root object is publicly available or not." +} + +variable "is_force_destroy_enabled" { + type = bool + default = true + description = "Determines if the AWS s3 bucket is force-destroyed or not upon deletion." +} + +variable "is_versioning_enabled" { + type = bool + default = true + description = "Determines if versioning is allowed on the AWS s3 bucket or not." +} + +variable "tags" { + description = "A map of tags to add to all resources" + type = map(string) + default = {} +} diff --git a/terraform/modules/aws/vpc/README.md b/terraform/modules/aws/vpc/README.md new file mode 100644 index 0000000..4df967e --- /dev/null +++ b/terraform/modules/aws/vpc/README.md @@ -0,0 +1,65 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [random](#provider\_random) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.17 | + +## Resources + +| Name | Type | +|------|------| +| [random_id.vpc_suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [availability\_zones](#input\_availability\_zones) | List of availability zones to use | `list(string)` | `[]` | no | +| [cidr](#input\_cidr) | CIDR block for the VPC | `string` | `"10.0.0.0/16"` | no | +| [enable\_nat\_gateway](#input\_enable\_nat\_gateway) | Enable NAT Gateway | `bool` | `true` | no | +| [environment](#input\_environment) | The AWS project environment. (for example: production, staging, development, etc.) | `string` | n/a | yes | +| [one\_nat\_gateway\_per\_az](#input\_one\_nat\_gateway\_per\_az) | One NAT gateway per AZ | `bool` | `true` | no | +| [private\_subnet\_tags](#input\_private\_subnet\_tags) | A map of tags to add to private subnet resources | `map(string)` | `{}` | no | +| [private\_subnets](#input\_private\_subnets) | List of private subnets | `list(string)` | `[]` | no | +| [public\_subnet\_tags](#input\_public\_subnet\_tags) | A map of tags to add to public subnet resources | `map(string)` | `{}` | no | +| [public\_subnets](#input\_public\_subnets) | List of public subnets | `list(string)` | `[]` | no | +| [single\_nat\_gateway](#input\_single\_nat\_gateway) | Single NAT Gateway | `bool` | `false` | no | +| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | +| [vpc\_name](#input\_vpc\_name) | The VPC name | `string` | `""` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [database\_subnet\_arns](#output\_database\_subnet\_arns) | List of ARNs of database subnets | +| [database\_subnet\_group](#output\_database\_subnet\_group) | ID of database subnet group | +| [database\_subnet\_group\_name](#output\_database\_subnet\_group\_name) | Name of database subnet group | +| [database\_subnets](#output\_database\_subnets) | List of IDs of database subnets | +| [database\_subnets\_cidr\_blocks](#output\_database\_subnets\_cidr\_blocks) | List of cidr\_blocks of database subnets | +| [database\_subnets\_ipv6\_cidr\_blocks](#output\_database\_subnets\_ipv6\_cidr\_blocks) | List of IPv6 cidr\_blocks of database subnets in an IPv6 enabled VPC | +| [default\_vpc\_arn](#output\_default\_vpc\_arn) | The ARN of the Default VPC | +| [default\_vpc\_cidr\_block](#output\_default\_vpc\_cidr\_block) | The CIDR block of the Default VPC | +| [default\_vpc\_id](#output\_default\_vpc\_id) | The ID of the Default VPC | +| [private\_subnet\_arns](#output\_private\_subnet\_arns) | List of ARNs of private subnets | +| [private\_subnets](#output\_private\_subnets) | List of IDs of private subnets | +| [private\_subnets\_cidr\_blocks](#output\_private\_subnets\_cidr\_blocks) | List of cidr\_blocks of private subnets | +| [private\_subnets\_ipv6\_cidr\_blocks](#output\_private\_subnets\_ipv6\_cidr\_blocks) | List of IPv6 cidr\_blocks of private subnets in an IPv6 enabled VPC | +| [public\_subnet\_arns](#output\_public\_subnet\_arns) | List of ARNs of public subnets | +| [public\_subnets](#output\_public\_subnets) | List of IDs of public subnets | +| [public\_subnets\_cidr\_blocks](#output\_public\_subnets\_cidr\_blocks) | List of cidr\_blocks of public subnets | +| [public\_subnets\_ipv6\_cidr\_blocks](#output\_public\_subnets\_ipv6\_cidr\_blocks) | List of IPv6 cidr\_blocks of public subnets in an IPv6 enabled VPC | +| [vpc\_arn](#output\_vpc\_arn) | The ARN of the VPC | +| [vpc\_cidr\_block](#output\_vpc\_cidr\_block) | The CIDR block of the VPC | +| [vpc\_id](#output\_vpc\_id) | The ID of the VPC | +| [vpc\_ipv6\_cidr\_block](#output\_vpc\_ipv6\_cidr\_block) | The IPv6 CIDR block | +| [vpc\_secondary\_cidr\_blocks](#output\_vpc\_secondary\_cidr\_blocks) | List of secondary CIDR blocks of the VPC | diff --git a/terraform/modules/aws/vpc/main.tf b/terraform/modules/aws/vpc/main.tf new file mode 100644 index 0000000..8f6cc8a --- /dev/null +++ b/terraform/modules/aws/vpc/main.tf @@ -0,0 +1,40 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + + random = { + source = "hashicorp/random" + } + } +} + +resource "random_id" "vpc_suffix" { + count = var.vpc_name == "" ? 1 : 0 + byte_length = 8 +} + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 5.17" + name = var.vpc_name == "" ? "open-edx-${var.environment}-vpc-${random_id.vpc_suffix[0].dec}" : var.vpc_name + cidr = var.cidr + azs = var.availability_zones + private_subnets = var.private_subnets + public_subnets = var.public_subnets + + # Your VPC must have DNS hostname and DNS resolution support. Otherwise, + # your nodes cannot register with your cluster. + # https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html + enable_dns_support = true + enable_dns_hostnames = true + + enable_nat_gateway = var.enable_nat_gateway + single_nat_gateway = var.single_nat_gateway + one_nat_gateway_per_az = var.one_nat_gateway_per_az + + tags = var.tags + public_subnet_tags = var.public_subnet_tags + private_subnet_tags = var.private_subnet_tags +} diff --git a/infra-examples/aws/eks/outputs.tf b/terraform/modules/aws/vpc/outputs.tf similarity index 58% rename from infra-examples/aws/eks/outputs.tf rename to terraform/modules/aws/vpc/outputs.tf index 04dda89..d812d46 100644 --- a/infra-examples/aws/eks/outputs.tf +++ b/terraform/modules/aws/vpc/outputs.tf @@ -13,16 +13,6 @@ output "vpc_cidr_block" { value = module.vpc.vpc_cidr_block } -output "default_security_group_id" { - description = "The ID of the security group created by default on VPC creation" - value = module.vpc.default_security_group_id -} - -output "vpc_ipv6_association_id" { - description = "The association ID for the IPv6 CIDR block" - value = module.vpc.vpc_ipv6_association_id -} - output "vpc_ipv6_cidr_block" { description = "The IPv6 CIDR block" value = module.vpc.vpc_ipv6_cidr_block @@ -33,11 +23,6 @@ output "vpc_secondary_cidr_blocks" { value = module.vpc.vpc_secondary_cidr_blocks } -output "vpc_owner_id" { - description = "The ID of the AWS account that owns the VPC" - value = module.vpc.vpc_owner_id -} - output "private_subnets" { description = "List of IDs of private subnets" value = module.vpc.private_subnets @@ -78,23 +63,47 @@ output "public_subnets_ipv6_cidr_blocks" { value = module.vpc.public_subnets_ipv6_cidr_blocks } -# Static values (arguments) -output "azs" { - description = "A list of availability zones specified as argument to this module" - value = var.azs +output "default_vpc_id" { + description = "The ID of the Default VPC" + value = module.vpc.default_vpc_id +} + +output "default_vpc_arn" { + description = "The ARN of the Default VPC" + value = module.vpc.default_vpc_arn +} + +output "default_vpc_cidr_block" { + description = "The CIDR block of the Default VPC" + value = module.vpc.default_vpc_cidr_block +} + +output "database_subnets" { + description = "List of IDs of database subnets" + value = module.vpc.database_subnets +} + +output "database_subnet_arns" { + description = "List of ARNs of database subnets" + value = module.vpc.database_subnet_arns +} + +output "database_subnets_cidr_blocks" { + description = "List of cidr_blocks of database subnets" + value = module.vpc.database_subnets_cidr_blocks } -output "cluster_name" { - description = "The name of the EKS cluster" - value = module.eks.cluster_name +output "database_subnets_ipv6_cidr_blocks" { + description = "List of IPv6 cidr_blocks of database subnets in an IPv6 enabled VPC" + value = module.vpc.database_subnets_ipv6_cidr_blocks } -output "cluster_endpoint" { - description = "Endpoint for your Kubernetes API server" - value = module.eks.cluster_endpoint +output "database_subnet_group" { + description = "ID of database subnet group" + value = module.vpc.database_subnet_group } -output "cluster_version" { - description = "The Kubernetes version for the cluster" - value = module.eks.cluster_version +output "database_subnet_group_name" { + description = "Name of database subnet group" + value = module.vpc.database_subnet_group_name } diff --git a/terraform/modules/aws/vpc/variables.tf b/terraform/modules/aws/vpc/variables.tf new file mode 100644 index 0000000..aaff0b9 --- /dev/null +++ b/terraform/modules/aws/vpc/variables.tf @@ -0,0 +1,70 @@ +variable "environment" { + type = string + description = "The AWS project environment. (for example: production, staging, development, etc.)" +} + +variable "vpc_name" { + description = "The VPC name" + type = string + default = "" +} + +variable "cidr" { + description = "CIDR block for the VPC" + type = string + default = "10.0.0.0/16" +} + +variable "availability_zones" { + description = "List of availability zones to use" + type = list(string) + default = [] +} + +variable "private_subnets" { + description = "List of private subnets" + type = list(string) + default = [] +} + +variable "public_subnets" { + description = "List of public subnets" + type = list(string) + default = [] +} + +variable "enable_nat_gateway" { + description = "Enable NAT Gateway" + type = bool + default = true +} + +variable "single_nat_gateway" { + description = "Single NAT Gateway" + type = bool + default = false +} + +variable "one_nat_gateway_per_az" { + description = "One NAT gateway per AZ" + type = bool + default = true +} + +variable "tags" { + description = "A map of tags to add to all resources" + type = map(string) + default = {} +} + +variable "private_subnet_tags" { + description = "A map of tags to add to private subnet resources" + type = map(string) + default = {} +} + +variable "public_subnet_tags" { + description = "A map of tags to add to public subnet resources" + type = map(string) + default = {} +}