Initial commit.

This commit is contained in:
Patrick MARIE 2022-02-11 14:42:37 +01:00
commit 38ae6b41ac
17 changed files with 655 additions and 0 deletions

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# plans
*.plan
*.tfplan

197
README.md Normal file
View File

@ -0,0 +1,197 @@
# AWS - EKS
This is a POC of deploying an EKS stack on AWS, and some apps in it.
It uses Terraform for building the EKS cluster (1 node only, cuz $$), & another terraform configuration to deploy a couple of nginx nodes in the cluster.
## How?
### Before anything
Make sure to have a valid AWS account with the right permissions & policies.
Permissions required:
* AmazonEC2FullAccess
* IAMFullAccess
* AmazonEKSClusterPolicy
* AmazonVPCFullAccess
* AmazonEKSServicePolicy
Required policy:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": "eks:*",
"Resource": "*"
}
]
}
```
Once your IAM user is created, create the profile accordingly:
```sh
$ aws configure --profile infra-test
AWS Access Key ID [None]: AKxxxxxxxxxxxxxxxx
AWS Secret Access Key [None]: zWVxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Default region name [None]: eu-west-3
Default output format [None]: json
```
For all the next commands, make sure to use the `AWS_PROFILE` environment variable set to your profile id:
```sh
$ export AWS_PROFILE=infra-test
```
### First: EKS
Like any terraform deployments:
```sh
$ cd eks
$ terraform init
$ terraform plan -var "aws_profile=$AWS_PROFILE" -out tf.plan
$ terraform apply tf.plan
...
aws_eks_cluster.eks_cluster: Creation complete after 9m33s [id=eks-cluster-prod]
...
Apply complete! Resources: 4 added, 0 changed, 2 destroyed.
Outputs:
cluster_name = "eks-cluster-prod"
region = "eu-west-3"
$
```
Note that creating the initial EKS cluster will take up to 20 minutes in total (10 minutes for the eks cluster, 10 minutes to provision the nodes).
Once the cluster is built, make sure to configure your `.kube/config`:
```sh
$ terraform output
cluster_name = "eks-cluster-prod"
region = "eu-west-3"
$ aws eks --region $(terraform output -raw region) update-kubeconfig --name $(terraform output -raw cluster_name)
Added new context arn:aws:eks:eu-west-3:123456789012:cluster/eks-cluster-prod to /home/mycroft/.kube/config
$ kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system aws-node-689lb 1/1 Running 0 111s
kube-system coredns-9b5d74bfb-b652h 1/1 Running 0 5m20s
kube-system coredns-9b5d74bfb-z6p6v 1/1 Running 0 5m20s
kube-system kube-proxy-xg5cp 1/1 Running 0 111s
```
### Second: Apps.
Once eks is deployed, and kubectl correctly configured, we can continue by deploying our app.
```sh
$ cd ../k8s
$ terraform init
$ terraform plan -var enable_nginx=1 -out tf.plan
$ terraform apply
...
Apply complete! Resources: 3 added, 0 changed, 1 destroyed.
```
As a result, let's verify there is our stuff deployed:
```sh
$ kubectl get pods --namespace testaroo
NAME READY STATUS RESTARTS AGE
alpine 1/1 Running 0 5m3s
nginx-98cf9b965-l785s 1/1 Running 0 5m3s
nginx-98cf9b965-smpkr 1/1 Running 0 5m3s
$ kubectl get deploy -n testaroo nginx -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx 2/2 2 2 5m46s nginx-container nginx app=Nginx
$ kubectl get svc -n testaroo -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx NodePort 172.20.10.182 <none> 80:31234/TCP 6m8s app=Nginx
```
### Reaching the app.
It is not possible with terraform output to retrieve the configured nodes. However, it is possible to retrieve IPs for our nodes using aws cli:
```sh
$ CLUSTER_IP=$(aws ec2 describe-instances \
--filters "Name=tag:k8s.io/cluster-autoscaler/eks-cluster-prod,Values=owned" \
--filters "Name=instance-state-name,Values=running" \
--query "Reservations[*].Instances[*].PublicIpAddress" \
--output text | head -1)
$ echo ${CLUSTER_IP}
52.47.91.179
$ curl http://$CLUSTER_IP:31234/
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...
```
### Reaching the/a node ssh port:
Still using the AWS CLI to retrieve nodes, just:
```sh
$ ssh -i ~/.ssh/ec2-terraform.pem -l ec2-user $CLUSTER_IP
Last login: Fri Feb 11 13:21:00 2022 from xxxx.wanadoo.fr
__| __|_ )
_| ( / Amazon Linux 2 AMI
___|\___|___|
# docker ps|grep nginx
cc3aafd1a6ec nginx "/docker-entrypoint.…" 25 minutes ago Up 25 minutes k8s_nginx-container_nginx-98cf9b965-l785s_testaroo_e5ebf304-e156-4f6d-b00f-0f5dad0a9445_0
f4b998b0558e nginx "/docker-entrypoint.…" 25 minutes ago Up 25 minutes k8s_nginx-container_nginx-98cf9b965-smpkr_testaroo_eebe1868-fc5e-425e-948a-ce2cc2f2633e_0
14113cac359b 602401143452.dkr.ecr.eu-west-3.amazonaws.com/eks/pause:3.1-eksbuild.1 "/pause" 25 minutes ago Up 25 minutes k8s_POD_nginx-98cf9b965-l785s_testaroo_e5ebf304-e156-4f6d-b00f-0f5dad0a9445_0
c8c252673fbb 602401143452.dkr.ecr.eu-west-3.amazonaws.com/eks/pause:3.1-eksbuild.1 "/pause" 25 minutes ago Up 25 minutes k8s_POD_nginx-98cf9b965-smpkr_testaroo_eebe1868-fc5e-425e-948a-ce2cc2f2633e_0
```
### Going into a container
```sh
$ kubectl get pods -n testaroo alpine
NAME READY STATUS RESTARTS AGE
alpine 1/1 Running 0 29m
$ kubectl exec -ti -n testaroo alpine -- ps auxw
PID USER TIME COMMAND
1 root 0:00 sh -c while true; do sleep 3600; done
7 root 0:00 sleep 3600
8 root 0:00 ps auxw
$ kubectl exec -ti -n testaroo alpine -- sh
/ # echo "hello world"
hello world
/ #
```
## Todo:
* Move roles in the dedicated env;
## Notes
### Got an AWS error?
Decode it using `aws sts decode-authorization-message --encoded-message`.

57
eks/.terraform.lock.hcl Normal file
View File

@ -0,0 +1,57 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
version = "3.74.1"
constraints = "~> 3.27"
hashes = [
"h1:0/ImOh63jJHOqiA165x+99N+MR0Rz7x4Nlcbnpys4Ww=",
"zh:2de9a8c19e07ea3b12c3fe5fe23ffa71354f90683d1f3ded41f2f318e8bad401",
"zh:3f651572f9ad067e119ed083d25455627ae121d36e737823f1d89445949f8ca0",
"zh:468c5954ea646e8edbf70c5a3dbce3d9591a47259f3cf3bdfb2c8728a5e3a083",
"zh:5b379f4803268d3a2cde0bd8a2b6b0a3752e0a22d2cb15a9a28c6a8852d17840",
"zh:5f1271620def1e199afad2377e37ab194f5d5ea51ff804c0e7d468fc4a48b741",
"zh:770783d8d743f28ecaeaf7485f9d602071d610278e33347a692ebb75ae690a8f",
"zh:aecfa7b52f39cbfb1ef53576935ad6cc05deebf82d0b8b6b82c10727469d1c85",
"zh:c905af45fc8cb64fe566c5b35241baf5e5850e137ebbd59a3298321648d05046",
"zh:d7dabb6a110073c8adaf34af288a485714b4be7185304d491f042827a77f9d5f",
"zh:e8ccc2ef2465164ce467f32d58e5ffad74da92cc3733551aef5e0d839532e3d4",
"zh:f1c2c9145383ab8675eab68398b53cf33edb2665d64ef2e48e0444771fa5849e",
]
}
provider "registry.terraform.io/hashicorp/local" {
version = "2.1.0"
hashes = [
"h1:EYZdckuGU3n6APs97nS2LxZm3dDtGqyM4qaIvsmac8o=",
"zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2",
"zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab",
"zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3",
"zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a",
"zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe",
"zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1",
"zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c",
"zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4",
"zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b",
"zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3",
"zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "3.1.0"
hashes = [
"h1:fUJX8Zxx38e2kBln+zWr1Tl41X+OuiE++REjrEyiOM4=",
"zh:3d46616b41fea215566f4a957b6d3a1aa43f1f75c26776d72a98bdba79439db6",
"zh:623a203817a6dafa86f1b4141b645159e07ec418c82fe40acd4d2a27543cbaa2",
"zh:668217e78b210a6572e7b0ecb4134a6781cc4d738f4f5d09eb756085b082592e",
"zh:95354df03710691773c8f50a32e31fca25f124b7f3d6078265fdf3c4e1384dca",
"zh:9f97ab190380430d57392303e3f36f4f7835c74ea83276baa98d6b9a997c3698",
"zh:a16f0bab665f8d933e95ca055b9c8d5707f1a0dd8c8ecca6c13091f40dc1e99d",
"zh:be274d5008c24dc0d6540c19e22dbb31ee6bfdd0b2cddd4d97f3cd8a8d657841",
"zh:d5faa9dce0a5fc9d26b2463cea5be35f8586ab75030e7fa4d4920cd73ee26989",
"zh:e9b672210b7fb410780e7b429975adcc76dd557738ecc7c890ea18942eb321a5",
"zh:eb1f8368573d2370605d6dbf60f9aaa5b64e55741d96b5fb026dbfe91de67c0d",
"zh:fc1e12b713837b85daf6c3bb703d7795eaf1c5177aebae1afcf811dd7009f4b0",
]
}

38
eks/eks.tf Normal file
View File

@ -0,0 +1,38 @@
resource "aws_eks_cluster" "eks_cluster" {
name = "eks-cluster-${var.environment}"
role_arn = aws_iam_role.eks_role.arn
vpc_config {
subnet_ids = [for subnet in aws_subnet.subnets : subnet.id]
}
}
# https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_eks_node_group" "eks_cluster" {
cluster_name = aws_eks_cluster.eks_cluster.name
node_group_name = "eks_cluster-${var.environment}"
node_role_arn = aws_iam_role.eks_role.arn
subnet_ids = [for subnet in aws_subnet.subnets : subnet.id]
instance_types = ["t2.small"] # Free tiers
remote_access {
ec2_ssh_key = aws_key_pair.ssh.id
# TODO: define source_security_group_ids; Undefined but with a key, port 22 is opened WW.
}
scaling_config {
desired_size = 1
max_size = 1
min_size = 1
}
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
depends_on = [
aws_iam_role_policy_attachment.eks-AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.eks-AmazonEKS_CNI_Policy,
aws_iam_role_policy_attachment.eks-AmazonEC2ContainerRegistryReadOnly,
]
}

17
eks/output.tf Normal file
View File

@ -0,0 +1,17 @@
output "region" {
description = "AWS region"
value = var.aws_region
}
output "cluster_name" {
description = "Kubernetes Cluster Name"
value = aws_eks_cluster.eks_cluster.name
}
output "vpc" {
value = aws_vpc.main
}
output "eks" {
value = aws_eks_cluster.eks_cluster
}

15
eks/provider.tf Normal file
View File

@ -0,0 +1,15 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.27"
}
}
required_version = ">= 0.14.9"
}
provider "aws" {
profile = var.aws_profile
region = var.aws_region
}

45
eks/roles.tf Normal file
View File

@ -0,0 +1,45 @@
resource "aws_iam_role" "eks_role" {
name = "eks"
assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "eks.amazonaws.com"
}
},{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks_role.name
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKSVPCResourceController" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
role = aws_iam_role.eks_role.name
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.eks_role.name
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.eks_role.name
}
resource "aws_iam_role_policy_attachment" "eks-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.eks_role.name
}

29
eks/security.tf Normal file
View File

@ -0,0 +1,29 @@
# A basic security group for our nginx server (running on NodePort 31234)
# resource "aws_security_group_rule" "public_out" {
# type = "egress"
# from_port = 0
# to_port = 0
# protocol = "-1"
# cidr_blocks = ["0.0.0.0/0"]
#
# security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
# }
resource "aws_security_group_rule" "public_in_ssh" {
type = "ingress"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
}
resource "aws_security_group_rule" "public_in_http" {
type = "ingress"
from_port = 31234
to_port = 31234
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_eks_cluster.eks_cluster.vpc_config[0].cluster_security_group_id
}

19
eks/sshkeys.tf Normal file
View File

@ -0,0 +1,19 @@
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "aws_key_pair" "ssh" {
key_name = "ec2-terraform"
public_key = tls_private_key.ssh.public_key_openssh
}
resource "local_file" "pem_file" {
filename = pathexpand("~/.ssh/${aws_key_pair.ssh.key_name}.pem")
file_permission = "400"
directory_permission = "700"
sensitive_content = tls_private_key.ssh.private_key_pem
}
# ssh -i ~/.ssh/ec2-terraform.pem -l ec2-user 52.47.91.179

33
eks/variables.tf Normal file
View File

@ -0,0 +1,33 @@
variable "aws_profile" {
type = string
default = "aws-infra"
}
variable "aws_region" {
type = string
default = "eu-west-3"
}
variable "environment" {
type = string
default = "prod"
}
# AZ can be seen using: aws ec2 describe-availability-zones --region eu-west-3
variable "vpc_subnets" {
type = map(object({
cidr_block = string
availability_zone = string
}))
default = {
"alpha" = {
cidr_block = "10.0.1.0/24"
availability_zone = "eu-west-3b"
}
"beta" = {
cidr_block = "10.0.2.0/24"
availability_zone = "eu-west-3c"
}
}
}

44
eks/vpc.tf Normal file
View File

@ -0,0 +1,44 @@
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
tags = {
Name = "vpc-${var.environment}"
Env = var.environment
}
}
resource "aws_subnet" "subnets" {
vpc_id = aws_vpc.main.id
for_each = var.vpc_subnets
cidr_block = each.value.cidr_block
availability_zone = each.value.availability_zone
map_public_ip_on_launch = true
tags = {
Name = "${each.key}-${var.environment}"
Env = var.environment
"kubernetes.io/cluster/eks-cluster-${var.environment}" = "shared"
}
}
resource "aws_internet_gateway" "nat_gateway" {
vpc_id = aws_vpc.main.id
}
resource "aws_route_table" "nat_gateway" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.nat_gateway.id
}
}
resource "aws_route_table_association" "nat_gateway" {
for_each = aws_subnet.subnets
subnet_id = each.value.id
route_table_id = aws_route_table.nat_gateway.id
}

21
k8s/.terraform.lock.hcl Normal file
View File

@ -0,0 +1,21 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.8.0"
constraints = ">= 2.0.0"
hashes = [
"h1:UZCCMTH49ziz6YDV5oCCoOHypOxZWvzc59IfZxVdWeI=",
"zh:0cf42c17c05ae5f0f5eb4b2c375dd2068960b97392e50823e47b2cee7b5e01be",
"zh:29e3751eceae92c7400a17fe3a5394ed761627bcadfda66e7ac91d6485c37927",
"zh:2d95584504c651e1e2e49fbb5fae1736e32a505102c3dbd2c319b26884a7d3d5",
"zh:4a5f1d915c19e7c7b4f04d7d68f82db2c872dad75b9e6f33a6ddce43aa160405",
"zh:4b959187fd2c884a4c6606e1c4edc7b506ec4cadb2742831f37aca1463eb349d",
"zh:5e76a2b81c93d9904d50c2a703845f79d2b080c2f87c07ef8f168592033d638f",
"zh:c5aa21a7168f96afa4b4776cbd7eefd3e1f47d48430dce75c7f761f2d2fac77b",
"zh:d45e8bd98fc6752ea087e744efdafb209e7ec5a4224f9affee0a24fb51d26bb9",
"zh:d4739255076ed7f3ac2a06aef89e8e48a87667f3e470c514ce2185c0569cc1fb",
"zh:dbd2f11529a422ffd17040a70c0cc2802b7f1be2499e976dc22f1138d022b1b4",
"zh:dbd5357082b2485bb9978bce5b6d508d6b431d15c53bfa1fcc2781131826b5d8",
]
}

12
k8s/k8s.tf Normal file
View File

@ -0,0 +1,12 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.0"
}
}
}
provider "kubernetes" {
config_path = "~/.kube/config"
}

78
k8s/nginx.tf Normal file
View File

@ -0,0 +1,78 @@
resource "kubernetes_namespace" "testaroo" {
metadata {
name = "testaroo"
}
}
resource "kubernetes_pod" "basic-pod" {
metadata {
name = "alpine"
namespace = kubernetes_namespace.testaroo.metadata.0.name
}
spec {
container {
name = "alpine"
image = "alpine:3.15"
command = ["sh", "-c", "while true; do sleep 3600; done"]
}
}
}
resource "kubernetes_deployment" "testaroo" {
count = var.enable_nginx
metadata {
name = "nginx"
namespace = kubernetes_namespace.testaroo.metadata.0.name
}
spec {
replicas = 2
selector {
match_labels = {
app = "Nginx"
}
}
template {
metadata {
labels = {
app = "Nginx"
}
}
spec {
container {
image = "nginx"
name = "nginx-container"
port {
container_port = 80
}
}
}
}
}
}
resource "kubernetes_service" "testaroo" {
count = var.enable_nginx
metadata {
name = "nginx"
namespace = kubernetes_namespace.testaroo.metadata.0.name
}
spec {
selector = {
app = kubernetes_deployment.testaroo[0].spec.0.template.0.metadata.0.labels.app
}
type = "NodePort"
port {
node_port = 31234
port = 80
target_port = 80
}
}
}

10
k8s/samples/basic.yaml Normal file
View File

@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Pod
metadata:
name: alpine
spec:
containers:
- name: alpine
image: alpine:3.15
command: ["sh", "-c", "while true; do sleep 3600; done"]

26
k8s/samples/nginx.yaml Normal file
View File

@ -0,0 +1,26 @@
# A nginx instance with a NodePort.
---
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.21
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
ports:
- port: 8123 # Port exposed in cluster
targetPort: 80 # Port inside container
protocol: TCP
nodePort: 31234
selector:
app: nginx
type: NodePort

4
k8s/variables.tf Normal file
View File

@ -0,0 +1,4 @@
variable "enable_nginx" {
type = number
default = 0
}