Minor updates.

This commit is contained in:
Patrick MARIE 2022-12-09 18:08:49 +01:00
parent 621e04fb94
commit f94a81ada5
8 changed files with 77 additions and 14 deletions

View File

@ -4,6 +4,16 @@
Run `terraform` into the `init-state` directory to create initial s3 & dynamodb.
## Initial deployment
Because of the uncreated dependencies, it is required to do extra step on a full deployment. It is required to create first the VPC, then the EKS cluster, then ebs-addon-csi-driver, then kube-prometheus-stack and finally the last other components:
```sh
$ cd infra/vpc && terragrunt plan && terragrunt apply
...
$ cd ../../ && make apply
```
## Validate, plan & apply
There is a `Makefile` for this:
@ -21,7 +31,6 @@ $ aws eks update-kubeconfig --name avx-pmarie-eks --region eu-west-3
$ kubectl config rename-context arn:aws:eks:eu-west-3:563042046245:cluster/avx-pmarie-eks avx-pmarie-eks
```
## Notes
### aws-auth
@ -33,4 +42,8 @@ $ cd infra/eks-aws-auth
$ terragrunt import kubernetes_config_map.aws_auth kube-system/aws-auth
...
```
```
### ebs-csi-driver
The driver is installed and managed as cluster addons in `eks`. It is required to annotate the service account & to restart the controller so the created role in `eks-addon-csi-ebs` is updated.

View File

@ -20,6 +20,32 @@ locals {
eks_cluster_version = "1.22"
enable_irsa = true
# EKS addons
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
addon_version = "v1.8.7-eksbuild.1"
}
kube-proxy = {
resolve_conflicts = "OVERWRITE"
addon_version = "v1.22.11-eksbuild.2"
}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
addon_version = "v1.12.0-eksbuild.1"
}
aws-ebs-csi-driver = {
resolve_conflicts = "OVERWRITE"
addon_version = "v1.13.0-eksbuild.2"
}
}
# EKS addon CSI EBS; This is obsolete and should be removed.
eks_addon_versions = {
aws-ebs-csi-driver = "v1.13.0-eksbuild.2"
}
node_group_defaults = {
disk_size = 50
instance_types = ["t3.small"]
@ -92,11 +118,6 @@ locals {
groups = ["system:masters"]
}]
# EKS addon CSI EBS
eks_addon_versions = {
aws-ebs-csi-driver = "v1.7.0-eksbuild.0"
}
# AWS load balancer
aws_load_balancer_service_account_name = "aws-load-balancer"
aws_load_balancer_namespace = "aws-load-balancer"

View File

@ -19,6 +19,10 @@ dependency "eks" {
}
}
dependencies {
paths = ["../aws-load-balancer-controller"]
}
locals {
config_vars = read_terragrunt_config(find_in_parent_folders("config.hcl"))

View File

@ -6,6 +6,10 @@ terraform {
source = "${get_repo_root()}//modules/eks-cert-manager"
}
dependencies {
paths = ["../eks-kube-prometheus-stack"]
}
dependency "eks" {
config_path = "../eks"

View File

@ -6,6 +6,10 @@ terraform {
source = "${get_repo_root()}//modules/eks-external-secrets"
}
dependencies {
paths = ["../eks-kube-prometheus-stack"]
}
dependency "eks" {
config_path = "../eks"

View File

@ -31,6 +31,8 @@ inputs = {
enable_irsa = local.config_vars.locals.enable_irsa
cluster_addons = local.config_vars.locals.cluster_addons
eks_managed_node_group_defaults = local.config_vars.locals.node_group_defaults
eks_managed_node_groups = local.config_vars.locals.node_groups

View File

@ -6,6 +6,10 @@ terraform {
source = "${get_repo_root()}//modules/eks-ingress-controller"
}
dependencies {
paths = ["../eks-kube-prometheus-stack"]
}
dependency "vpc" {
config_path = "../vpc"
@ -15,6 +19,7 @@ dependency "vpc" {
mock_outputs = {
vpc_id = "fake-vpc-id"
private_subnets = ["fake-private-subnet-id-1", "fake-sprivate-ubnet-id-2"]
vpc_cidr_block = "10.88.0.0/24"
}
}
@ -37,10 +42,12 @@ dependency "public_dns" {
mock_outputs_allowed_terraform_commands = ["validate", "plan"]
mock_outputs = {
dns_zone = {
name = "xxx"
zone_id = "ZXXXXXXXXXXXXXXXXXXX"
}
sub_zones = {
"fake.sub.zone.com" = {
name = "xxx"
zone_id = "ZXXXXXXXXXXXXXXXXXXX"
}
}
@ -53,10 +60,12 @@ dependency "private_dns" {
mock_outputs_allowed_terraform_commands = ["validate", "plan"]
mock_outputs = {
dns_zone = {
name = "xxx"
zone_id = "ZXXXXXXXXXXXXXXXXXXX"
}
sub_zones = {
"fake.sub.zone.com" = {
name = "xxx"
zone_id = "ZXXXXXXXXXXXXXXXXXXX"
}
}

View File

@ -8,10 +8,16 @@ resource "aws_iam_role_policy_attachment" "self" {
role = aws_iam_role.self.name
}
resource "aws_eks_addon" "self" {
cluster_name = var.eks_cluster_id
addon_name = "aws-ebs-csi-driver"
addon_version = var.addon_version
service_account_role_arn = aws_iam_role.self.arn
}
# resource "aws_eks_addon" "self" {
# cluster_name = var.eks_cluster_id
# addon_name = "aws-ebs-csi-driver"
# addon_version = var.addon_version
#
# # Note: when installed using the cluster_addons, the service account role
# # is "inherited from node". In this case, it is still required to create an IAM.
# # It will be then required to annotate the service account with the arn of the created role:
# # > kubectl annotate serviceaccount ebs-csi-controller-sa -n kube-system eks.amazonaws.com/role-arn=arn:aws:iam::563042046245:role/EksCsiEbsRole-avx-pmarie-eks
# # Then to restart the ebs-csi-controller deployment to take effect:
# # > kubectl rollout restart deployment ebs-csi-controller -n kube-system
# service_account_role_arn = aws_iam_role.self.arn
# }