Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/5146.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
charts: fix terraform script where all eks acceptance test cases failed to provision infra on aws.
```
136 changes: 110 additions & 26 deletions charts/consul/test/terraform/eks/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,24 @@
terraform {
required_providers {
aws = {
version = ">= 4.0.0"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.27.0"
}
}
}

provider "aws" {
region = var.region

assume_role {
role_arn = var.role_arn
duration = "2700s"
dynamic "assume_role" {
for_each = var.role_arn != "" ? [1] : []
content {
role_arn = var.role_arn
duration = "2700s"
}
}
}

Expand All @@ -35,7 +42,7 @@ resource "random_string" "suffix" {
module "vpc" {
count = var.cluster_count
source = "terraform-aws-modules/vpc/aws"
version = "4.0.0"
version = "5.0.0"

name = "consul-k8s-${random_id.suffix[count.index].dec}"
# The cidr range needs to be unique in each VPC to allow setting up a peering connection.
Expand All @@ -47,6 +54,11 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true

# Enable dual-stack (IPv4 + IPv6) support for acceptance tests
# enable_ipv6 = true
# public_subnet_ipv6_prefixes = [0, 1, 2]
# private_subnet_ipv6_prefixes = [3, 4, 5]

public_subnet_tags = {
"kubernetes.io/cluster/consul-k8s-${random_id.suffix[count.index].dec}" = "shared"
"kubernetes.io/role/elb" = "1"
Expand Down Expand Up @@ -91,6 +103,31 @@ module "eks" {
tags = var.tags
}

resource "aws_ebs_encryption_by_default" "enable" {
enabled = true
}

# Multi cluster setup.
# K8s Provider for the first cluster (cluster0)
provider "kubernetes" {
alias = "cluster0"
host = module.eks[0].cluster_endpoint
cluster_ca_certificate = base64decode(module.eks[0].cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.cluster[0].token
}

# Provider for second cluster (cluster1)
provider "kubernetes" {
alias = "cluster1"

# Use null to disable the provider configuration if cluster_count is not > 1
# This avoids errors from empty string credentials.
host = var.cluster_count > 1 ? module.eks[1].cluster_endpoint : null
cluster_ca_certificate = var.cluster_count > 1 ? base64decode(module.eks[1].cluster_certificate_authority_data) : null
token = var.cluster_count > 1 ? data.aws_eks_cluster_auth.cluster[1].token : null
}


resource "aws_iam_role" "csi-driver-role" {
count = var.cluster_count
assume_role_policy = jsonencode({
Expand Down Expand Up @@ -143,28 +180,77 @@ data "aws_eks_cluster_auth" "cluster" {
name = module.eks[count.index].cluster_id
}

# Add a default StorageClass for dynamic volume provisioning
# This is the primary fix for the "unbound PersistentVolumeClaims" issue
# as we do not specify storage class in default helm values.yaml for consul server.

# StorageClass for the first cluster (cluster0)
resource "kubernetes_storage_class" "ebs_gp3_cluster0" {
provider = kubernetes.cluster0
depends_on = [module.eks, aws_eks_addon.csi-driver[0]]

metadata {
name = "gp3"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
}
storage_provisioner = "ebs.csi.aws.com"
parameters = {
type = "gp3"
encrypted = "true"
}
reclaim_policy = "Delete"
volume_binding_mode = "WaitForFirstConsumer"
}

# StorageClass for second cluster (cluster1)
resource "kubernetes_storage_class" "ebs_gp3_cluster1" {
count = var.cluster_count > 1 ? 1 : 0

provider = kubernetes.cluster1
depends_on = [module.eks, aws_eks_addon.csi-driver[1]]
metadata {
name = "gp3"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
}
storage_provisioner = "ebs.csi.aws.com"
parameters = {
type = "gp3"
encrypted = "true"
}
reclaim_policy = "Delete"
volume_binding_mode = "WaitForFirstConsumer"
}

# The following resources are only applied when cluster_count=2 to set up vpc peering and the appropriate routes and
# security groups so traffic between VPCs is allowed. There is validation to ensure cluster_count can be 1 or 2.

# Each EKS cluster needs to allow ingress traffic from the other VPC.
# Each EKS cluster needs to allow ingress traffic from the other VPC's public subnets.
# Traffic routes via NAT Gateway through public subnets (no private peering routes),
# so source IPs arrive from the public subnet CIDRs of the remote VPC.
resource "aws_security_group_rule" "allowingressfrom1-0" {
count = var.cluster_count > 1 ? 1 : 0
count = var.cluster_count > 1 ? length(module.vpc[1].public_subnets_cidr_blocks) : 0
type = "ingress"
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = [module.vpc[1].vpc_cidr_block]
security_group_id = module.eks[0].cluster_primary_security_group_id
to_port = 0
protocol = "-1"
cidr_blocks = [module.vpc[1].public_subnets_cidr_blocks[count.index]]
security_group_id = module.eks[0].worker_security_group_id
description = "Allow node traffic from cluster 1 public subnet ${count.index}"
}

resource "aws_security_group_rule" "allowingressfrom0-1" {
count = var.cluster_count > 1 ? 1 : 0
count = var.cluster_count > 1 ? length(module.vpc[0].public_subnets_cidr_blocks) : 0
type = "ingress"
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = [module.vpc[0].vpc_cidr_block]
security_group_id = module.eks[1].cluster_primary_security_group_id
to_port = 0
protocol = "-1"
cidr_blocks = [module.vpc[0].public_subnets_cidr_blocks[count.index]]
security_group_id = module.eks[1].worker_security_group_id
description = "Allow node traffic from cluster 0 public subnet ${count.index}"
}

# Create a peering connection. This is the requester's side of the connection.
Expand Down Expand Up @@ -192,20 +278,18 @@ resource "aws_vpc_peering_connection_accepter" "peer" {
}
}

# Add routes that so traffic going from VPC 0 to VPC 1 is routed through the peering connection.
resource "aws_route" "peering0" {
# We have 2 route tables to add a route to, the public and private route tables.
count = var.cluster_count > 1 ? 2 : 0
route_table_id = [module.vpc[0].public_route_table_ids[0], module.vpc[0].private_route_table_ids[0]][count.index]
# Add routes to public route tables in VPC 0 to route traffic to VPC 1 through the peering connection.
resource "aws_route" "peering_public_0" {
count = var.cluster_count > 1 ? length(module.vpc[0].public_route_table_ids) : 0
route_table_id = module.vpc[0].public_route_table_ids[count.index]
destination_cidr_block = module.vpc[1].vpc_cidr_block
vpc_peering_connection_id = aws_vpc_peering_connection.peer[0].id
}

# Add routes that so traffic going from VPC 1 to VPC 0 is routed through the peering connection.
resource "aws_route" "peering1" {
# We have 2 route tables to add a route to, the public and private route tables.
count = var.cluster_count > 1 ? 2 : 0
route_table_id = [module.vpc[1].public_route_table_ids[0], module.vpc[1].private_route_table_ids[0]][count.index]
# Add routes to public route tables in VPC 1 to route traffic to VPC 0 through the peering connection.
resource "aws_route" "peering_public_1" {
count = var.cluster_count > 1 ? length(module.vpc[1].public_route_table_ids) : 0
route_table_id = module.vpc[1].public_route_table_ids[count.index]
destination_cidr_block = module.vpc[0].vpc_cidr_block
vpc_peering_connection_id = aws_vpc_peering_connection.peer[0].id
}
Loading