Hello,
I have an EKS cluster (terraform code see below) and follow the guide to set up the Load Balancer Controller (https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html). But when I deploy the service (terraform code see below) and want to expose it via "LoadBalancer" it keeps in a pending state and no external adr. is available. The Load Balancer controller gives the following error:
Log Error from eksckubectl logs pod/aws-load-balancer-controller-5b57cdc6cc-dtjbg -n kube-system
{"level":"error","ts":1640857282.2362676,"logger":"controller-runtime.manager.controller.service","msg":"Reconciler error","name":"terraform-example","namespace":"default","error":"AccessDenied: User: arn:aws:sts::009661972061:assumed-role/my-cluster2021123008214425030000000b/i-0a40de3c4e8541004 is not authorized to perform: elasticloadbalancing:CreateTargetGroup on resource: arn:aws:elasticloadbalancing:eu-central-1:009661972061:targetgroup/k8s-default-terrafor-630f67813d/* because no identity-based policy allows the elasticloadbalancing:CreateTargetGroup action\n\tstatus code: 403, request id: 2491099a-a6fd-4e6f-bab8-3c758eda0d0b"}
If I add the AWSLoadBalancerControllerIAMPolicy to the my-cluster2021123008214425030000000b role manually it works. But as far as I read the documentation the AWSLoadBalancerControllerIAMPolicy is for the controller in the kube-system namespace and not the worker nodes.
Is there anything missing from the documentation? Or what is the intended way of solving this?
best regards rene
Terraform EKS:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.27"
}
}
required_version = ">= 0.14.9"
}
provider "aws" {
profile = "default"
region = "eu-central-1"
}
data "aws_eks_cluster" "eks" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "eks" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.eks.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.eks.token
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
cluster_version = "1.21"
cluster_name = "my-cluster"
vpc_id = "vpc-xx"
subnets = ["subnet-xx", "subnet-xx", "subnet-xx"]
worker_groups = [
{
instance_type = "t3.medium"
asg_max_size = 5
role_arn = "arn:aws:iam::xxx:role/worker-node-example"
}
]
}
Terraform service:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.27"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.0.1"
}
}
required_version = ">= 0.14.9"
}
provider "kubernetes" {
host = "xxx"
cluster_ca_certificate = base64decode("xxx")
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
args = [
"eks",
"get-token",
"--cluster-name",
"my-cluster"
]
}
}
provider "aws" {
profile = "default"
region = "eu-central-1"
}
resource "aws_sqs_queue" "gdpr_queue" {
name = "terraform-example-queue.fifo"
fifo_queue = true
content_based_deduplication = true
sqs_managed_sse_enabled = true
}
resource "aws_sqs_queue" "private_data_queue" {
name = "terraform-example-queue.fifo"
fifo_queue = true
content_based_deduplication = true
sqs_managed_sse_enabled = true
}
resource "aws_db_instance" "database" {
allocated_storage = 10
engine = "postgres"
engine_version = "13.3"
instance_class = "db.t3.micro"
name = "mydb"
username = "foo"
password = "foobarbaz"
skip_final_snapshot = true
vpc_security_group_ids = [aws_security_group.basic_security_group.id]
}
resource "aws_security_group" "basic_security_group" {
name = "allow rds connection"
description = "Allow rds traffic"
vpc_id = "vpc-xxx"
ingress {
description = "postgres"
from_port = 5432
to_port = 5432
protocol = "all"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
resource "kubernetes_service" "gdpr-hub-service" {
metadata {
name = "terraform-example"
annotations = {
"service.beta.kubernetes.io/aws-load-balancer-type" = "external"
"service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" = "ip"
"service.beta.kubernetes.io/aws-load-balancer-scheme" : "internet-facing"
}
}
spec {
selector = {
App = kubernetes_deployment.gdpr-hub-service-deployment.spec.0.template.0.metadata.0.labels.App
}
session_affinity = "ClientIP"
port {
port = 80
target_port = 8080
}
type = "LoadBalancer"
}
}
resource "kubernetes_deployment" "gdpr-hub-service-deployment" {
depends_on = [
aws_db_instance.database,
aws_sqs_queue.gdpr_queue,
aws_sqs_queue.private_data_queue
]
metadata {
name = "gdpr-hub-service"
labels = {
App = "gdpr-hub-service"
}
}
spec {
replicas = 2
selector {
match_labels = {
App = "gdpr-hub-service"
}
}
template {
metadata {
labels = {
App = "gdpr-hub-service"
}
}
spec {
container {
image = "xxxx"
name = "gdpr-hub-service"
port {
container_port = 8080
}
resources {
limits = {
cpu = "2"
memory = "1024Mi"
}
requests = {
cpu = "250m"
memory = "50Mi"
}
}
}
}
}
}
}