EKS cluster creation using Terraform

0

Hi AWS, I am creating an EKS cluster with NodeGroups along with adds-on using Terraform. Here are some of the required terraform code snippets:

eks-cluster.tf

resource "aws_eks_cluster" "eks_cluster" {
  name     = var.cluster_name
  role_arn = aws_iam_role.service_role.arn

  vpc_config {
    subnet_ids              = [aws_subnet.private_subnet[1].id, aws_subnet.public_subnet[0].id]
    endpoint_private_access = true
    endpoint_public_access  = true
  }

  version = "1.28"

  #   # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling.
  #   # Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups.
  depends_on = [
    aws_iam_role_policy_attachment.service-role-AmazonEKSClusterPolicy,
    aws_iam_role_policy_attachment.service-role-AmazonEKSVPCResourceController,
    aws_cloudwatch_log_group.cw_log_group,
  ]

  enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]

}

resource "aws_eks_addon" "addons" {
  for_each          = { for addon in var.addons : addon.name => addon }
  cluster_name      = aws_eks_cluster.eks_cluster.id
  addon_name        = each.value.name
  addon_version     = each.value.version
  resolve_conflicts = "OVERWRITE"
}


output "endpoint" {
  value = aws_eks_cluster.eks_cluster.endpoint
}

output "kubeconfig-certificate-authority-data" {
  value = aws_eks_cluster.eks_cluster.certificate_authority[0].data
}

eks-worker-nodes.tf

resource "aws_eks_node_group" "aws_eks_workernodes" {
  cluster_name    = aws_eks_cluster.eks_cluster.name
  node_group_name = "${var.default_tags.project_name}-worker-node"
  node_role_arn   = aws_iam_role.eks_worker_node_role.arn
  subnet_ids      = [aws_subnet.private_subnet[0].id, aws_subnet.public_subnet[0].id]
  scaling_config {
    desired_size = 1
    max_size     = 2
    min_size     = 1
  }
  update_config {
    max_unavailable = 1
  }
  capacity_type = "SPOT"
  launch_template {
    id      = aws_launch_template.eks_launch_template.id
    version = aws_launch_template.eks_launch_template.latest_version
  }
  # Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
  # Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
  depends_on = [
    aws_iam_role_policy_attachment.worker-node-role-AmazonEKSWorkerNodePolicy,
    aws_iam_role_policy_attachment.worker-node-role-AmazonEKS_CNI_Policy,
    aws_iam_role_policy_attachment.worker-node-role-AmazonEC2ContainerRegistryReadOnly,
    data.aws_launch_template.cluster
  ]
}

variables.tf

variable "aws_region" {
  type        = string
  description = "AWS Region to be utilized"
}

variable "aws_access_key" {
  type        = string
  description = "AWS Access key for IAM user"
}

variable "aws_secret_access_key" {
  type        = string
  description = "AWS Secret access key for IAM user"
}

variable "cidr_block" {
  type        = string
  description = "VPC CIDR Block"
}

variable "default_tags" {
  type        = map(string)
  description = "Tagging used for AWS resource"
}

variable "public_subnet_count" {
  type        = number
  description = "Total number of public subnets to create"
}

variable "private_subnet_count" {
  type        = number
  description = "Total number of private subnets to create"
  default     = 2
}

variable "eks_service_role" {
  type        = string
  description = "EKS Service Role Name"
}

variable "eks_workernode_role" {
  type        = string
  description = "EKS Worker Node Role Name"
}

variable "cluster_name" {
  type        = string
  description = "EKS Cluster name"
}

variable "cloudwatch_log_group_name" {
  type        = string
  description = "EKS Cluster Log group name"
}

variable "cloudwatch_log_stream_name" {
  type        = string
  description = "EKS Cluster Log stream name"
}

variable "addons" {
  type = list(object({
    name    = string
    version = string
  }))
}

variable "launch_template_name" {
  type        = string
  description = "AWS EC2 Launch Template Name"
}

terraform.tfvars.json

{
    "aws_region": "us-east-1",
    "aws_access_key": "AKIATCXXXXXXXXXX",
    "aws_secret_access_key": "ny0oaSXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
    "default_tags": {
      "primary_owner": "Arjun Goel",
      "secondary_owner": "Harsh Goel",
      "project_name": "EKS-POC-PROJECT"
    },
    "cidr_block": "192.168.0.0/16",
    "public_subnet_count": 2,
    "private_subnet_count": 2,
    "cluster_name": "eks-cluster-poc",
    "addons": [
      {
        "name": "vpc-cni",
        "version": "v1.18.0-eksbuild.1"
      },
      {
        "name": "coredns",
        "version": "v1.10.1-eksbuild.7"
      },
      {
        "name": "kube-proxy",
        "version": "v1.28.6-eksbuild.2"
      }
    ],
    "cloudwatch_log_group_name": "eks_cluster_cw_log_group",
    "cloudwatch_log_stream_name": "eks_cluster_cw_log_stream",
    "eks_service_role": "eks_iam_service_role",
    "eks_workernode_role": "eks_iam_worker_node_role",
    "launch_template_name": "eks_worker_node_lt"
  }  

When I ran terraform apply, I got these error:

│ Error: unexpected EKS Add-On (eks-cluster-poc:coredns) state returned during creation: timeout while waiting for state to become 'ACTIVE' (last state: 'DEGRADED', timeout: 20m0s)
│ [WARNING] Running terraform apply again will remove the kubernetes add-on and attempt to create it again effectively purging previous add-on configuration
│
│   with aws_eks_addon.addons["coredns"],
│   on eks-cluster.tf line 25, in resource "aws_eks_addon" "addons":
│   25: resource "aws_eks_addon" "addons" {
│
╵
╷
│ Error: error waiting for EKS Node Group (eks-cluster-poc:EKS-POC-PROJECT-worker-node) to create: unexpected state 'CREATE_FAILED', wanted target 'ACTIVE'. last error: 1 error occurred:
│       * eks-EKS-POC-PROJECT-worker-node-42c759ef-5a0e-6095-a8e3-401b054f33b4: AsgInstanceLaunchFailures: You've reached your quota for maximum Fleet Requests for this account. Launching EC2 instance failed.
│
│
│
│   with aws_eks_node_group.aws_eks_workernodes,
│   on eks-worker-nodes.tf line 1, in resource "aws_eks_node_group" "aws_eks_workernodes":
│    1: resource "aws_eks_node_group" "aws_eks_workernodes" {

So, in order to troubleshoot it I commented out this piece of code in terraform.tfvars.json file and reran terraform apply again

//{
 //       "name": "coredns",
    //    "version": "v1.10.1-eksbuild.7"
 // }

but I got this error:

│ Error: error waiting for EKS Node Group (eks-cluster-poc:EKS-POC-PROJECT-worker-node) to create: unexpected state 'CREATE_FAILED', wanted target 'ACTIVE'. last error: 1 error occurred:
│       * eks-EKS-POC-PROJECT-worker-node-f8c759fb-7433-ef33-31df-bf9938caacc2: AsgInstanceLaunchFailures: You've reached your quota for maximum Fleet Requests for this account. Launching EC2 instance failed.
│
│
│
│   with aws_eks_node_group.aws_eks_workernodes,
│   on eks-worker-nodes.tf line 1, in resource "aws_eks_node_group" "aws_eks_workernodes":
│    1: resource "aws_eks_node_group" "aws_eks_workernodes" {

Please help me out in resolving all the errors I provided above as I don't want to build anything manually.

1 Antwort
0

Hi There

The error AsgInstanceLaunchFailures: You've reached your quota for maximum Fleet Requests for this account. Launching EC2 instance failed indicates that you are hitting a service limit, probably for EC2. There is a github issue here with others facing the same issue, and a few things to try: https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2149

Is this a brand new account?

profile pictureAWS
EXPERTE
Matt-B
beantwortet vor einem Monat
  • Yes it is a brand new AWS account.

  • Since its a new account, try starting a free tier EC2 instance (t2.micro or t3.micro with Amazon linux ami is fine), wait a little while, then try again.

  • I changed the instance type to t2.micro in the launch-template.tf file:

    resource "aws_launch_template" "eks_launch_template" {
      name          = var.launch_template_name
      instance_type = "t2.micro"
      image_id      = data.aws_ssm_parameter.cluster.value
      block_device_mappings {
        device_name = "/dev/xvda"
        ebs {
          volume_size = 20
          volume_type = "gp2"
        }
      }
      block_device_mappings {
        device_name = "/dev/xvdb"
        ebs {
          volume_size = 40
          volume_type = "gp2"
        }
      }
      tag_specifications {
        resource_type = "instance"
        tags = {
          Name = "eks-worker-node"
        }
      }
      tag_specifications {
        resource_type = "volume"
        tags = {
          Name = "eks-worker-node"
        }
      }
      user_data = base64encode(templatefile("userdata.tpl", { CLUSTER_NAME = aws_eks_cluster.eks_cluster.name, B64_CLUSTER_CA = aws_eks_cluster.eks_cluster.certificate_authority[0].data, API_SERVER_URL = aws_eks_cluster.eks_cluster.endpoint }))
      #   depends_on = [
      #     aws_eks_cluster.eks_cluster
      #   ]
    }
    

    but I got this error now:

    Error: error waiting for EKS Node Group (eks-cluster-poc:EKS-POC-PROJECT-worker-node) to create: unexpected state 'CREATE_FAILED', wanted target 'ACTIVE'. last error: 1 error occurred: │ * eks-EKS-POC-PROJECT-worker-node-c8c75e15-a0d6-2092-75b2-1423ea5429ff: AsgInstanceLaunchFailures: You've reached your quota for maximum Fleet Requests for this account. Launching EC2 instance failed. │ │ │ │ with aws_eks_node_group.aws

  • Please try starting a free tier EC2 instance (t2.micro or t3.micro with Amazon linux ami is fine) from the AWS EC2 console (not from your terraform script), wait a little while, then try again.

Du bist nicht angemeldet. Anmelden um eine Antwort zu veröffentlichen.

Eine gute Antwort beantwortet die Frage klar, gibt konstruktives Feedback und fördert die berufliche Weiterentwicklung des Fragenstellers.

Richtlinien für die Beantwortung von Fragen