Error while Deploying EKS cluster using CDK Pipeline

0

I am Deploying EKS cluster using CDK pipeline in Typescript This is May Cluster Stack, import { PhysicalName, Stack, StackProps } from "aws-cdk-lib"; import * as ec2 from 'aws-cdk-lib/aws-ec2'; import { Vpc } from "aws-cdk-lib/aws-ec2"; import * as eks from 'aws-cdk-lib/aws-eks'; import { Cluster } from "aws-cdk-lib/aws-eks/lib/cluster"; import { AccountRootPrincipal,Role } from "aws-cdk-lib/aws-iam"; import { Construct } from "constructs";

export interface DevOpsClusterStackProps extends StackProps { cluster:Cluster, vpc:Vpc, } export class DevOpsClusterStack extends Stack { public readonly cluster: eks.Cluster; accountId = this.account; clusterName = "DevOpsCluster" Role: Role; /* Cluster Role Defined */

constructor(scope: Construct, id: string, props: DevOpsClusterStackProps) {
    super(scope, id, props);
    this.accountId = this.account;
    this.clusterName = "DevOpsCluster";

    const clusterAdmin = new Role(this, 'clusterAdmin', {
      assumedBy: new AccountRootPrincipal(),
      roleName: "clusterAdmin",
      });    
  
      /* Cluster Configuration */

    const cluster = new eks.Cluster(this, 'DevOpsCluster', {
  
      clusterName: "DevOpsCluster",
      version: eks.KubernetesVersion.V1_23,
      defaultCapacity: 3,  
      mastersRole: clusterAdmin,
      defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.LARGE),
      vpc:props.vpc,
      vpcSubnets: [{  
            subnetType: ec2.SubnetType.PUBLIC 
        }], 
      });
      cluster.addAutoScalingGroupCapacity('spot-group', {
        instanceType: new ec2.InstanceType('m5.xlarge'),
      });
      if (Stack.of(this).region==this.region)
      this.Role = createDeployRole(this, `for-1st-region`, cluster);
      
    this.cluster = cluster;
}

} function createDeployRole(scope: Construct, id: string, cluster: eks.Cluster): Role { const role = new Role(scope, id, { roleName: PhysicalName.GENERATE_IF_NEEDED, assumedBy: new AccountRootPrincipal() }); cluster.awsAuth.addMastersRole(role);

return role; } export interface PipelineStack extends StackProps { Cluster: eks.Cluster, Role: Role, }

and This is My Pipeline Stack to Deploy this cluster using Pipeline import { Stack, StackProps, Stage } from 'aws-cdk-lib'; import * as codecommit from 'aws-cdk-lib/aws-codecommit'; import { CodePipeline, CodePipelineSource } from 'aws-cdk-lib/pipelines'; import * as pipelines from 'aws-cdk-lib/pipelines'; import { Construct } from 'constructs'; import { VpcStack } from './vpc-stack'; import { Cluster } from 'aws-cdk-lib/aws-eks/lib/cluster'; import { DevOpsClusterStack } from '../lib/devops-cluster-stack';

class DevelopmentStage extends Stage { cluster: Cluster;

constructor(scope: Construct, id: string, props: StackProps) {
    super(scope, id, props);
    const vpcStack = new VpcStack(this, "VpcStack", {});
    const ClusterStack = new DevOpsClusterStack (this, 'DevOpsCluster',{vpc:vpcStack.vpc , cluster:this.cluster});   
    
}

} /**

  • Create a CI/CD pipelines for cluster deployment */ export class PipelineStack extends Stack { cluster: Cluster; static cluster: Cluster;

    constructor(scope: Construct, id: string, props?: StackProps) { super(scope, id, props);

/**

  • Here we provide pipeline start point as a Codecommit Soursecode to Create a CI/CD pipelines for cluster deployment */ const repository = codecommit.Repository.fromRepositoryName(this, 'Repository', 'CDK-Typescript-Project'); const source = CodePipelineSource.codeCommit(repository, "feature/create-eks-cluster") const pipeline = new CodePipeline(this, 'Pipeline', { pipelineName: 'EKS-CICD-Pipeline',

         synth: new pipelines.ShellStep('Synth', {
             input: source,
             installCommands: ['npm i -g npm@latest',"npm install -g typescript"],
             commands: [
                 'npm ci',
                 'npm run build',
                 'npx cdk synth',
             ]
         })
     });
    
     // Developemnt stage This could include things like EC2 instances and more, depending on the needs of the application being developed. 
     const devStage = new DevelopmentStage(this, "Development", {
    
     });
    
     pipeline.addStage(devStage);
    

    } }

Also I have Created Separate VPC Stack import { App, Stack, StackProps } from "aws-cdk-lib"; import * as ec2 from 'aws-cdk-lib/aws-ec2'; import { IpAddresses } from "aws-cdk-lib/aws-ec2"; import { Construct } from "constructs";

/**

  • Create a VPC with one Public and one Private Subnet */ export class VpcStack extends Stack {

    public readonly vpc: ec2.Vpc;

    constructor(scope: Construct, id: string, props?: StackProps) { super(scope, id, props);

     const vpc = new ec2.Vpc(this, 'vpc', {
         natGateways: 1,
         ipAddresses: IpAddresses.cidr("10.1.0.0/16"),
         subnetConfiguration: [
           {
             name: 'Public',
             subnetType: ec2.SubnetType.PUBLIC,
           },
           {
             name: 'Private',
             subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS,
           }
         ],
         maxAzs: 2
       });
    
     this.vpc = vpc;
    

    }

}

/I am receiving following error while deploying the Cluster Stack/ ERROR is like: instance Fails to Join Kubernetes Cluster DevOpsClusterNodegroupDefaultCapacity90B6204B CREATE_FAILED Resource handler returned message: "[Issue(Code=NodeCreationFailure, Message=Instances failed to join the kubernetes cluster, ResourceIds=[i-02c060ccb6d6e8c6f, i-048feaa20bfdca377, i-0a7a4184599e60cd2])] (Service: null, Status Code: 0, Request ID: null)" (RequestToken: e94890a6-5074-b4a3-a4e3-916cf510ef8a, HandlerErrorCode: GeneralServiceException)

1 réponse
0

The issue you're facing is because the created nodes are not able to join the clusters. Check the routing the routing tables of the created subnets to ensure that the cluster is reachable. From your configuration, you have your subnet configuration as "ec2.SubnetType.PRIVATE_WITH_EGRESS" which is a IPv6 feature, if your intention is to use NAT then use the option "PRIVATE_WITH_NAT". This will ensure both your private subnet as well as public subnet can access the cluster.

Also you're using self managed nodegroups but from your configuration it looks like managed ndoegroups itself will meet your needs. Hence recommend to use managed nodegroups using the API "addNodegroupCapacity". Also when using managed or self managed nodegroups explictly specific vpc subnets otherwise all the subnets will get added to the managed nodegroups including the public ones.

AWS
Mahali
répondu il y a un an

Vous n'êtes pas connecté. Se connecter pour publier une réponse.

Une bonne réponse répond clairement à la question, contient des commentaires constructifs et encourage le développement professionnel de la personne qui pose la question.

Instructions pour répondre aux questions