Ejemplo n.º 1
0
    def __create_eks_control_plane(self, vpc: ec2.Vpc) -> eks.Cluster:
        # This role is used to connect to the cluster with admin access
        # It is be associated to system:masters kubernetes RBAC group
        masters_role = iam.Role(
            self,
            'eksClusterAdmin',
            role_name='eks-cluster-admin-'+self._config['stage'],
            assumed_by=iam.AccountRootPrincipal()
        )

        # Control plane role
        # It provides permissions for the Kubernetes control plane
        # to make calls to AWS API operations on your behalf.
        role = self.__create_eks_control_plane_role()

        eks_config = self._config['compute']['eks']
        self._cluster = eks.Cluster(
            scope=self,
            id="ControlPlane",
            cluster_name=self._config['name'],
            role=role,
            masters_role=masters_role,
            version=eks.KubernetesVersion.of(eks_config['version']),
            vpc=vpc,
            vpc_subnets=list(
                map(lambda group_name: ec2.SubnetSelection(subnet_group_name=group_name),
                    eks_config['subnetGroupNames'])
            ),
            default_capacity=0,
        )
Ejemplo n.º 2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, "EKS_Kafka_PocClusterVPC")

        private_subnets = [snet_id.subnet_id for snet_id in vpc.private_subnets]
        bngi = msk.CfnCluster.BrokerNodeGroupInfoProperty(instance_type="kafka.m5.large",
                                                          client_subnets=private_subnets)

        msk_cluster = msk.CfnCluster(self, "EKS_KafkaPocMSKCluster",
                                     broker_node_group_info=bngi,
                                     cluster_name="EKSKafkaPOCMKSCluster",
                                     kafka_version="2.3.1",
                                     number_of_broker_nodes=3)

        eks_admin_role = iam.Role(self, "EKS_Kafka_PocCluster-AdminRole",
                                  assumed_by=iam.AccountPrincipal(account_id=self.account))

        eks_cluster = eks.Cluster(self, "EKS_Kafka_PocEKSCluster",
                                  cluster_name="EKS_Kafka_PocCluster",
                                  masters_role=eks_admin_role,
                                  kubectl_enabled=True,
                                  version="1.15",
                                  vpc=vpc)
        eks_cluster.add_capacity("worker", instance_type=ec2.InstanceType("t3.large"),
                                 min_capacity=1, max_capacity=10)
Ejemplo n.º 3
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        cluster_admin = iam.Role(self,
                                 'ClusterAdmin',
                                 assumed_by=iam.AccountRootPrincipal(),
                                 role_name='eks_cdk_admin')

        # The code that defines your stack goes here
        example_cluster = eks.Cluster(self,
                                      'Example',
                                      version=eks.KubernetesVersion.V1_19,
                                      masters_role=cluster_admin)

        example_cluster.aws_auth.add_user_mapping(user=iam.User.from_user_name(
            self, 'K8SUser', 'k8s'),
                                                  groups=['system:masters'])

        example_cluster.add_fargate_profile(
            'ExampleFargate',
            selectors=[{
                'namespace': 'kube-system'
            }],
            fargate_profile_name='ExampleFargate')
Ejemplo n.º 4
0
    def __init__(self, scope: core.Construct, construct_id: str, 
        my_service_details={}, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.my_service_details = my_service_details

        masters_role = iam.Role(
            self, "clusterAdmin",
            role_name="demo_EKS_cluster_role",
            assumed_by=iam.AccountRootPrincipal()
        )

        k8s_cluster = eks.Cluster(
            self, "defaultCluster", 
            cluster_name="DemoEKS",
            version=eks.KubernetesVersion.V1_19,
            default_capacity=1,
            default_capacity_type=eks.DefaultCapacityType.EC2,
            default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
            masters_role=masters_role
            )
        k8s_cluster.add_fargate_profile(
            "FargateEnabled", selectors=[
                eks.Selector(
                    namespace="eksdemo", 
                    labels={"fargate":"enabled"})
            ]
        )

        my_service = EksServices(self, "myService", eks_cluster=k8s_cluster, service=self.my_service_details)
Ejemplo n.º 5
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        cdk_id = self.cdk_id
        ng_cdk_id = self.ng_cdk_id
        if cdk_id not in EKS_Cluster_List:
            return
        cluster_config, worker_node_config = EKS_Cluster_List[cdk_id]
        cluster_name, k8s_version = cluster_config
        instance_type, min_size, max_size, desired_size, disk_size = worker_node_config

        cluster = aws_eks.Cluster(
            self,
            cdk_id,
            version=k8s_version,
            default_capacity=0,
            cluster_name=cluster_name,
        )
        cluster.add_nodegroup_capacity(
            ng_cdk_id,
            instance_types=[instance_type],
            min_size=min_size,
            max_size=max_size,
            desired_size=desired_size,
            disk_size=disk_size,
        )
Ejemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # import default VPC
        #vpc = aws_ec2.Vpc.from_lookup(self, 'VPC', is_default=True)
        vpc = aws_ec2.Vpc(self, 'EKS-CDK-VPC', cidr='10.0.0.0/16', nat_gateways=1)

        # create an admin role
        eks_admin_role = aws_iam.Role(self, 'AdminRole',
                                      assumed_by=aws_iam.AccountPrincipal(
                                          account_id=self.account)
                                      )
        # create the cluster
        cluster = aws_eks.Cluster(self, 'cluster',
                                  masters_role=eks_admin_role,
                                  vpc=vpc,
                                  default_capacity=0,
                                  version='1.14',
                                  output_cluster_name=True
                                  )

        cluster.add_capacity('ondemand', instance_type=aws_ec2.InstanceType('t3.large'),
                             max_capacity=1,
                             bootstrap_options=aws_eks.BootstrapOptions(
                                 kubelet_extra_args='--node-labels myCustomLabel=od'
        )
        )

        cluster.add_capacity('spot', instance_type=aws_ec2.InstanceType('t3.large'),
                             max_capacity=1,
                             spot_price='0.1094',
                             bootstrap_options=aws_eks.BootstrapOptions(
                                 kubelet_extra_args='--node-labels myCustomLabel=spot'
        )
        )
Ejemplo n.º 7
0
 def __init__(self, scope: core.Construct, id: str, cluster_configuration, **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     self.cluster_configuration = cluster_configuration    
         
     def determine_cluster_size(self):
         """
         return instance_size, node_count
         """
         if self.cluster_configuration['capacity_details'] == 'small':
             # default to fargate only
             instance_details = aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.SMALL)
             instance_count = 3
         elif self.cluster_configuration['capacity_details'] == 'medium':
             instance_details = aws_ec2.InstanceType.of(aws_ec2.InstanceClass.COMPUTE5, aws_ec2.InstanceSize.LARGE)
             instance_count = 3
         elif self.cluster_configuration['capacity_details'] == 'large':
             instance_details = aws_ec2.InstanceType.of(aws_ec2.InstanceClass.COMPUTE5, aws_ec2.InstanceSize.LARGE)
             instance_count = 6
         elif self.cluster_configuration['capacity_details'] == 'small':
             instance_details = aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.SMALL)
             instance_count = 3
         else:
             # For a non specified capacity cluster, we will default to zero nodes and fargate only
             instance_count = 2
             instance_details = aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.SMALL)
             self.cluster_configuration['fargate_enabled'] == True
         
         return { 'default_capacity': instance_count, 'default_capacity_instance': instance_details }
      
     capacity_details = determine_cluster_size(self)
             
     # Create an EKS cluster with default nodegroup configuration        
     self.cluster = aws_eks.Cluster(
         self, "EKSCluster",
         version = self.cluster_configuration['eks_version'],
         cluster_name = self.cluster_configuration['cluster_name'],
         **capacity_details
     )
     
     # If fargate is enabled, create a fargate profile
     if self.cluster_configuration['fargate_enabled'] is True:
         self.cluster.add_fargate_profile(
             "FargateEnabled",
             selectors = [
                 aws_eks.Selector(
                     namespace = 'default',
                     labels = { 'fargate': 'enabled' }
                 )
             ]
         )
     
     # If bottle rocket is enabled, build a self managed nodegroup
     if self.cluster_configuration.get('bottlerocket_asg') is True:
         self.cluster.add_auto_scaling_group_capacity(
             "BottleRocketASG",
             instance_type=aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.SMALL),
             machine_image_type=aws_eks.MachineImageType.BOTTLEROCKET,
             desired_capacity=3,
         )
    def __init__(self, scope: core.Construct, name: str, vpc: ec2.IVpc, **kwargs) -> None:
        super().__init__(scope, name, **kwargs)

        cluster = eks.Cluster(
            self, 'jenkins-workshop-eks-control-plane',
            vpc=vpc,
            default_capacity=0
        )

        asg_worker_nodes = cluster.add_capacity(
            'worker-node',
            instance_type=ec2.InstanceType('t3.medium'),
            desired_capacity=2,
        )

        asg_jenkins_slave = cluster.add_capacity(
            'worker-node-jenkins-slave',
            instance_type=ec2.InstanceType('t3.medium'),
            desired_capacity=1,
            bootstrap_options=eks.BootstrapOptions(
                kubelet_extra_args='--node-labels jenkins=slave --register-with-taints jenkins=slave:NoSchedule',
                docker_config_json=read_docker_daemon_resource('kubernetes_resources/docker-daemon.json')
            )
        )
        asg_jenkins_slave.add_to_role_policy(iam.PolicyStatement(
            actions=[
                'ecr:CompleteLayerUpload',
                'ecr:InitiateLayerUpload',
                'ecr:PutImage',
                'ecr:UploadLayerPart'
                ],
            resources=["*"]
            )
        )

        asg_worker_nodes.connections.allow_from(
            asg_jenkins_slave,
            ec2.Port.all_traffic()
        )
        asg_jenkins_slave.connections.allow_from(
            asg_worker_nodes,
            ec2.Port.all_traffic()
        )

        eks_master_role = iam.Role(
            self, 'AdminRole',
            assumed_by=iam.ArnPrincipal(get_eks_admin_iam_username())
        )

        cluster.aws_auth.add_masters_role(eks_master_role)

        helm_tiller_rbac = eks.KubernetesResource(
            self, 'helm-tiller-rbac',
            cluster=cluster,
            manifest=read_k8s_resource('kubernetes_resources/helm-tiller-rbac.yaml')
        )
    def create_eks(self, vpc):
        # create eks cluster with amd nodegroup
        cluster = eks.Cluster(
            self,
            "EKS",
            vpc=vpc,
            version=eks.KubernetesVersion.V1_18,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=1)
        # add arm/graviton nodegroup
        cluster.add_nodegroup_capacity(
            "graviton",
            desired_size=1,
            instance_type=ec2.InstanceType("m6g.large"),
            nodegroup_name="graviton",
            node_role=cluster.default_nodegroup.role)

        # add secret access to eks node role
        cluster.default_nodegroup.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "SecretsManagerReadWrite"))

        # create service account
        sa = cluster.add_service_account("LBControllerServiceAccount",
                                         name="aws-load-balancer-controller",
                                         namespace="kube-system")
        sa_annotated = self.add_helm_annotation(cluster, sa)

        # create policy for the service account
        statements = []
        with open('backend/iam_policy.json') as f:
            data = json.load(f)
            for s in data["Statement"]:
                statements.append(iam.PolicyStatement.from_json(s))
        policy = iam.Policy(self, "LBControllerPolicy", statements=statements)
        policy.attach_to_role(sa.role)

        # add helm charts
        ingress = cluster.add_helm_chart(
            "LBIngress",
            chart="aws-load-balancer-controller",
            release="aws-load-balancer-controller",
            repository="https://aws.github.io/eks-charts",
            namespace="kube-system",
            values={
                "clusterName": cluster.cluster_name,
                "serviceAccount.name": "aws-load-balancer-controller",
                "serviceAccount.create": "false"
            })
        ingress.node.add_dependency(sa_annotated)

        return cluster
Ejemplo n.º 10
0
    def provision(
        self,
        name: str,
        eks_version: eks.KubernetesVersion,
        private_api: bool,
        vpc: ec2.Vpc,
        bastion_sg: ec2.SecurityGroup,
    ):
        eks_sg = ec2.SecurityGroup(
            self.scope,
            "EKSSG",
            vpc=vpc,
            security_group_name=f"{name}-EKSSG",
            allow_all_outbound=False,
        )

        # Note: We can't tag the EKS cluster via CDK/CF: https://github.com/aws/aws-cdk/issues/4995
        cluster = eks.Cluster(
            self.scope,
            "eks",
            cluster_name=name,
            vpc=vpc,
            endpoint_access=eks.EndpointAccess.PRIVATE
            if private_api else None,
            vpc_subnets=[ec2.SubnetType.PRIVATE],
            version=eks_version,
            default_capacity=0,
            security_group=eks_sg,
        )

        if bastion_sg:
            cluster.cluster_security_group.add_ingress_rule(
                peer=bastion_sg,
                connection=ec2.Port(
                    protocol=ec2.Protocol("TCP"),
                    string_representation="API Access",
                    from_port=443,
                    to_port=443,
                ),
            )

        cdk.CfnOutput(self.scope,
                      "eks_cluster_name",
                      value=cluster.cluster_name)
        cdk.CfnOutput(
            self.scope,
            "eks_kubeconfig_cmd",
            value=
            f"aws eks update-kubeconfig --name {cluster.cluster_name} --region {self.scope.region} --role-arn {cluster.kubectl_role.role_arn}",
        )

        return cluster
Ejemplo n.º 11
0
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        vpc: ec2.IVpc,
        instance_type: str = "m5.xlarge",
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.cluster_name = "data-team"

        # EKS cluster
        self.cluster = eks.Cluster(
            self,
            "EksForSpark",
            cluster_name=self.cluster_name,
            version=eks.KubernetesVersion.V1_19,
            default_capacity=0,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            vpc=vpc,
            vpc_subnets=[
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
            ],
        )

        # Default node group
        ng = self.cluster.add_nodegroup_capacity(
            "base-node-group",
            instance_types=[ec2.InstanceType(instance_type)],
            min_size=1,
            max_size=20,
            disk_size=50,
        )

        self.add_admin_role_to_cluster()
        self.add_cluster_admin()

        # Cluster AutoScaling FTW
        ClusterAutoscaler(self.cluster_name, self, self.cluster,
                          ng).enable_autoscaling()

        # We like to use the Kubernetes Dashboard
        self.enable_dashboard()

        # Install Airflow as well
        # TODO: Make this optional
        # self.enable_airflow()

        # This is emr-specific, but we have to do it here to prevent circular dependencies
        self.map_iam_to_eks()
Ejemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.encryptionKey = kms.Key(self, 'KubeClusterKey')
        self.cluster = eks.Cluster(
            self,
            'KubeCluster',
            version=eks.KubernetesVersion.V1_18,
            vpc=vpc,
            default_capacity=0,
            secrets_encryption_key=self.encryptionKey,
            endpoint_access=eks.EndpointAccess.PRIVATE,
            vpc_subnets=[ec2.SubnetSelection(subnet_group_name='Kubes')])
Ejemplo n.º 13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.__vpc = ec2.Vpc(self, 'MyVpc', cidr='10.10.0.0/16')

        self.__eks = eks.Cluster(self,
                                 'WinCtrEksCluster',
                                 version=eks.KubernetesVersion.V1_18,
                                 default_capacity=0,
                                 vpc=self.vpc,
                                 endpoint_access=eks.EndpointAccess.PUBLIC)

        self.__private_subnet_ids = []
        for net in self.eks_cluster.vpc.private_subnets:
            self.__private_subnet_ids.append(net.subnet_id)
Ejemplo n.º 14
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        env_name = self.node.try_get_context("env")
        eks_role = iam.Role(
            self,
            "eksadmin",
            assumed_by=iam.ServicePrincipal(service='ec2.amazonaws.com'),
            role_name='eks-cluster-role',
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name='AdministratorAccess')
            ])
        eks_instance_profile = iam.CfnInstanceProfile(
            self,
            'instanceprofile',
            roles=[eks_role.role_name],
            instance_profile_name='eks-cluster-role')

        cluster = eks.Cluster(
            self,
            'prod',
            cluster_name='ie-prod-snow-common',
            version=eks.KubernetesVersion.V1_19,
            vpc=vpc,
            vpc_subnets=[
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
            ],
            default_capacity=0,
            masters_role=eks_role)

        nodegroup = cluster.add_nodegroup_capacity(
            'eks-nodegroup',
            instance_types=[
                ec2.InstanceType('t3.large'),
                ec2.InstanceType('m5.large'),
                ec2.InstanceType('c5.large')
            ],
            disk_size=50,
            min_size=2,
            max_size=2,
            desired_size=2,
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            remote_access=eks.NodegroupRemoteAccess(
                ssh_key_name='ie-prod-snow-common'),
            capacity_type=eks.CapacityType.SPOT)
Ejemplo n.º 15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        vpc = ec2.Vpc.from_lookup(self,'vpc',
            vpc_id='vpc-082a9f3f7200f4513'
        )

        k8s_admin = iam.Role(self, "k8sadmin",
            assumed_by=iam.ServicePrincipal(service='ec2.amazonaws.com'),
            role_name='eks-master-role',
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name='AdministratorAccess'
                )
            ]
        
        ) 
        k8s_instance_profile = iam.CfnInstanceProfile(self, 'instanceprofile',
            roles=[k8s_admin.role_name],
            instance_profile_name='eks-master-role'
        )

        cluster = eks.Cluster(self, 'dev',
            cluster_name='eks-cdk-demo',
            version='1.15',
            vpc=vpc,
            vpc_subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)],
            default_capacity=0,
            kubectl_enabled=True,
            #security_group=k8s_sg,
            masters_role= k8s_admin

        )
        #cluster.aws_auth.add_user_mapping(adminuser, {groups['system:masters']})
        
        ng = cluster.add_nodegroup('eks-ng',
            nodegroup_name='eks-ng',
            instance_type=ec2.InstanceType('t3.medium'),
            disk_size=5,
            min_size=1,
            max_size=1,
            desired_size=1,
            subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
            remote_access=eks.NodegroupRemoteAccess(ssh_key_name='k8s-nodes')
            
        )

        
        
Ejemplo n.º 16
0
    def __init__(self,
                 scope,
                 id,
                 *,
                 description=None,
                 env=None,
                 tags=None,
                 synthesizer=None):
        super().__init__(scope,
                         id,
                         description=description,
                         env=env,
                         tags=tags,
                         synthesizer=synthesizer)

        vpc = ec2.Vpc(
            self,
            f"kodexa-vpc-{id}",
            max_azs=2,
            cidr="10.10.0.0/16",
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24)
            ],
            nat_gateways=1,
        )

        core.CfnOutput(self, "Output", value=vpc.vpc_id)

        # Create K8S cluster

        cluster_admin = iam.Role(self,
                                 f"kodexa-eks-adminrole-{id}",
                                 assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self,
                              id=f'kodexa-eks-cluster-{id}',
                              cluster_name=f'kodexa-eks-cluster-{id}',
                              version=eks.KubernetesVersion.V1_17,
                              vpc=vpc,
                              default_capacity=4,
                              masters_role=cluster_admin)
Ejemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        cluster_admin = iam.Role(self,
                                 "AdminRole",
                                 assumed_by=iam.AccountRootPrincipal())

        vpc = ec2.Vpc(self, "EKSVpc", cidr="10.2.0.0/16")

        eksCluster = eks.Cluster(
            self,
            "fedcluster",
            vpc=vpc,
            cluster_name="awsfedcluster",
            kubectl_enabled=True,
            masters_role=cluster_admin,
            default_capacity=2,
            default_capacity_instance=ec2.InstanceType("t3.large"))
Ejemplo n.º 18
0
    def __init__(self, scope: core.Construct, id: str, vpc, instance_type,
                 managed_worker_nodes_nubmer, cluster_name,
                 unmanaged_worker_nodes_number, spot_price, key_pair,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        #Create key pair that will be used for the K8S worker nodes
        self.key = KeyPair(
            self,
            "EKSKey",
            name=key_pair,
            description="This is a Key Pair for EKS worker nodes")
        #Create KMS key for secrets encryption
        self.kms_eks = kms.Key(
            self,
            'kms_eks',
            alias='kms_eks',
        )
        #Get the IAM role which will be added to the aws_auth
        # masters_role = iam.Role.from_role_arn(
        #     self, 'MasterRole',
        #     role_arn = masters_role
        # )

        #Create EKS cluster with managed/unmanaged worker nodes
        self.eks_cluster = eks.Cluster(
            self,
            'eks',
            cluster_name=cluster_name,
            version=eks.KubernetesVersion.V1_18,
            # masters_role = masters_role,
            default_capacity=managed_worker_nodes_nubmer,
            secrets_encryption_key=self.kms_eks,
            vpc=vpc)
        self.eks_role = self.eks_cluster.node.try_find_child('Role')
        self.eks_role.add_to_policy(statement=iam.PolicyStatement(
            actions=["ec2:DescribeVpcs"], resources=["*"]))
        if unmanaged_worker_nodes_number > 0:
            self.asg = self.eks_cluster.add_auto_scaling_group_capacity(
                "EKSAutoScalingGroup",
                instance_type=ec2.InstanceType(instance_type),
                spot_price=spot_price,
                desired_capacity=unmanaged_worker_nodes_number,
                key_name=self.key.name)
        self.asg.add_to_role_policy(
            iam.PolicyStatement(actions=["route53:*"], resources=["*"]))
Ejemplo n.º 19
0
 def setup_eks_cluster(self):
     self.cluster = aws_eks.Cluster(
         self,
         self.config["eks_cluster_name"],
         version=aws_eks.KubernetesVersion.V1_21,
         default_capacity=self.config.get("eks_nodegroup_capacity", 2),
         default_capacity_instance=ec2.InstanceType.of(
             instance_class=ec2.InstanceClass.BURSTABLE2,
             instance_size=ec2.InstanceSize.SMALL),
         vpc=self.vpc,
         vpc_subnets=self.vpc.private_subnets,
         endpoint_access=aws_eks.EndpointAccess.PUBLIC)
     # add worker task roles to masters group in the EKS cluster so that workers can launch pods
     self.cluster.aws_auth.add_role_mapping(
         self.worker_service.task_definition.task_role,
         groups=["system:masters"])
     # add any needed policies to give permissions to EKS nodes
     self.cluster.default_nodegroup.role.add_to_policy(
         self.athena_access_policy())
Ejemplo n.º 20
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(
            self, "EKSSecurityGroup",
            vpc=vpc,
            allow_all_outbound=True
        )
        
        eks_security_group.add_ingress_rule(
            ec2.Peer.ipv4('10.0.0.0/16'),
            ec2.Port.all_traffic()
        )  
        eks_security_group.add_ingress_rule(
            ec2.Peer.ipv4(c9_ip),
            ec2.Port.all_traffic()
        )  

        
        self.cluster = eks.Cluster(self, "EKSGraviton2",
            version=eks.KubernetesVersion.V1_18,
            default_capacity=0,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            vpc=vpc,
            security_group=eks_security_group

        )

        self.ng_x86 = self.cluster.add_nodegroup_capacity("x86-node-group",
            instance_types=[ec2.InstanceType("m5.large")],
            desired_size=2,
            min_size=1,
            max_size=3
        )
        
        self.ng_arm64 = self.cluster.add_nodegroup_capacity("arm64-node-group",
            instance_types=[ec2.InstanceType("m6g.large")],
            desired_size=2,
            min_size=1,
            max_size=3
        )
Ejemplo n.º 21
0
    def __init__(self, scope, id, *, description=None, env=None, tags=None, synthesizer=None, iam_user=None,
                 vpc_id=None, default_capacity=4, default_instance_type='t3a.large'):
        super().__init__(scope, id, description=description, env=env, tags=tags,
                         synthesizer=synthesizer)

        if vpc_id:
            vpc = ec2.Vpc.from_lookup(self, "VPC",
                                      vpc_id=vpc_id)
        else:
            vpc = ec2.Vpc(self, f"kodexa-vpc-{id}",
                          max_azs=2,
                          cidr="10.10.0.0/16",
                          subnet_configuration=[ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PUBLIC,
                              name="Public",
                              cidr_mask=24
                          ), ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PRIVATE,
                              name="Private",
                              cidr_mask=24
                          )],
                          nat_gateways=1,
                          )

            core.CfnOutput(self, "Output",
                           value=vpc.vpc_id)

        # Create K8S cluster

        cluster_admin = iam.Role(self, f"kodexa-eks-adminrole-{id}", assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self, id=f'kodexa-eks-cluster-{id}', cluster_name=f'kodexa-eks-cluster-{id}',
                              version=eks.KubernetesVersion.V1_17,
                              vpc=vpc,
                              default_capacity_instance=ec2.InstanceType(default_instance_type),
                              default_capacity=default_capacity,
                              masters_role=cluster_admin)

        if iam_user:
            admin_user = iam.User.from_user_name(id='cluster-admin-iam-user', user_name=iam_user, scope=self)
            cluster.aws_auth.add_user_mapping(admin_user, groups=['system:masters'])
    def create_eks(self, vpc):
        # create eks cluster with amd nodegroup
        cluster = eks.Cluster(
            self,
            "EKS",
            vpc=vpc,
            version=eks.KubernetesVersion.V1_18,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=1)
        # add arm/graviton nodegroup
        cluster.add_nodegroup_capacity(
            "graviton",
            desired_size=1,
            instance_type=ec2.InstanceType("m6g.large"),
            nodegroup_name="graviton",
            node_role=cluster.default_nodegroup.role)

        # add secret access to eks node role
        cluster.default_nodegroup.role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "SecretsManagerReadWrite"))

        # create service account
        sa = self.add_service_account(cluster=cluster,
                                      name="aws-load-balancer-controller",
                                      namespace="kube-system")

        # add helm charts
        ingress = cluster.add_helm_chart(
            "LBIngress",
            chart="aws-load-balancer-controller",
            release="aws-load-balancer-controller",
            repository="https://aws.github.io/eks-charts",
            namespace="kube-system",
            values={
                "clusterName": cluster.cluster_name,
                "serviceAccount.name": "aws-load-balancer-controller",
                "serviceAccount.create": "false"
            })

        return cluster
Ejemplo n.º 23
0
    def __init__(self,scope: core.Construct, id:str, eksname: str, eksvpc: ec2.IVpc, noderole: IRole, eks_adminrole: IRole, region:str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # 1.Create EKS cluster without node group
        self._my_cluster = eks.Cluster(self,'EKS',
                vpc= eksvpc,
                cluster_name=eksname,
                masters_role=eks_adminrole,
                output_cluster_name=True,
                version= eks.KubernetesVersion.V1_19,
                endpoint_access= eks.EndpointAccess.PUBLIC_AND_PRIVATE,
                default_capacity=0
        )

        # 2.Add Managed NodeGroup to EKS, compute resource to run Spark jobs
        _managed_node = self._my_cluster.add_nodegroup_capacity('onDemand-mn',
            nodegroup_name = 'etl-ondemand',
            node_role = noderole,
            desired_size = 1,
            max_size = 5,
            disk_size = 50,
            instance_types = [ec2.InstanceType('m5.xlarge')],
            labels = {'app':'spark', 'lifecycle':'OnDemand'},
            subnets = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE,one_per_az=True),
            tags = {'Name':'OnDemand-'+eksname,'k8s.io/cluster-autoscaler/enabled': 'true', 'k8s.io/cluster-autoscaler/'+eksname: 'owned'}
        )  
    

        # 3. Add Spot managed NodeGroup to EKS (Run Spark exectutor on spot)
        _spot_node = self._my_cluster.add_nodegroup_capacity('spot-mn',
            nodegroup_name = 'etl-spot',
            node_role = noderole,
            desired_size = 1,
            max_size = 30,
            disk_size = 50,
            instance_types=[ec2.InstanceType("r5.xlarge"),ec2.InstanceType("r4.xlarge"),ec2.InstanceType("r3.xlarge")],
            labels = {'app':'spark', 'lifecycle':'Ec2Spot'},
            capacity_type=eks.CapacityType.SPOT,
            tags = {'Name':'Spot-'+eksname, 'k8s.io/cluster-autoscaler/enabled': 'true', 'k8s.io/cluster-autoscaler/'+eksname: 'owned'}
        )
Ejemplo n.º 24
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 params: typing.Optional[Stack_Parameter_Group] = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        cluster_vpc = None
        cluster_id = id + '-cluster'
        cluster_subnets = []

        if params is not None:
            cluster_vpc = ec2.Vpc.from_lookup(self,
                                              'VPC',
                                              vpc_id=params.vpc_id)
            cluster_id = params.cluster_id
            cluster_subnets = params.cluster_subnets

            self._cluster_subnets = cluster_subnets

        self._kip_cluster = eks.Cluster(
            self,
            id=cluster_id,
            default_capacity=0,
            kubectl_enabled=True,
            cluster_name=cluster_id,
            core_dns_compute_type=None,
            # masters_role=None,
            output_cluster_name=False,
            output_config_command=True,
            output_masters_role_arn=False,
            # role=None,
            # security_group=None,
            # version=None,
            vpc=cluster_vpc,
            vpc_subnets=[ec2.SubnetSelection(subnets=cluster_subnets)])
Ejemplo n.º 25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        vpc = ec2.Vpc.from_lookup(self, "DefaultVpc", is_default=True)

        eks_cluster = eks.Cluster(self, "EksCluster",
                                  cluster_name="eks-cluster",
                                  # managed by ASG
                                  default_capacity=0,
                                  kubectl_enabled=True,
                                  version=eks.KubernetesVersion.V1_17,
                                  vpc=vpc,
                                  # default vpc only has public subnets
                                  vpc_subnets=vpc.select_subnets(
                                      subnet_type=ec2.SubnetType.PUBLIC, one_per_az=True).subnets
                                  )

        asg = autoscaling.AutoScalingGroup(self, "EksASG",
                                           min_capacity=1,
                                           max_capacity=2,
                                           instance_type=ec2.InstanceType("t3.small"),
                                           machine_image=eks.EksOptimizedImage(
                                               kubernetes_version="1.17"
                                           ),
                                           update_type=autoscaling.UpdateType.ROLLING_UPDATE,
                                           vpc=vpc
                                           )

        node_group = eks.Nodegroup(self, "NodeGroup",
                                   nodegroup_name="eks-worker",
                                   cluster=eks_cluster,
                                   instance_type=ec2.InstanceType("t3.small"),
                                   min_size=1,
                                   max_size=2,
                                   tags={"Name": "eks-worker"}
                                   )
Ejemplo n.º 26
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc,
                 eks_version=aws_eks.KubernetesVersion.V1_18,
                 cluster_name=None,
                 capacity_details='small',
                 fargate_enabled=False,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = vpc

        self.eks_version = eks_version
        self.cluster_name = cluster_name
        self.capacity_details = capacity_details
        self.fargate_enabled = fargate_enabled

        self.cluster_config = {
            'eks_version': self.eks_version,
            'cluster_name': self.cluster_name,
            'capacity_details': self.capacity_details,
            'fargate_enabled': self.fargate_enabled
        }

        def determine_cluster_size(self):
            if self.cluster_config['capacity_details'] == 'small':
                instance_details = aws_ec2.InstanceType.of(
                    aws_ec2.InstanceClass.BURSTABLE3,
                    aws_ec2.InstanceSize.SMALL)
                instance_count = 3
            elif self.cluster_config['capacity_details'] == 'medium':
                instance_details = aws_ec2.InstanceType.of(
                    aws_ec2.InstanceClass.COMPUTE5, aws_ec2.InstanceSize.LARGE)
                instance_count = 3
            elif self.cluster_config['capacity_details'] == 'large':
                instance_details = aws_ec2.InstanceType.of(
                    aws_ec2.InstanceClass.COMPUTE5, aws_ec2.InstanceSize.LARGE)
                instance_count = 6
            else:
                instance_details = aws_ec2.InstanceType.of(
                    aws_ec2.InstanceClass.BURSTABLE3,
                    aws_ec2.InstanceSize.SMALL)
                instance_count = 1
            return {
                'default_capacity': instance_count,
                'default_capacity_instance': instance_details
            }

        capacity_details = determine_cluster_size(self)

        self.cluster = aws_eks.Cluster(
            self,
            "EKSCluster",
            version=self.cluster_config['eks_version'],
            cluster_name=self.cluster_config['cluster_name'],
            vpc=vpc,
            vpc_subnets=vpc.isolated_subnets,
            **capacity_details)

        # If fargate is enabled, create a fargate profile
        if self.cluster_config['fargate_enabled'] is True:
            self.cluster.add_fargate_profile(
                "FargateEnabled",
                selectors=[
                    aws_eks.Selector(namespace='default',
                                     labels={'fargate': 'enabled'})
                ])
 def _create_resource(self):
     self.cluster = eks.Cluster(
         scope=self,
         id=self._cluster_name,
         version=eks.KubernetesVersion.V1_18
     )
Ejemplo n.º 28
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 runnerrole: iam.IRole, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        clusterAdmin = iam.Role(self,
                                "AdminRole",
                                assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self, 'ekscdkdemo', vpc=vpc, default_capacity=0)

        asg_worker_nodes = cluster.add_capacity(
            'eksspot-cdkdemo',
            spot_price="0.0544",
            instance_type=ec2.InstanceType('t3.medium'),
            desired_capacity=2,
            bootstrap_options=eks.BootstrapOptions(
                docker_config_json=read_docker_daemon_resource(
                    'eksbaseresource/docker-daemon.json')))

        alb_rbac = eks.KubernetesResource(
            self,
            'alb-rbac',
            cluster=cluster,
            manifest=read_k8s_resource('eksbaseresource/alb-rbac.yml'))

        asg_worker_nodes.add_to_role_policy(iampolicy)
        cluster.aws_auth.add_masters_role(clusterAdmin)
        cluster.aws_auth.add_masters_role(runnerrole)

        service_account = cluster.add_service_account("external-dns-sa",
                                                      name='external-dns-sa')

        wellnessuser_irsa = cluster.add_service_account("wellnessuser",
                                                        name='wellnessuser')

        service_account.add_to_principal_policy(dnspolicy)

        deployment = {
            "apiVersion": "apps/v1",
            "kind": "Deployment",
            "metadata": {
                "labels": {
                    "app.kubernetes.io/name": "alb-ingress-controller"
                },
                "name": "alb-ingress-controller",
                "namespace": "kube-system"
            },
            "spec": {
                "selector": {
                    "matchLabels": {
                        "app.kubernetes.io/name": "alb-ingress-controller"
                    }
                },
                "template": {
                    "metadata": {
                        "labels": {
                            "app.kubernetes.io/name": "alb-ingress-controller"
                        }
                    },
                    "spec": {
                        "containers": [{
                            "name":
                            "alb-ingress-controller",
                            "args": [
                                "--ingress-class=alb",
                                "--cluster-name=" + cluster.cluster_name
                            ],
                            "image":
                            "docker.io/amazon/aws-alb-ingress-controller:v1.1.8"
                        }],
                        "serviceAccountName":
                        "alb-ingress-controller"
                    }
                }
            }
        }
        alb_service = cluster.add_resource('alb-ingress-controller',
                                           deployment)
        external_dns = eks.KubernetesResource(
            self,
            'external-dns',
            cluster=cluster,
            manifest=read_k8s_resource('eksbaseresource/external-dns.yml'))
        alb_service.node.add_dependency(alb_rbac)
        external_dns.node.add_dependency(service_account)
        core.CfnOutput(self,
                       'ClusterAdmin_Role_ARN',
                       value=clusterAdmin.role_arn)
        core.CfnOutput(
            self,
            'Getupdateeks',
            value="aws eks update-kubeconfig --name " + cluster.cluster_name +
            " --region ap-northeast-1 --role-arn " + clusterAdmin.role_arn)

        wellness_kns_stream = kinesis.Stream(
            self,
            'WellnessKnsStream',
            retention_period=core.Duration.hours(24),
            shard_count=1,
            stream_name='event.member.appointment.devInfo')

        wellness_kns_stream.grant_read_write(wellnessuser_irsa)

        core.CfnOutput(self,
                       'kinesis_stream_arn',
                       value=wellness_kns_stream.stream_arn)

        core.CfnOutput(self,
                       'kinesis_stream_name',
                       value=wellness_kns_stream.stream_name)
Ejemplo n.º 29
0
    def __init__(self, scope: core.Construct, id: str, vpc, config,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        name = config['ec2']['name']
        key = config['ec2']['ssh_key']

        eks_cluster = eks.Cluster(
            self,
            'eks-cluster',
            version=eks.KubernetesVersion.V1_19,
            cluster_name=prj_name + env_name + '-eks-cluster',
            vpc=vpc,
            vpc_subnets=[
                ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
            ],
            output_cluster_name=True,
            default_capacity=0)

        # This code block will provision worker nodes with launch configuration
        eks_cluster.add_auto_scaling_group_capacity(
            'spot-asg-az-a',
            auto_scaling_group_name=prj_name + env_name + '-spot-az-a',
            min_capacity=1,
            max_capacity=1,
            desired_capacity=1,
            key_name=f"{key}",
            instance_type=ec2.InstanceType('t3.small'),
            vpc_subnets=ec2.SubnetSelection(
                availability_zones=["ap-southeast-1a"],
                subnet_type=ec2.SubnetType.PRIVATE),
            bootstrap_options={
                "kubelet_extra_args":
                "--node-labels=node.kubernetes.io/lifecycle=spot,daemonset=active,app=general --eviction-hard imagefs.available<15% --feature-gates=CSINodeInfo=true,CSIDriverRegistry=true,CSIBlockVolume=true,ExpandCSIVolumes=true"
            })

        # This code block will provision worker nodes with launch templates
        eks_cluster.add_nodegroup_capacity(
            'spot-nodegroup-az-a',
            nodegroup_name=prj_name + env_name + '-spot-az-a',
            instance_types=[
                ec2.InstanceType('t3a.small'),
                ec2.InstanceType('t3.small')
            ],
            disk_size=100,
            min_size=1,
            max_size=1,
            desired_size=1,
            capacity_type=eks.CapacityType.SPOT,
            subnets=ec2.SubnetSelection(availability_zones=["ap-southeast-1a"],
                                        subnet_type=ec2.SubnetType.PRIVATE),
        )

        eks_cluster.add_nodegroup_capacity(
            'spot-nodegroup-az-b',
            nodegroup_name=prj_name + env_name + '-spot-az-b',
            instance_types=[
                ec2.InstanceType('t3a.small'),
                ec2.InstanceType('t3.small')
            ],
            min_size=1,
            max_size=1,
            desired_size=1,
            capacity_type=eks.CapacityType.SPOT,
            subnets=ec2.SubnetSelection(availability_zones=["ap-southeast-1b"],
                                        subnet_type=ec2.SubnetType.PRIVATE))

        eks_cluster.add_nodegroup_capacity(
            'spot-nodegroup-az-c',
            nodegroup_name=prj_name + env_name + '-spot-az-c',
            instance_types=[
                ec2.InstanceType('t3a.small'),
                ec2.InstanceType('t3.small')
            ],
            min_size=1,
            max_size=1,
            desired_size=1,
            capacity_type=eks.CapacityType.SPOT,
            subnets=ec2.SubnetSelection(availability_zones=["ap-southeast-1c"],
                                        subnet_type=ec2.SubnetType.PRIVATE))
Ejemplo n.º 30
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level, vpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create EKS Cluster Role
        # https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html

        self._eks_cluster_svc_role = _iam.Role(
            self,
            "c_SvcRole",
            assumed_by=_iam.ServicePrincipal("eks.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSClusterPolicy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKS_CNI_Policy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSVPCResourceController")
            ])

        self._eks_node_role = _iam.Role(
            self,
            "c_NodeRole",
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKSWorkerNodePolicy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEC2ContainerRegistryReadOnly"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEKS_CNI_Policy"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSSMManagedInstanceCore"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
                # Yes, yes...I know. :)
            ])

        c_admin_role = _iam.Role(
            self,
            "c_AdminRole",
            assumed_by=_iam.CompositePrincipal(
                _iam.AccountRootPrincipal(),
                _iam.ServicePrincipal("ec2.amazonaws.com")))
        c_admin_role.add_to_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 actions=["eks:DescribeCluster"],
                                 resources=["*"]))

        # Create Security Group for EKS Cluster SG
        self.eks_cluster_sg = _ec2.SecurityGroup(
            self,
            "eksClusterSG",
            vpc=vpc,
            description="EKS Cluster security group",
            allow_all_outbound=True,
        )
        cdk.Tags.of(self.eks_cluster_sg).add("Name", "eks_cluster_sg")

        # https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
        self.eks_cluster_sg.add_ingress_rule(
            peer=self.eks_cluster_sg,
            connection=_ec2.Port.all_traffic(),
            description="Allow incoming within SG")

        clust_name = "c_1_event_processor"

        self.eks_cluster_1 = _eks.Cluster(
            self,
            f"{clust_name}",
            cluster_name=f"{clust_name}",
            version=_eks.KubernetesVersion.V1_18,
            vpc=vpc,
            vpc_subnets=[
                _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC),
                _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PRIVATE)
            ],
            default_capacity=0,
            masters_role=c_admin_role,
            role=self._eks_cluster_svc_role,
            security_group=self.eks_cluster_sg,
            endpoint_access=_eks.EndpointAccess.PUBLIC
            # endpoint_access=_eks.EndpointAccess.PUBLIC_AND_PRIVATE
        )

        node_grp_1 = self.eks_cluster_1.add_nodegroup_capacity(
            f"n_g_{clust_name}",
            nodegroup_name=f"{clust_name}_n_g",
            instance_types=[
                _ec2.InstanceType("t3.medium"),
                _ec2.InstanceType("t3.large"),
            ],
            disk_size=20,
            min_size=1,
            max_size=6,
            desired_size=2,
            labels={
                "app": "miztiik_ng",
                "lifecycle": "on_demand",
                "compute_provider": "ec2"
            },
            subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC),
            ami_type=_eks.NodegroupAmiType.AL2_X86_64,
            # remote_access=_eks.NodegroupRemoteAccess(ssh_key_name="eks-ssh-keypair"),
            capacity_type=_eks.CapacityType.ON_DEMAND,
            node_role=self._eks_node_role
            # bootstrap_options={"kubelet_extra_args": "--node-labels=node.kubernetes.io/lifecycle=spot,daemonset=active,app=general --eviction-hard imagefs.available<15% --feature-gates=CSINodeInfo=true,CSIDriverRegistry=true,CSIBlockVolume=true,ExpandCSIVolumes=true"}
        )

        # This code block will provision worker nodes with Fargate Profile configuration
        fargate_n_g_3 = self.eks_cluster_1.add_fargate_profile(
            "FargateEnabled",
            fargate_profile_name="miztiik_n_g_fargate",
            selectors=[
                _eks.Selector(namespace="default",
                              labels={"fargate": "enabled"})
            ])

        self.add_cluster_admin()
        # We like to use the Kubernetes Dashboard
        self.enable_dashboard()

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = cdk.CfnOutput(self,
                                 "eksClusterAdminRole",
                                 value=f"{c_admin_role.role_name}",
                                 description="EKS Cluster Admin Role")

        output_2 = cdk.CfnOutput(
            self,
            "eksClusterSvcRole",
            value=f"{self._eks_cluster_svc_role.role_name}",
            description="EKS Cluster Service Role")