def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version("2010-09-09") template.add_description("Kubernetes Master via EKS - V1.0.0") # Resources ccpsecuritygroup = template.add_resource( ec2.SecurityGroup( "ClusterControlPlaneSecurityGroup", GroupDescription="Cluster communication with worker nodes", Tags=[ { "Key": Sub("kubernetes.io/cluster/${EksClusterName}"), "Value": "owned", }, { "Key": "Product", "Value": "Kubernetes" }, { "Key": "Project", "Value": "eks" }, { "Key": "Name", "Value": Sub("${EksClusterName}-sg-worker-nodes") }, ], VpcId=variables["VPC"].ref, )) template.add_output( Output( ccpsecuritygroup.title, Description="Cluster communication with worker nodes", Export=Export( Sub("${AWS::StackName}-ControlPlaneSecurityGroup")), Value=ccpsecuritygroup.ref(), )) eksservicerole = template.add_resource( iam.Role( "EksServiceRole", AssumeRolePolicyDocument=make_simple_assume_policy( "eks.amazonaws.com"), ManagedPolicyArns=[ IAM_POLICY_ARN_PREFIX + "AmazonEKSClusterPolicy" ], Policies=[ iam.Policy( PolicyName="EksServiceRolePolicy", PolicyDocument=PolicyDocument(Statement=[ Statement( Action=[ awacs.iam.CreateServiceLinkedRole, awacs.iam.PutRolePolicy, ], Condition=Condition( StringLike( "iam:AWSServiceName", "elasticloadbalancing.amazonaws.com", )), Effect=Allow, Resource=[ Sub("arn:aws:iam::${AWS::AccountId}:role/" "aws-service-role/" "elasticloadbalancing.amazonaws.com/" "AWSServiceRoleForElasticLoadBalancing*" ) ], ) ]), ) ], )) ekscluster = template.add_resource( eks.Cluster( "EksCluster", Name=variables["EksClusterName"].ref, Version=variables["EksVersion"].ref, RoleArn=eksservicerole.get_att("Arn"), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[ccpsecuritygroup.ref()], SubnetIds=variables["EksSubnets"].ref, ), )) template.add_output( Output( "%sName" % ekscluster.title, Description="EKS Cluster Name", Export=Export( Sub("${AWS::StackName}-%sName" % ekscluster.title)), Value=ekscluster.ref(), )) template.add_output( Output( "%sEndpoint" % ekscluster.title, Description="EKS Cluster Endpoint", Export=Export( Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title)), Value=ekscluster.get_att("Endpoint"), )) # Additional Outputs template.add_output( Output( "VpcId", Description="EKS Cluster VPC Id", Export=Export(Sub("${AWS::StackName}-VpcId")), Value=variables["VPC"].ref, )) template.add_output( Output( "Subnets", Description="EKS Cluster Subnets", Export=Export(Sub("${AWS::StackName}-Subnets")), Value=Join(",", variables["EksSubnets"].ref), ))
template=template, GroupDescription="EKS control plane security group.", VpcId=Ref(vpc), Tags=Tags(Name=Join("-", [Ref("AWS::StackName"), "eks-cluster"]), ), ) cluster = eks.Cluster( "EksCluster", template=template, # Unlike most other resources in the stack, we hard-code the cluster name # both so it's easy to find and so it cannot be accidentally recreated # (for example if the ResourcesVpcConfig is changed). Name=Sub("${AWS::StackName}-cluster"), ResourcesVpcConfig=eks.ResourcesVpcConfig( SubnetIds=[ # For load balancers Ref(public_subnet_a), Ref(public_subnet_b), # For worker nodes Ref(private_subnet_a), Ref(private_subnet_b), ], SecurityGroupIds=[Ref(eks_security_group)], ), RoleArn=GetAtt(eks_service_role, "Arn"), ) eks.Nodegroup( # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-nodegroup.html "Nodegroup", template=template, # For some reason, CloudFormation doesn't figure out that it needs to create
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Kubernetes Master via EKS - V1.0.0') # Resources ccpsecuritygroup = template.add_resource( ec2.SecurityGroup( 'ClusterControlPlaneSecurityGroup', GroupDescription='Cluster communication with worker nodes', Tags=[ {'Key': Sub('kubernetes.io/cluster/${EksClusterName}'), 'Value': 'owned'}, {'Key': 'Product', 'Value': 'Kubernetes'}, {'Key': 'Project', 'Value': 'eks'}, {'Key': 'Name', 'Value': Sub('${EksClusterName}-sg-worker-nodes')} ], VpcId=variables['VPC'].ref ) ) template.add_output( Output( ccpsecuritygroup.title, Description='Cluster communication with worker nodes', Export=Export( Sub('${AWS::StackName}-ControlPlaneSecurityGroup') ), Value=ccpsecuritygroup.ref() ) ) eksservicerole = template.add_resource( iam.Role( 'EksServiceRole', AssumeRolePolicyDocument=make_simple_assume_policy( 'eks.amazonaws.com' ), ManagedPolicyArns=[ IAM_POLICY_ARN_PREFIX + i for i in [ 'AmazonEKSClusterPolicy', 'AmazonEKSServicePolicy' ] ], Policies=[ iam.Policy( PolicyName='EksServiceRolePolicy', PolicyDocument=PolicyDocument( Statement=[ Statement( Action=[awacs.iam.CreateServiceLinkedRole, awacs.iam.PutRolePolicy], Condition=Condition( StringLike( 'iam:AWSServiceName', 'elasticloadbalancing.amazonaws.com' # noqa ) ), Effect=Allow, Resource=[ Sub('arn:aws:iam::${AWS::AccountId}:role/' # noqa 'aws-service-role/' 'elasticloadbalancing.amazonaws.com/' # noqa 'AWSServiceRoleForElasticLoadBalancing*') # noqa ] ) ] ) ) ] ) ) ekscluster = template.add_resource( eks.Cluster( 'EksCluster', Name=variables['EksClusterName'].ref, Version=variables['EksVersion'].ref, RoleArn=eksservicerole.get_att('Arn'), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[ccpsecuritygroup.ref()], SubnetIds=variables['EksSubnets'].ref ) ) ) template.add_output( Output( "%sName" % ekscluster.title, Description='EKS Cluster Name', Export=Export( Sub("${AWS::StackName}-%sName" % ekscluster.title) ), Value=ekscluster.ref() ) ) template.add_output( Output( "%sEndpoint" % ekscluster.title, Description='EKS Cluster Endpoint', Export=Export( Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title) ), Value=ekscluster.get_att('Endpoint') ) ) # Additional Outputs template.add_output( Output( 'VpcId', Description='EKS Cluster VPC Id', Export=Export( Sub('${AWS::StackName}-VpcId') ), Value=variables['VPC'].ref ) ) template.add_output( Output( 'Subnets', Description='EKS Cluster Subnets', Export=Export( Sub('${AWS::StackName}-Subnets') ), Value=Join(',', variables['EksSubnets'].ref) ) )
def add_resources(self): self.EKSControlPlaneSG = self.template.add_resource( ec2.SecurityGroup( "EKSControlPlaneSG", GroupDescription= "Allow communication between WorkerNodes and EKS", VpcId=Ref(self.SharedServicesVpcId), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EksControlPlane-SG"), )) self.EKSClusterRole = self.template.add_resource( Role( "EKSClusterRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["eks.amazonaws.com"])) ]), ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" ], )) self.EKSCluster = self.template.add_resource( eks.Cluster( "EKSCluster", DependsOn=["EKSControlPlaneSG", "EKSClusterRole"], Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS", RoleArn=GetAtt(self.EKSClusterRole, "Arn"), Version=Ref(self.EksClusterVersion), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[Ref(self.EKSControlPlaneSG)], SubnetIds=[ Ref(self.SharedServicesPubSubnet1), Ref(self.SharedServicesPubSubnet2) ], ), )) self.WorkerNodeEc2SG = self.template.add_resource( ec2.SecurityGroup( "WorkerNodeEc2SG", DependsOn=["EKSCluster"], GroupDescription= "Allow communication between WorkerNodes and EKS", VpcId=Ref(self.SharedServicesVpcId), Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-SS-EksWorkerNodes-Ec2SG") + Tags({ "kubernetes.io/cluster/" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS": "owned" }), )) self.WorkerNodeInstanceRole = self.template.add_resource( Role( "WorkerNodeInstanceRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]), ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" ], )) self.WorkerNodeInstanceProfile = self.template.add_resource( InstanceProfile( "WorkerNodeInstanceProfile", Path="/", Roles=[Ref(self.WorkerNodeInstanceRole)], )) self.WorkerNodeEc2SGIngress = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SGIngress", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="-1", FromPort=0, ToPort=65535, SourceSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeEc2SGIngressFromEKSControlPlane = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SGIngressFromEKSControlPlane", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="tcp", FromPort=1025, ToPort=65535, SourceSecurityGroupId=Ref(self.EKSControlPlaneSG), )) self.EksControlPlaneEgressToWorkerNodes = self.template.add_resource( ec2.SecurityGroupEgress( "EksControlPlaneEgressToWorkerNodes", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=1025, ToPort=65535, DestinationSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeEc2SG443IngressFromEKSControlPlane = self.template.add_resource( ec2.SecurityGroupIngress( "WorkerNodeEc2SG443IngressFromEKSControlPlane", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.WorkerNodeEc2SG), IpProtocol="tcp", FromPort=443, ToPort=443, SourceSecurityGroupId=Ref(self.EKSControlPlaneSG), )) self.EKSControlPlaneSG443IngressFromWorkerNode = self.template.add_resource( ec2.SecurityGroupIngress( "EKSControlPlaneSG443IngressFromWorkerNode", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=443, ToPort=443, SourceSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.EksControlPlane443EgressToWorkerNodes = self.template.add_resource( ec2.SecurityGroupEgress( "EksControlPlane443EgressToWorkerNodes", DependsOn=["WorkerNodeEc2SG"], GroupId=Ref(self.EKSControlPlaneSG), IpProtocol="tcp", FromPort=443, ToPort=443, DestinationSecurityGroupId=Ref(self.WorkerNodeEc2SG), )) self.WorkerNodeLaunchConfiguration = self.template.add_resource( LaunchConfiguration( "WorkerNodeLaunchConfiguration", ImageId=Ref(self.WorkerNodeImageId), InstanceType=Ref(self.WorkerNodeInstanceType), IamInstanceProfile=Ref(self.WorkerNodeInstanceProfile), KeyName=Ref(self.WorkerNodeKeyName), SecurityGroups=[Ref(self.WorkerNodeEc2SG)], UserData=Base64( Join('', [ "#!/bin/bash \n", "set -o xtrace \n" "ClusterName=\"" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS" + "\" \n", "BootstrapArguments=\"" "\" \n", "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments} \n" ])))) self.WorkerNodeAutoScalingGroup = self.template.add_resource( AutoScalingGroup( "WorkerNodeAutoscalingGroup", AutoScalingGroupName=self. environment_parameters["ClientEnvironmentKey"] + "-SS-EksWorkerNodeAutoScalingGroup", LaunchConfigurationName=Ref( self.WorkerNodeLaunchConfiguration), MaxSize=Ref(self.WorkerNodeASGGroupMaxSize), MinSize=Ref(self.WorkerNodeASGGroupMinSize), DesiredCapacity=Ref(self.WorkerNodeASGGroupDesiredSize), HealthCheckType=Ref(self.WorkerNodeASGHealthCheckType), HealthCheckGracePeriod=Ref( self.WorkerNodeASGHealthCheckGracePeriod), Cooldown=Ref(self.WorkerNodeASGCoolDown), VPCZoneIdentifier=[ Ref(self.SharedServicesPrivSubnet1), Ref(self.SharedServicesPrivSubnet2) ], Tags=[ AutoScalingTag( "Name", self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS" + "-WorkerNodeGroup-Node", True), AutoScalingTag( "kubernetes.io/cluster/" + self.environment_parameters["ClientEnvironmentKey"] + "-SS-EKS", "owned", True), AutoScalingTag( "Environment", self.environment_parameters["EnvironmentName"], True), AutoScalingTag( "ResourceOwner", self.environment_parameters["ResourceOwner"], True), AutoScalingTag( "ClientCode", self.environment_parameters["ClientEnvironmentKey"], True), ], ))