def getTemplate(self): #asgConfigJson = simplejson.loads(simplejson.dumps(self.asgConfig)) asgConfigJson = self.asgConfig if self.elb is not None: self.template.add_resource( AutoScalingGroup( "ELBASG", DesiredCapacity=asgConfigJson['DesiredCapactiy'], MinSize=asgConfigJson['MinSize'], MaxSize=asgConfigJson['MaxSize'], LaunchConfigurationName=self.launchConfigName, HealthCheckType="ELB", HealthCheckGracePeriod=asgConfigJson[ 'HealthCheckGracePeriod'], LoadBalancerNames=[self.elb], AvailabilityZones=[self.availZone], VPCZoneIdentifier=[self.subnet], Tags=[ Tag("Name", Join("-", [self.friendlyName, self.branch]), True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=asgConfigJson['MaxBatchSize'], MinInstancesInService=asgConfigJson[ 'MinInstancesInService'], PauseTime=asgConfigJson['PauseTime'], WaitOnResourceSignals=asgConfigJson[ 'WaitOnResourceSignals'])))) else: self.template.add_resource( AutoScalingGroup( "NoELBASG", DesiredCapacity=asgConfigJson['DesiredCapactiy'], MinSize=asgConfigJson['MinSize'], MaxSize=asgConfigJson['MaxSize'], LaunchConfigurationName=self.launchConfigName, HealthCheckType="EC2", HealthCheckGracePeriod=asgConfigJson[ 'HealthCheckGracePeriod'], AvailabilityZones=[self.availZone], VPCZoneIdentifier=[self.subnet], Tags=[ Tag("Name", Join("-", [self.friendlyName, self.branch]), True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=asgConfigJson['MaxBatchSize'], MinInstancesInService=asgConfigJson[ 'MinInstancesInService'], PauseTime=asgConfigJson['PauseTime'], WaitOnResourceSignals=asgConfigJson[ 'WaitOnResourceSignals'])))) return self.template
def test_json(self): p = UpdatePolicy(AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=2, MinInstancesInService=1, PauseTime='PT90S', WaitOnResourceSignals=True)) p = p.to_dict() self.assertEqual(p['AutoScalingRollingUpdate']['MaxBatchSize'], 2) self.assertEqual( p['AutoScalingRollingUpdate']['MinInstancesInService'], 1) self.assertEqual(p['AutoScalingRollingUpdate']['PauseTime'], 'PT90S') self.assertTrue(p['AutoScalingRollingUpdate']['WaitOnResourceSignals'])
def test_json(self): p = UpdatePolicy(AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=2, MinInstancesInService=1, PauseTime="PT90S", WaitOnResourceSignals=True, )) p = p.to_dict() self.assertEqual(p["AutoScalingRollingUpdate"]["MaxBatchSize"], 2) self.assertEqual( p["AutoScalingRollingUpdate"]["MinInstancesInService"], 1) self.assertEqual(p["AutoScalingRollingUpdate"]["PauseTime"], "PT90S") self.assertTrue(p["AutoScalingRollingUpdate"]["WaitOnResourceSignals"])
def test_json(self): p = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=2, MinInstancesInService=1, PauseTime='PT90S', WaitOnResourceSignals=True)) p = p.to_dict() self.assertEqual(p['AutoScalingRollingUpdate']['MaxBatchSize'], 2) self.assertEqual( p['AutoScalingRollingUpdate']['MinInstancesInService'], 1 ) self.assertEqual(p['AutoScalingRollingUpdate']['PauseTime'], 'PT90S') self.assertTrue(p['AutoScalingRollingUpdate']['WaitOnResourceSignals'])
def _default_update_policy(): return UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, WaitOnResourceSignals=True ) )
def create_autoscaling_resource(name, launchConfig): """[ create simple autoscaling resource ] Arguments: name {[String]} -- [ name ] launchConfig {[LaunchConfiguration]} -- [description] Returns: [ troposphere.resource ] -- [ AutoScalingGroup ] """ return AutoScalingGroup( name + "AutoScalingGroup", DesiredCapacity=10, AutoScalingGroupName=name, LaunchConfigurationName=Ref(launchConfig), MinSize=9, MaxSize=10, AvailabilityZones=[Ref(zone1), Ref(zone2)], HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)), Tags=Tags(Application=Ref('AWS::StackId')))
def _get_autoscaling_group_parameters(self, chain_context, launch_config_name): config = { 'AvailabilityZones': Ref("AvailabilityZones"), # Not really required in this case (yet) 'LaunchConfigurationName': Ref(launch_config_name), 'MinSize': Ref("MinSize"), 'MaxSize': Ref("MaxSize"), 'VPCZoneIdentifier': Ref("PrivateSubnets"), 'Tags': [ASTag('Name', self.name, True)], } if META_TARGET_GROUP_NAME in chain_context.metadata: config['TargetGroupARNs'] = [Ref(chain_context.metadata[META_TARGET_GROUP_NAME])] if self.use_update_policy: update_policy = UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True ) ) config['UpdatePolicy'] = update_policy return config
def test_AutoScalingRollingUpdate_all_defaults(self): group = AutoScalingGroup( 'mygroup', AvailabilityZones=['eu-west-1a', 'eu-west-1b'], LaunchConfigurationName="I'm a test", MaxSize="1", MinSize="1", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate())) self.assertTrue(group.validate())
def test_works(self): p = UpdatePolicy(AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=2, MinInstancesInService=1, PauseTime='PT90S', WaitOnResourceSignals=True)) self.assertEqual(p.AutoScalingRollingUpdate.MaxBatchSize, 2) self.assertEqual(p.AutoScalingRollingUpdate.MinInstancesInService, 1) self.assertEqual(p.AutoScalingRollingUpdate.PauseTime, 'PT90S') self.assertTrue(p.AutoScalingRollingUpdate.WaitOnResourceSignals)
def test_AutoScalingRollingUpdate_all_defaults(self): group = AutoScalingGroup( "mygroup", AvailabilityZones=["eu-west-1a", "eu-west-1b"], LaunchConfigurationName="I'm a test", MaxSize="1", MinSize="1", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate()), ) self.assertIsNone(group.validate())
def test_mininstances(self): group = AutoScalingGroup( 'mygroup', LaunchConfigurationName="I'm a test", MaxSize=1, MinSize=1, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1M5S', MinInstancesInService='1', MaxBatchSize='1'))) with self.assertRaises(ValueError): self.assertTrue(group.validate())
def test_mininstances_maxsize_is_ref(self): paramMaxSize = Parameter("ParamMaxSize", Type="String") group = AutoScalingGroup( 'mygroup', AvailabilityZones=['eu-west-1a', 'eu-west-1b'], LaunchConfigurationName="I'm a test", MaxSize=Ref(paramMaxSize), MinSize="2", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1M5S', MinInstancesInService='2', MaxBatchSize="1"))) self.assertTrue(group.validate())
def test_AutoScalingRollingUpdate_validation(self): update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService="2", MaxBatchSize='1')) group = AutoScalingGroup( 'mygroup', AvailabilityZones=['eu-west-1a', 'eu-west-1b'], LaunchConfigurationName="I'm a test", MaxSize="2", MinSize="1", UpdatePolicy=update_policy) with self.assertRaises(ValueError): self.assertTrue(group.validate())
def test_size_if(self): group = AutoScalingGroup( 'mygroup', AvailabilityZones=['eu-west-1a', 'eu-west-1b'], LaunchConfigurationName="I'm a test", MaxSize=If("isstage", "1", "5"), MinSize="1", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True))) self.assertTrue(group.validate())
def test_AutoScalingRollingUpdate_validation(self): update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService="2", MaxBatchSize="1")) group = AutoScalingGroup( "mygroup", AvailabilityZones=["eu-west-1a", "eu-west-1b"], LaunchConfigurationName="I'm a test", MaxSize="2", MinSize="1", UpdatePolicy=update_policy, ) with self.assertRaises(ValueError): group.validate()
def test_size_if(self): group = AutoScalingGroup( "mygroup", AvailabilityZones=["eu-west-1a", "eu-west-1b"], LaunchConfigurationName="I'm a test", MaxSize=If("isstage", "1", "5"), MinSize="1", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime="PT5M", MinInstancesInService="1", MaxBatchSize="1", WaitOnResourceSignals=True, )), ) self.assertIsNone(group.validate())
def test_mininstances_mininstancesinservice_is_ref(self): paramMinInstancesInService = Parameter("ParamMinInstancesInService", Type="String") group = AutoScalingGroup( "mygroup", AvailabilityZones=["eu-west-1a", "eu-west-1b"], LaunchConfigurationName="I'm a test", MaxSize="4", MinSize="2", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime="PT1M5S", MinInstancesInService=Ref(paramMinInstancesInService), MaxBatchSize="2", )), ) self.assertIsNone(group.validate())
def test_helperfn_as_AutoScalingRollingUpdate(self): update_policy = UpdatePolicy(AutoScalingRollingUpdate=If( 'RollingUpdate', AutoScalingRollingUpdate(PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True), Ref("AWS::NoValue"), ), ) group = AutoScalingGroup( 'mygroup', AvailabilityZones=['eu-west-1a', 'eu-west-1b'], LaunchConfigurationName="I'm a test", MaxSize=If("isstage", "1", "5"), MinSize="1", UpdatePolicy=If("UseUpdatePolicy", update_policy, Ref("AWS::NoValue"))) self.assertTrue(group.validate())
def test_helperfn_as_updatepolicy(self): update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime="PT5M", MinInstancesInService="1", MaxBatchSize="1", WaitOnResourceSignals=True, )) group = AutoScalingGroup( "mygroup", AvailabilityZones=["eu-west-1a", "eu-west-1b"], LaunchConfigurationName="I'm a test", MaxSize=If("isstage", "1", "5"), MinSize="1", UpdatePolicy=If("UseUpdatePolicy", update_policy, Ref("AWS::NoValue")), ) self.assertTrue(group.validate())
def add_autoscaling_group(self, name, launch_configuration_name, subnets, desired_size=2, min_size=1, max_size=2, health_check_type='EC2', target_group_arns=[]): """ Create Autoscaling Group :param name: Name of the ASG :param launch_configuration_name: Which launch configuration to use to create instances :param subnets: Subnet to spin up instances in :param desired_size: Desired number of instances :param min_size: Minimum number of instances :param max_size: Maximum number of instances :param health_check_type: Health check type :param target_group_arns: ARN of the target group(s), if any """ auto_scaling_group = AutoScalingGroup( name, DesiredCapacity=desired_size, Tags=[{ 'Key': 'Name', 'Value': name, 'PropagateAtLaunch': True }], LaunchConfigurationName=Ref(launch_configuration_name), MinSize=min_size, MaxSize=max_size, VPCZoneIdentifier=subnets, HealthCheckType=health_check_type, HealthCheckGracePeriod=60, TargetGroupARNs=target_group_arns, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1H', MinInstancesInService=min_size, MaxBatchSize='1', WaitOnResourceSignals=True))) self.template.add_resource(auto_scaling_group)
def test_mininstances_mininstancesinservice_is_ref(self): paramMinInstancesInService = Parameter( "ParamMinInstancesInService", Type="String" ) group = AutoScalingGroup( 'mygroup', LaunchConfigurationName="I'm a test", MaxSize="4", MinSize="2", UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1M5S', MinInstancesInService=Ref(paramMinInstancesInService), MaxBatchSize="2", ) ) ) self.assertTrue(group.validate())
def _auto_scaling_group(self): return AutoScalingGroup( "duy%sAutoscalingGroup" % self.config['env'], DesiredCapacity=self.config['scale_desire'], Tags=[ Tag("Name", "duy-%s" % self.config['env'], True), Tag("Environment", self.config['env'], True), Tag("PropagateAtLaunch", "true", True) ], LaunchConfigurationName=Ref(self.launchConfiguration), MinSize=self.config['scale_min'], MaxSize=self.config['scale_max'], VPCZoneIdentifier=self.config['private_subnet'], LoadBalancerNames=[Ref(self.loadBalancer)], HealthCheckType="EC2", HealthCheckGracePeriod="300", TerminationPolicies=["OldestInstance", "Default"], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)))
def add_auto_scaling_group(self): ''' Add Instance AutoScalingGroup ''' self.cfn_template.add_resource( AutoScalingGroup( title=constants.INST_ASG, AvailabilityZones=[ ImportValue(Sub('${Environment}-PRIVATE-SUBNET-1-AZ')), ImportValue(Sub('${Environment}-PRIVATE-SUBNET-2-AZ')), ], HealthCheckGracePeriod=int('150'), LaunchConfigurationName=Ref(constants.INST_LC), MaxSize='4', MinSize='2', VPCZoneIdentifier=[ ImportValue(Sub('${Environment}-PRIVATE-SUBNET-1')), ImportValue(Sub('${Environment}-PRIVATE-SUBNET-2')), ], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal( Count='2', Timeout='PT30M' ) ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=int('1'), MinInstancesInService=int('1'), PauseTime='PT10M', WaitOnResourceSignals='true' ) ) ) ) return self.cfn_template
def create_auto_scaling_group(self, asg_args): ''' Method creates an auto scaling group and adds it to the resources list @param asg_args [dict] collection of keyword arguments for the asg ''' autoscaling_group = self.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=asg_args['desired_capacity'], Tags=[Tag("Name", "CloudformationLab", True)], LaunchConfigurationName=Ref(self.launch_configuration), MinSize=asg_args['min_capacity'], MaxSize=asg_args['max_capacity'], VPCZoneIdentifier=self.subnets, LoadBalancerNames=[Ref(self.load_balancer)], AvailabilityZones=self.availability_zones, HealthCheckType="ELB", HealthCheckGracePeriod=60, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService=asg_args['min_capacity'], MaxBatchSize='1', WaitOnResourceSignals=True))))
def elb_asg_lc_template(app, env, nameSGRDS, rdsPort, instanceType, ami, subnets, elbPort, elbCidrBlock, ec2Port, desiredCapacity, minSize, maxSize, region, nameBucket, officeIP): template = Template() sgELB = template.add_resource( SecurityGroup( "SecurityGroupELB" + app + env, GroupDescription="Security group for " + app + "-" + env, VpcId=ImportValue("VPC" + env), SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=elbPort, ToPort=elbPort, CidrIp=elbCidrBlock, ) ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-ELB" + app + "-" + env, app=app, ), )) sgEC2 = template.add_resource( SecurityGroup( "SecurityGroupEC2" + app + env, GroupDescription="Security group for EC2 " + app + "-" + env, VpcId=ImportValue("VPC" + env), DependsOn="SecurityGroupELB" + app + env, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=ec2Port, ToPort=ec2Port, SourceSecurityGroupId=Ref(sgELB), ), SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=officeIP, ), ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-EC2-" + app + "-" + env, app=app, ), )) addIngressRDS = template.add_resource( SecurityGroupIngress( "ingressSGRDS" + app + env, SourceSecurityGroupId=Ref(sgEC2), Description="From EC2 instances", GroupId=ImportValue("SG-" + nameSGRDS + "-" + app + "-" + env), IpProtocol="tcp", FromPort=rdsPort, ToPort=rdsPort, DependsOn="SecurityGroupEC2" + app + env, )) launchConfig = template.add_resource( LaunchConfiguration("LaunchConfiguration" + app + env, InstanceType=instanceType, ImageId=ami, SecurityGroups=[Ref(sgEC2)], IamInstanceProfile=ImportValue("Role-" + app + "-" + env))) bucketPolicy = template.add_resource( BucketPolicy("BucketPolicy" + nameBucket + app + env, Bucket=ImportValue("Bucket" + nameBucket + app + env), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": Join("", [ "arn:aws:s3:::", ImportValue("Bucket" + nameBucket + app + env), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Principal": { "AWS": ["156460612806"] } }] })) lb = template.add_resource( LoadBalancer("LoadBalancer" + app + env, ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=120, ), Subnets=subnets, HealthCheck=elb.HealthCheck( "HealthCheck", Target="TCP:" + str(ec2Port), HealthyThreshold="5", UnhealthyThreshold="5", Interval="30", Timeout="15", ), Listeners=[ elb.Listener( LoadBalancerPort=elbPort, InstancePort=ec2Port, Protocol="HTTP", InstanceProtocol="HTTP", ), ], CrossZone=True, SecurityGroups=[Ref(sgELB)], LoadBalancerName="lb-" + app + "-" + env, Scheme="internet-facing", AccessLoggingPolicy=AccessLoggingPolicy( "LoggingELB" + app + env, EmitInterval=5, Enabled=True, S3BucketName=ImportValue("Bucket" + nameBucket + app + env), ))) asg = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + app + env, DesiredCapacity=desiredCapacity, Tags=[Tag("Environment", env, True)], LaunchConfigurationName=Ref(launchConfig), MinSize=minSize, MaxSize=maxSize, LoadBalancerNames=[Ref(lb)], AvailabilityZones=GetAZs(region), VPCZoneIdentifier=subnets, HealthCheckType="ELB", HealthCheckGracePeriod=300, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True, )))) return (template.to_json())
HealthCheckGracePeriod=300, DesiredCapacity=DesiredCapacity, MinSize=MinSize, MaxSize=MaxSize, Tags=[ Tag("Name", environmentString + "AutoscalingGroup" + f, True) ], LaunchConfigurationName=Ref(LaunchConfig), VPCZoneIdentifier=subnetsList, # LoadBalancerNames=[Ref(LoadBalancer)], #AvailabilityZones=subnetsList, HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)))) ScalePolicyUp = template.add_resource( ScalingPolicy("HTTPRequestScalingPolicyUp" + f, AutoScalingGroupName=Ref(AutoscalingGroupX), AdjustmentType="ChangeInCapacity", Cooldown="300", ScalingAdjustment="1")) ScalePolicyDown = template.add_resource( ScalingPolicy("HTTPRequestScalingPolicyDown" + f, AutoScalingGroupName=Ref(AutoscalingGroupX), AdjustmentType="ChangeInCapacity",
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.add_version("2010-09-09") template.add_description( "Kubernetes workers via EKS - V1.0.0 " "- compatible with amazon-eks-node-v23+" ) # Metadata template.add_metadata( { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "EKS Cluster"}, "Parameters": [ self.variables[i].name for i in [ "ClusterName", "ClusterControlPlaneSecurityGroup", ] ], }, { "Label": {"default": "Worker Node Configuration"}, "Parameters": [ self.variables[i].name for i in [ "NodeGroupName", "NodeAutoScalingGroupMinSize", "NodeAutoScalingGroupMaxSize", "UseDesiredInstanceCount", "NodeInstanceType", "NodeInstanceProfile", "NodeImageId", "NodeVolumeSize", "KeyName", "UseSpotInstances", "SpotBidPrice", "BootstrapArguments", ] ], }, { "Label": {"default": "Worker Network Configuration"}, "Parameters": [ self.variables[i].name for i in ["VpcId", "Subnets"] ], }, ] } } ) # Conditions template.add_condition( "SetSpotPrice", Equals(self.variables["UseSpotInstances"].ref, "yes") ) template.add_condition( "DesiredInstanceCountSpecified", Equals(self.variables["UseDesiredInstanceCount"].ref, "true"), ) template.add_condition( "KeyNameSpecified", Not(Equals(self.variables["KeyName"].ref, "")) ) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( "NodeSecurityGroup", GroupDescription="Security group for all nodes in the cluster", Tags=[ { "Key": Sub("kubernetes.io/cluster/${ClusterName}"), "Value": "owned", }, ], VpcId=self.variables["VpcId"].ref, ) ) template.add_output( Output( "NodeSecurityGroup", Description="Security group for all nodes in the cluster", Value=nodesecuritygroup.ref(), ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupIngress", Description="Allow node to communicate with each other", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="-1", FromPort=0, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneIngress", Description="Allow worker Kubelets and pods to receive " "communication from the cluster control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroup", Description="Allow the cluster control plane to communicate " "with worker Kubelet and pods", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneOn443Ingress", Description="Allow pods running extension API servers on port " "443 to receive communication from cluster " "control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroupOn443", Description="Allow the cluster control plane to communicate " "with pods running extension API servers on port " "443", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupIngress( "ClusterControlPlaneSecurityGroupIngress", Description="Allow pods to communicate with the cluster API " "Server", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( "NodeLaunchConfig", AssociatePublicIpAddress=True, IamInstanceProfile=self.variables["NodeInstanceProfile"].ref, ImageId=self.variables["NodeImageId"].ref, InstanceType=self.variables["NodeInstanceType"].ref, KeyName=If("KeyNameSpecified", self.variables["KeyName"].ref, NoValue), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If( "SetSpotPrice", self.variables["SpotBidPrice"].ref, NoValue ), BlockDeviceMappings=[ autoscaling.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=autoscaling.EBSBlockDevice( VolumeSize=self.variables["NodeVolumeSize"].ref, VolumeType="gp2", DeleteOnTermination=True, ), ) ], UserData=Base64( Sub( "\n".join( [ "#!/bin/bash", "set -o xtrace", "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}", "/opt/aws/bin/cfn-signal --exit-code $? \\", "--stack ${AWS::StackName} \\", "--resource NodeGroup \\", "--region ${AWS::Region}", ] ) ) ), ) ) template.add_resource( autoscaling.AutoScalingGroup( "NodeGroup", DesiredCapacity=If( "DesiredInstanceCountSpecified", self.variables["NodeAutoScalingGroupMaxSize"].ref, NoValue, ), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=self.variables["NodeAutoScalingGroupMinSize"].ref, MaxSize=self.variables["NodeAutoScalingGroupMaxSize"].ref, VPCZoneIdentifier=self.variables["Subnets"].ref, Tags=[ autoscaling.Tag( "Name", Sub("${ClusterName}-${NodeGroupName}-Node"), True ), autoscaling.Tag( Sub("kubernetes.io/cluster/${ClusterName}"), "owned", True ), ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService="1", MaxBatchSize="1" ) ), ) )
def generate_cloudformation_template(): enable_elb = sys.argv[1] input_scaling_policies = ast.literal_eval(sys.argv[2]) input_alarms = ast.literal_eval(sys.argv[3]) enable_elb = enable_elb == 'True' elb_listeners = ast.literal_eval(sys.argv[4]) template = Template() template.add_description("""\ Configures Auto Scaling Group for the app""") project_name = template.add_parameter( Parameter( "Name", Type="String", Description="Instances will be tagged with this name", )) scalecapacity = template.add_parameter( Parameter( "ScaleCapacity", Default="1", Type="String", Description="Number of api servers to run", )) minsize = template.add_parameter( Parameter( "MinScale", Type="String", Description="Minimum number of servers to keep in the ASG", )) maxsize = template.add_parameter( Parameter( "MaxScale", Type="String", Description="Maximum number of servers to keep in the ASG", )) signalcount = template.add_parameter( Parameter( "SignalCount", Default="1", Type="String", Description= "No. of signals CF must receive before it sets the status as CREATE_COMPLETE", )) signaltimeout = template.add_parameter( Parameter( "SignalTimeout", Default="PT5M", Type="String", Description= "Time that CF waits for the number of signals that was specified in Count ", )) minsuccessfulinstancespercent = template.add_parameter( Parameter( "MinSuccessfulInstancesPercent", Default="100", Type="String", Description= "% instances in a rolling update that must signal success for CF to succeed", )) environment = template.add_parameter( Parameter( "Environment", Type="String", Description="The environment being deployed into", )) subnet = template.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", )) launchconfigurationname = template.add_parameter( Parameter( "LaunchConfigurationName", Type="String", )) health_check_grace_period = template.add_parameter( Parameter( "HealthCheckGracePeriod", Type="String", Default="300", )) if enable_elb: elb_subnets = template.add_parameter( Parameter( "LoadBalancerSubnets", Type="CommaDelimitedList", )) elb_bucket_name = template.add_parameter( Parameter("LoadBalancerBucketName", Type="String", Description="S3 Bucket for the ELB access logs")) template.add_condition("ElbLoggingCondition", Not(Equals(Ref(elb_bucket_name), ""))) elb_schema = template.add_parameter( Parameter( "LoadBalancerSchema", Type="String", )) health_check_interval = template.add_parameter( Parameter( "LoadBalancerHealthCheckInterval", Type="String", )) health_check_timeout = template.add_parameter( Parameter( "LoadBalancerHealthCheckTimeout", Type="String", )) healthy_threshold = template.add_parameter( Parameter( "LoadBalancerHealthyThreshold", Type="String", )) unhealthy_threshold = template.add_parameter( Parameter( "LoadBalancerUnHealthyThreshold", Type="String", )) enable_connection_draining = template.add_parameter( Parameter( "LoadBalancerEnableConnectionDraining", Type="String", Default="True", )) connection_draining_timeout = template.add_parameter( Parameter( "LoadBalancerConnectionDrainingTimeout", Type="String", Default="30", )) loadbalancersecuritygroup = template.add_parameter( Parameter( "LoadBalancerSecurityGroup", Type="CommaDelimitedList", Description="Security group for api app load balancer.", )) hostedzone = template.add_parameter( Parameter( "HostedZoneName", Description= "The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) dns_record = template.add_parameter( Parameter( "DNSRecord", Type="String", )) dns_ttl = template.add_parameter( Parameter( "DNSTTL", Default="300", Type="String", )) new_weight = template.add_parameter( Parameter( "NewDnsWeight", Type="String", Default="100", )) health_check_protocol = template.add_parameter( Parameter( "LoadBalancerHealthCheckProtocol", Type="String", )) template.add_condition("ElbTCPProtocolCondition", Equals(Ref(health_check_protocol), "TCP")) health_check_port = template.add_parameter( Parameter( "LoadBalancerHealthCheckPort", Type="String", )) health_check_path = template.add_parameter( Parameter( "LoadBalancerHealthCheckPath", Type="String", )) load_balancer_listeners = [] for listener in elb_listeners: load_balancer_listeners.append( elb.Listener( LoadBalancerPort=listener['load_balancer_port'], InstancePort=listener['instance_port'], Protocol=listener['protocol'], InstanceProtocol=Ref(health_check_protocol), )) loadbalancer = template.add_resource( elb.LoadBalancer( "LoadBalancer", AccessLoggingPolicy=If( "ElbLoggingCondition", elb.AccessLoggingPolicy(EmitInterval=60, Enabled=True, S3BucketName=Ref(elb_bucket_name), S3BucketPrefix="ELBLogs"), Ref("AWS::NoValue")), ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=Ref(enable_connection_draining), Timeout=Ref(connection_draining_timeout), ), Subnets=Ref(elb_subnets), HealthCheck=elb.HealthCheck( Target=Join("", [ Ref(health_check_protocol), ":", Ref(health_check_port), If("ElbTCPProtocolCondition", Ref("AWS::NoValue"), Ref(health_check_path)) ]), HealthyThreshold=Ref(healthy_threshold), UnhealthyThreshold=Ref(unhealthy_threshold), Interval=Ref(health_check_interval), Timeout=Ref(health_check_timeout), ), Listeners=load_balancer_listeners, CrossZone=True, SecurityGroups=Ref(loadbalancersecuritygroup), Scheme=Ref(elb_schema))) route53record = template.add_resource( RecordSetType( "DNS", HostedZoneName=Join("", [Ref(hostedzone), "."]), Name=Join("", [Ref(dns_record), ".", Ref(hostedzone), "."]), ResourceRecords=[GetAtt(loadbalancer, "DNSName")], SetIdentifier=Ref(project_name), TTL=Ref(dns_ttl), Type="CNAME", Weight=Ref(new_weight), )) autoscalinggroup = template.add_resource( AutoScalingGroup( "AutoscalingGroup", Tags=[ Tag("Name", Ref(project_name), True), Tag("Environment", Ref(environment), True) ], LaunchConfigurationName=Ref(launchconfigurationname), MinSize=Ref(minsize), MaxSize=Ref(maxsize), DesiredCapacity=Ref(scalecapacity), VPCZoneIdentifier=Ref(subnet), HealthCheckGracePeriod=Ref(health_check_grace_period), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=Ref(signalcount), Timeout=Ref(signaltimeout)), AutoScalingCreationPolicy=AutoScalingCreationPolicy( MinSuccessfulInstancesPercent=Ref( minsuccessfulinstancespercent))), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize='1', MinInstancesInService='1', MinSuccessfulInstancesPercent=Ref( minsuccessfulinstancespercent), PauseTime=Ref(signaltimeout), WaitOnResourceSignals=True)))) autoscalinggroup.HealthCheckType = 'EC2' if enable_elb: autoscalinggroup.LoadBalancerNames = [Ref(loadbalancer)] autoscalinggroup.HealthCheckType = 'ELB' created_scaling_policies = dict() for scaling_policy in input_scaling_policies: policy_properties = { 'AdjustmentType': scaling_policy['adjustment_type'], 'AutoScalingGroupName': Ref(autoscalinggroup), 'Cooldown': scaling_policy['cooldown'], 'PolicyType': scaling_policy['policy_type'], 'ScalingAdjustment': scaling_policy['scaling_adjustment'], } if scaling_policy['policy_type'] != "SimpleScaling" \ and 'estimated_instance_warmup' in scaling_policy: policy_properties['EstimatedInstanceWarmup'] = \ scaling_policy['estimated_instance_warmup'] if scaling_policy['policy_type'] != "SimpleScaling" \ and 'metric_aggregation_type' in scaling_policy: policy_properties['MetricAggregationType'] = scaling_policy[ 'metric_aggregation_type'] if scaling_policy['adjustment_type'] == "PercentChangeInCapacity" \ and 'min_adjustment_magnitude' in scaling_policy: policy_properties['MinAdjustmentMagnitude'] = scaling_policy[ 'min_adjustment_magnitude'] if 'step_adjustments' in scaling_policy: policy_properties['StepAdjustments'] = scaling_policy[ 'step_adjustments'] created_scaling_policies[ scaling_policy['name']] = template.add_resource( ScalingPolicy(scaling_policy['name'], **policy_properties)) for alarm in input_alarms: template.add_resource( Alarm( alarm['name'], ActionsEnabled=True, AlarmActions=[ Ref(created_scaling_policies[alarm['scaling_policy_name']]) ], AlarmDescription=alarm['description'], ComparisonOperator=alarm['comparison'], Dimensions=[ MetricDimension(Name="AutoScalingGroupName", Value=Ref(autoscalinggroup)), ], EvaluationPeriods=alarm['evaluation_periods'], InsufficientDataActions=[], MetricName=alarm['metric'], Namespace=alarm['namespace'], OKActions=[], Period=alarm['period'], Statistic=alarm['statistics'], Threshold=str(alarm['threshold']), Unit=alarm['unit'], )) template.add_output( Output("StackName", Value=Ref(project_name), Description="Stack Name")) if enable_elb: template.add_output( Output("DomainName", Value=Ref(route53record), Description="DNS to access the service")) template.add_output( Output("LoadBalancer", Value=GetAtt(loadbalancer, "DNSName"), Description="ELB dns")) template.add_output( Output("AutoScalingGroup", Value=Ref(autoscalinggroup), Description="Auto Scaling Group")) template.add_output( Output("LaunchConfiguration", Value=Ref(launchconfigurationname), Description="LaunchConfiguration for this deploy")) return template
def build_template(sierrafile): template = Template() template.add_version('2010-09-09') template.add_metadata(build_interface(sierrafile.extra_params)) parameters = AttrDict( # Network Parameters vpc_cidr=template.add_parameter(Parameter( 'VpcCidr', Type='String', Default='192.172.0.0/16', )), subnet1_cidr=template.add_parameter(Parameter( 'Subnet1Cidr', Type='String', Default='192.172.1.0/24', )), subnet2_cidr=template.add_parameter(Parameter( 'Subnet2Cidr', Type='String', Default='192.172.2.0/24', )), # ECS Parameters cluster_size=template.add_parameter(Parameter( 'ClusterSize', Type='Number', Default=2, )), instance_type=template.add_parameter(Parameter( 'InstanceType', Type='String', Default='t2.medium' )), key_name=template.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', )), image_id=template.add_parameter(Parameter( 'ImageId', Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>', Default=( '/aws/service/ecs/optimized-ami' '/amazon-linux/recommended/image_id' ), Description=( 'An SSM parameter that resolves to a valid AMI ID.' ' This is the AMI that will be used to create ECS hosts.' ' The default is the current recommended ECS-optimized AMI.' ) )), # Other Parameters github_token=template.add_parameter(Parameter( 'GitHubToken', Type='String', NoEcho=True, )), ) # Environment Variable Parameters for env_var_param, env_var_name in sierrafile.extra_params: template.add_parameter(Parameter( env_var_param, Type='String', NoEcho=True, )) # Resource Declarations # # Network network_vpc = template.add_resource(VPC( 'NetworkVpc', CidrBlock=Ref(parameters.vpc_cidr), Tags=Tags(Name=Ref('AWS::StackName')), )) network_ig = template.add_resource(InternetGateway( 'NetworkInternetGateway', Tags=Tags(Name=Ref('AWS::StackName')), )) vpc_attach = template.add_resource(VPCGatewayAttachment( 'NetworkInternetGatewayAttachment', InternetGatewayId=Ref(network_ig), VpcId=Ref(network_vpc), )) route_table = template.add_resource(RouteTable( 'NetworkRouteTable', VpcId=Ref(network_vpc), Tags=Tags(Name=Ref('AWS::StackName')), )) template.add_resource(Route( 'NetworkDefaultRoute', DependsOn=[vpc_attach.title], RouteTableId=Ref(route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(network_ig), )) subnet1 = template.add_resource(Subnet( 'NetworkSubnet1', VpcId=Ref(network_vpc), AvailabilityZone=Select(0, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet1_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) subnet2 = template.add_resource(Subnet( 'NetworkSubnet2', VpcId=Ref(network_vpc), AvailabilityZone=Select(1, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet2_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet1RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet1), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet2RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet2), )) elb = template.add_resource(LoadBalancer( ELB_NAME, Name=Sub('${AWS::StackName}-elb'), Type='network', Subnets=[Ref(subnet1), Ref(subnet2)], )) # # Cluster ecs_host_role = template.add_resource(Role( 'EcsHostRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ec2.amazonaws.com'), Action=[awacs.sts.AssumeRole] )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonEC2ContainerServiceforEC2Role' ] )) ecs_host_profile = template.add_resource(InstanceProfile( 'EcsHostInstanceProfile', Roles=[Ref(ecs_host_role)] )) ecs_host_sg = template.add_resource(SecurityGroup( 'EcsHostSecurityGroup', GroupDescription=Sub('${AWS::StackName}-hosts'), VpcId=Ref(network_vpc), SecurityGroupIngress=[SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1' )] )) cluster = template.add_resource(Cluster( 'EcsCluster', ClusterName=Ref('AWS::StackName') )) autoscaling_name = 'EcsHostAutoScalingGroup' launch_conf_name = 'EcsHostLaunchConfiguration' launch_conf = template.add_resource(LaunchConfiguration( launch_conf_name, ImageId=Ref(parameters.image_id), InstanceType=Ref(parameters.instance_type), IamInstanceProfile=Ref(ecs_host_profile), KeyName=Ref(parameters.key_name), SecurityGroups=[Ref(ecs_host_sg)], UserData=Base64(Sub( '#!/bin/bash\n' 'yum install -y aws-cfn-bootstrap\n' '/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' '/opt/aws/bin/cfn-signal -e $?' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {autoscaling_name}\n' )), Metadata={ 'AWS::CloudFormation::Init': { 'config': { 'commands': { '01_add_instance_to_cluster': { 'command': Sub( f'echo ECS_CLUSTER=${{{cluster.title}}}' f' > /etc/ecs/ecs.config' ), } }, 'files': { '/etc/cfn/cfn-hup.conf': { 'mode': 0o400, 'owner': 'root', 'group': 'root', 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' ), }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.ContainerInstances.Metadata' '.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' ), }, }, 'services': { 'sysvinit': { 'cfn-hup': { 'enabled': True, 'ensureRunning': True, 'files': [ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] } } } } } } )) autoscaling_group = template.add_resource(AutoScalingGroup( autoscaling_name, VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], LaunchConfigurationName=Ref(launch_conf), DesiredCapacity=Ref(parameters.cluster_size), MinSize=Ref(parameters.cluster_size), MaxSize=Ref(parameters.cluster_size), Tags=[{ 'Key': 'Name', 'Value': Sub('${AWS::StackName} - ECS Host'), 'PropagateAtLaunch': True, }], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, MaxBatchSize=1, PauseTime='PT5M', WaitOnResourceSignals=True, ), ), )) # # Services task_role = template.add_resource(Role( 'TaskExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ecs-tasks.amazonaws.com'), Action=[awacs.sts.AssumeRole], )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonECSTaskExecutionRolePolicy' ], )) artifact_bucket = template.add_resource(Bucket( 'ArtifactBucket', DeletionPolicy='Retain', )) codebuild_role = template.add_resource(Role( 'CodeBuildServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codebuild.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ssm.GetParameters, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.GetObjectVersion, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], ), ], ), )], )) codepipeline_role = template.add_resource(Role( 'CodePipelineServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codepipeline.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=[ Sub(f'${{{artifact_bucket.title}.Arn}}/*') ], Effect=Allow, Action=[ awacs.s3.GetBucketVersioning, awacs.s3.GetObject, awacs.s3.GetObjectVersion, awacs.s3.PutObject, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ecs.DescribeServices, awacs.ecs.DescribeTaskDefinition, awacs.ecs.DescribeTasks, awacs.ecs.ListTasks, awacs.ecs.RegisterTaskDefinition, awacs.ecs.UpdateService, awacs.codebuild.StartBuild, awacs.codebuild.BatchGetBuilds, awacs.iam.PassRole, ], ), ], ), )], )) log_group = template.add_resource(LogGroup( 'LogGroup', LogGroupName=Sub('/ecs/${AWS::StackName}'), )) if any(conf.pipeline.enable for conf in sierrafile.services.values()): project = template.add_resource(Project( 'CodeBuildProject', Name=Sub('${AWS::StackName}-build'), ServiceRole=Ref(codebuild_role), Artifacts=Artifacts(Type='CODEPIPELINE'), Source=Source(Type='CODEPIPELINE'), Environment=Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:17.09.0', Type='LINUX_CONTAINER', ), )) for name, settings in sierrafile.services.items(): task_definition = template.add_resource(TaskDefinition( f'{name}TaskDefinition', RequiresCompatibilities=['EC2'], Cpu=str(settings.container.cpu), Memory=str(settings.container.memory), NetworkMode='bridge', ExecutionRoleArn=Ref(task_role.title), ContainerDefinitions=[ ContainerDefinition( Name=f'{name}', Image=settings.container.image, Memory=str(settings.container.memory), Essential=True, PortMappings=[ PortMapping( ContainerPort=settings.container.port, Protocol='tcp', ), ], Environment=[ troposphere.ecs.Environment(Name=k, Value=v) for k, v in sierrafile.env_vars.items() if k in settings.get('environment', []) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group.title), 'awslogs-stream-prefix': Ref('AWS::StackName'), }, ), ), ], )) target_group = template.add_resource(TargetGroup( f'{name}TargetGroup', Port=settings.container.port, Protocol='TCP', VpcId=Ref(network_vpc), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')), )) listener = template.add_resource(Listener( f'{name}ElbListener', LoadBalancerArn=Ref(elb), Port=settings.container.port, Protocol='TCP', DefaultActions=[ Action(TargetGroupArn=Ref(target_group), Type='forward') ], )) service = template.add_resource(Service( f'{name}Service', Cluster=Ref(cluster), ServiceName=f'{name}-service', DependsOn=[autoscaling_group.title, listener.title], DesiredCount=settings.container.count, TaskDefinition=Ref(task_definition), LaunchType='EC2', LoadBalancers=[ troposphere.ecs.LoadBalancer( ContainerName=f'{name}', ContainerPort=settings.container.port, TargetGroupArn=Ref(target_group), ), ], )) if settings.pipeline.enable: pipeline = template.add_resource(Pipeline( f'{name}Pipeline', RoleArn=GetAtt(codepipeline_role, 'Arn'), ArtifactStore=ArtifactStore( Type='S3', Location=Ref(artifact_bucket), ), Stages=[ Stages( Name='Source', Actions=[Actions( Name='Source', ActionTypeId=ActionTypeId( Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub', ), OutputArtifacts=[ OutputArtifacts(Name=f'{name}Source'), ], RunOrder='1', Configuration={ 'Owner': settings.pipeline.user, 'Repo': settings.pipeline.repo, 'Branch': settings.pipeline.branch, 'OAuthToken': Ref(parameters.github_token), }, )], ), Stages( Name='Build', Actions=[Actions( Name='Build', ActionTypeId=ActionTypeId( Category='Build', Owner='AWS', Version='1', Provider='CodeBuild', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Source'), ], OutputArtifacts=[ OutputArtifacts(Name=f'{name}Build'), ], RunOrder='1', Configuration={ 'ProjectName': Ref(project), }, )], ), Stages( Name='Deploy', Actions=[Actions( Name='Deploy', ActionTypeId=ActionTypeId( Category='Deploy', Owner='AWS', Version='1', Provider='ECS', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Build') ], RunOrder='1', Configuration={ 'ClusterName': Ref(cluster), 'ServiceName': Ref(service), 'FileName': 'image.json', }, )], ), ], )) template.add_resource(Webhook( f'{name}CodePipelineWebhook', Name=Sub(f'${{AWS::StackName}}-{name}-webhook'), Authentication='GITHUB_HMAC', AuthenticationConfiguration=AuthenticationConfiguration( SecretToken=Ref(parameters.github_token), ), Filters=[FilterRule( JsonPath='$.ref', MatchEquals=f'refs/heads/{settings.pipeline.branch}' )], TargetAction='Source', TargetPipeline=Ref(pipeline), TargetPipelineVersion=1, RegisterWithThirdParty=True, )) return template
def _create_cfn_template(self): self.tpl = Template() self.tpl.add_version('2010-09-09') self.tpl.add_description('CFN template to create an EKS node group and affiliated resources.') eks_tag = 'kubernetes.io/cluster/{}'.format(self.cluster.name) r = self.resources.get(self.RESOURCE_NG_ROLE.name) if self.role: profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[self.role]) account_id = boto3.session.Session().client('sts').get_caller_identity().get('Account') role_arn = 'arn:aws:iam::{}:role/{}'.format(account_id, self.role) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=role_arn, Description='Node group role')) r.status = Status.provided r.resource_id = role_arn else: role = Role( self.RESOURCE_NG_ROLE.name, RoleName=self.tag_name, AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', ['ec2.amazonaws.com'])), ], ), ManagedPolicyArns=['arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy', 'arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy', 'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly'] ) self.tpl.add_resource(role) profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[Ref(role)]) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=GetAtt(role, 'Arn'), Description='Node group role')) self.tpl.add_resource(profile) if self.sg_igresses: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', SecurityGroupIngress=[SecurityGroupRule(IpProtocol=r.protocol, FromPort=r.from_port, ToPort=r.to_port, CidrIp=r.cidr) for r in self.sg_igresses] ) else: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', ) self.tpl.add_resource(sg) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_INGRESS.name, DependsOn=sg, Description='Allow node to communicate with each other', GroupId=Ref(sg), SourceSecurityGroupId=Ref(sg), IpProtocol='-1', FromPort=0, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_CP_INGRESS.name, DependsOn=sg, Description='Allow kubelet and pods on the nodes to receive communication from the cluster control plane', GroupId=Ref(sg), SourceSecurityGroupId=self.cluster.sg, IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupEgress( self.RESOURCE_CP_EGRESS_TO_NG.name, DependsOn=sg, Description='Allow the cluster control plane to communicate with nodes kubelet and pods', GroupId=self.cluster.sg, DestinationSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_CP_SG_INGRESS.name, DependsOn=sg, Description='Allow pods to communicate with the cluster API Server', GroupId=self.cluster.sg, SourceSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=443, ToPort=443 )) # keypair ec2 = boto3.session.Session().resource('ec2') r = self.resources.get(self.RESOURCE_NG_KEYPAIR.name) if not self.keypair: keyname = 'eks{}'.format(''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5))) with open(self.ssh_public_key, 'rb') as f: ec2.import_key_pair(KeyName=keyname, PublicKeyMaterial=f.read()) self.keypair = keyname self.keypair_imported = True self.tpl.add_output(Output(self.OUTPUT_KEYNAME, Value=self.keypair, Description='Imported kaypair name')) r.status = Status.created else: r.status = Status.provided r.resource_id = self.keypair # auto-scaling group and launch configuration if self.no_user_data: lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)]) else: user_data = Base64( Join('', [line + '\n' for line in Environment().from_string(self.USER_DATA).render( ci=self.cluster, ng_asg=self.RESOURCE_NG_ASG.name, stack_name=self.stack_name, max_pods=self.MAX_PODS.get(self.instance), region=self.region).split('\n')])) lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)], UserData=user_data) self.tpl.add_resource(lc) self.tpl.add_resource(AutoScalingGroup( self.RESOURCE_NG_ASG.name, DesiredCapacity=self.desired, MinSize=self.min, MaxSize=self.max, LaunchConfigurationName=Ref(lc), VPCZoneIdentifier=self.subnets, Tags=[Tag('Name', self.tag_name, True), Tag(eks_tag, 'owned', True)], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate(MinInstancesInService=1, MaxBatchSize=1))))
def __init__(self, name='HaCluster', ami_name='amazonLinuxAmiId', user_data='', env_vars={}, min_size=1, max_size=1, desired_capacity=DEFAULT_TO_MIN_SIZE, instance_type='t2.micro', subnet_layer=None, elb_scheme=SCHEME_INTERNET_FACING, elb_listeners=[{ 'elb_protocol': 'HTTP', 'elb_port': 80 }], elb_health_check_port=None, elb_health_check_protocol='TCP', elb_health_check_path='', elb_idle_timeout=None, update_policy_PauseTime='PT1M', update_policy_MinInstancesInService=0, update_policy_MaxBatchSize=1, cname='', custom_tags={}, elb_custom_tags={}, scaling_policies=None, creation_policy_timeout=None, allow_default_ingress=True): # This will be the name used in resource names and descriptions self.name = name # This is the name used to identify the AMI from the ami_cache.json file self.ami_name = ami_name # This is the contents of the userdata script as a string self.user_data = user_data # This is a dictionary of environment variables to inject into the instances self.env_vars = env_vars # These define the lower and upper boundaries of the autoscaling group self.min_size = min_size self.max_size = max_size self.desired_capacity = desired_capacity # The type of instance for the autoscaling group self.instance_type = instance_type # This is the subnet layer that the ASG is in (public, private, ...) self.subnet_layer = subnet_layer # This is the type of ELB: internet-facing gets a publicly accessible DNS, while internal is only accessible to the VPC self.elb_scheme = elb_scheme # This should be a list of dictionaries defining each listener for the ELB # Each dictionary can contain elb_port [required], elb_protocol, instance_port, instance_protocol, ssl_cert_name self.elb_listeners = elb_listeners # This is the health check port for the cluster self.elb_health_check_port = elb_health_check_port # The ELB health check protocol for the cluster (HTTP, HTTPS, TCP, SSL) self.elb_health_check_protocol = elb_health_check_protocol # The ELB health check path for the cluster (Only for HTTP and HTTPS) self.elb_health_check_path = elb_health_check_path # Add a creation policy with a custom timeout if one was specified if creation_policy_timeout: self.creation_policy = CreationPolicy( ResourceSignal=ResourceSignal( Timeout='PT' + str(creation_policy_timeout) + 'M')) else: self.creation_policy = None # Add update policy self.update_policy = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime=update_policy_PauseTime, MinInstancesInService=update_policy_MinInstancesInService, MaxBatchSize=update_policy_MaxBatchSize, # WaitOnResourceSignals=True )) # The Idle Timeout for the ELB (how long your connection can stay idle before being terminated) self.elb_idle_timeout = elb_idle_timeout # This is an optional fully qualified DNS name to create a CNAME in a private hosted zone self.cname = cname # Translate the custom_tags dict to a list of autoscaling Tags self.custom_tags = [] for key, value in custom_tags.iteritems(): self.custom_tags.append(autoscaling.Tag(key, value, True)) ## Save ELB tags for add_cluster_elb self.elb_custom_tags = elb_custom_tags # A list of dictionaries describing scaling policies to be passed to add_asg self.scaling_policies = scaling_policies # Indicates whether ingress rules should be added to the ELB for type-appropriate CIDR ranges # Internet facing ELBs would allow ingress from PUBLIC_ACCESS_CIDR and private ELBs will allow ingress from the VPC CIDR self.allow_default_ingress = allow_default_ingress super(HaCluster, self).__init__(template_name=self.name)