def add_nat_asg(self): user_data = [resources.get_resource('nat_takeover.sh')] if self.enable_ntp: user_data.append(resources.get_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) nat_asg_name = "Nat%sASG" % str(self.subnet_index) user_data.extend([ "\n", "cfn-signal -s true", " --resource ", nat_asg_name, " --stack ", { "Ref": "AWS::StackName" }, " --region ", { "Ref": "AWS::Region" } ]) nat_launch_config = self.add_resource( LaunchConfiguration("Nat%sLaunchConfig" % str(self.subnet_index), UserData=Base64(Join('', user_data)), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'), KeyName=Ref('ec2Key'), SecurityGroups=[Ref(self.sg)], EbsOptimized=False, IamInstanceProfile=Ref(self.instance_profile), InstanceType=self.instance_type, AssociatePublicIpAddress=True)) # Create the NAT in a public subnet subnet_layer = self._subnets['public'].keys()[0] nat_asg = self.add_resource( AutoScalingGroup( nat_asg_name, DesiredCapacity=1, Tags=[ Tag("Name", Join("-", [ "NAT", self.subnet_index, ]), True), Tag("isNat", "true", True) ], MinSize=1, MaxSize=1, Cooldown="30", LaunchConfigurationName=Ref(nat_launch_config), HealthCheckGracePeriod=30, HealthCheckType="EC2", VPCZoneIdentifier=[ self._subnets['public'][subnet_layer][self.subnet_index] ], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=1, Timeout='PT15M')))) return nat_asg
def getTemplate(self): #asgConfigJson = simplejson.loads(simplejson.dumps(self.asgConfig)) asgConfigJson = self.asgConfig if self.elb is not None: self.template.add_resource( AutoScalingGroup( "ELBASG", DesiredCapacity=asgConfigJson['DesiredCapactiy'], MinSize=asgConfigJson['MinSize'], MaxSize=asgConfigJson['MaxSize'], LaunchConfigurationName=self.launchConfigName, HealthCheckType="ELB", HealthCheckGracePeriod=asgConfigJson[ 'HealthCheckGracePeriod'], LoadBalancerNames=[self.elb], AvailabilityZones=[self.availZone], VPCZoneIdentifier=[self.subnet], Tags=[ Tag("Name", Join("-", [self.friendlyName, self.branch]), True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=asgConfigJson['MaxBatchSize'], MinInstancesInService=asgConfigJson[ 'MinInstancesInService'], PauseTime=asgConfigJson['PauseTime'], WaitOnResourceSignals=asgConfigJson[ 'WaitOnResourceSignals'])))) else: self.template.add_resource( AutoScalingGroup( "NoELBASG", DesiredCapacity=asgConfigJson['DesiredCapactiy'], MinSize=asgConfigJson['MinSize'], MaxSize=asgConfigJson['MaxSize'], LaunchConfigurationName=self.launchConfigName, HealthCheckType="EC2", HealthCheckGracePeriod=asgConfigJson[ 'HealthCheckGracePeriod'], AvailabilityZones=[self.availZone], VPCZoneIdentifier=[self.subnet], Tags=[ Tag("Name", Join("-", [self.friendlyName, self.branch]), True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize=asgConfigJson['MaxBatchSize'], MinInstancesInService=asgConfigJson[ 'MinInstancesInService'], PauseTime=asgConfigJson['PauseTime'], WaitOnResourceSignals=asgConfigJson[ 'WaitOnResourceSignals'])))) return self.template
def define_web_auto_scaling(template, alb_target_group_80, alb_target_group_9090, sg): web_launch_config = LaunchConfiguration( stack_name_strict + "WebLC", UserData=Base64( Join('', [ '#!/bin/bash\n', 'set -x\n', 'exec > >(tee /tmp/user-data.log|logger -t user-data ) 2>&1\n', 'sudo su - deploy -c "echo \\"export RAILS_ENV=' + env + '\\" >> ~/.bashrc"\n', 'sudo su - deploy -c "cd ~/app/current; wget http://taxweb-deploy.s3.amazonaws.com/' + app_name + '/app_update.sh -O app_update.sh >/dev/null 2>&1"\n', 'sudo su - deploy -c "cd ~/app/current && chmod 755 app_update.sh && ./app_update.sh ' + env + ' web ' + app_name + '"\n' ])), ImageId=Ref(base_ami), InstanceType=Ref(instance_type), KeyName="taxweb-AWS-US-West", SecurityGroups=[Ref(sg)]) template.add_resource(web_launch_config) web_autoscaling_group = AutoScalingGroup( stack_name_strict + "WebASG", Tags=[ Tag("Name", stack_name + "-web", True), Tag("Custo", app_name, True), Tag("Env", env, True), Tag("Role", "web", True), ], LaunchConfigurationName=Ref(web_launch_config), MinSize=1, MaxSize=1, DesiredCapacity=1, VPCZoneIdentifier=[Ref(subnet_id1), Ref(subnet_id2)], TargetGroupARNs=[Ref(alb_target_group_80), Ref(alb_target_group_9090)], HealthCheckType="ELB", HealthCheckGracePeriod="300", ) template.add_resource(web_autoscaling_group) return { "launch_config": web_launch_config, "autoscaling_group": web_autoscaling_group }
def add_nat_asg(self): user_data = [resources.get_resource('nat_takeover.sh')] if self.enable_ntp: user_data.append(resources.get_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) nat_launch_config = self.add_resource( LaunchConfiguration("Nat%sLaunchConfig" % str(self.subnet_index), UserData=Base64(Join('\n', user_data)), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'), KeyName=Ref('ec2Key'), SecurityGroups=[Ref(self.sg)], EbsOptimized=False, IamInstanceProfile=Ref(self.instance_profile), InstanceType=self.instance_type, AssociatePublicIpAddress=True)) nat_asg = self.add_resource( AutoScalingGroup("Nat%sASG" % str(self.subnet_index), DesiredCapacity=1, Tags=[ Tag("Name", Join("-", [Ref(self.vpc_id), "NAT"]), True), Tag("isNat", "true", True) ], MinSize=1, MaxSize=1, Cooldown="30", LaunchConfigurationName=Ref(nat_launch_config), HealthCheckGracePeriod=30, HealthCheckType="EC2", VPCZoneIdentifier=[ Ref(self.subnets['public'][self.subnet_index]) ])) return nat_asg
def wrapper(*args, **kwargs): result = [] if 'VPCZoneIdentifier' in kwargs: for ref in kwargs['VPCZoneIdentifier']: if isinstance(ref, Ref): subnet = args[-1].resources[ref.data['Ref']] if 'Tags' in subnet.properties: append = True for tag in subnet.properties['Tags'].tags: for r_tag in result: if r_tag.data['Key'] == tag['Key']: append = False if append is True: result.append(Tag( tag['Key'], tag['Value'], True) ) if 'Tags' in kwargs and isinstance(kwargs['Tags'], baseTags): for tag in kwargs['Tags'].tags: result.append(Tag(tag['Key'], tag['Value'], True)) if 'Tags' in kwargs and isinstance(kwargs['Tags'], Tags): result.append(kwargs['Tags']) kwargs['Tags'] = result return func(*args, **kwargs)
def _auto_scaling_group(self): return AutoScalingGroup( "duy%sAutoscalingGroup" % self.config['env'], DesiredCapacity=self.config['scale_desire'], Tags=[ Tag("Name", "duy-%s" % self.config['env'], True), Tag("Environment", self.config['env'], True), Tag("PropagateAtLaunch", "true", True) ], LaunchConfigurationName=Ref(self.launchConfiguration), MinSize=self.config['scale_min'], MaxSize=self.config['scale_max'], VPCZoneIdentifier=self.config['private_subnet'], LoadBalancerNames=[Ref(self.loadBalancer)], HealthCheckType="EC2", HealthCheckGracePeriod="300", TerminationPolicies=["OldestInstance", "Default"], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)))
def create_auto_scaling_group(self, asg_args): ''' Method creates an auto scaling group and adds it to the resources list @param asg_args [dict] collection of keyword arguments for the asg ''' autoscaling_group = self.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=asg_args['desired_capacity'], Tags=[Tag("Name", "CloudformationLab", True)], LaunchConfigurationName=Ref(self.launch_configuration), MinSize=asg_args['min_capacity'], MaxSize=asg_args['max_capacity'], VPCZoneIdentifier=self.subnets, LoadBalancerNames=[Ref(self.load_balancer)], AvailabilityZones=self.availability_zones, HealthCheckType="ELB", HealthCheckGracePeriod=60, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService=asg_args['min_capacity'], MaxBatchSize='1', WaitOnResourceSignals=True))))
def test_ensuring_an_ec2_instance_is_always_running(self): test_stack_name = "TestEnsureOneInstance" init_cf_env(test_stack_name) ### subnet_ids = [get_first_subnet(), get_subnet(index=1)] t = Template() sg = ts_add_security_group(t) launch_config = t.add_resource( LaunchConfiguration( "MyLaunchConfiguration", ImageId=get_linux2_image_id(), InstanceType='t2.micro', KeyName=KEY, SecurityGroups=[Ref(sg)], AssociatePublicIpAddress=True, InstanceMonitoring= False, # Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring )) auto_scaling_group = t.add_resource( AutoScalingGroup( "MyAutoScalingGroup", LaunchConfigurationName=Ref(launch_config), DesiredCapacity=1, MinSize=1, MaxSize=1, VPCZoneIdentifier=subnet_ids, HealthCheckGracePeriod= 600, # The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before # checking the health status of an EC2 instance that has come into service. HealthCheckType= 'EC2', # Use internal health chack of EC2 service to discover issues with the vm Tags=[ Tag( "Name", test_stack_name, True ) # 'True' means: Attaches the same tags to the virtual machine started by this auto-scaling group ])) dump_template(t, True) create_stack(test_stack_name, t) resp = ec2_client.describe_instances( Filters=[{ "Name": "tag:Name", 'Values': [test_stack_name] }]) all_instances = list( chain.from_iterable([ reservation['Instances'] for reservation in resp['Reservations'] ])) running_instances = [ ins for ins in all_instances if ins['State'] == { 'Code': 16, 'Name': 'running' } ] self.assertEqual(len(running_instances), 1) instance = running_instances[0] public_ip = instance['PublicIpAddress'] subnet_id = instance['SubnetId'] instance_id = instance['InstanceId'] self.assertIn(subnet_id, subnet_ids) run(f'aws ec2 terminate-instances --instance-ids {instance_id}') time.sleep(180) # wait for new instance starting up resp = ec2_client.describe_instances( Filters=[{ "Name": "tag:Name", 'Values': [test_stack_name] }]) all_instances = list( chain.from_iterable([ reservation['Instances'] for reservation in resp['Reservations'] ])) running_instances = [ ins for ins in all_instances if ins['State'] == { 'Code': 16, 'Name': 'running' } ] self.assertEqual(len(running_instances), 1) instance = running_instances[0] subnet_id = instance['SubnetId'] self.assertIn(subnet_id, subnet_ids) instance_id2 = instance['InstanceId'] self.assertNotEqual(instance_id2, instance_id)
def main(): t = Template() t.add_version('2010-09-09') t.set_description("AWS CloudFormation ECS example") # Add the Parameters AMI = t.add_parameter(Parameter( "AMI", Type="String", )) ClusterSize = t.add_parameter(Parameter( "ClusterSize", Type="String", )) ClusterType = t.add_parameter(Parameter( "ClusterType", Type="String", )) InstanceType = t.add_parameter(Parameter( "InstanceType", Type="String", )) IamInstanceProfile = t.add_parameter( Parameter( "IamInstanceProfile", Type="String", )) KeyName = t.add_parameter( Parameter( "KeyName", Type="AWS::EC2::KeyPair::KeyName", )) MaxClusterSize = t.add_parameter( Parameter( "MaxClusterSize", Type="String", )) RollingUpdate = t.add_parameter(Parameter( "RollingUpdate", Type="String", )) Stage = t.add_parameter(Parameter( "Stage", Type="String", )) Subnets = t.add_parameter( Parameter( "Subnets", Type="List<AWS::EC2::Subnet::Id>", )) VpcCidr = t.add_parameter(Parameter( "VpcCidr", Type="String", )) VpcId = t.add_parameter(Parameter( "VpcId", Type="AWS::EC2::VPC::Id", )) ContainerInstances = t.add_resource( LaunchConfiguration( 'ContainerInstances', UserData=Base64( Join('', [ '#!/bin/bash -xe\n', 'echo ECS_CLUSTER=', Ref('AWS::StackName'), '>> /etc/ecs/ecs.config\n', 'systemctl enable [email protected]\n', 'systemctl start [email protected]\n', '/usr/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource ECSAutoScalingGroup ', ' --region ', Ref('AWS::Region'), '\n' ])), ImageId=Ref(AMI), KeyName=Ref(KeyName), SecurityGroups=[Ref('EcsSecurityGroup')], IamInstanceProfile=Ref(IamInstanceProfile), InstanceType=Ref(InstanceType))) ECSCluster = t.add_resource( Cluster('EcsCluster', ClusterName=Ref('AWS::StackName'))) ECSAutoScalingGroup = t.add_resource( AutoScalingGroup( 'ECSAutoScalingGroup', DesiredCapacity=Ref(ClusterSize), MinSize=Ref(ClusterSize), MaxSize=Ref(MaxClusterSize), VPCZoneIdentifier=Ref(Subnets), LaunchConfigurationName=Ref('ContainerInstances'), HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService=Ref(ClusterSize), MaxBatchSize='1', WaitOnResourceSignals=True)), Tags=[ Tag("Project", "demo", True), Tag("Stage", Ref(Stage), True), Tag("Name", "home-ecs", True), ])) t.add_resource( ScalingPolicy("EcsAsgScaleDown", AdjustmentType="PercentChangeInCapacity", AutoScalingGroupName=Ref("ECSAutoScalingGroup"), MetricAggregationType="Average", MinAdjustmentMagnitude="1", PolicyType="StepScaling", StepAdjustments=[ StepAdjustments(MetricIntervalLowerBound="-10", MetricIntervalUpperBound="0", ScalingAdjustment="-10"), StepAdjustments(MetricIntervalUpperBound="-10", ScalingAdjustment="-20") ])) t.add_resource( ScalingPolicy('EcsScaleUp', AdjustmentType="PercentChangeInCapacity", AutoScalingGroupName=Ref("ECSAutoScalingGroup"), EstimatedInstanceWarmup="300", MetricAggregationType="Average", MinAdjustmentMagnitude="1", PolicyType="StepScaling", StepAdjustments=[ StepAdjustments(MetricIntervalLowerBound="0", MetricIntervalUpperBound="10", ScalingAdjustment="10"), StepAdjustments(MetricIntervalLowerBound="10", ScalingAdjustment="20") ])) t.add_resource( cloudwatch.Alarm("EcsScaleDownAlarm", ActionsEnabled="True", MetricName="CPUUtilization", AlarmActions=[Ref("EcsAsgScaleDown")], AlarmDescription="Scale down ECS Instances", Namespace="AWS/EC2", Statistic="Average", Period="60", EvaluationPeriods="6", Threshold="25", ComparisonOperator="LessThanThreshold", Dimensions=[ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref("ECSAutoScalingGroup")) ])) t.add_resource( cloudwatch.Alarm("EcsAsgScaleUpAlarm", ActionsEnabled="True", MetricName="CPUUtilization", AlarmActions=[Ref("EcsScaleUp")], AlarmDescription="Scale up ECS Instances", Namespace="AWS/EC2", Statistic="Average", Period="60", EvaluationPeriods="3", Threshold="65", ComparisonOperator="GreaterThanThreshold", Dimensions=[ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref("ECSAutoScalingGroup")) ])) EC2SecurityGroup = t.add_resource( ec2.SecurityGroup('EcsSecurityGroup', GroupDescription='ECS InstanceSecurityGroup', SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='31000', ToPort='61000', CidrIp='0.0.0.0/0') ], VpcId=Ref(VpcId))) with open("ecs-ec2-cluster-cf.yaml", "w") as yamlout: yamlout.write(t.to_yaml())
def template(): t = Template() keyname_param = t.add_parameter( Parameter( "KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance", Type="String")) image_id_param = t.add_parameter( Parameter("ImageId", Description="ImageId of the EC2 instance", Type="String")) instance_type_param = t.add_parameter( Parameter("InstanceType", Description="Type of the EC2 instance", Type="String")) ScaleCapacity = t.add_parameter( Parameter( "ScaleCapacity", Default="1", Type="String", Description="Number of api servers to run", )) VPCAvailabilityZone2 = t.add_parameter( Parameter( "VPCAvailabilityZone2", MinLength="1", Type="String", Description="Second availability zone", MaxLength="255", )) VPCAvailabilityZone1 = t.add_parameter( Parameter( "VPCAvailabilityZone1", MinLength="1", Type="String", Description="First availability zone", MaxLength="255", )) SecurityGroup = t.add_parameter( Parameter( "SecurityGroup", Type="String", Description="Security group.", )) RootStackName = t.add_parameter( Parameter( "RootStackName", Type="String", Description="The root stack name", )) ApiSubnet2 = t.add_parameter( Parameter( "ApiSubnet2", Type="String", Description="Second private VPC subnet ID for the api app.", )) ApiSubnet1 = t.add_parameter( Parameter( "ApiSubnet1", Type="String", Description="First private VPC subnet ID for the api app.", )) ##################################################### # Launch Configuration ##################################################### LaunchConfig = t.add_resource( LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init( cloudformation.InitConfigSets(InstallAndRun=['Install']), Install=cloudformation.InitConfig( packages={ "apt": { "curl": [], "zip": [], "unzip": [], "git": [], "supervisor": [], "sqlite3": [], "nginx": [], "php7.2-fpm": [], "php7.2-cli": [], "php7.2-pgsql": [], "php7.2-sqlite3": [], "php7.2-gd": [], "php7.2-curl": [], "php7.2-memcached": [], "php7.2-imap": [], "php7.2-mysql": [], "php7.2-mbstring": [], "php7.2-xml": [], "php7.2-zip": [], "php7.2-bcmath": [], "php7.2-soap": [], "php7.2-intl": [], "php7.2-readline": [], "php-msgpack": [], "php-igbinary": [] } }, files=cloudformation.InitFiles({ "/etc/nginx/sites-available/default": cloudformation. InitFile(content=Join('', [ "server {\n", " listen 80 default_server;\n", " root /var/www/html/public;\n", " index index.html index.htm index.php;\n", " server_name _;\n", " charset utf-8;\n", " location = /favicon.ico { log_not_found off; access_log off; }\n", " location = /robots.txt { log_not_found off; access_log off; }\n", " location / {\n", " try_files $uri $uri/ /index.php$is_args$args;\n", " }\n", " location ~ \.php$ {\n", " include snippets/fastcgi-php.conf;\n", " fastcgi_pass unix:/run/php/php7.2-fpm.sock;\n", " }\n", " error_page 404 /index.php;\n", "}\n" ])), "/etc/supervisor/conf.d/supervisord.conf": cloudformation. InitFile(content=Join('', [ "[supervisord]\n", "nodaemon=true\n", "[program:nginx]\n", "command=nginx\n", "stdout_logfile=/dev/stdout\n", "stdout_logfile_maxbytes=0\n", "stderr_logfile=/dev/stderr\n", "stderr_logfile_maxbytes=0\n", "[program:php-fpm]\n", "command=php-fpm7.2\n", "stdout_logfile=/dev/stdout\n", "stdout_logfile_maxbytes=0\n", "stderr_logfile=/dev/stderr\n", "stderr_logfile_maxbytes=0\n", "[program:horizon]\n", "process_name=%(program_name)s\n", "command=php /var/www/html/artisan horizon\n", "autostart=true\n", "autorestart=true\n", "user=root\n", "redirect_stderr=true\n", "stdout_logfile=/var/www/html/storage/logs/horizon.log\n", ])), "/etc/php/7.2/fpm/php-fpm.conf": cloudformation.InitFile( content=Join('', [ "[global]\n", "pid = /run/php/php7.2-fpm.pid\n", "error_log = /proc/self/fd/2\n", "include=/etc/php/7.2/fpm/pool.d/*.conf\n" ])) }))), ), UserData=Base64( Join('', [ "#!/bin/bash -xe\n", "apt-get update -y\n", "apt-get install -y language-pack-en-base\n", "export LC_ALL=en_US.UTF-8\n", "export LANG=en_US.UTF-8\n", "apt-get install -y ruby\n", "wget https://aws-codedeploy-ap-south-1.s3.amazonaws.com/latest/install\n", "chmod +x ./install\n", "./install auto\n", "service codedeploy-agent start\n", "apt-get install -y software-properties-common python-software-properties\n", "add-apt-repository -y ppa:ondrej/php\n", "apt-get update -y\n", "apt-get install -y python-setuptools\n", "mkdir -p /opt/aws/bin\n", "wget https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", "easy_install --script-dir /opt/aws/bin aws-cfn-bootstrap-latest.tar.gz\n", "# Install the files and packages from the metadata\n", "/opt/aws/bin/cfn-init -v ", " --stack ", Ref("AWS::StackName"), " --resource LaunchConfiguration", " --configsets InstallAndRun ", " --region ", Ref("AWS::Region"), "\n" ])), ImageId=Ref("ImageId"), KeyName=Ref(keyname_param), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], InstanceType=Ref("InstanceType"), IamInstanceProfile="CodeDeployDemo-EC2-Instance-Profile", SecurityGroups=[Ref(SecurityGroup)])) ##################################################### # AutoScaling Groups ##################################################### AutoscalingGroup = t.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref(ScaleCapacity), Tags=[ Tag("App", "cc-worker", True), Tag("Name", "cc-worker", True) ], LaunchConfigurationName=Ref(LaunchConfig), MinSize=Ref(ScaleCapacity), MaxSize=Ref(ScaleCapacity), VPCZoneIdentifier=[Ref(ApiSubnet1), Ref(ApiSubnet2)], AvailabilityZones=[ Ref(VPCAvailabilityZone1), Ref(VPCAvailabilityZone2) ], HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)))) return t.to_json()
ipPrivateList).replace("[label]", f)), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")) ])) AutoscalingGroupX = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + f, Cooldown=300, HealthCheckGracePeriod=300, DesiredCapacity=DesiredCapacity, MinSize=MinSize, MaxSize=MaxSize, Tags=[ Tag("Name", environmentString + "AutoscalingGroup" + f, True) ], LaunchConfigurationName=Ref(LaunchConfig), VPCZoneIdentifier=subnetsList, # LoadBalancerNames=[Ref(LoadBalancer)], #AvailabilityZones=subnetsList, HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True))))
InstanceType=Ref(InstanceType), AssociatePublicIpAddress="true", )) AdditionalNodes = t.add_resource( AutoScalingGroup( "AdditionalNodes", DesiredCapacity=Ref(AdditionalInstanceCount), MinSize=0, MaxSize=Ref(AdditionalInstanceCount), VPCZoneIdentifier=[Ref(SubnetId)], LaunchConfigurationName=Ref(AdditionalNodeLaunchConfig), DependsOn="AmbariNode", CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Count=Ref(AdditionalInstanceCount), Timeout="PT30M"), ), Tags=[Tag("Name", ref_stack_name, True)], )) t.add_output([ Output("IIAN", Description="Instance ID of additional nodes", Value=Ref('AdditionalNodes')), Output( "AmbariURL", Description="URL of Ambari UI", Value=Join("", ["http://", GetAtt('AmbariNode', 'PublicDnsName'), ":8080"]), ), Output( "AmbariSSH",
autoscaling_group_resource = t.add_resource( AutoScalingGroup("myAutoScalingGroup", DesiredCapacity=Ref(desInstances_param), MinSize=Ref(minInstances_param), MaxSize=Ref(maxInstances_param), Cooldown="300", LoadBalancerNames=FindInMap("RegionMap", {"Ref": "AWS::Region"}, "ELBName"), AvailabilityZones=availability_zones, LaunchConfigurationName=Ref(launchconfig_resource), VPCZoneIdentifier=FindInMap("RegionMap", {"Ref": "AWS::Region"}, "SNETid"), Tags=[ Tag("Name", "MyInstance", True), Tag("Project", "MyProject", True), Tag("Team", "MyTeam", True) ])) autoscaling_up_resource = t.add_resource( ScalingPolicy("myScalingUp", AdjustmentType="ChangeInCapacity", ScalingAdjustment="1", Cooldown="300", AutoScalingGroupName=Ref(autoscaling_group_resource))) autoscaling_down_resource = t.add_resource( ScalingPolicy("myScalingDown", AdjustmentType="ChangeInCapacity", ScalingAdjustment="-1",
def generate_cloudformation_template(): template = Template() template.add_description("""\ Configures Auto Scaling Group for the app""") hostedzone = template.add_parameter( Parameter( "HostedZoneName", Description= "The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) dnsRecord = template.add_parameter(Parameter( "DNSRecord", Type="String", )) loadbalancersecuritygroup = template.add_parameter( Parameter( "LoadBalancerSecurityGroup", Type="String", Description="Security group for api app load balancer.", )) scalecapacity = template.add_parameter( Parameter( "ScaleCapacity", Default="1", Type="String", Description="Number of api servers to run", )) minsize = template.add_parameter( Parameter( "MinScale", Type="String", Description="Minimum number of servers to keep in the ASG", )) maxsize = template.add_parameter( Parameter( "MaxScale", Type="String", Description="Maximum number of servers to keep in the ASG", )) environment = template.add_parameter( Parameter( "Environment", Type="String", Description="The environment being deployed into", )) subnet = template.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", )) loadbalancername = template.add_parameter( Parameter( "LoadBalancerName", Type="String", )) elbSchema = template.add_parameter( Parameter( "LoadBalancerSchema", Type="String", )) healthCheckTarget = template.add_parameter( Parameter( "LoadBalancerHealthCheckTarget", Type="String", )) healthCheckInterval = template.add_parameter( Parameter( "LoadBalancerHealthCheckInterval", Type="String", )) healthCheckTimeout = template.add_parameter( Parameter( "LoadBalancerHealthCheckTimeout", Type="String", )) healthyThreshold = template.add_parameter( Parameter( "LoadBalancerHealthyThreshold", Type="String", )) unhealthyThreshold = template.add_parameter( Parameter( "LoadBalancerUnHealthyThreshold", Type="String", )) launchconfigurationname = template.add_parameter( Parameter( "LaunchConfigurationName", Type="String", )) loadbalancer = template.add_resource( elb.LoadBalancer( "LoadBalancer", ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=30, ), Subnets=Ref(subnet), HealthCheck=elb.HealthCheck( Target=Ref(healthCheckTarget), HealthyThreshold=Ref(healthyThreshold), UnhealthyThreshold=Ref(unhealthyThreshold), Interval=Ref(healthCheckInterval), Timeout=Ref(healthCheckTimeout), ), Listeners=[ elb.Listener( LoadBalancerPort="80", InstancePort="80", Protocol="HTTP", InstanceProtocol="HTTP", ), ], CrossZone=True, SecurityGroups=[Ref(loadbalancersecuritygroup)], LoadBalancerName=Ref(loadbalancername), Scheme=Ref(elbSchema), )) autoscalinggroup = template.add_resource( AutoScalingGroup("AutoscalingGroup", Tags=[Tag("Environment", Ref(environment), True)], LaunchConfigurationName=Ref(launchconfigurationname), MinSize=Ref(minsize), MaxSize=Ref(maxsize), DesiredCapacity=Ref(scalecapacity), LoadBalancerNames=[Ref(loadbalancer)], VPCZoneIdentifier=Ref(subnet), HealthCheckType='ELB', HealthCheckGracePeriod=30, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1M', MinInstancesInService="1", MaxBatchSize='1')))) route53record = template.add_resource( RecordSetType( "myDNSRecord", HostedZoneName=Join("", [Ref(hostedzone), "."]), Name=Join("", [Ref(dnsRecord), ".", Ref(hostedzone), "."]), Type="CNAME", TTL="300", ResourceRecords=[GetAtt(loadbalancer, "DNSName")], )) template.add_output( Output("DomainName", Value=Ref(route53record), Description="DNS to access the service")) template.add_output( Output("LoadBalancer", Value=GetAtt(loadbalancer, "DNSName"), Description="ELB dns")) template.add_output( Output("AutoScalingGroup", Value=Ref(autoscalinggroup), Description="Created Auto Scaling Group")) template.add_output( Output("LaunchConfiguration", Value=Ref(launchconfigurationname), Description="LaunchConfiguration for this deploy")) return template
Ebs=ec2.EBSBlockDevice( VolumeSize="30", VolumeType="gp2" ) ), ], )) AutoScalingGroupSplunk = t.add_resource(AutoScalingGroup( "autoScalingGroupSplunk", DesiredCapacity=1, MinSize=1, MaxSize=3, VPCZoneIdentifier=subnetids_output_id_list, Tags=[ Tag("Name", "Splunk", True) ], LaunchConfigurationName=Ref("LaunchConfiguration"), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime="PT15M", WaitOnResourceSignals=True, MinInstancesInService="1", ) ) )) LaunchConfigBoomi = t.add_resource(LaunchConfiguration( "LaunchConfigurationBoomi", UserData=Base64(Join('', [ "#!/bin/bash\n",
def create_wordpress_environment(self): template = Template() template.add_version('2010-09-09') # Wordpress preparation: format vpc name and split private and public subnets in two lists vpc_name_formatted = ''.join( e for e in self.private_vpc_name if e.isalnum()).capitalize() filter_private_subnets = filter(lambda x : x["type"] == "private", self.private_vpc_subnets) filter_public_subnets = filter(lambda x : x["type"] == "public", self.private_vpc_subnets) private_subnets = [] for subnet in filter_private_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() private_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) public_subnets = [] for subnet in filter_public_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() public_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) # Instances Security Groups web_dmz_security_group = template.add_resource( SecurityGroup( "{}WebDMZSecurityGroup".format(self.stage), GroupName="{}webdmz-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Enables external http access to EC2 instance(s) that host the webpages", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", SourceSecurityGroupId=ImportValue("{}BastionHostSecurityGroupID".format(self.stage)) ) ] ) ) rds_private_security_group = template.add_resource( SecurityGroup( "{}RdsPrivateSecurityGroup".format(self.stage), GroupName="{}rds-private-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Allow access to the mysql port from the webservers", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=self.database_port, ToPort=self.database_port, SourceSecurityGroupId=Ref(web_dmz_security_group) ) ] ) ) # S3 Buckets for wordpress content bucket_wordpress_code = template.add_resource( Bucket( "{}BucketWordpressCode".format(self.stage), BucketName="{}-wordpress-code".format(self.stage), AccessControl=Private ) ) bucket_wordpress_media_assets = template.add_resource( Bucket( "{}BucketWordpressMediaAssets".format(self.stage), BucketName="{}-wordpress-media-assets".format(self.stage), AccessControl=Private ) ) # Database Instance to store wordpress data rds_subnet_group = template.add_resource( DBSubnetGroup( "{}PrivateRDSSubnetGroup".format(self.stage), DBSubnetGroupName="{}private-rds-subnet-group".format(self.stage), DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=private_subnets ) ) template.add_resource( DBInstance( "{}RdsInstance".format(self.stage), DBInstanceIdentifier="{}RdsInstance".format(self.stage), DBName=self.database_name, AllocatedStorage="20", DBInstanceClass=self.database_instance_class, Engine=self.database_engine, EngineVersion=self.database_engine_version, MasterUsername=self.database_username, MasterUserPassword=self.database_password, Port=self.database_port, BackupRetentionPeriod=0, MultiAZ=self.database_multiaz, DBSubnetGroupName=Ref(rds_subnet_group), VPCSecurityGroups=[Ref(rds_private_security_group)], Tags=Tags( Name=self.database_name_tag ) ) ) # Cloudfront Distribution to load images cloudfront_origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "{}CloudfrontOriginAccessIdentity".format(self.stage), CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( "{}CloudFrontOriginAccessIdentityConfig".format(self.stage), Comment="WordPress Origin Access Identity" ) ) ) template.add_resource(BucketPolicy( "{}BucketWordpressMediaAssetsPolicy".format(self.stage), Bucket=Ref(bucket_wordpress_media_assets), PolicyDocument={ "Version": "2008-10-17", "Id": "PolicyForCloudFrontPrivateContent", "Statement": [ { "Sid": "1", "Effect": "Allow", "Principal": { "CanonicalUser": GetAtt(cloudfront_origin_access_identity, 'S3CanonicalUserId') }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::{}-wordpress-media-assets/*".format(self.stage) } ] } )) cloudfront_distribution = template.add_resource( Distribution( "{}CloudfrontDistribution".format(self.stage), DistributionConfig=DistributionConfig( Origins=[ Origin( Id="MediaAssetsOrigin", DomainName=GetAtt(bucket_wordpress_media_assets, 'DomainName'), S3OriginConfig=S3Origin( OriginAccessIdentity=Join("", [ "origin-access-identity/cloudfront/", Ref(cloudfront_origin_access_identity) ]) ) ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="MediaAssetsOrigin", ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all" ), Enabled=True, HttpVersion='http2' ) ) ) # Wordpress EC2 Instances ''' EC2 Instances types: Write node = To make changes to your blog. E.g: add new posts Read Nodes = Instances open to the internet for blog reading ''' wordpress_ec2_role = template.add_resource( Role( "{}WordPressEC2InstanceRole".format(self.stage), RoleName="{}WordPressEC2InstanceRole".format(self.stage), Path="/", AssumeRolePolicyDocument={"Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }]}, Policies=[ Policy( PolicyName="S3FullAccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "s3:*", "Resource": "*" }], } ) ] ) ) spotfleetrole = template.add_resource( Role( "{}spotfleetrole".format(self.stage), AssumeRolePolicyDocument={ "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "spotfleet.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ], "Version": "2012-10-17" }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole" ] ) ) ec2_instance_profile = template.add_resource( InstanceProfile( "{}WriteWordpressEc2InstanceProfile".format(self.stage), Roles=[Ref(wordpress_ec2_role)] ) ) template.add_resource( SpotFleet( "{}WriteWordpressEc2Instance".format(self.stage), SpotFleetRequestConfigData=SpotFleetRequestConfigData( AllocationStrategy="lowestPrice", IamFleetRole=GetAtt(spotfleetrole,"Arn"), LaunchSpecifications=[LaunchSpecifications( IamInstanceProfile=IamInstanceProfile( Arn=GetAtt(ec2_instance_profile, "Arn") ), ImageId=self.write_instance_image_id, InstanceType=self.write_instance_type, KeyName=self.write_instance_key_name, SecurityGroups=[SecurityGroups(GroupId=Ref(web_dmz_security_group))], SubnetId=next(iter(public_subnets)), UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html s3://""", Ref(bucket_wordpress_code), """">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html/wp-content/uploads s3://""", Ref(bucket_wordpress_media_assets), """">> /etc/crontab service httpd start """ ]) ) )], TargetCapacity=1, Type="request" ) ) ) template.add_resource( LaunchConfiguration( "{}WordPressReadLaunchConfiguration".format(self.stage), InstanceType=self.read_instance_type, ImageId=self.read_instance_image_id, KeyName=self.read_instance_key_name, LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], IamInstanceProfile=Ref(ec2_instance_profile), SpotPrice="0.5", UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_code), """ /var/www/html">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_media_assets), """/var/www/html/wp-content/uploads">> /etc/crontab service httpd start """ ]) ) ) ) alb = template.add_resource( LoadBalancer( "{}ApplicationLoadBalancer".format(self.stage), Name="{}-wordpress-alb".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], Subnets=public_subnets, Type="application" ) ) target_group = template.add_resource( TargetGroup( "{}TargetGroup".format(self.stage), Name="{}-wordpress-target-group".format(self.stage), Port=80, Protocol="HTTP", VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), HealthCheckPort=8080 ) ) template.add_resource( AutoScalingGroup( "{}AutoScalingGroup".format(self.stage), DependsOn="{}WordPressReadLaunchConfiguration".format(self.stage), AutoScalingGroupName="{}-wordpress-auto-scaling".format(self.stage), LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), TargetGroupARNs=[Ref(target_group)], MaxSize="3", MinSize="1", VPCZoneIdentifier=public_subnets, Tags=[ Tag("Name", "{}-wordpress-read-node".format(self.stage), True) ] ) ) template.add_resource( Listener( "ALBListener", DefaultActions=[ Action( TargetGroupArn=Ref(target_group), Type="forward" ) ], LoadBalancerArn=Ref(alb), Port=80, Protocol="HTTP" ) ) f = open("modules/template_wordpress.yaml", 'w') print(template.to_yaml(), file=f)
Timeout=300), CrossZone=True, Tags=Tags(Environment=Ref("environment"), Service=role))) loadbalancer = [Ref(elb)] identifier = "" if rolemap[role]["instance"]["subnet"] in public_prefixes: identifier = "pubsub" + rolemap[role]["instance"]["subnet"].upper() # Dont name this resource since named resources need full stack destroy. t.add_resource( AutoScalingGroup("autoscaling" + role.upper(), VPCZoneIdentifier=Split(",", Ref(identifier)), LaunchConfigurationName=Ref(launchConfig), LoadBalancerNames=loadbalancer, TargetGroupARNs=targetgroup, MinSize=rolemap[role]["autoscaling"]["min"], MaxSize=rolemap[role]["autoscaling"]["max"], Tags=[ Tag("Environment", Ref("environment"), "true"), Tag("Name", Join("-", [Ref("stackName"), role]), "true"), Tag("Service", role, "true"), Tag("Role", role, "true"), Tag("pp_role", rolemap[role]["instance"]["pp_role"], "true") ])) print(t.to_json())
KeyName=Ref(param_keyname), SecurityGroups=[Ref(load_balancer_security_group)] ) ) autoscaling_group = template.add_resource(AutoScalingGroup( 'WebAuthorAutoscalingGroup', DesiredCapacity=2, HealthCheckGracePeriod=300, HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), LoadBalancerNames=[Ref(load_balancer)], MaxSize=2, MinSize=2, Tags=[ Tag('Purpose', 'WebAuthor', True), Tag('Environment', Ref(param_environment), True) ], VPCZoneIdentifier=FindInMap('EnvironmentAttributeMap', Ref(param_environment), 'PublicSubnetArray') ) ) #---Outputs-------------------------------------------------------------------- template.add_output(Output( 'DNSName', Description='The DNS name for the load balancer.', Value=GetAtt(load_balancer, 'DNSName') ) )
def add_resources(self): self.runner_ssm_role = self.template.add_resource( Role( "RunnerSsmRole", Path="/", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM" ], AssumeRolePolicyDocument=aws.Policy(Statement=[ Statement( Action=[sts.AssumeRole], Effect=Allow, Principal=Principal("Service", ["ec2.amazonaws.com"]), ) ]), )) self.runner_ssm_instanceprofile = self.template.add_resource( InstanceProfile("RunnerSsmInstanceProfile", Path="/", Roles=[Ref(self.runner_ssm_role)])) self.runner_launch_config = self.template.add_resource( LaunchConfiguration( "RunnerLaunchConfiguration", UserData=Base64( Join( "", [ "#!/bin/bash\n", "#####install ssm######\n", "yum install -y amazon-ssm-agent\n", "systemctl enable amazon-ssm-agent\n", "systemctl start amazon-ssm-agent\n", "####install docker####\n", "yum install -y docker\n", "systemctl enable docker\n", "systemctl start docker\n", "####install runner####\n", "yum install -y wget\n", "wget -O /usr/local/bin/gitlab-runner ", "https://gitlab-runner-downloads.s3.amazonaws.com/v", Ref(self.runner_version), "/binaries/gitlab-runner-linux-amd64\n", "ln -s /usr/local/bin/gitlab-runner ", "/usr/bin/gitlab-runner\n", "chmod +x /usr/local/bin/gitlab-runner\n", "useradd --comment 'GitLab Runner' ", "--create-home gitlab-runner --shell /bin/bash\n", "/usr/local/bin/gitlab-runner install ", "--user=gitlab-runner " "--working-directory=/home/gitlab-runner\n", "systemctl enable gitlab-runner\n", "systemctl start gitlab-runner\n", "####register runner####\n", "gitlab-runner register ", "--config=/etc/gitlab-runner/config.toml ", "--request-concurrency=", Ref(self.runner_job_concurrency), " ", "--tag-list=", Ref(self.runner_tag_list), " ", "--non-interactive ", "--registration-token=", Ref(self.runner_register_token), " ", "--run-untagged=true ", "--locked=false ", "--url=", Ref(self.runner_gitlab_url), " ", "--executor=docker ", "--docker-image=alpine:latest ", "--docker-privileged=true\n", "####create unregister script####\n", "TOKEN=$(gitlab-runner list 2>&1 | grep Executor | ", "awk '{ print $4 }' | awk -F= '{ print $2 }')\n", "URL=$(gitlab-runner list 2>&1 | grep Executor | ", "awk '{ print $5 }' | awk -F= '{ print $2 }')\n", "echo gitlab-runner unregister ", "--url $URL --token $TOKEN > /unregister.sh\n", "chmod +x /unregister.sh", ], )), ImageId=Ref(self.runner_ami_id), KeyName=Ref(self.runner_key_pair), BlockDeviceMappings=[ BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=EBSBlockDevice( VolumeSize=Ref(self.runner_volume_size)), ) ], SecurityGroups=[Ref(self.runner_security_group)], InstanceType=Ref(self.runner_server_instance_type), IamInstanceProfile=GetAtt(self.runner_ssm_instanceprofile, "Arn"), )) self.runner_autoscaling_group = self.template.add_resource( AutoScalingGroup( "RunnerAutoscalingGroup", DesiredCapacity=Ref(self.runner_desired_count), LaunchConfigurationName=Ref(self.runner_launch_config), MinSize=Ref(self.runner_min_count), MaxSize=Ref(self.runner_max_count), VPCZoneIdentifier=Split(",", Ref(self.runner_subnets)), Tags=[Tag("Name", "gitlab-runner-created-by-asg", True)], ))
)) # Autoscaling Group for Zookeeper autoscaling_group = template.add_resource( AutoScalingGroup( "ZkAutoscalingGroup", LaunchConfigurationName=Ref(launch_config), MinSize=Ref(num_hosts), MaxSize=Ref(num_hosts), DesiredCapacity=Ref(num_hosts), AvailabilityZones=GetAZs(""), VPCZoneIdentifier=[Ref(subnetid_a), Ref(subnetid_b), Ref(subnetid_c)], Tags=[ Tag("Environment", Ref(environment), True), Tag("Cluster", "zookeeper", True), Tag("Name", "zookeeper", True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)))) # LogGroup log_group = template.add_resource( LogGroup( "ZkLogGroup", LogGroupName="/zookeeper/instances",
'ScaleCapacity': 'Number of api servers to run', 'Subnets': 'ASG Subnets' } for p in params.keys(): vars()[p] = t.add_parameter( Parameter(p, Type="String", Description=params[p])) LaunchConfig = t.add_resource( LaunchConfiguration("LaunchConfiguration", ImageId=Ref(AmiId), SecurityGroups=Split(',', Ref(SecurityGroups)), KeyName=Ref(KeyName), InstanceType=Ref(InstanceType))) t.add_resource( AutoScalingGroup( "AutoscalingGroup", Tags=[ Tag("Environment", Ref(EnvType), True), Tag("Name", Ref(InstanceName), True) ], DesiredCapacity=Ref(ScaleCapacity), LaunchConfigurationName=Ref(LaunchConfig), MinSize=Ref(ScaleCapacity), MaxSize=Ref(ScaleCapacity), VPCZoneIdentifier=Split(',', Ref(Subnets)), )) print(t.to_json())
def create_asg(self, title, network_config, load_balancers, asg_config): """ Creates an autoscaling group object AWS Cloud Formation: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-group.html Troposphere link: https://github.com/cloudtools/troposphere/blob/master/troposphere/autoscaling.py :param title: Title of the autoscaling application :param load_balancers: list of load balancers to associate autoscaling group with :param asg_config: object containing asg related variables :param network_config: object containing network related variables """ availability_zones = network_config.availability_zones self.trop_asg = self.template.add_resource(AutoScalingGroup( title, MinSize=asg_config.minsize, MaxSize=asg_config.maxsize, VPCZoneIdentifier=network_config.private_subnets, AvailabilityZones=availability_zones, LoadBalancerNames=[Ref(load_balancer) for load_balancer in load_balancers], HealthCheckGracePeriod=asg_config.health_check_grace_period, HealthCheckType=asg_config.health_check_type, Tags=[ Tag('Name', Join('', [Ref('AWS::StackName'), '-', title]), True), Tag('owner', asg_config.owner, True)], )) if network_config.get_depends_on(): self.trop_asg.DependsOn = network_config.get_depends_on() # Set cloud formation update policy to update self.trop_asg.resource['UpdatePolicy'] = UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=asg_config.minsize, PauseTime='PT{0}M'.format(asg_config.pausetime) ) ) self.trop_asg.NotificationConfigurations = [ NotificationConfigurations(TopicARN=network_config.sns_topic, NotificationTypes=['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'])] # if there are any scaling policies specified, create and associated with ASG if asg_config.simple_scaling_policy_config is not None: for scaling_policy_config in asg_config.simple_scaling_policy_config: self.create_simple_scaling_policy(scaling_policy_config=scaling_policy_config) self.trop_asg.LaunchConfigurationName = Ref(self.create_launch_config( title=title, asg_config=asg_config, network_config=network_config )) # scale down auto scaling group outside work hours with Scheduled Actions if asg_config.ec2_scheduled_shutdown: # Recurrence tag uses Cron syntax: https://en.wikipedia.org/wiki/Cron # scheduled action for turning off instances (max=0) self.template.add_resource(ScheduledAction( title=title + 'SchedActOFF', AutoScalingGroupName=Ref(self.trop_asg), MaxSize=0, MinSize=0, Recurrence="0 09 * * *" # 0900 UTC = 2000 AEDT )) # scheduled action for turning on instances (max=maxsize) self.template.add_resource(ScheduledAction( title=title + 'SchedActON', AutoScalingGroupName=Ref(self.trop_asg), MaxSize=asg_config.maxsize, MinSize=asg_config.minsize, Recurrence="0 19 * * 0,1,2,3,4" # 1900 UTC (previous day) = 0600 AEDT ))
def _create_cfn_template(self): self.tpl = Template() self.tpl.add_version('2010-09-09') self.tpl.add_description('CFN template to create an EKS node group and affiliated resources.') eks_tag = 'kubernetes.io/cluster/{}'.format(self.cluster.name) r = self.resources.get(self.RESOURCE_NG_ROLE.name) if self.role: profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[self.role]) account_id = boto3.session.Session().client('sts').get_caller_identity().get('Account') role_arn = 'arn:aws:iam::{}:role/{}'.format(account_id, self.role) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=role_arn, Description='Node group role')) r.status = Status.provided r.resource_id = role_arn else: role = Role( self.RESOURCE_NG_ROLE.name, RoleName=self.tag_name, AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', ['ec2.amazonaws.com'])), ], ), ManagedPolicyArns=['arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy', 'arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy', 'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly'] ) self.tpl.add_resource(role) profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[Ref(role)]) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=GetAtt(role, 'Arn'), Description='Node group role')) self.tpl.add_resource(profile) if self.sg_igresses: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', SecurityGroupIngress=[SecurityGroupRule(IpProtocol=r.protocol, FromPort=r.from_port, ToPort=r.to_port, CidrIp=r.cidr) for r in self.sg_igresses] ) else: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', ) self.tpl.add_resource(sg) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_INGRESS.name, DependsOn=sg, Description='Allow node to communicate with each other', GroupId=Ref(sg), SourceSecurityGroupId=Ref(sg), IpProtocol='-1', FromPort=0, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_CP_INGRESS.name, DependsOn=sg, Description='Allow kubelet and pods on the nodes to receive communication from the cluster control plane', GroupId=Ref(sg), SourceSecurityGroupId=self.cluster.sg, IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupEgress( self.RESOURCE_CP_EGRESS_TO_NG.name, DependsOn=sg, Description='Allow the cluster control plane to communicate with nodes kubelet and pods', GroupId=self.cluster.sg, DestinationSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_CP_SG_INGRESS.name, DependsOn=sg, Description='Allow pods to communicate with the cluster API Server', GroupId=self.cluster.sg, SourceSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=443, ToPort=443 )) # keypair ec2 = boto3.session.Session().resource('ec2') r = self.resources.get(self.RESOURCE_NG_KEYPAIR.name) if not self.keypair: keyname = 'eks{}'.format(''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5))) with open(self.ssh_public_key, 'rb') as f: ec2.import_key_pair(KeyName=keyname, PublicKeyMaterial=f.read()) self.keypair = keyname self.keypair_imported = True self.tpl.add_output(Output(self.OUTPUT_KEYNAME, Value=self.keypair, Description='Imported kaypair name')) r.status = Status.created else: r.status = Status.provided r.resource_id = self.keypair # auto-scaling group and launch configuration if self.no_user_data: lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)]) else: user_data = Base64( Join('', [line + '\n' for line in Environment().from_string(self.USER_DATA).render( ci=self.cluster, ng_asg=self.RESOURCE_NG_ASG.name, stack_name=self.stack_name, max_pods=self.MAX_PODS.get(self.instance), region=self.region).split('\n')])) lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)], UserData=user_data) self.tpl.add_resource(lc) self.tpl.add_resource(AutoScalingGroup( self.RESOURCE_NG_ASG.name, DesiredCapacity=self.desired, MinSize=self.min, MaxSize=self.max, LaunchConfigurationName=Ref(lc), VPCZoneIdentifier=self.subnets, Tags=[Tag('Name', self.tag_name, True), Tag(eks_tag, 'owned', True)], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate(MinInstancesInService=1, MaxBatchSize=1))))
'#!/bin/bash\n', 'cfn-signal -e 0', ' --resource AutoscalingGroup', ' --stack ', Ref('AWS::StackName'), ' --region ', Ref('AWS::Region'), '\n' ])), ImageId='ami-25681456', InstanceType='t2.micro', KeyName='dev-ec2', SecurityGroups=["sg-8eec36e8"] # hard coded to glomex default sg )) as_group = t.add_resource( AutoScalingGroup( 'AutoscalingGroup', DesiredCapacity=1, Tags=[Tag('Name', 'gcdt-test-autoscaling-ebs-tagging', True)], LaunchConfigurationName=Ref(LaunchConfiguration), MinSize='1', MaxSize='1', VPCZoneIdentifier=['subnet-b6eaa5d2'], AvailabilityZones=['eu-west-1a'], HealthCheckType='EC2')) t.add_output( Output( "AutoScalingGroupName", Description="Name of the autoscaling group", Value=Ref(as_group), ), ) def generate_template():
def generate_cloudformation_template(): enable_elb = sys.argv[1] input_scaling_policies = ast.literal_eval(sys.argv[2]) input_alarms = ast.literal_eval(sys.argv[3]) enable_elb = enable_elb == 'True' elb_listeners = ast.literal_eval(sys.argv[4]) template = Template() template.add_description("""\ Configures Auto Scaling Group for the app""") project_name = template.add_parameter( Parameter( "Name", Type="String", Description="Instances will be tagged with this name", )) scalecapacity = template.add_parameter( Parameter( "ScaleCapacity", Default="1", Type="String", Description="Number of api servers to run", )) minsize = template.add_parameter( Parameter( "MinScale", Type="String", Description="Minimum number of servers to keep in the ASG", )) maxsize = template.add_parameter( Parameter( "MaxScale", Type="String", Description="Maximum number of servers to keep in the ASG", )) signalcount = template.add_parameter( Parameter( "SignalCount", Default="1", Type="String", Description= "No. of signals CF must receive before it sets the status as CREATE_COMPLETE", )) signaltimeout = template.add_parameter( Parameter( "SignalTimeout", Default="PT5M", Type="String", Description= "Time that CF waits for the number of signals that was specified in Count ", )) minsuccessfulinstancespercent = template.add_parameter( Parameter( "MinSuccessfulInstancesPercent", Default="100", Type="String", Description= "% instances in a rolling update that must signal success for CF to succeed", )) environment = template.add_parameter( Parameter( "Environment", Type="String", Description="The environment being deployed into", )) subnet = template.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", )) launchconfigurationname = template.add_parameter( Parameter( "LaunchConfigurationName", Type="String", )) health_check_grace_period = template.add_parameter( Parameter( "HealthCheckGracePeriod", Type="String", Default="300", )) if enable_elb: elb_subnets = template.add_parameter( Parameter( "LoadBalancerSubnets", Type="CommaDelimitedList", )) elb_bucket_name = template.add_parameter( Parameter("LoadBalancerBucketName", Type="String", Description="S3 Bucket for the ELB access logs")) template.add_condition("ElbLoggingCondition", Not(Equals(Ref(elb_bucket_name), ""))) elb_schema = template.add_parameter( Parameter( "LoadBalancerSchema", Type="String", )) health_check_interval = template.add_parameter( Parameter( "LoadBalancerHealthCheckInterval", Type="String", )) health_check_timeout = template.add_parameter( Parameter( "LoadBalancerHealthCheckTimeout", Type="String", )) healthy_threshold = template.add_parameter( Parameter( "LoadBalancerHealthyThreshold", Type="String", )) unhealthy_threshold = template.add_parameter( Parameter( "LoadBalancerUnHealthyThreshold", Type="String", )) enable_connection_draining = template.add_parameter( Parameter( "LoadBalancerEnableConnectionDraining", Type="String", Default="True", )) connection_draining_timeout = template.add_parameter( Parameter( "LoadBalancerConnectionDrainingTimeout", Type="String", Default="30", )) loadbalancersecuritygroup = template.add_parameter( Parameter( "LoadBalancerSecurityGroup", Type="CommaDelimitedList", Description="Security group for api app load balancer.", )) hostedzone = template.add_parameter( Parameter( "HostedZoneName", Description= "The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) dns_record = template.add_parameter( Parameter( "DNSRecord", Type="String", )) dns_ttl = template.add_parameter( Parameter( "DNSTTL", Default="300", Type="String", )) new_weight = template.add_parameter( Parameter( "NewDnsWeight", Type="String", Default="100", )) health_check_protocol = template.add_parameter( Parameter( "LoadBalancerHealthCheckProtocol", Type="String", )) template.add_condition("ElbTCPProtocolCondition", Equals(Ref(health_check_protocol), "TCP")) health_check_port = template.add_parameter( Parameter( "LoadBalancerHealthCheckPort", Type="String", )) health_check_path = template.add_parameter( Parameter( "LoadBalancerHealthCheckPath", Type="String", )) load_balancer_listeners = [] for listener in elb_listeners: load_balancer_listeners.append( elb.Listener( LoadBalancerPort=listener['load_balancer_port'], InstancePort=listener['instance_port'], Protocol=listener['protocol'], InstanceProtocol=Ref(health_check_protocol), )) loadbalancer = template.add_resource( elb.LoadBalancer( "LoadBalancer", AccessLoggingPolicy=If( "ElbLoggingCondition", elb.AccessLoggingPolicy(EmitInterval=60, Enabled=True, S3BucketName=Ref(elb_bucket_name), S3BucketPrefix="ELBLogs"), Ref("AWS::NoValue")), ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=Ref(enable_connection_draining), Timeout=Ref(connection_draining_timeout), ), Subnets=Ref(elb_subnets), HealthCheck=elb.HealthCheck( Target=Join("", [ Ref(health_check_protocol), ":", Ref(health_check_port), If("ElbTCPProtocolCondition", Ref("AWS::NoValue"), Ref(health_check_path)) ]), HealthyThreshold=Ref(healthy_threshold), UnhealthyThreshold=Ref(unhealthy_threshold), Interval=Ref(health_check_interval), Timeout=Ref(health_check_timeout), ), Listeners=load_balancer_listeners, CrossZone=True, SecurityGroups=Ref(loadbalancersecuritygroup), Scheme=Ref(elb_schema))) route53record = template.add_resource( RecordSetType( "DNS", HostedZoneName=Join("", [Ref(hostedzone), "."]), Name=Join("", [Ref(dns_record), ".", Ref(hostedzone), "."]), ResourceRecords=[GetAtt(loadbalancer, "DNSName")], SetIdentifier=Ref(project_name), TTL=Ref(dns_ttl), Type="CNAME", Weight=Ref(new_weight), )) autoscalinggroup = template.add_resource( AutoScalingGroup( "AutoscalingGroup", Tags=[ Tag("Name", Ref(project_name), True), Tag("Environment", Ref(environment), True) ], LaunchConfigurationName=Ref(launchconfigurationname), MinSize=Ref(minsize), MaxSize=Ref(maxsize), DesiredCapacity=Ref(scalecapacity), VPCZoneIdentifier=Ref(subnet), HealthCheckGracePeriod=Ref(health_check_grace_period), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=Ref(signalcount), Timeout=Ref(signaltimeout)), AutoScalingCreationPolicy=AutoScalingCreationPolicy( MinSuccessfulInstancesPercent=Ref( minsuccessfulinstancespercent))), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MaxBatchSize='1', MinInstancesInService='1', MinSuccessfulInstancesPercent=Ref( minsuccessfulinstancespercent), PauseTime=Ref(signaltimeout), WaitOnResourceSignals=True)))) autoscalinggroup.HealthCheckType = 'EC2' if enable_elb: autoscalinggroup.LoadBalancerNames = [Ref(loadbalancer)] autoscalinggroup.HealthCheckType = 'ELB' created_scaling_policies = dict() for scaling_policy in input_scaling_policies: policy_properties = { 'AdjustmentType': scaling_policy['adjustment_type'], 'AutoScalingGroupName': Ref(autoscalinggroup), 'Cooldown': scaling_policy['cooldown'], 'PolicyType': scaling_policy['policy_type'], 'ScalingAdjustment': scaling_policy['scaling_adjustment'], } if scaling_policy['policy_type'] != "SimpleScaling" \ and 'estimated_instance_warmup' in scaling_policy: policy_properties['EstimatedInstanceWarmup'] = \ scaling_policy['estimated_instance_warmup'] if scaling_policy['policy_type'] != "SimpleScaling" \ and 'metric_aggregation_type' in scaling_policy: policy_properties['MetricAggregationType'] = scaling_policy[ 'metric_aggregation_type'] if scaling_policy['adjustment_type'] == "PercentChangeInCapacity" \ and 'min_adjustment_magnitude' in scaling_policy: policy_properties['MinAdjustmentMagnitude'] = scaling_policy[ 'min_adjustment_magnitude'] if 'step_adjustments' in scaling_policy: policy_properties['StepAdjustments'] = scaling_policy[ 'step_adjustments'] created_scaling_policies[ scaling_policy['name']] = template.add_resource( ScalingPolicy(scaling_policy['name'], **policy_properties)) for alarm in input_alarms: template.add_resource( Alarm( alarm['name'], ActionsEnabled=True, AlarmActions=[ Ref(created_scaling_policies[alarm['scaling_policy_name']]) ], AlarmDescription=alarm['description'], ComparisonOperator=alarm['comparison'], Dimensions=[ MetricDimension(Name="AutoScalingGroupName", Value=Ref(autoscalinggroup)), ], EvaluationPeriods=alarm['evaluation_periods'], InsufficientDataActions=[], MetricName=alarm['metric'], Namespace=alarm['namespace'], OKActions=[], Period=alarm['period'], Statistic=alarm['statistics'], Threshold=str(alarm['threshold']), Unit=alarm['unit'], )) template.add_output( Output("StackName", Value=Ref(project_name), Description="Stack Name")) if enable_elb: template.add_output( Output("DomainName", Value=Ref(route53record), Description="DNS to access the service")) template.add_output( Output("LoadBalancer", Value=GetAtt(loadbalancer, "DNSName"), Description="ELB dns")) template.add_output( Output("AutoScalingGroup", Value=Ref(autoscalinggroup), Description="Auto Scaling Group")) template.add_output( Output("LaunchConfiguration", Value=Ref(launchconfigurationname), Description="LaunchConfiguration for this deploy")) return template
def test_using_load_balancer(self): test_stack_name = "TestALB" init_cf_env(test_stack_name) ### t = Template() load_balancer_sg = ts_add_security_group( t, name="LoadBalancerSecurityGroup") instance_sg = ts_add_security_group(t) load_balancer = t.add_resource( LoadBalancer( "MyLoadBalancer", SecurityGroups=[Ref(load_balancer_sg)], # The ALB is publicly accessible. # (use `internal` instead of `internet-facing` to define a load balancer reachable from private network only) Scheme='internet-facing', Subnets=[get_subnet(index=0), get_subnet(index=1) ], # Attaches the ALB to the subnets Type='application')) target_group = t.add_resource( TargetGroup( "MyTargetGroup", HealthCheckIntervalSeconds=10, HealthCheckProtocol='HTTP', HealthCheckPath='/index.html', HealthCheckTimeoutSeconds=5, HealthyThresholdCount=3, UnhealthyThresholdCount=2, Matcher=Matcher( HttpCode='200-299' ), # If HTTP status code is 2XX, the backend is considered healthy. Port= 80, # The web server on the EC2 instances listens on port 80. Protocol='HTTP', VpcId=get_default_vpc(), )) listener = t.add_resource( Listener( "MyListener", LoadBalancerArn=Ref(load_balancer), Port=80, Protocol= 'HTTP', # The load balancer listens on port 80 for HTTP requests. DefaultActions=[ Action( Type='forward', # TargetGroupARN is the connection between the ALB and the auto-scaling group TargetGroupArn=Ref(target_group), ) ])) launch_config = t.add_resource( LaunchConfiguration( "MyLaunchConfiguration", ImageId=get_linux2_image_id(), InstanceType='m4.xlarge', KeyName=KEY, SecurityGroups=[Ref(instance_sg)], AssociatePublicIpAddress=True, InstanceMonitoring=False, UserData=Base64( Join('', [ '#!/bin/bash -xe\n', '/opt/aws/bin/cfn-init -v --stack ', Ref('AWS::StackName'), ' --resource MyLaunchConfiguration ', ' --region ', Ref('AWS::Region'), '\n' ])), Metadata=Metadata( Init({ 'config': InitConfig( packages={'yum': { 'httpd': [] }}, files={ '/tmp/config': { 'content': Join('\n', [ '#!/bin/bash -ex', 'PRIVATE_IP=`curl -s http://169.254.169.254/latest/meta-data/local-ipv4`', 'echo "$PRIVATE_IP" > index.html', ]), 'mode': '000500', 'owner': 'root', 'group': 'root', } }, commands={ '01_config': { 'command': "/tmp/config", 'cwd': '/var/www/html' } }, services={ 'sysvinit': { 'httpd': { 'enabled': True, 'ensureRunning': True } } }) })))) auto_scaling_group = t.add_resource( AutoScalingGroup( "MyAutoScalingGroup", LaunchConfigurationName=Ref(launch_config), DesiredCapacity=2, MinSize=2, MaxSize=2, VPCZoneIdentifier=[get_subnet(index=0), get_subnet(index=1)], TargetGroupARNs=[ Ref(target_group) ], # Registers new EC2 instances with the default target group. Tags=[ Tag( "Name", test_stack_name, True ) # 'True' means: Attaches the same tags to the virtual machine started by this auto-scaling group ])) t.add_output([ Output("URL", Value=Sub('http://${MyLoadBalancer.DNSName}')), ]) dump_template(t, True) create_stack(test_stack_name, t) outputs = get_stack_outputs(test_stack_name) lb_url = get_output_value(outputs, 'URL') private_ips = set() for i in range(10): private_ips.add(run(f'curl {lb_url}', True)) self.assertEqual(len(private_ips), 2)
Protocol="HTTPS", InstanceProtocol="HTTP", SSLCertificateId=Ref(SSLCertificateId), ), ], CrossZone=True, SecurityGroups=[Ref(LoadBalancerSecurityGroup)], LoadBalancerName="api-lb", Scheme="internet-facing", )) AutoscalingGroup = t.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref(ScaleCapacity), Tags=[Tag("Environment", Ref(EnvType), True)], LaunchConfigurationName=Ref(LaunchConfig), MinSize=Ref(ScaleCapacity), MaxSize=Ref(ScaleCapacity), VPCZoneIdentifier=[Ref(ApiSubnet1), Ref(ApiSubnet2)], LoadBalancerNames=[Ref(LoadBalancer)], AvailabilityZones=[ Ref(VPCAvailabilityZone1), Ref(VPCAvailabilityZone2) ], HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime="PT5M",
def generate_cloudformation_template(): template = Template() template.add_description("""\ Configures Auto Scaling Group for the app""") scalecapacity = template.add_parameter( Parameter( "ScaleCapacity", Default="1", Type="String", Description="Number of api servers to run", )) minsize = template.add_parameter( Parameter( "MinScale", Type="String", Description="Minimum number of servers to keep in the ASG", )) maxsize = template.add_parameter( Parameter( "MaxScale", Type="String", Description="Maximum number of servers to keep in the ASG", )) environment = template.add_parameter( Parameter( "Environment", Type="String", Description="The environment being deployed into", )) subnet = template.add_parameter( Parameter( "Subnets", Type="CommaDelimitedList", )) launchconfigurationname = template.add_parameter( Parameter( "LaunchConfigurationName", Type="String", )) autoscalinggroup = template.add_resource( AutoScalingGroup("AutoscalingGroup", Tags=[Tag("Environment", Ref(environment), True)], LaunchConfigurationName=Ref(launchconfigurationname), MinSize=Ref(minsize), MaxSize=Ref(maxsize), DesiredCapacity=Ref(scalecapacity), VPCZoneIdentifier=Ref(subnet), HealthCheckType='EC2', HealthCheckGracePeriod=30, UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT1M', MinInstancesInService="1", MaxBatchSize='1')))) template.add_output( Output("AutoScalingGroup", Value=Ref(autoscalinggroup), Description="Created Auto Scaling Group")) template.add_output( Output("LaunchConfiguration", Value=Ref(launchconfigurationname), Description="LaunchConfiguration for this deploy")) return template
def elb_asg_lc_template(app, env, nameSGRDS, rdsPort, instanceType, ami, subnets, elbPort, elbCidrBlock, ec2Port, desiredCapacity, minSize, maxSize, region, nameBucket, officeIP): template = Template() sgELB = template.add_resource( SecurityGroup( "SecurityGroupELB" + app + env, GroupDescription="Security group for " + app + "-" + env, VpcId=ImportValue("VPC" + env), SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=elbPort, ToPort=elbPort, CidrIp=elbCidrBlock, ) ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-ELB" + app + "-" + env, app=app, ), )) sgEC2 = template.add_resource( SecurityGroup( "SecurityGroupEC2" + app + env, GroupDescription="Security group for EC2 " + app + "-" + env, VpcId=ImportValue("VPC" + env), DependsOn="SecurityGroupELB" + app + env, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=ec2Port, ToPort=ec2Port, SourceSecurityGroupId=Ref(sgELB), ), SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=officeIP, ), ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-EC2-" + app + "-" + env, app=app, ), )) addIngressRDS = template.add_resource( SecurityGroupIngress( "ingressSGRDS" + app + env, SourceSecurityGroupId=Ref(sgEC2), Description="From EC2 instances", GroupId=ImportValue("SG-" + nameSGRDS + "-" + app + "-" + env), IpProtocol="tcp", FromPort=rdsPort, ToPort=rdsPort, DependsOn="SecurityGroupEC2" + app + env, )) launchConfig = template.add_resource( LaunchConfiguration("LaunchConfiguration" + app + env, InstanceType=instanceType, ImageId=ami, SecurityGroups=[Ref(sgEC2)], IamInstanceProfile=ImportValue("Role-" + app + "-" + env))) bucketPolicy = template.add_resource( BucketPolicy("BucketPolicy" + nameBucket + app + env, Bucket=ImportValue("Bucket" + nameBucket + app + env), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": Join("", [ "arn:aws:s3:::", ImportValue("Bucket" + nameBucket + app + env), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Principal": { "AWS": ["156460612806"] } }] })) lb = template.add_resource( LoadBalancer("LoadBalancer" + app + env, ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=120, ), Subnets=subnets, HealthCheck=elb.HealthCheck( "HealthCheck", Target="TCP:" + str(ec2Port), HealthyThreshold="5", UnhealthyThreshold="5", Interval="30", Timeout="15", ), Listeners=[ elb.Listener( LoadBalancerPort=elbPort, InstancePort=ec2Port, Protocol="HTTP", InstanceProtocol="HTTP", ), ], CrossZone=True, SecurityGroups=[Ref(sgELB)], LoadBalancerName="lb-" + app + "-" + env, Scheme="internet-facing", AccessLoggingPolicy=AccessLoggingPolicy( "LoggingELB" + app + env, EmitInterval=5, Enabled=True, S3BucketName=ImportValue("Bucket" + nameBucket + app + env), ))) asg = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + app + env, DesiredCapacity=desiredCapacity, Tags=[Tag("Environment", env, True)], LaunchConfigurationName=Ref(launchConfig), MinSize=minSize, MaxSize=maxSize, LoadBalancerNames=[Ref(lb)], AvailabilityZones=GetAZs(region), VPCZoneIdentifier=subnets, HealthCheckType="ELB", HealthCheckGracePeriod=300, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True, )))) return (template.to_json())
def add_kippo_sensors(self): # Create the ELB self.template.add_resource( LoadBalancer( 'Elb', Listeners=[ Listener( InstancePort=80, LoadBalancerPort=80, Protocol='http', ), Listener( InstancePort=443, LoadBalancerPort=443, Protocol='tcp', # Plain TCP forwarding for HTTPS/SSL ), ], CrossZone=True, Subnets=Ref('ElbSubnetIdList'), SecurityGroups=[Ref('ElbSecurityGroup')], Scheme='internet-facing', HealthCheck=HealthCheck( Target='HTTP:80/kippo-graph/', HealthyThreshold=2, UnhealthyThreshold=5, Interval=120, Timeout=60, ), )) self.template.add_output( Output( 'ElbEndpoint', Description='ELB endpoint address', Value=GetAtt('Elb', 'DNSName'), )) self.template.add_resource( LaunchConfiguration( 'LaunchConfiguration', KeyName=Ref('KeyName'), ImageId=FindInMap('Ec2AmiMap', Ref('AWS::Region'), 'AmiId'), InstanceType=Ref('Ec2InstanceType'), SecurityGroups=[Ref('Ec2SecurityGroup')], AssociatePublicIpAddress=True, UserData=Base64( Join('\n', [ '#cloud-config', 'repo_upgrade: security', 'runcmd:', ' - "/usr/bin/wget -O /tmp/configure_kippo_sensor.sh https://raw.githubusercontent.com/cdodd/aws-kippo-cluster/master/bootstrap/configure_kippo_sensor.sh"', Join( '', [ ' - "bash /tmp/configure_kippo_sensor.sh', ' ', GetAtt('RdsInstance', 'Endpoint.Address'), ' ', Ref('RdsRootPassword'), ' ', Ref('RealSshPort'), '"', ], ), ])), )) self.template.add_resource( AutoScalingGroup( 'Asg', DesiredCapacity=Ref('KippoSensorCount'), HealthCheckGracePeriod=1800, HealthCheckType='ELB', LaunchConfigurationName=Ref('LaunchConfiguration'), LoadBalancerNames=[Ref('Elb')], MaxSize=Ref('KippoSensorCount'), MinSize=Ref('KippoSensorCount'), Tags=[Tag(key='Name', value='kippo-sensor', propogate='true')], VPCZoneIdentifier=Ref('Ec2SubnetIdList'), ))