def add_ec2_launch_configuration(self, name, security_groups, keypair, image_id='ami-14913f63', instance_type='t2.micro', metadata=None, userdata=None): """ Create a Launch Configuration :param name: Name of the Launch Config :param security_groups: SG to assign to the ASG :param keypair: Key pair to use to launch the instances in the ASG :param image_id: AMI ID to spin up the instances from :param instance_type: Instance type :param metadata: Any metadata, eg. files, packages etc. :param userdata: Any userdata """ launch_config = LaunchConfiguration(name, ImageId=image_id, SecurityGroups=security_groups, InstanceType=instance_type, KeyName=keypair) launch_config.BlockDeviceMappings = self.block_device_default if metadata: launch_config.Metadata = metadata if userdata: launch_config.UserData = userdata self.template.add_resource(launch_config)
def add_nat_asg(self): user_data = [resources.get_resource('nat_takeover.sh')] if self.enable_ntp: user_data.append(resources.get_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) nat_asg_name = "Nat%sASG" % str(self.subnet_index) user_data.extend([ "\n", "cfn-signal -s true", " --resource ", nat_asg_name, " --stack ", { "Ref": "AWS::StackName" }, " --region ", { "Ref": "AWS::Region" } ]) nat_launch_config = self.add_resource( LaunchConfiguration("Nat%sLaunchConfig" % str(self.subnet_index), UserData=Base64(Join('', user_data)), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'), KeyName=Ref('ec2Key'), SecurityGroups=[Ref(self.sg)], EbsOptimized=False, IamInstanceProfile=Ref(self.instance_profile), InstanceType=self.instance_type, AssociatePublicIpAddress=True)) # Create the NAT in a public subnet subnet_layer = self._subnets['public'].keys()[0] nat_asg = self.add_resource( AutoScalingGroup( nat_asg_name, DesiredCapacity=1, Tags=[ Tag("Name", Join("-", [ "NAT", self.subnet_index, ]), True), Tag("isNat", "true", True) ], MinSize=1, MaxSize=1, Cooldown="30", LaunchConfigurationName=Ref(nat_launch_config), HealthCheckGracePeriod=30, HealthCheckType="EC2", VPCZoneIdentifier=[ self._subnets['public'][subnet_layer][self.subnet_index] ], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=1, Timeout='PT15M')))) return nat_asg
def add_launch_config(self): ''' Add autoscaling launch configurastion ''' self.cfn_template.add_resource( LaunchConfiguration( title=constants.INST_LC, AssociatePublicIpAddress=False, BlockDeviceMappings=[ BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=EBSBlockDevice( DeleteOnTermination=True, VolumeSize=int('100'), VolumeType='gp2' ) ) ], IamInstanceProfile=Ref(constants.INST_PROFILE), ImageId=Ref('AmiId'), InstanceType=Ref('InstanceType'), SecurityGroups=[ Ref(constants.SSH_SG), ImportValue(Sub('${Environment}-AppSecurityGroup')), ], UserData=Base64( Sub(constants.USERDATA) ) ) ) return self.cfn_template
def _create_test_document(self): template = Template() LaunchConfiguration( "launchconfig", template, ImageId='ami-809f84e6', InstanceType='t2.micro', Metadata=Metadata( Init({ 'config': InitConfigFromHTTP(url='http://www.example.com') }))) return json.loads(template.to_json())
def define_web_auto_scaling(template, alb_target_group_80, alb_target_group_9090, sg): web_launch_config = LaunchConfiguration( stack_name_strict + "WebLC", UserData=Base64( Join('', [ '#!/bin/bash\n', 'set -x\n', 'exec > >(tee /tmp/user-data.log|logger -t user-data ) 2>&1\n', 'sudo su - deploy -c "echo \\"export RAILS_ENV=' + env + '\\" >> ~/.bashrc"\n', 'sudo su - deploy -c "cd ~/app/current; wget http://taxweb-deploy.s3.amazonaws.com/' + app_name + '/app_update.sh -O app_update.sh >/dev/null 2>&1"\n', 'sudo su - deploy -c "cd ~/app/current && chmod 755 app_update.sh && ./app_update.sh ' + env + ' web ' + app_name + '"\n' ])), ImageId=Ref(base_ami), InstanceType=Ref(instance_type), KeyName="taxweb-AWS-US-West", SecurityGroups=[Ref(sg)]) template.add_resource(web_launch_config) web_autoscaling_group = AutoScalingGroup( stack_name_strict + "WebASG", Tags=[ Tag("Name", stack_name + "-web", True), Tag("Custo", app_name, True), Tag("Env", env, True), Tag("Role", "web", True), ], LaunchConfigurationName=Ref(web_launch_config), MinSize=1, MaxSize=1, DesiredCapacity=1, VPCZoneIdentifier=[Ref(subnet_id1), Ref(subnet_id2)], TargetGroupARNs=[Ref(alb_target_group_80), Ref(alb_target_group_9090)], HealthCheckType="ELB", HealthCheckGracePeriod="300", ) template.add_resource(web_autoscaling_group) return { "launch_config": web_launch_config, "autoscaling_group": web_autoscaling_group }
def create_launch_configuration_resource(name, securityGroups): """[create simple] Arguments: name {[type]} -- [description] securityGroups {[List<String>]} -- [ list of security refs ] Returns: [ troposphere.resource ] -- [ LaunchConfiguration ] """ return LaunchConfiguration( name, ImageId=MOCK_IMAGE_ID, KeyName=MOCK_KEY_PAIR_NAME, InstanceType="m1.small", SecurityGroups=securityGroups, Tags=Tags(Application=Ref('AWS::StackId'), Name=Join("", [Ref('AWS::StackName'), "-LaunchConfiguration"])))
def create_launch_config(stack, name, ami, security_group, instance_type, profile, block_devices=[], user_data=""): """Add EC2 LaunchConfiguration Resource.""" return stack.stack.add_resource( LaunchConfiguration('{0}{1}LC'.format(stack.env, name.replace('_', '')), ImageId=ami, KeyName=Ref(stack.ssh_key_param), SecurityGroups=security_group, InstanceType=instance_type, IamInstanceProfile=profile, UserData=Base64(user_data), BlockDeviceMappings=block_devices))
def add_nat_asg(self): user_data = [resources.get_resource('nat_takeover.sh')] if self.enable_ntp: user_data.append(resources.get_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) nat_launch_config = self.add_resource( LaunchConfiguration("Nat%sLaunchConfig" % str(self.subnet_index), UserData=Base64(Join('\n', user_data)), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'), KeyName=Ref('ec2Key'), SecurityGroups=[Ref(self.sg)], EbsOptimized=False, IamInstanceProfile=Ref(self.instance_profile), InstanceType=self.instance_type, AssociatePublicIpAddress=True)) nat_asg = self.add_resource( AutoScalingGroup("Nat%sASG" % str(self.subnet_index), DesiredCapacity=1, Tags=[ Tag("Name", Join("-", [Ref(self.vpc_id), "NAT"]), True), Tag("isNat", "true", True) ], MinSize=1, MaxSize=1, Cooldown="30", LaunchConfigurationName=Ref(nat_launch_config), HealthCheckGracePeriod=30, HealthCheckType="EC2", VPCZoneIdentifier=[ Ref(self.subnets['public'][self.subnet_index]) ])) return nat_asg
def create_launch_config(self, title, asg_config, network_config): """ Method to add a launch configuration resource to a cloud formation document AWS Cloud Formation links: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig.html http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-launchconfig-blockdev-mapping.html Troposphere links: https://github.com/cloudtools/troposphere/blob/master/troposphere/autoscaling.py https://github.com/cloudtools/troposphere/blob/master/troposphere/ec2.py :param title: Title of the autoscaling application :param asg_config: object holding asg related variables :param network_config: object holding network related variables :return string representing Launch Configuration name """ launch_config_title = title + 'Lc' self.lc = self.template.add_resource(LaunchConfiguration( launch_config_title, AssociatePublicIpAddress=False, ImageId=asg_config.image_id, InstanceMonitoring=False, InstanceType=asg_config.instance_type, KeyName=network_config.keypair, SecurityGroups=[self.security_group], )) if asg_config.iam_instance_profile_arn is not None: self.lc.IamInstanceProfile = asg_config.iam_instance_profile_arn # Userdata must be a valid string if asg_config.userdata is None: self.lc.UserData = '' else: self.lc.UserData = Base64(asg_config.userdata) # If block devices have been configured if asg_config.block_devices_config is not None: self.lc.BlockDeviceMappings = Bdm(launch_config_title, asg_config.block_devices_config) \ .block_device_mappings return launch_config_title
def create_launch_configuration(self, asg_args): ''' Method creates a Launch Configuration and adds it to the resources list @param asg_args [dict] collection of keyword arguments for the launch configuration ''' self.launch_configuration = self.add_resource( LaunchConfiguration( "LaunchConfiguration", UserData=Base64( Join('', [ "#!/bin/bash\n", "cfn-signal -e 0", " --resource AutoscalingGroup", " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n", asg_args['user_data'] ])), ImageId=asg_args['ami_id'], KeyName=asg_args['key_pair_name'], SecurityGroups=[Ref(self.instance_security_group)], InstanceType=asg_args['instance_type'], AssociatePublicIpAddress=True, ))
def add_auto_scaling_group(self, asg): """ Add auto scaling group to stack """ default_security_groups = [ SecurityGroups(GroupId=Ref('IncomingSg')), SecurityGroups(GroupId=ImportValue('{}-cluster:DBBadgeSg'.format(self.cluster.get('name')))), ] for group in self.cluster.get('security_groups', []): default_security_groups.append( SecurityGroups(GroupId=group) ) launch_config = LaunchConfiguration( "ASGLaunchConfig{}".format(sanitize_cfn_resource_name(asg.get('name'))), IamInstanceProfile=self.instance_role, ImageId=self.ami, InstanceType=asg.get('instance_type'), KeyName=self.keypair, SecurityGroups=default_security_groups, BlockDeviceMappings=self.block_devices, UserData=Base64(Sub(self.open_userdata())) ) # launch config self.template.add_resource(launch_config) self.template.add_condition('IsActive', Equals(Ref('Status'), 'active')) auto_scaling_group = AutoScalingGroup( "ASG{}".format(sanitize_cfn_resource_name(asg.get('name'))), VPCZoneIdentifier=self.cluster.get('subnets'), Cooldown=300, DesiredCapacity=asg.get('desired_instances', 1), HealthCheckType='EC2', HealthCheckGracePeriod=60, LaunchConfigurationName=Ref(launch_config), MinSize=If('IsActive', asg.get('min_size', 1), 0), MaxSize=asg.get('max_size', 1) ) self.template.add_resource(auto_scaling_group)
def build_template(sierrafile): template = Template() template.add_version('2010-09-09') template.add_metadata(build_interface(sierrafile.extra_params)) parameters = AttrDict( # Network Parameters vpc_cidr=template.add_parameter(Parameter( 'VpcCidr', Type='String', Default='192.172.0.0/16', )), subnet1_cidr=template.add_parameter(Parameter( 'Subnet1Cidr', Type='String', Default='192.172.1.0/24', )), subnet2_cidr=template.add_parameter(Parameter( 'Subnet2Cidr', Type='String', Default='192.172.2.0/24', )), # ECS Parameters cluster_size=template.add_parameter(Parameter( 'ClusterSize', Type='Number', Default=2, )), instance_type=template.add_parameter(Parameter( 'InstanceType', Type='String', Default='t2.medium' )), key_name=template.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', )), image_id=template.add_parameter(Parameter( 'ImageId', Type='AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>', Default=( '/aws/service/ecs/optimized-ami' '/amazon-linux/recommended/image_id' ), Description=( 'An SSM parameter that resolves to a valid AMI ID.' ' This is the AMI that will be used to create ECS hosts.' ' The default is the current recommended ECS-optimized AMI.' ) )), # Other Parameters github_token=template.add_parameter(Parameter( 'GitHubToken', Type='String', NoEcho=True, )), ) # Environment Variable Parameters for env_var_param, env_var_name in sierrafile.extra_params: template.add_parameter(Parameter( env_var_param, Type='String', NoEcho=True, )) # Resource Declarations # # Network network_vpc = template.add_resource(VPC( 'NetworkVpc', CidrBlock=Ref(parameters.vpc_cidr), Tags=Tags(Name=Ref('AWS::StackName')), )) network_ig = template.add_resource(InternetGateway( 'NetworkInternetGateway', Tags=Tags(Name=Ref('AWS::StackName')), )) vpc_attach = template.add_resource(VPCGatewayAttachment( 'NetworkInternetGatewayAttachment', InternetGatewayId=Ref(network_ig), VpcId=Ref(network_vpc), )) route_table = template.add_resource(RouteTable( 'NetworkRouteTable', VpcId=Ref(network_vpc), Tags=Tags(Name=Ref('AWS::StackName')), )) template.add_resource(Route( 'NetworkDefaultRoute', DependsOn=[vpc_attach.title], RouteTableId=Ref(route_table), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(network_ig), )) subnet1 = template.add_resource(Subnet( 'NetworkSubnet1', VpcId=Ref(network_vpc), AvailabilityZone=Select(0, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet1_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) subnet2 = template.add_resource(Subnet( 'NetworkSubnet2', VpcId=Ref(network_vpc), AvailabilityZone=Select(1, GetAZs()), MapPublicIpOnLaunch=True, CidrBlock=Ref(parameters.subnet2_cidr), Tags=Tags(Name=Sub('${AWS::StackName} (Public)')), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet1RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet1), )) template.add_resource(SubnetRouteTableAssociation( 'NetworkSubnet2RouteTableAssociation', RouteTableId=Ref(route_table), SubnetId=Ref(subnet2), )) elb = template.add_resource(LoadBalancer( ELB_NAME, Name=Sub('${AWS::StackName}-elb'), Type='network', Subnets=[Ref(subnet1), Ref(subnet2)], )) # # Cluster ecs_host_role = template.add_resource(Role( 'EcsHostRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ec2.amazonaws.com'), Action=[awacs.sts.AssumeRole] )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonEC2ContainerServiceforEC2Role' ] )) ecs_host_profile = template.add_resource(InstanceProfile( 'EcsHostInstanceProfile', Roles=[Ref(ecs_host_role)] )) ecs_host_sg = template.add_resource(SecurityGroup( 'EcsHostSecurityGroup', GroupDescription=Sub('${AWS::StackName}-hosts'), VpcId=Ref(network_vpc), SecurityGroupIngress=[SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1' )] )) cluster = template.add_resource(Cluster( 'EcsCluster', ClusterName=Ref('AWS::StackName') )) autoscaling_name = 'EcsHostAutoScalingGroup' launch_conf_name = 'EcsHostLaunchConfiguration' launch_conf = template.add_resource(LaunchConfiguration( launch_conf_name, ImageId=Ref(parameters.image_id), InstanceType=Ref(parameters.instance_type), IamInstanceProfile=Ref(ecs_host_profile), KeyName=Ref(parameters.key_name), SecurityGroups=[Ref(ecs_host_sg)], UserData=Base64(Sub( '#!/bin/bash\n' 'yum install -y aws-cfn-bootstrap\n' '/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' '/opt/aws/bin/cfn-signal -e $?' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {autoscaling_name}\n' )), Metadata={ 'AWS::CloudFormation::Init': { 'config': { 'commands': { '01_add_instance_to_cluster': { 'command': Sub( f'echo ECS_CLUSTER=${{{cluster.title}}}' f' > /etc/ecs/ecs.config' ), } }, 'files': { '/etc/cfn/cfn-hup.conf': { 'mode': 0o400, 'owner': 'root', 'group': 'root', 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' ), }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.ContainerInstances.Metadata' '.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --region ${AWS::Region}' ' --stack ${AWS::StackName}' f' --resource {launch_conf_name}\n' ), }, }, 'services': { 'sysvinit': { 'cfn-hup': { 'enabled': True, 'ensureRunning': True, 'files': [ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] } } } } } } )) autoscaling_group = template.add_resource(AutoScalingGroup( autoscaling_name, VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], LaunchConfigurationName=Ref(launch_conf), DesiredCapacity=Ref(parameters.cluster_size), MinSize=Ref(parameters.cluster_size), MaxSize=Ref(parameters.cluster_size), Tags=[{ 'Key': 'Name', 'Value': Sub('${AWS::StackName} - ECS Host'), 'PropagateAtLaunch': True, }], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService=1, MaxBatchSize=1, PauseTime='PT5M', WaitOnResourceSignals=True, ), ), )) # # Services task_role = template.add_resource(Role( 'TaskExecutionRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[Statement( Effect=Allow, Principal=Principal('Service', 'ecs-tasks.amazonaws.com'), Action=[awacs.sts.AssumeRole], )], ), ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/' 'service-role/AmazonECSTaskExecutionRolePolicy' ], )) artifact_bucket = template.add_resource(Bucket( 'ArtifactBucket', DeletionPolicy='Retain', )) codebuild_role = template.add_resource(Role( 'CodeBuildServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codebuild.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ssm.GetParameters, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.PutObject, awacs.s3.GetObjectVersion, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], ), ], ), )], )) codepipeline_role = template.add_resource(Role( 'CodePipelineServiceRole', Path='/', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Principal=Principal( 'Service', 'codepipeline.amazonaws.com' ), Action=[ awacs.sts.AssumeRole, ], ), ], ), Policies=[Policy( PolicyName='root', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Resource=[ Sub(f'${{{artifact_bucket.title}.Arn}}/*') ], Effect=Allow, Action=[ awacs.s3.GetBucketVersioning, awacs.s3.GetObject, awacs.s3.GetObjectVersion, awacs.s3.PutObject, ], ), Statement( Resource=['*'], Effect=Allow, Action=[ awacs.ecs.DescribeServices, awacs.ecs.DescribeTaskDefinition, awacs.ecs.DescribeTasks, awacs.ecs.ListTasks, awacs.ecs.RegisterTaskDefinition, awacs.ecs.UpdateService, awacs.codebuild.StartBuild, awacs.codebuild.BatchGetBuilds, awacs.iam.PassRole, ], ), ], ), )], )) log_group = template.add_resource(LogGroup( 'LogGroup', LogGroupName=Sub('/ecs/${AWS::StackName}'), )) if any(conf.pipeline.enable for conf in sierrafile.services.values()): project = template.add_resource(Project( 'CodeBuildProject', Name=Sub('${AWS::StackName}-build'), ServiceRole=Ref(codebuild_role), Artifacts=Artifacts(Type='CODEPIPELINE'), Source=Source(Type='CODEPIPELINE'), Environment=Environment( ComputeType='BUILD_GENERAL1_SMALL', Image='aws/codebuild/docker:17.09.0', Type='LINUX_CONTAINER', ), )) for name, settings in sierrafile.services.items(): task_definition = template.add_resource(TaskDefinition( f'{name}TaskDefinition', RequiresCompatibilities=['EC2'], Cpu=str(settings.container.cpu), Memory=str(settings.container.memory), NetworkMode='bridge', ExecutionRoleArn=Ref(task_role.title), ContainerDefinitions=[ ContainerDefinition( Name=f'{name}', Image=settings.container.image, Memory=str(settings.container.memory), Essential=True, PortMappings=[ PortMapping( ContainerPort=settings.container.port, Protocol='tcp', ), ], Environment=[ troposphere.ecs.Environment(Name=k, Value=v) for k, v in sierrafile.env_vars.items() if k in settings.get('environment', []) ], LogConfiguration=LogConfiguration( LogDriver='awslogs', Options={ 'awslogs-region': Ref('AWS::Region'), 'awslogs-group': Ref(log_group.title), 'awslogs-stream-prefix': Ref('AWS::StackName'), }, ), ), ], )) target_group = template.add_resource(TargetGroup( f'{name}TargetGroup', Port=settings.container.port, Protocol='TCP', VpcId=Ref(network_vpc), Tags=Tags(Name=Sub(f'${{AWS::StackName}}-{name}')), )) listener = template.add_resource(Listener( f'{name}ElbListener', LoadBalancerArn=Ref(elb), Port=settings.container.port, Protocol='TCP', DefaultActions=[ Action(TargetGroupArn=Ref(target_group), Type='forward') ], )) service = template.add_resource(Service( f'{name}Service', Cluster=Ref(cluster), ServiceName=f'{name}-service', DependsOn=[autoscaling_group.title, listener.title], DesiredCount=settings.container.count, TaskDefinition=Ref(task_definition), LaunchType='EC2', LoadBalancers=[ troposphere.ecs.LoadBalancer( ContainerName=f'{name}', ContainerPort=settings.container.port, TargetGroupArn=Ref(target_group), ), ], )) if settings.pipeline.enable: pipeline = template.add_resource(Pipeline( f'{name}Pipeline', RoleArn=GetAtt(codepipeline_role, 'Arn'), ArtifactStore=ArtifactStore( Type='S3', Location=Ref(artifact_bucket), ), Stages=[ Stages( Name='Source', Actions=[Actions( Name='Source', ActionTypeId=ActionTypeId( Category='Source', Owner='ThirdParty', Version='1', Provider='GitHub', ), OutputArtifacts=[ OutputArtifacts(Name=f'{name}Source'), ], RunOrder='1', Configuration={ 'Owner': settings.pipeline.user, 'Repo': settings.pipeline.repo, 'Branch': settings.pipeline.branch, 'OAuthToken': Ref(parameters.github_token), }, )], ), Stages( Name='Build', Actions=[Actions( Name='Build', ActionTypeId=ActionTypeId( Category='Build', Owner='AWS', Version='1', Provider='CodeBuild', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Source'), ], OutputArtifacts=[ OutputArtifacts(Name=f'{name}Build'), ], RunOrder='1', Configuration={ 'ProjectName': Ref(project), }, )], ), Stages( Name='Deploy', Actions=[Actions( Name='Deploy', ActionTypeId=ActionTypeId( Category='Deploy', Owner='AWS', Version='1', Provider='ECS', ), InputArtifacts=[ InputArtifacts(Name=f'{name}Build') ], RunOrder='1', Configuration={ 'ClusterName': Ref(cluster), 'ServiceName': Ref(service), 'FileName': 'image.json', }, )], ), ], )) template.add_resource(Webhook( f'{name}CodePipelineWebhook', Name=Sub(f'${{AWS::StackName}}-{name}-webhook'), Authentication='GITHUB_HMAC', AuthenticationConfiguration=AuthenticationConfiguration( SecretToken=Ref(parameters.github_token), ), Filters=[FilterRule( JsonPath='$.ref', MatchEquals=f'refs/heads/{settings.pipeline.branch}' )], TargetAction='Source', TargetPipeline=Ref(pipeline), TargetPipelineVersion=1, RegisterWithThirdParty=True, )) return template
LaunchConfiguration( "myLaunchConfig", ImageId=FindInMap("RegionMap", {"Ref": "AWS::Region"}, "AMIid"), SecurityGroups=FindInMap("RegionMap", {"Ref": "AWS::Region"}, "SGid"), KeyName=Ref(keyname_param), InstanceType=Ref(instanceType_param), IamInstanceProfile=Ref(iam_instanceprofile_resource), UserData=Base64( Join("", [ "#!/bin/bash\n", "yum clean all\n", "yum update -y\n", "yum install pystache python-daemon -y\n", "/bin/rpm -U https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.amzn1.noarch.rpm\n", "/opt/aws/bin/cfn-init ", " --stack ", { "Ref": "AWS::StackName" }, " --resource myLaunchConfig", " --configsets InstallandRun", " --region ", { "Ref": "AWS::Region" }, "\n" ])), Metadata=Metadata( cf.Init({ "configsets": cf.InitConfigSets(InstallandRun=["install", "config"]), "install": cf.InitConfig(packages={"yum": { "git": [], "wget": [] }}), "config": cf.InitConfig(files=cf.InitFiles({ "/tmp/example.txt": cf.InitFile(content=Join('', [ "This is a file example.\n", "See another examples in:\n", "https://github.com/rabeloo/cf-templates\n" ]), owner="root", group="root", mode="000600") }), ), }))))
def _create_cfn_template(self): self.tpl = Template() self.tpl.add_version('2010-09-09') self.tpl.add_description('CFN template to create an EKS node group and affiliated resources.') eks_tag = 'kubernetes.io/cluster/{}'.format(self.cluster.name) r = self.resources.get(self.RESOURCE_NG_ROLE.name) if self.role: profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[self.role]) account_id = boto3.session.Session().client('sts').get_caller_identity().get('Account') role_arn = 'arn:aws:iam::{}:role/{}'.format(account_id, self.role) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=role_arn, Description='Node group role')) r.status = Status.provided r.resource_id = role_arn else: role = Role( self.RESOURCE_NG_ROLE.name, RoleName=self.tag_name, AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', ['ec2.amazonaws.com'])), ], ), ManagedPolicyArns=['arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy', 'arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy', 'arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly'] ) self.tpl.add_resource(role) profile = InstanceProfile( self.RESOURCE_NG_PROFILE.name, InstanceProfileName=self.tag_name, Path='/', Roles=[Ref(role)]) self.tpl.add_output( Output(self.RESOURCE_NG_ROLE.name, Value=GetAtt(role, 'Arn'), Description='Node group role')) self.tpl.add_resource(profile) if self.sg_igresses: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', SecurityGroupIngress=[SecurityGroupRule(IpProtocol=r.protocol, FromPort=r.from_port, ToPort=r.to_port, CidrIp=r.cidr) for r in self.sg_igresses] ) else: sg = SecurityGroup( self.RESOURCE_NG_SG.name, VpcId=self.cluster.vpc, Tags=Tags({'Name': self.tag_name, eks_tag: 'owned'}), GroupDescription='Security Group applied to the EKS node group', ) self.tpl.add_resource(sg) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_INGRESS.name, DependsOn=sg, Description='Allow node to communicate with each other', GroupId=Ref(sg), SourceSecurityGroupId=Ref(sg), IpProtocol='-1', FromPort=0, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_NG_SG_CP_INGRESS.name, DependsOn=sg, Description='Allow kubelet and pods on the nodes to receive communication from the cluster control plane', GroupId=Ref(sg), SourceSecurityGroupId=self.cluster.sg, IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupEgress( self.RESOURCE_CP_EGRESS_TO_NG.name, DependsOn=sg, Description='Allow the cluster control plane to communicate with nodes kubelet and pods', GroupId=self.cluster.sg, DestinationSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=1025, ToPort=65535 )) self.tpl.add_resource(SecurityGroupIngress( self.RESOURCE_CP_SG_INGRESS.name, DependsOn=sg, Description='Allow pods to communicate with the cluster API Server', GroupId=self.cluster.sg, SourceSecurityGroupId=Ref(sg), IpProtocol='tcp', FromPort=443, ToPort=443 )) # keypair ec2 = boto3.session.Session().resource('ec2') r = self.resources.get(self.RESOURCE_NG_KEYPAIR.name) if not self.keypair: keyname = 'eks{}'.format(''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(5))) with open(self.ssh_public_key, 'rb') as f: ec2.import_key_pair(KeyName=keyname, PublicKeyMaterial=f.read()) self.keypair = keyname self.keypair_imported = True self.tpl.add_output(Output(self.OUTPUT_KEYNAME, Value=self.keypair, Description='Imported kaypair name')) r.status = Status.created else: r.status = Status.provided r.resource_id = self.keypair # auto-scaling group and launch configuration if self.no_user_data: lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)]) else: user_data = Base64( Join('', [line + '\n' for line in Environment().from_string(self.USER_DATA).render( ci=self.cluster, ng_asg=self.RESOURCE_NG_ASG.name, stack_name=self.stack_name, max_pods=self.MAX_PODS.get(self.instance), region=self.region).split('\n')])) lc = LaunchConfiguration( self.RESOURCE_NG_ASG_LC.name, AssociatePublicIpAddress=self.use_public_ip, IamInstanceProfile=Ref(profile), ImageId=self.ami, InstanceType=self.instance, KeyName=self.keypair, SecurityGroups=[Ref(sg)], UserData=user_data) self.tpl.add_resource(lc) self.tpl.add_resource(AutoScalingGroup( self.RESOURCE_NG_ASG.name, DesiredCapacity=self.desired, MinSize=self.min, MaxSize=self.max, LaunchConfigurationName=Ref(lc), VPCZoneIdentifier=self.subnets, Tags=[Tag('Name', self.tag_name, True), Tag(eks_tag, 'owned', True)], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate(MinInstancesInService=1, MaxBatchSize=1))))
Action=[Action("s3", "*")], Resource=["*"]), Statement( Effect=Allow, Action=[Action("logs", "*")], Resource=["*"]) ] ), Roles=[Ref("Role")] )) t.add_resource(LaunchConfiguration( "LaunchConfiguration", UserData=ud, ImageId="ami-97785bed", KeyName=Ref("KeyPair"), SecurityGroups=[Ref("SecurityGroup")], InstanceType=Ref("InstanceType"), IamInstanceProfile=Ref("InstanceProfile"), )) t.add_resource(AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref("ScaleCapacity"), LaunchConfigurationName=Ref("LaunchConfiguration"), MinSize=1, MaxSize=1, LoadBalancerNames=[Ref("LoadBalancer")], VPCZoneIdentifier="subnet-dac1f391", ))
init_config = {"config": InitConfig({})} LaunchConfig = t.add_resource( LaunchConfiguration( "LaunchConfig", Metadata=Init(init_config), UserData=Base64( Join("", [ "#!/bin/bash -xe\n", "yum update -y aws-cfn-bootstrap\n", "# Install the files and packages from the metadata\n", "/opt/aws/bin/cfn-init -v ", " --stack ", Ref("AWS::StackName"), " --resource LaunchConfig ", " --region ", Ref("AWS::Region"), "\n", "# Signal the status from cfn-init\n", "/opt/aws/bin/cfn-signal -e $? ", " --stack ", Ref("AWS::StackName"), " --resource WebServerGroup ", " --region ", Ref("AWS::Region"), "\n" ])), KeyName=Ref(KeyName), SecurityGroups=[Ref(WebServerSecurityGroup)], InstanceType=Ref(InstanceType), ImageId=FindInMap( "AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(InstanceType), "Arch")), )) # @alias component @app:@db_ec2_sg to DBEC2SecurityGroup DBEC2SecurityGroup = t.add_resource( SecurityGroup(
SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='10.10.0.0/32'), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0') ], VpcId=Ref(vpc))) launch_config = t.add_resource( LaunchConfiguration("TestLaunchConfiguration", ImageId='ami-0c276975654214bf3', KeyName='InnfisKey', SecurityGroups=[Ref(instanceSecurityGroup)], InstanceType="t2.small")) as_group = t.add_resource( AutoScalingGroup("TestAutoscalingGroup", DesiredCapacity=5, MinSize=3, MaxSize=10, LaunchConfigurationName=Ref(launch_config), VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], AvailabilityZones=['ap-northeast-2a', 'ap-northeast-2b'], HealthCheckType="EC2", UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate(
InstanceProfile( 'EC2InstanceProfile', Roles=[Ref('EcsClusterRole')], )) t.add_resource( LaunchConfiguration( 'ContainerInstances', UserData=Base64( Join('', [ "#!/bin/bash -xe\n", "echo ECS_CLUSTER=", Ref('ECSCluster'), " >> /etc/ecs/ecs.config\n", "yum install -y aws-cfn-bootstrap\n", "/opt/aws/bin/cfn-signal -e $? ", " --stack ", Ref('AWS::StackName'), " --resource ECSAutoScalingGroup ", " --region ", Ref('AWS::Region'), "\n" ])), ImageId='ami-030c18dab55018cb3', KeyName=Ref("KeyPair"), SecurityGroups=[Ref("SecurityGroup")], IamInstanceProfile=Ref('EC2InstanceProfile'), InstanceType='t2.micro', AssociatePublicIpAddress='true', )) t.add_resource( AutoScalingGroup( 'ECSAutoScalingGroup', DesiredCapacity='1', MinSize='1',
def elb_asg_lc_template(app, env, nameSGRDS, rdsPort, instanceType, ami, subnets, elbPort, elbCidrBlock, ec2Port, desiredCapacity, minSize, maxSize, region, nameBucket, officeIP): template = Template() sgELB = template.add_resource( SecurityGroup( "SecurityGroupELB" + app + env, GroupDescription="Security group for " + app + "-" + env, VpcId=ImportValue("VPC" + env), SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=elbPort, ToPort=elbPort, CidrIp=elbCidrBlock, ) ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-ELB" + app + "-" + env, app=app, ), )) sgEC2 = template.add_resource( SecurityGroup( "SecurityGroupEC2" + app + env, GroupDescription="Security group for EC2 " + app + "-" + env, VpcId=ImportValue("VPC" + env), DependsOn="SecurityGroupELB" + app + env, SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=ec2Port, ToPort=ec2Port, SourceSecurityGroupId=Ref(sgELB), ), SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=officeIP, ), ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol="-1", ToPort=0, FromPort=65535, CidrIp="0.0.0.0/0") ], Tags=Tags( env=env, Name="sg-EC2-" + app + "-" + env, app=app, ), )) addIngressRDS = template.add_resource( SecurityGroupIngress( "ingressSGRDS" + app + env, SourceSecurityGroupId=Ref(sgEC2), Description="From EC2 instances", GroupId=ImportValue("SG-" + nameSGRDS + "-" + app + "-" + env), IpProtocol="tcp", FromPort=rdsPort, ToPort=rdsPort, DependsOn="SecurityGroupEC2" + app + env, )) launchConfig = template.add_resource( LaunchConfiguration("LaunchConfiguration" + app + env, InstanceType=instanceType, ImageId=ami, SecurityGroups=[Ref(sgEC2)], IamInstanceProfile=ImportValue("Role-" + app + "-" + env))) bucketPolicy = template.add_resource( BucketPolicy("BucketPolicy" + nameBucket + app + env, Bucket=ImportValue("Bucket" + nameBucket + app + env), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": Join("", [ "arn:aws:s3:::", ImportValue("Bucket" + nameBucket + app + env), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Principal": { "AWS": ["156460612806"] } }] })) lb = template.add_resource( LoadBalancer("LoadBalancer" + app + env, ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=120, ), Subnets=subnets, HealthCheck=elb.HealthCheck( "HealthCheck", Target="TCP:" + str(ec2Port), HealthyThreshold="5", UnhealthyThreshold="5", Interval="30", Timeout="15", ), Listeners=[ elb.Listener( LoadBalancerPort=elbPort, InstancePort=ec2Port, Protocol="HTTP", InstanceProtocol="HTTP", ), ], CrossZone=True, SecurityGroups=[Ref(sgELB)], LoadBalancerName="lb-" + app + "-" + env, Scheme="internet-facing", AccessLoggingPolicy=AccessLoggingPolicy( "LoggingELB" + app + env, EmitInterval=5, Enabled=True, S3BucketName=ImportValue("Bucket" + nameBucket + app + env), ))) asg = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + app + env, DesiredCapacity=desiredCapacity, Tags=[Tag("Environment", env, True)], LaunchConfigurationName=Ref(launchConfig), MinSize=minSize, MaxSize=maxSize, LoadBalancerNames=[Ref(lb)], AvailabilityZones=GetAZs(region), VPCZoneIdentifier=subnets, HealthCheckType="ELB", HealthCheckGracePeriod=300, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True, )))) return (template.to_json())
LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig( files=cloudformation.InitFiles({ "/etc/rsyslog.d/20-somethin.conf": cloudformation.InitFile( source=Join( "", [ "http://", Ref(DeployBucket), ".s3.amazonaws.com/stacks/", Ref(RootStackName), "/env/etc/rsyslog.d/20-somethin.conf", ], ), mode="000644", owner="root", group="root", authentication="DeployUserAuth", ) }), services={ "sysvinit": cloudformation.InitServices({ "rsyslog": cloudformation.InitService( enabled=True, ensureRunning=True, files=["/etc/rsyslog.d/20-somethin.conf"], ) }) }, ) }), cloudformation.Authentication({ "DeployUserAuth": cloudformation.AuthenticationBlock( type="S3", accessKeyId=Ref(DeployUserAccessKey), secretKey=Ref(DeployUserSecretKey), ) }), ), UserData=Base64( Join( "", [ "#!/bin/bash\n", "cfn-signal -e 0", " --resource AutoscalingGroup", " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n", ], )), ImageId=Ref(AmiId), KeyName=Ref(KeyName), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=[Ref(SecurityGroup)], InstanceType="m1.small", ))
def ec2(self): # LOAD STACK TEMPLATE data = self.data["ec2"] resources = [] sgs = [] for sg_name, ingress in data["security_groups"].items(): sg = SecurityGroup(sg_name, VpcId=Ref("VPC"), GroupDescription="BaseHost Security Group") sgs.append(sg) resources.append(sg) # Because we want to be able to add ingress rules to a security # group that referes to itself (for example allow all instances in # the sg to speak to each other on 9300 for Elasticsearch # clustering) we create the SG in one resource and rules as other # resources # # The yaml for this case is: # # security_groups: # EScluster: # - FromPort: 9300 # - ToPort: 9300 # - SourceSecurityGroupId: { Ref: EScluster } for idx, rule in enumerate(ingress): # Convert { Ref: "x"} to Ref("x") rule = self._convert_ref_dict_to_objects(rule) ingress = SecurityGroupIngress("{}Rule{}".format(sg_name, idx), GroupId=Ref(sg), **rule) resources.append(ingress) devices = [] try: for i in data["block_devices"]: devices.append( BlockDeviceMapping(DeviceName=i["DeviceName"], Ebs=EBSBlockDevice(VolumeSize=i["VolumeSize"])) ) except KeyError: devices.append(BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice(VolumeSize=20))) launch_config = LaunchConfiguration( "BaseHostLaunchConfig", KeyName=data["parameters"]["KeyName"], SecurityGroups=[Ref(g) for g in sgs], InstanceType=data["parameters"]["InstanceType"], AssociatePublicIpAddress=True, IamInstanceProfile=Ref("InstanceProfile"), ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=devices, ) user_data = self.get_ec2_userdata() if user_data: user_data = mime_packer.pack(user_data) launch_config.UserData = Base64(user_data) resources.append(launch_config) # Allow deprecation of tags ec2_tags = [] deprecated_tags = ["Env"] for k, v in data["tags"].items(): if k not in deprecated_tags: ec2_tags.append(Tag(k, v, True)) else: logging.warning("config: Tag '%s' is deprecated.." % (k)) scaling_group = AutoScalingGroup( "ScalingGroup", VPCZoneIdentifier=[Ref("SubnetA"), Ref("SubnetB"), Ref("SubnetC")], MinSize=data["auto_scaling"]["min"], MaxSize=data["auto_scaling"]["max"], DesiredCapacity=data["auto_scaling"]["desired"], AvailabilityZones=GetAZs(), Tags=ec2_tags, LaunchConfigurationName=Ref(launch_config), ) resources.append(scaling_group) return resources
), ], )) WorkerNodeLaunchConfig = t.add_resource( LaunchConfiguration( "WorkerNodeLaunchConfig", UserData=Base64( Join( "", my_bootstrap_script('WorkerNodes', 'true', 'false', ref_ambariserver))), ImageId=FindInMap("RHEL66", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=If( "UseEBSBool", my_block_device_mappings_ebs(ref_disk_worker_ebs_diskcount, "/dev/sd", ref_disk_worker_ebs_volumesize, "gp2"), my_block_device_mappings_ephemeral(24, "/dev/sd")), KeyName=Ref(KeyName), SecurityGroups=[Ref("DefaultSecurityGroup")], IamInstanceProfile=Ref("NodeInstanceProfile"), InstanceType=Ref(InstanceType), AssociatePublicIpAddress="true", )) WorkerNodes = t.add_resource( AutoScalingGroup( "WorkerNodes", DesiredCapacity=Ref(WorkerInstanceCount),
load_balancer = LoadBalancer(config['name'] + "Elb") load_balancer.CrossZone = True load_balancer.Listeners = [elb_listener_80] load_balancer.Subnets = [Ref(subnet.title) for subnet in app_subnets] load_balancer.SecurityGroups = [Ref(elb_sg)] t.add_resource(load_balancer) # launch configuration for consul server consul_block_device = EBSBlockDevice(config['name'] + 'Ebs') consul_block_device.DeleteOnTermination = config['consul_launch_config']['block_device']['delete_on_termination'] consul_block_device_mapping = BlockDeviceMapping(config['name'] + 'ConsulBlockDeviceMapping') consul_block_device_mapping.DeviceName = '/dev/sda1' consul_block_device_mapping.Ebs = consul_block_device consul_launch_config = LaunchConfiguration(config['name'] + 'ConsulLaunchConfig') consul_launch_config.AssociatePublicIpAddress = True consul_launch_config.EbsOptimized = config['consul_launch_config']['ebs_optimized'] consul_launch_config.ImageId = config['consul_launch_config']['image_id'] consul_launch_config.KeyName = config['consul_launch_config']['key_name'] consul_launch_config.InstanceType = config['consul_launch_config']['instance_type'] consul_launch_config.BlockDeviceMappings = [consul_block_device_mapping] consul_launch_config.SecurityGroups = [Ref(config['name'] + 'homeSsh'), Ref(consul_sg)] t.add_resource(consul_launch_config) # auto scale group for consul server consul_asg = AutoScalingGroup(config['name'] + 'ConsulAsg') consul_asg.AvailabilityZones = config['consul_asg']['availability_zones'] consul_asg.LaunchConfigurationName = Ref(consul_launch_config) consul_asg.MaxSize = config['consul_asg']['max_size'] consul_asg.MinSize = config['consul_asg']['min_size']
"EC2Principal")), Path="/", Policies=[ products_bucket_access, poll_messages, publish_notifications, get_parameters ])) instance_profile = t.add_resource( InstanceProfile("HyP3WorkerInstanceProfile", Path="/", Roles=[Ref(role)])) launch_config = t.add_resource( LaunchConfiguration("HyP3LaunchConfiguration", ImageId=FindInMap("Region2AMI", Ref("AWS::Region"), "AMIId"), KeyName=Ref(keyname), SecurityGroups=[Ref(security_group)], InstanceType=Ref(instance_type), UserData=user_data, IamInstanceProfile=Ref(instance_profile), DependsOn=net_gw_vpc_attachment, SpotPrice=Ref(spot_price))) processing_group = t.add_resource( AutoScalingGroup( "HyP3AutoscalingGroup", LaunchConfigurationName=Ref(launch_config), MinSize=0, MaxSize=Ref(max_instances), VPCZoneIdentifier=[Ref(subnet) for subnet in get_public_subnets()], HealthCheckType="EC2", Tags=Tags(Maturity=environment.maturity, Project="hyp3-in-a-box",
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") vpn_instance_class = template.add_parameter( Parameter( 'VPNInstanceType', Type='String', Default='m3.medium', Description='VPN instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) vpn_ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('udp', 1194)] ] vpn_sg = template.add_resource( SecurityGroup("VPN", GroupDescription="Security Group for VPN ingress.", VpcId=Ref(vpc), SecurityGroupIngress=vpn_ingress_rules, DependsOn=vpc.title)) # IAM role for vpn vpn_policy = json.loads( cfn.load_template("vpn_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2" })) default_policy = json.loads( cfn.load_template("default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-west-2" })) vpn_role_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_iam_role = template.add_resource( Role("VPNIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName="VPNDefaultPolicy", PolicyDocument=default_policy), Policy(PolicyName="VPNPolicy", PolicyDocument=vpn_policy) ], DependsOn=vpc.title)) vpn_instance_profile = template.add_resource( InstanceProfile("vpnInstanceProfile", Path="/", Roles=[Ref(vpn_iam_role)], DependsOn=vpn_iam_role.title)) vpn_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "vpn" }) # Launch Configuration for vpns vpn_launchcfg = template.add_resource( LaunchConfiguration( "VPNLaunchConfiguration", ImageId=FindInMap('RegionMap', region, int(cfn.Amis.INSTANCE)), InstanceType=Ref(vpn_instance_class), IamInstanceProfile=Ref(vpn_instance_profile), KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(vpn_sg)], DependsOn=[vpn_instance_profile.title, vpn_sg.title], AssociatePublicIpAddress=True, UserData=Base64(vpn_user_data))) # Create the babysitter autoscaling group vpn_asg_name = '.'.join(['vpn', CLOUDNAME, CLOUDENV]) vpn_asg = template.add_resource( AutoScalingGroup( "VPNASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(vpn_launchcfg), MinSize="1", MaxSize="1", NotificationConfiguration=NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR, EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.VPN) ]))
ResourceSignal=ResourceSignal(Count=1, Timeout="PT15M")), Tags=Tags(Name=ref_stack_name, ), KeyName=Ref(KeyName), InstanceType=Ref(InstanceType), SubnetId=Ref(SubnetId), SecurityGroupIds=Ref(SecurityGroups), )) AdditionalNodeLaunchConfig = t.add_resource( LaunchConfiguration( "AdditionalNodeLaunchConfig", UserData=Base64( Join( "", my_bootstrap_script('AdditionalNodes', 'true', 'false', ref_ambariserver))), ImageId=FindInMap(ref_os, Ref("AWS::Region"), "AMI"), BlockDeviceMappings=my_block_device_mappings_ephemeral(24, "/dev/sd"), KeyName=Ref(KeyName), SecurityGroups=Ref(SecurityGroups), InstanceType=Ref(InstanceType), AssociatePublicIpAddress="true", )) AdditionalNodes = t.add_resource( AutoScalingGroup( "AdditionalNodes", DesiredCapacity=Ref(AdditionalInstanceCount), MinSize=0, MaxSize=Ref(AdditionalInstanceCount), VPCZoneIdentifier=[Ref(SubnetId)], LaunchConfigurationName=Ref(AdditionalNodeLaunchConfig),
def create_microservice_asg(template, name, ami, key_name, instance_profile, instance_type, vpc_id, instance_port=8080, subnets=None, security_groups=[], availability_zones=None, region='us-east-1', load_balancer=None, load_balancer_security_group=None, min_size=1, max_size=1, desired_capacity=None, creation_policy=None, update_policy=None, depends_on=None, metadata=None, tags=[]): template.mappings[name] = { region: {'instanceType': instance_type, 'ami': ami, 'profile': instance_profile} } if not availability_zones: availability_zones = _all_az(region) if load_balancer: security_groups.append(template.add_resource(ec2.SecurityGroup( "InstanceSecurityGroup" + name, GroupDescription="Enable access from ELB", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=load_balancer['elb'].Listeners[0].InstancePort, ToPort=load_balancer['elb'].Listeners[0].InstancePort, SourceSecurityGroupId=Ref(load_balancer_security_group) ), ], VpcId=vpc_id ))) if not creation_policy: creation_policy = _default_creation_policy(name) if not update_policy: update_policy = _default_update_policy() security_group_refs = [Ref(sg) for sg in security_groups] asg_name = "AutoscalingGroup" + name lc_name = "LaunchConfiguration" + name lc = LaunchConfiguration( lc_name, UserData=Base64(Join('', [ "#!/bin/bash -ex\n", "# redirect output to syslog\n", "exec 1> >(logger -s -t user-data) 2>&1\n", "# running cfn-init\n", "/usr/local/bin/cfn-init --stack ", Ref("AWS::StackName"), " --resource {}".format(lc_name), " --region ", Ref("AWS::Region"), "\n", "echo \"cfn-init finished\"\n", "printf '%b\n' \"$(cat /home/ubuntu/application.properties)\"\n", "# restart services\n", "service supervisor restart\n", "echo \"restarting services\"\n", "# wait until microservice is ready\n", "until $(curl --output /dev/null --silent --head --fail http://localhost:{}/health); do\n".format( instance_port), " printf '.'\n", " sleep 5\n", "done\n" "echo \"springboot is up\"\n", "# signal asg\n" "cfn-signal -e 0", " --resource {}".format(asg_name), " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n" ])), ImageId=FindInMap(name, Ref("AWS::Region"), 'ami'), KeyName=key_name, SecurityGroups=security_group_refs, InstanceType=FindInMap(name, Ref("AWS::Region"), 'instanceType'), IamInstanceProfile=Ref(instance_profile) ) if metadata: lc.Metadata = metadata lc = template.add_resource(lc) if not desired_capacity: desired_capacity = max_size if not subnets: subnets = _get_vpc_subnets(vpc_id, region) asg = AutoScalingGroup( asg_name, DesiredCapacity=desired_capacity, Tags=tags, LaunchConfigurationName=Ref(lc), MinSize=min_size, MaxSize=max_size, LoadBalancerNames=[Ref(load_balancer['elb'])] if load_balancer else None, HealthCheckGracePeriod=60, AvailabilityZones=availability_zones, HealthCheckType="EC2" if not load_balancer else "ELB", VPCZoneIdentifier=subnets, CreationPolicy=creation_policy, UpdatePolicy=update_policy ) if depends_on: asg.DependsOn = depends_on asg = template.add_resource(asg) return { 'asg': asg, 'lc': lc, 'security_groups': security_groups }
SecurityGroupIngress=securityGroupIngressWorker, SecurityGroupEgress=securityGroupEgressWorker, VpcId=Ref(VPC), Tags=Tags(Name=environmentString + "CustomSecurityGroupIngressWorker", Stack=Ref("AWS::StackName")))) for f in labels: LaunchConfig = template.add_resource( LaunchConfiguration( "LaunchConfiguration" + f, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=instanceTypeWorker, KeyName=Ref(keyPar_param), IamInstanceProfile=Ref(rootInstanceProfile), SecurityGroups=[Ref(instanceSecurityWorkerGroup)], UserData=Base64( USER_DATA_WORKER.replace("[ipPrivateList]", ipPrivateList).replace("[label]", f)), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")) ])) AutoscalingGroupX = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + f, Cooldown=300, HealthCheckGracePeriod=300, DesiredCapacity=DesiredCapacity, MinSize=MinSize, MaxSize=MaxSize,
LaunchConfiguration( "CraftLaunchConfig", UserData=Base64( Join("", [ "#!/usr/bin/env bash\n", "export PROJECT=", "sheltermutual", "\n", "export ROLE=", "craft", "\nexport ENV=", Ref(Environment), "\n", "export SNS_TOPIC=", Ref(SnsTopic), "\n", "if [ $(which apt-get) ] ; then", "\n", "export DEBIAN_FRONTEND=noninteractive", "\n", "apt-get update", "\n", "apt-get -y install python-pip", "\n", "elif [ $(which yum) ] ; then", "\n", "if [ -e /etc/redhat-release ]; then", "\n", "yum -y install wget", "\n", "RELEASE=$(cut -d ' ' -f 7 /etc/redhat-release | cut -d '.' -f 1)", "\n", "wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-${RELEASE}.noarch.rpm", "\n", "rpm -Uvh epel-release*.rpm", "\n", "else", "\n", "yum -y install epel-release", "\n", "fi", "\n", "yum makecache fast", "\n", "yum -y install python-pip", "\n", "fi", "\n", "pip install --upgrade awscli", "\n", "aws s3 cp s3://", "shelter-mutual-aws-tools-us-east-1/bootstrap", "/", "craft", ".sh", " /usr/local/bin/", "craft", ".sh", " --region ", Ref("AWS::Region"), " \n", "chmod 770 /usr/local/bin/", "craft", ".sh", " \n", "/usr/local/bin/", "craft", ".sh", "\n" ])), ImageId=Ref(AmiId), BlockDeviceMappings=[{ "DeviceName": "/dev/sda1", "Ebs": { "DeleteOnTermination": "true", "VolumeType": "gp2", "VolumeSize": 10 } }], KeyName="common-us-east-1", SecurityGroups=[Ref(InstanceSecurityGroups)], IamInstanceProfile=Ref(IamInstanceProfile), InstanceType=Ref(InstanceType), AssociatePublicIpAddress="false", ))
container_instance_configuration = LaunchConfiguration( container_instance_configuration_name, template=template, KeyName=Ref(secret_key), Metadata=Metadata( cloudformation.Init( dict(config=cloudformation.InitConfig( commands=dict(register_cluster=dict(command=Join( "", [ "#!/bin/bash\n", # Register the cluster "echo ECS_CLUSTER=", Ref(main_cluster), " >> /etc/ecs/ecs.config\n", # Enable CloudWatch docker logging 'echo \'ECS_AVAILABLE_LOGGING_DRIVERS=', '["json-file","awslogs"]\'', " >> /etc/ecs/ecs.config\n", ]))), files=cloudformation.InitFiles({ "/etc/cfn/cfn-hup.conf": cloudformation.InitFile( content=Join("", [ "[main]\n", "template=", Ref(AWS_STACK_ID), "\n", "region=", Ref(AWS_REGION), "\n", ]), mode="000400", owner="root", group="root", ), "/etc/cfn/hooks.d/cfn-auto-reload.conf": cloudformation.InitFile( content=Join("", [ "[cfn-auto-reloader-hook]\n", "triggers=post.update\n", "path=Resources.%s." % container_instance_configuration_name, "Metadata.AWS::CloudFormation::Init\n", "action=/opt/aws/bin/cfn-init -v ", " --stack", Ref(AWS_STACK_NAME), " --resource %s" % container_instance_configuration_name, " --region ", Ref("AWS::Region"), "\n", "runas=root\n", ])) }), services=dict(sysvinit=cloudformation.InitServices({ 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf", ]), })))))), SecurityGroups=[Ref(instance_security_group)], AssociatePublicIpAddress=True, InstanceType=instance_type, ImageId=FindInMap("ECSRegionMap", Ref(AWS_REGION), "AMI"), IamInstanceProfile=Ref(container_instance_profile), UserData=Base64( Join('', [ "#!/bin/bash -xe\n", "yum install -y aws-cfn-bootstrap\n", "/opt/aws/bin/cfn-init -v ", " --stack ", Ref(AWS_STACK_NAME), " --resource %s " % container_instance_configuration_name, " --region ", Ref(AWS_REGION), "\n", ])))
LaunchConfiguration( "BastionLaunchConfig", UserData=Base64( Join("", [ "#!/usr/bin/env bash\n", "yum -y install epel-release\n", "yum -y --enablerepo=epel install python-pip\n", "pip install --upgrade awscli\n", "export PROJECT=", "sheltermutual", "\n", "export ENV=", "test", "\n", "export SNS_TOPIC=", Ref(SnsTopic), "\n", "aws s3 cp s3://", "shelter-mutual-aws-tools-us-east-1/bootstrap", "/bastion.sh", " /usr/local/bin/bastion.sh", " --region ", Ref("AWS::Region"), " \n", "chmod 770 /usr/local/bin/bastion.sh", " \n", "/usr/local/bin/bastion.sh", " \n" ])), ImageId=Ref(AmiId), BlockDeviceMappings=[{ "DeviceName": "/dev/xvda", "Ebs": { "DeleteOnTermination": "true", "VolumeType": "gp2", "VolumeSize": "10" } }], KeyName="bastion-us-east-1", SecurityGroups=[Ref(InstanceSecurityGroups)], IamInstanceProfile=Ref(IamInstanceProfile), InstanceType="t2.micro", AssociatePublicIpAddress="true", ))
Action("logs", "Create*"), Action("logs", "Put*"), Action("logs", "Describe*"), Action("events", "Put*"), Action("firehose", "Put*"), ], Resource=["*"]) ]), Roles=[Ref("Role")])) t.add_resource( LaunchConfiguration( "LaunchConfiguration", UserData=ud, ImageId="ami-a4c7edb2", KeyName=Ref("KeyPair"), SecurityGroups=[Ref("SecurityGroup")], InstanceType=Ref("InstanceType"), IamInstanceProfile=Ref("InstanceProfile"), )) t.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref("ScaleCapacity"), LaunchConfigurationName=Ref("LaunchConfiguration"), MinSize=2, MaxSize=5, LoadBalancerNames=[Ref("LoadBalancer")], VPCZoneIdentifier=Ref("PublicSubnet"), ))
LaunchConfiguration( 'ContainerInstances', Metadata=Metadata( Init({ 'config': InitConfig( files=InitFiles( { '/etc/cfn/cfn-hup.conf': InitFile( content=Join( '', [ '[main]\n', 'stack=', Ref('AWS::StackId'), # NOQA '\n', 'region=', Ref('AWS::Region'), '\n' ]), # NOQA mode='000400', owner='root', group='root'), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': InitFile( content=Join( '', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init\n', # NOQA 'action=/opt/aws/bin/cfn-init -v ', '--stack ', Ref( # NOQA 'AWS::StackName'), ' --resource ContainerInstances ', ' --region ', Ref('AWS::Region'), '\n', # NOQA 'runas=root\n' ]), mode='000400', owner='root', group='root') }, ), services=InitServices({ 'cfn-hup': InitService( ensureRunning='true', enabled='true', files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]) }), commands={ '01_add_instance_to_cluster': { 'command': Join( '', [ '#!/bin/bash\n', # NOQA 'echo ECS_CLUSTER=', # NOQA Ref('ECSCluster'), # NOQA ' >> /etc/ecs/ecs.config' ]) }, # NOQA '02_install_ssm_agent': { 'command': Join( '', [ '#!/bin/bash\n', 'yum -y update\n', # NOQA 'curl https://amazon-ssm-eu-west-1.s3.amazonaws.com/latest/linux_amd64/amazon-ssm-agent.rpm -o amazon-ssm-agent.rpm\n', # NOQA 'yum install -y amazon-ssm-agent.rpm' # NOQA ]) } }) }), ), UserData=Base64( Join('', [ '#!/bin/bash -xe\n', 'yum install -y aws-cfn-bootstrap\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource ContainerInstances ', ' --region ', Ref('AWS::Region'), '\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource ECSAutoScalingGroup ', ' --region ', Ref('AWS::Region'), '\n' ])), ImageId='ami-0499a641a2a0e5da9', KeyName='devops-demo', SecurityGroups=[Ref('ContainerSecurityGroup')], IamInstanceProfile=Ref('EC2InstanceProfile'), InstanceType='t2.micro', AssociatePublicIpAddress='true', ))