def create_autoscaling_group(self): t = self.template t.add_resource( autoscaling.LaunchConfiguration( 'EmpireControllerLaunchConfig', IamInstanceProfile=GetAtt("EmpireControllerProfile", "Arn"), ImageId=FindInMap('AmiMap', Ref("AWS::Region"), Ref("ImageName")), BlockDeviceMappings=self.build_block_device(), InstanceType=Ref("InstanceType"), KeyName=Ref("SshKeyName"), UserData=self.generate_user_data(), SecurityGroups=[Ref("DefaultSG"), Ref(CLUSTER_SG_NAME)])) t.add_resource( autoscaling.AutoScalingGroup( 'EmpireControllerAutoscalingGroup', AvailabilityZones=Ref("AvailabilityZones"), LaunchConfigurationName=Ref("EmpireControllerLaunchConfig"), MinSize=Ref("MinHosts"), MaxSize=Ref("MaxHosts"), VPCZoneIdentifier=Ref("PrivateSubnets"), LoadBalancerNames=[ Ref("EmpireControllerLoadBalancer"), ], Tags=[ASTag('Name', 'empire_controller', True)]))
def AS_Autoscaling(key): LoadBalancers = [] TargetGroups = [] for n in cfg.LoadBalancer: if cfg.LoadBalancerType == "Classic": LoadBalancers.append(Ref(f"LoadBalancerClassic{n}")) if cfg.LoadBalancerType == "Application": TargetGroups.append(Ref(f"TargetGroup{n}")) if cfg.LoadBalancerType == "Network": for k in cfg.Listeners: TargetGroups.append(Ref(f"TargetGroupListeners{k}{n}")) # Resources LaunchTemplateTags = AS_LaunchTemplate() R_ASG = asg.AutoScalingGroup( "AutoScalingGroupBase", LoadBalancerNames=LoadBalancers, TargetGroupARNs=TargetGroups, ) auto_get_props(R_ASG) R_ASG.Tags += LaunchTemplateTags add_obj([R_ASG])
def create_auto_scaling_resources(self, worker_security_group, worker_lb): worker_launch_config_name = 'lcWorker' worker_launch_config = self.add_resource( asg.LaunchConfiguration( worker_launch_config_name, EbsOptimized=True, ImageId=Ref(self.worker_ami), IamInstanceProfile=Ref(self.worker_instance_profile), InstanceType=Ref(self.worker_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(worker_security_group)], UserData=Base64(Join('', self.get_cloud_config())))) worker_auto_scaling_group_name = 'asgWorker' worker_asg = self.add_resource( asg.AutoScalingGroup( worker_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), Cooldown=300, DesiredCapacity=Ref(self.worker_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(worker_launch_config), LoadBalancerNames=[Ref(worker_lb)], MaxSize=Ref(self.worker_auto_scaling_max), MinSize=Ref(self.worker_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'Worker', True)])) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingStart', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_start_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_start_recurrence))) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingEnd', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_end_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_end_recurrence))) return worker_asg
def handle(self, chain_context): template = chain_context.template template.add_resource(autoscaling.AutoScalingGroup( self.name, **self._get_autoscaling_group_parameters(chain_context=chain_context, launch_config_name=LAUNCH_CONFIG_NAME)))
def handle(self, chain_context): template = chain_context.template name = "Asg%s" % chain_context.instance_name template.add_resource( autoscaling.AutoScalingGroup( name, **self._get_autoscaling_group_parameters( chain_context=chain_context, launch_config_name='Lc%s' % chain_context.instance_name)))
def create_autoscaling_group_resource(template, api_instance_count_parameter, launch_template_resource): return template.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', DesiredCapacity=Ref(api_instance_count_parameter), MinSize=Ref(api_instance_count_parameter), MaxSize=Ref(api_instance_count_parameter), LaunchTemplate=autoscaling.LaunchTemplateSpecification( LaunchTemplateId=Ref(launch_template_resource), Version=GetAtt(launch_template_resource, 'LatestVersionNumber')), AvailabilityZones=['eu-west-1a', 'eu-west-1b', 'eu-west-1c']))
def create_autoscaling_group(self): name = "%sASG" % self.name launch_config = "%sLaunchConfig" % name elb_name = ELB_NAME % self.name t = self.template t.add_resource(autoscaling.LaunchConfiguration( launch_config, **self.get_launch_configuration_parameters() )) self.ASG = t.add_resource(autoscaling.AutoScalingGroup( name, **self.get_autoscaling_group_parameters(launch_config, elb_name) ))
def create_auto_scaling_resources(self, tile_server_security_group, tile_server_lb): tile_server_launch_config_name = 'lcTileServer' tile_server_launch_config = self.add_resource( asg.LaunchConfiguration( tile_server_launch_config_name, ImageId=Ref(self.tile_server_ami), IamInstanceProfile=Ref(self.tile_server_instance_profile), InstanceType=Ref(self.tile_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(tile_server_security_group)], UserData=Base64( Join('', self.get_cloud_config())) )) tile_server_auto_scaling_group_name = 'asgTileServer' self.add_resource( asg.AutoScalingGroup( tile_server_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), Cooldown=300, DesiredCapacity=Ref(self.tile_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(tile_server_launch_config), LoadBalancerNames=[Ref(tile_server_lb)], MaxSize=Ref(self.tile_server_auto_scaling_max), MinSize=Ref(self.tile_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ] ) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'TileServer', True)] ) )
def add_autoscaling_ondemand(self): self.AutoscalingGroupOnDemand = self.template.add_resource( autoscaling.AutoScalingGroup( "AutoscalingGroupOnDemand", DesiredCapacity=self. sceptre_user_data["desired_capacity_ondemand"], LaunchConfigurationName=Ref(self.launchconfig_ondemand), MinSize=self.sceptre_user_data["minimum_capacity_ondemand"], MaxSize=self.sceptre_user_data["maximum_capacity_ondemand"], VPCZoneIdentifier=self.subnets, LoadBalancerNames=[Ref(self.LoadBalancer)], AvailabilityZones=GetAZs(""), HealthCheckType="ELB", HealthCheckGracePeriod=10, Tags=[ autoscaling.Tag("Name", "web-server-ondemand", True), autoscaling.Tag("service", self.service_tag, True), autoscaling.Tag("lifecycle", "ondemand", True) ]))
def create_autoscaling_group(self): t = self.template t.add_resource( autoscaling.LaunchConfiguration( 'BastionLaunchConfig', AssociatePublicIpAddress=True, ImageId=FindInMap( 'AmiMap', Ref("AWS::Region"), Ref("ImageName")), InstanceType=Ref("InstanceType"), KeyName=Ref("SshKeyName"), UserData=self.generate_user_data(), SecurityGroups=[Ref("DefaultSG"), Ref(CLUSTER_SG_NAME)])) t.add_resource( autoscaling.AutoScalingGroup( 'BastionAutoscalingGroup', AvailabilityZones=Ref("AvailabilityZones"), LaunchConfigurationName=Ref("BastionLaunchConfig"), MinSize=Ref("MinSize"), MaxSize=Ref("MaxSize"), VPCZoneIdentifier=Ref("PublicSubnets"), Tags=[ASTag('Name', 'bastion', True)]))
def create_autoscaling_group(self): name = "%sASG" % self.name sg_name = CLUSTER_SG_NAME % self.name launch_config = "%sLaunchConfig" % name elb_name = ELB_NAME % self.name t = self.template t.add_resource(autoscaling.LaunchConfiguration( launch_config, ImageId=FindInMap('AmiMap', Ref("AWS::Region"), Ref('ImageName')), InstanceType=Ref("InstanceType"), KeyName=Ref("SshKeyName"), SecurityGroups=[Ref("DefaultSG"), Ref(sg_name)])) t.add_resource(autoscaling.AutoScalingGroup( name, AvailabilityZones=Ref("AvailabilityZones"), LaunchConfigurationName=Ref(launch_config), MinSize=Ref("MinSize"), MaxSize=Ref("MaxSize"), VPCZoneIdentifier=Ref("PrivateSubnets"), LoadBalancerNames=If("CreateELB", [Ref(elb_name), ], []), Tags=[ASTag('Name', self.name, True)]))
def add_resources_and_outputs(self): """Add resources to template.""" template = self.template variables = self.get_variables() autoscalinggroup = template.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', # this doesn't work - even though it's in tropohere # and the AWS CloudFormation docs # error: # AttributeError: AWS::AutoScaling::AutoScalingGroup # object does not support attribute AutoScalingGroupName #AutoScalingGroupName=variables['AutoScalingGroupName'].ref, Cooldown=variables['Cooldown'].ref, DesiredCapacity=variables['DesiredCapacity'].ref, HealthCheckGracePeriod=variables['HealthCheckGracePeriod'].ref, HealthCheckType=variables['HealthCheckType'].ref, LaunchConfigurationName=variables[ 'LaunchConfigurationName' ].ref, MaxSize=variables['MaxSize'].ref, MinSize=variables['MinSize'].ref, Tags=Tags( Application=variables['ApplicationName'].ref, Environment=variables['EnvironmentName'].ref, Name=variables['InstanceName'].ref ), VPCZoneIdentifier=variables['SubnetIdList'].ref, ) ) template.add_output( Output( "{}Name".format(autoscalinggroup.title), Description="Name of the Auto Scaling Group", Value=Ref(autoscalinggroup) ) )
def create_auto_scaling_resources(self, app_server_security_group, app_server_lb, backward_compat_app_server_lb): self.add_condition('BlueCondition', Equals('Blue', Ref(self.color))) self.add_condition('GreenCondition', Equals('Green', Ref(self.color))) blue_app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServerBlue', Condition='BlueCondition', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.app_server_instance_profile), InstanceType=Ref(self.app_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(app_server_security_group)], UserData=Base64( Join( '', self.get_cloud_config( self.blue_tile_distribution_endpoint))))) self.add_resource( asg.AutoScalingGroup( 'asgAppServerBlue', AvailabilityZones=Ref(self.availability_zones), Condition='BlueCondition', Cooldown=300, DesiredCapacity=Ref(self.app_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(blue_app_server_launch_config), LoadBalancerNames=[ Ref(app_server_lb), Ref(backward_compat_app_server_lb) ], MaxSize=Ref(self.app_server_auto_scaling_max), MinSize=Ref(self.app_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'AppServer', True)])) green_app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServerGreen', Condition='GreenCondition', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.app_server_instance_profile), InstanceType=Ref(self.app_server_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(app_server_security_group)], UserData=Base64( Join( '', self.get_cloud_config( self.green_tile_distribution_endpoint))))) self.add_resource( asg.AutoScalingGroup( 'asgAppServerGreen', AvailabilityZones=Ref(self.availability_zones), Condition='GreenCondition', Cooldown=300, DesiredCapacity=Ref(self.app_server_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(green_app_server_launch_config), LoadBalancerNames=[ Ref(app_server_lb), Ref(backward_compat_app_server_lb) ], MaxSize=Ref(self.app_server_auto_scaling_max), MinSize=Ref(self.app_server_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'AppServer', True)]))
'RScriptRunnerLC', IamInstanceProfile=Ref(instance_profile), ImageId=parameters.r_processing_ami, InstanceType='m4.large', UserData=base64.b64encode('#cloud-config\n' + yaml.safe_dump(r_server_user_data)), KeyName='bvanzant', SecurityGroups=[ parameters.database_client_sg, GetAtt(default_instance_sg, 'GroupId'), ] )) r_script_runner_asg = stack.add_resource(autoscaling.AutoScalingGroup( 'RScriptRunnerASG', LaunchConfigurationName=Ref(r_script_runner_lc), MaxSize=2, MinSize=1, Tags=autoscaling.Tags(Name='RScriptRunner'), VPCZoneIdentifier=parameters.private_subnets.values(), )) ourelb = stack.add_resource(elb.LoadBalancer( "ApplicationElasticLB", Name="ApplicationElasticLB", Scheme="internet-facing", Subnets=parameters.public_subnets.values(), SecurityGroups=[GetAtt(elb_sg, 'GroupId')], )) webserver_target_group = stack.add_resource(elb.TargetGroup( "WebserverTarget",
def configure(self): """ Returns a vpn template """ self.defaults = {'instance_type': 't2.small'} self.service = 'vpn' self.add_description('Sets up VPNs') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) # Custom config per VPN for vpn in constants.ENVIRONMENTS[self.env]['vpn']: if not vpn['active']: continue _vpn_name = vpn['name'] _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] _role = 'vpn-{}'.format(_vpn_name) _vpn_security_group = self.add_resource( ec2.SecurityGroup( self.cfn_name('VPNSecurityGroup', _vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for VPN {}'.format( _vpn_name), SecurityGroupIngress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }], SecurityGroupEgress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "udp", "FromPort": "123", "ToPort": "123", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }])) _vpn_eip = self.add_resource( ec2.EIP(self.cfn_name('VPNInstanceEIP', _vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( self.cfn_name('VPNInstanceENI', _vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for VPN - {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(role_override=_role))) self.add_resource( ec2.EIPAssociation(self.cfn_name('AssociateVPNInstanceENI', _vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Set up Routes from all VPC subnets to the ENI _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] _local_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('local_envs', []), constants.ENVIRONMENTS.keys()))) _local_subnets = list( itertools.chain(_local_subnets, [ self.vpc_metadata['cidrblock'], ])) # append remote vpc subnets _remote_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('remote_envs', []), constants.ENVIRONMENTS.keys()))) _remote_subnets = list( itertools.chain(_remote_subnets, vpn.get('remote_subnets', []))) for remote_subnet in _remote_subnets: for route_table in _vpc_route_tables: self.add_resource( ec2.Route(self.cfn_name(_vpn_name, "VPNRoute", remote_subnet, route_table['RouteTableId']), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=remote_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _user_data_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__LOCAL_SUBNETS__', ','.join(sorted(_local_subnets))), ('__REMOTE_IP__', vpn['remote_ip']), ('__REMOTE_SUBNETS__', ','.join(sorted(_remote_subnets))), ('__SECRET__', vpn['secret']), ('__IKE__', vpn.get('ike', 'aes256-sha1-modp1536')), ('__IKE_LIFETIME__', vpn.get('ikelifetime', '28800s')), ('__ESP__', vpn.get('esp', 'aes256-sha1')), ('__KEYLIFE__', vpn.get('keylife', '1800s')), ('__IPTABLES_RULES__', '\n'.join(vpn.get('iptables_rules', ''))), ('__SERVICE__', self.service), ('__VPN_NAME__', _vpn_name), ('__TAG__', _vpn_name.lower()), ('__VPC_ID__', self.vpc_id))) _user_data = Sub( _user_data_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets, { 'CFN_EIP_ADDR': Ref(_vpn_eip), 'CFN_ENI_ID': Ref(_vpn_eni), }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( self.cfn_name('VPNLaunchConfiguration', _vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_user_data))) self.add_resource( autoscaling.AutoScalingGroup( self.cfn_name('VPNASGroup', _vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=1, MaxSize=1, DesiredCapacity=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(role_override=_role) + [autoscaling.Tag('Name', _role, True)]))
def emit_configuration(): # Parameters here jenkins_instance_class = template.add_parameter( Parameter( 'JenkinsInstanceType', Type='String', Default='t2.micro', Description='Chef jenkins instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) # jenkins IAM role jenkins_role_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) jenkins_iam_role = template.add_resource( iam.Role('JenkinsIamRole', AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ iam.Policy(PolicyName='JenkinsPolicy', PolicyDocument=json.loads( cfn.load_template( "jenkins_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))), iam.Policy(PolicyName='JenkinsDefaultPolicy', PolicyDocument=json.loads( cfn.load_template( "default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))) ], DependsOn=cfn.vpcs[0].title)) jenkins_instance_profile = template.add_resource( iam.InstanceProfile("JenkinsInstanceProfile", Path="/", Roles=[Ref(jenkins_iam_role)], DependsOn=jenkins_iam_role.title)) jenkins_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "jenkins" }) ingress_rules = [ ec2.SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('tcp', 80), ('tcp', 443)] ] security_group = template.add_resource( ec2.SecurityGroup( "JenkinsSecurityGroup", GroupDescription='Security Group for jenkins instances', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ingress_rules, DependsOn=cfn.vpcs[0].title, Tags=Tags(Name='.'.join(['jenkins-sg', CLOUDNAME, CLOUDENV])))) launch_cfg = template.add_resource( autoscaling.LaunchConfiguration( "JenkinsLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.EBS)), InstanceType=Ref(jenkins_instance_class), IamInstanceProfile=Ref(jenkins_instance_profile), AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(security_group)], DependsOn=[jenkins_instance_profile.title, security_group.title], UserData=Base64(jenkins_user_data))) asg_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) asg = template.add_resource( autoscaling.AutoScalingGroup( "JenkinsASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_cfg), MinSize="1", MaxSize="1", NotificationConfiguration=autoscaling.NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_TERMINATE_ERROR, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(cfn.vpcs[0], cfn.SubnetTypes.PLATFORM) ]))
" --resource %s " % container_instance_configuration_name, " --region ", Ref(AWS_REGION), "\n", "/opt/aws/bin/cfn-signal -e $? ", " --stack ", Ref(AWS_STACK_NAME), " --resource %s " % container_instance_configuration_name, " --region ", Ref(AWS_REGION), "\n", ])), ) autoscaling_group = autoscaling.AutoScalingGroup( autoscaling_group_name, template=template, VPCZoneIdentifier=[Ref(container_a_subnet), Ref(container_b_subnet)], MinSize=desired_container_instances, MaxSize=max_container_instances, DesiredCapacity=desired_container_instances, LaunchConfigurationName=Ref(container_instance_configuration), HealthCheckType="EC2", HealthCheckGracePeriod=300, ) app_service_role = iam.Role( "AppServiceRole", template=template, AssumeRolePolicyDocument=dict(Statement=[dict( Effect="Allow", Principal=dict(Service=["ecs.amazonaws.com"]), Action=["sts:AssumeRole"], )]),
def setup_vpn(config, template): stack = config['stack'] region = config['region'] vpc_name = config['vpc'] public_subnets = [] private_subnets = [] customer_gateways = [] nat_ec2_instances = [] if region == None: print_err('%(stack)s: missing region\n' % locals()) sys.exit(1) vpcs_file = read_yaml_file('configuration/vpcs.yaml') vpcs = vpcs_file['vpcs'] connections = vpcs_file['connections'] eips = read_yaml_file('configuration/eips.yaml') # NOTE: we look for the base VPC in 'vpcs' and in eips # EIP's are allocated per VPC, since it's easier to manage if vpc_name not in vpcs: print_err('%(vpc_name)s: not found in vpcs\n' % locals()) sys.exit(1) if vpc_name not in eips: print_err( '%(stack)s: not found in eips; execute "scripts/manage-eips"\n' % locals()) sys.exit(1) vpc_id = get_vpc_id(vpc_name, region) incoming_connections = map( lambda x: x.keys()[0] if isinstance(x, dict) else x, list( itertools.chain.from_iterable( x['from'] for x in connections.values() if 'to' in x and vpc_name in x['to']))) outgoing_connections = map( lambda x: x.keys()[0] if isinstance(x, dict) else x, list( itertools.chain.from_iterable( x['to'] for x in connections.values() if 'from' in x and vpc_name in x['from']))) # if we expect incoming VPN connections then setup a VPN gateway if incoming_connections: vpn_gateway = template.add_resource( ec2.VPNGateway( 'VpnGateway', Type='ipsec.1', Tags=Tags( Name=stack, VPC=vpc_name, ), )) vpn_gateway_attachment = template.add_resource( ec2.VPCGatewayAttachment( 'VpcGatewayAttachment', VpcId=vpc_id, VpnGatewayId=Ref(vpn_gateway), )) vpn_gateway_route_propegation = template.add_resource( ec2.VPNGatewayRoutePropagation( 'VpnGatewayRoutePropagation', RouteTableIds=get_route_table_ids(vpc_id, region), VpnGatewayId=Ref(vpn_gateway), DependsOn=Name(vpn_gateway_attachment), )) for index, connection_from in enumerate(incoming_connections, 1): if connection_from not in vpcs: print_err( '%(stack)s: vpn from "%(connection_from)s" not found in vpcs\n' % locals()) sys.exit(1) if connection_from not in eips: print_err( '%(stack)s: vpn from "%(connection_from)s" not found in eips\n' % locals()) sys.exit(1) alphanumeric_id = ''.join( [y.title() for y in connection_from.split('-')]) customer_gateway = template.add_resource( ec2.CustomerGateway( alphanumeric_id + 'CGW', BgpAsn=vpcs[connection_from]['bgp_asn'], IpAddress=eips[connection_from]['public_ip'], Type='ipsec.1', Tags=Tags( Name='%(connection_from)s to %(stack)s' % locals(), VPC=vpc_name, ), )) vpn_connection = template.add_resource( ec2.VPNConnection( alphanumeric_id + 'VPNConnection', # We want this to always be 'False', for BGP StaticRoutesOnly=config['static_routing'], Type='ipsec.1', VpnGatewayId=Ref(vpn_gateway), CustomerGatewayId=Ref(customer_gateway), Tags=Tags( Name='%s CGW: IP %s' % (connection_from, eips[connection_from]['public_ip']), # The Tag 'RemoteVPC' is queried by # configuration process on the remote VPC's NAT # instance to identify the Virtual Connection they # should connect to. # It refers to the VPC stack name, not the WAN stack name RemoteVPC=connection_from, RemoteIp=eips[connection_from]['public_ip'], VPC=vpc_name, ), )) # Add static routes to the subnets behind each incoming VPN connection # NOTE: Can't be used when StaticRoutesOnly is False (which is required # when using BGP) if config['static_routing']: vpn_connection_static_route = template.add_resource( ec2.VPNConnectionRoute( '%(connection_from)s Static Route' % locals(), VpnConnectionId=Ref(vpn_connection), DestinationCidrBlock=vpcs[connection_from]['cidr'], )) customer_gateways.append(customer_gateway) else: vpn_gateway = None if outgoing_connections: if not region in config['nat']['ami_id']: print_err('AMI ID not configured for region "%(region)s"\n' % locals()) sys.exit(1) nat_sg = template.add_resource( ec2.SecurityGroup( 'NatSg', VpcId=vpc_id, GroupDescription='%(stack)s router Security Group' % locals(), SecurityGroupEgress=[ ec2.SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='-1', FromPort='-1', ToPort='-1', ) ], SecurityGroupIngress= # Allow all traffic from internal networks map( lambda cidr: ec2.SecurityGroupRule(CidrIp=cidr, IpProtocol='-1', FromPort='-1', ToPort='-1'), ['10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']) + # Allow all traffic from all other locations on our WAN map( lambda eip: ec2.SecurityGroupRule( CidrIp=eips[eip]['public_ip'] + '/32', IpProtocol='-1', FromPort='-1', ToPort='-1'), eips.keys()) + # Optional extra traffic sources map( lambda cidr: ec2.SecurityGroupRule(CidrIp=cidr, IpProtocol='-1', FromPort='-1', ToPort='-1'), config['nat']['extra_ingress_sources'] or {}), Tags=Tags(Name='%(stack)s router' % locals(), ), )) if 'openvpn_server' in config and config['openvpn_server']: nat_sg.SecurityGroupIngress.append( ec2.SecurityGroupRule( CidrIp='0.0.0.0/0', IpProtocol='udp', FromPort='1194', ToPort='1194', )) if 'external_tld' in config: template.add_resource( route53.RecordSetType( 'OpenVpnDnsRecord', Comment='%(stack)s OpenVPN server' % locals(), HostedZoneName=config['external_tld'] + '.', Name='%s.%s.' % (vpc_name, config['external_tld']), ResourceRecords=[eips[vpc_name]['public_ip']], TTL='900', Type='A')) assume_role_policy_statement = awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Principal=awacs.aws.Principal(principal='Service', resources=['ec2.amazonaws.com']), Action=[awacs.sts.AssumeRole], ) ]) root_role = template.add_resource( iam.Role( 'RootRole', AssumeRolePolicyDocument=assume_role_policy_statement, Path='/', )) root_role_policy = template.add_resource( iam.PolicyType( 'RootRolePolicy', PolicyName='AllowAllPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': '*', 'Effect': 'Allow', 'Resource': '*', }] }, Roles=[Ref(root_role)], )) root_instance_profile = template.add_resource( iam.InstanceProfile( 'RootInstanceProfile', Path='/', Roles=[Ref(root_role)], )) for index, egress_config in enumerate(config['nat']['sg_egress_rules'], 1): template.add_resource( ec2.SecurityGroupEgress( 'NatSgEgressRule%d' % index, ToPort=egress_config['port'], FromPort=egress_config['port'], IpProtocol=egress_config['protocol'], CidrIp=egress_config['cidr'], GroupId=Ref(nat_sg), )) launch_configuration = template.add_resource( autoscaling.LaunchConfiguration( 'Ec2NatLaunchConfiguration', AssociatePublicIpAddress=True, SecurityGroups=[Ref(nat_sg)], IamInstanceProfile=Ref(root_instance_profile), ImageId=config['nat']['ami_id'][region], KeyName=config['nat']['key_name'], InstanceType=config['nat']['instance_type'], UserData=build_user_data(stack), )) AutoScalingGroup = template.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', VPCZoneIdentifier=get_public_subnet_ids(vpc_id, region), TerminationPolicies=['ClosestToNextInstanceHour'], MinSize=1, MaxSize=2, ##### # TODO: Have to find a way for VyOS to send the signal without # having access to cfn-signal script (old python version) # That's also the reason we allow one instance - since ha-nat # can't send the signal #### # CreationPolicy=policies.CreationPolicy( # ResourceSignal=policies.ResourceSignal( # Count=2, # Timeout='PT10M', # ), # ), LaunchConfigurationName=Ref(launch_configuration), HealthCheckType='EC2', UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( MaxBatchSize=1, MinInstancesInService=1, PauseTime='PT2M', # TODO: switch to 'True' when we teach VyOS to send signal WaitOnResourceSignals=False, )), Tags=[ autoscaling.Tag('Name', stack + ' router', True), autoscaling.Tag('VPC', vpc_name, True), # Just have to be unique for this provisioning run, could # be any unique string autoscaling.Tag( 'Version', datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S.%f'), True), ], ))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Kubernetes workers via EKS - V1.0.0 ' '- compatible with amazon-eks-node-v23+') # Metadata template.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [ {'Label': {'default': 'EKS Cluster'}, 'Parameters': [variables[i].name for i in ['ClusterName', 'ClusterControlPlaneSecurityGroup']]}, {'Label': {'default': 'Worker Node Configuration'}, 'Parameters': [variables[i].name for i in ['NodeGroupName', 'NodeAutoScalingGroupMinSize', 'NodeAutoScalingGroupMaxSize', 'UseDesiredInstanceCount', 'NodeInstanceType', 'NodeInstanceProfile', 'NodeImageId', 'NodeVolumeSize', 'KeyName', 'UseSpotInstances', 'SpotBidPrice', 'BootstrapArguments']]}, {'Label': {'default': 'Worker Network Configuration'}, 'Parameters': [variables[i].name for i in ['VpcId', 'Subnets']]} ] } }) # Conditions template.add_condition( 'SetSpotPrice', Equals(variables['UseSpotInstances'].ref, 'yes') ) template.add_condition( 'DesiredInstanceCountSpecified', Equals(variables['UseDesiredInstanceCount'].ref, 'true') ) template.add_condition( 'KeyNameSpecified', Not(Equals(variables['KeyName'].ref, '')) ) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( 'NodeSecurityGroup', GroupDescription='Security group for all nodes in the cluster', Tags=[ {'Key': Sub('kubernetes.io/cluster/${ClusterName}'), 'Value': 'owned'}, ], VpcId=variables['VpcId'].ref ) ) template.add_output( Output( 'NodeSecurityGroup', Description='Security group for all nodes in the cluster', Value=nodesecuritygroup.ref() ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupIngress', Description='Allow node to communicate with each other', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='-1', FromPort=0, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneIngress', Description='Allow worker Kubelets and pods to receive ' 'communication from the cluster control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=1025, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroup', Description='Allow the cluster control plane to communicate ' 'with worker Kubelet and pods', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=1025, ToPort=65535 ) ) template.add_resource( ec2.SecurityGroupIngress( 'NodeSecurityGroupFromControlPlaneOn443Ingress', Description='Allow pods running extension API servers on port ' '443 to receive communication from cluster ' 'control plane', GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=variables['ClusterControlPlaneSecurityGroup'].ref, # noqa IpProtocol='tcp', FromPort=443, ToPort=443 ) ) template.add_resource( ec2.SecurityGroupEgress( 'ControlPlaneEgressToNodeSecurityGroupOn443', Description='Allow the cluster control plane to communicate ' 'with pods running extension API servers on port ' '443', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443 ) ) template.add_resource( ec2.SecurityGroupIngress( 'ClusterControlPlaneSecurityGroupIngress', Description='Allow pods to communicate with the cluster API ' 'Server', GroupId=variables['ClusterControlPlaneSecurityGroup'].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol='tcp', FromPort=443, ToPort=443 ) ) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'NodeLaunchConfig', AssociatePublicIpAddress=True, IamInstanceProfile=variables['NodeInstanceProfile'].ref, ImageId=variables['NodeImageId'].ref, InstanceType=variables['NodeInstanceType'].ref, KeyName=If( 'KeyNameSpecified', variables['KeyName'].ref, NoValue ), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If('SetSpotPrice', variables['SpotBidPrice'].ref, NoValue), BlockDeviceMappings=[autoscaling.BlockDeviceMapping( DeviceName='/dev/xvda', Ebs=autoscaling.EBSBlockDevice( VolumeSize=variables['NodeVolumeSize'].ref, VolumeType='gp2', DeleteOnTermination=True ) )], UserData=Base64( Sub('\n'.join([ '#!/bin/bash', 'set -o xtrace', '/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}', # noqa '/opt/aws/bin/cfn-signal --exit-code $? \\', '--stack ${AWS::StackName} \\', '--resource NodeGroup \\', '--region ${AWS::Region}' ])) ) ) ) template.add_resource( autoscaling.AutoScalingGroup( 'NodeGroup', DesiredCapacity=If( 'DesiredInstanceCountSpecified', variables['NodeAutoScalingGroupMaxSize'].ref, NoValue ), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=variables['NodeAutoScalingGroupMinSize'].ref, MaxSize=variables['NodeAutoScalingGroupMaxSize'].ref, VPCZoneIdentifier=variables['Subnets'].ref, Tags=[ autoscaling.Tag( 'Name', Sub('${ClusterName}-${NodeGroupName}-Node'), True), autoscaling.Tag( Sub('kubernetes.io/cluster/${ClusterName}'), 'owned', True) ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1' ) ) ) )
" --resource %s " % container_instance_configuration_name, " --region ", Ref(AWS_REGION), "\n", ])), ) autoscaling_group = autoscaling.AutoScalingGroup( autoscaling_group_name, template=template, VPCZoneIdentifier=[Ref(container_a_subnet), Ref(container_b_subnet)], MinSize=desired_container_instances, MaxSize=max_container_instances, DesiredCapacity=desired_container_instances, LaunchConfigurationName=Ref(container_instance_configuration), LoadBalancerNames=[Ref(load_balancer)], # Since one instance within the group is a reserved slot # for rolling ECS service upgrade, it's not possible to rely # on a "dockerized" `ELB` health-check, else this reserved # instance will be flagged as `unhealthy` and won't stop respawning' HealthCheckType="EC2", HealthCheckGracePeriod=300, ) # ECS task web_task_definition = TaskDefinition( "WebTask", template=template, Condition=deploy_condition, ContainerDefinitions=[
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.add_version("2010-09-09") template.add_description( "Kubernetes workers via EKS - V1.0.0 " "- compatible with amazon-eks-node-v23+" ) # Metadata template.add_metadata( { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "EKS Cluster"}, "Parameters": [ self.variables[i].name for i in [ "ClusterName", "ClusterControlPlaneSecurityGroup", ] ], }, { "Label": {"default": "Worker Node Configuration"}, "Parameters": [ self.variables[i].name for i in [ "NodeGroupName", "NodeAutoScalingGroupMinSize", "NodeAutoScalingGroupMaxSize", "UseDesiredInstanceCount", "NodeInstanceType", "NodeInstanceProfile", "NodeImageId", "NodeVolumeSize", "KeyName", "UseSpotInstances", "SpotBidPrice", "BootstrapArguments", ] ], }, { "Label": {"default": "Worker Network Configuration"}, "Parameters": [ self.variables[i].name for i in ["VpcId", "Subnets"] ], }, ] } } ) # Conditions template.add_condition( "SetSpotPrice", Equals(self.variables["UseSpotInstances"].ref, "yes") ) template.add_condition( "DesiredInstanceCountSpecified", Equals(self.variables["UseDesiredInstanceCount"].ref, "true"), ) template.add_condition( "KeyNameSpecified", Not(Equals(self.variables["KeyName"].ref, "")) ) # Resources nodesecuritygroup = template.add_resource( ec2.SecurityGroup( "NodeSecurityGroup", GroupDescription="Security group for all nodes in the cluster", Tags=[ { "Key": Sub("kubernetes.io/cluster/${ClusterName}"), "Value": "owned", }, ], VpcId=self.variables["VpcId"].ref, ) ) template.add_output( Output( "NodeSecurityGroup", Description="Security group for all nodes in the cluster", Value=nodesecuritygroup.ref(), ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupIngress", Description="Allow node to communicate with each other", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="-1", FromPort=0, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneIngress", Description="Allow worker Kubelets and pods to receive " "communication from the cluster control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroup", Description="Allow the cluster control plane to communicate " "with worker Kubelet and pods", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=1025, ToPort=65535, ) ) template.add_resource( ec2.SecurityGroupIngress( "NodeSecurityGroupFromControlPlaneOn443Ingress", Description="Allow pods running extension API servers on port " "443 to receive communication from cluster " "control plane", GroupId=nodesecuritygroup.ref(), SourceSecurityGroupId=self.variables[ "ClusterControlPlaneSecurityGroup" ].ref, # noqa IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupEgress( "ControlPlaneEgressToNodeSecurityGroupOn443", Description="Allow the cluster control plane to communicate " "with pods running extension API servers on port " "443", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, DestinationSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) template.add_resource( ec2.SecurityGroupIngress( "ClusterControlPlaneSecurityGroupIngress", Description="Allow pods to communicate with the cluster API " "Server", GroupId=self.variables["ClusterControlPlaneSecurityGroup"].ref, SourceSecurityGroupId=nodesecuritygroup.ref(), IpProtocol="tcp", FromPort=443, ToPort=443, ) ) nodelaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( "NodeLaunchConfig", AssociatePublicIpAddress=True, IamInstanceProfile=self.variables["NodeInstanceProfile"].ref, ImageId=self.variables["NodeImageId"].ref, InstanceType=self.variables["NodeInstanceType"].ref, KeyName=If("KeyNameSpecified", self.variables["KeyName"].ref, NoValue), SecurityGroups=[nodesecuritygroup.ref()], SpotPrice=If( "SetSpotPrice", self.variables["SpotBidPrice"].ref, NoValue ), BlockDeviceMappings=[ autoscaling.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=autoscaling.EBSBlockDevice( VolumeSize=self.variables["NodeVolumeSize"].ref, VolumeType="gp2", DeleteOnTermination=True, ), ) ], UserData=Base64( Sub( "\n".join( [ "#!/bin/bash", "set -o xtrace", "/etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments}", "/opt/aws/bin/cfn-signal --exit-code $? \\", "--stack ${AWS::StackName} \\", "--resource NodeGroup \\", "--region ${AWS::Region}", ] ) ) ), ) ) template.add_resource( autoscaling.AutoScalingGroup( "NodeGroup", DesiredCapacity=If( "DesiredInstanceCountSpecified", self.variables["NodeAutoScalingGroupMaxSize"].ref, NoValue, ), LaunchConfigurationName=nodelaunchconfig.ref(), MinSize=self.variables["NodeAutoScalingGroupMinSize"].ref, MaxSize=self.variables["NodeAutoScalingGroupMaxSize"].ref, VPCZoneIdentifier=self.variables["Subnets"].ref, Tags=[ autoscaling.Tag( "Name", Sub("${ClusterName}-${NodeGroupName}-Node"), True ), autoscaling.Tag( Sub("kubernetes.io/cluster/${ClusterName}"), "owned", True ), ], UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService="1", MaxBatchSize="1" ) ), ) )
def create_asg( self, layer_name, instance_profile, instance_type=None, ami_name='ubuntu1404LtsAmiId', ec2_key=None, user_data=None, default_instance_type=None, security_groups=None, min_size=1, max_size=1, root_volume_size=None, root_volume_type=None, include_ephemerals=True, number_ephemeral_vols=2, ebs_data_volumes=None, #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}] custom_tags=None, load_balancer=None, instance_monitoring=False, subnet_type='private', launch_config_metadata=None, creation_policy=None, update_policy=None, depends_on=None): ''' Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group @param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed @param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group @param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy. @param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region @param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group @param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group @param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name @param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group @param min_size [int] value to set as the minimum number of instances for the Auto Scaling group @param max_size [int] value to set as the maximum number of instances for the Auto Scaling group @param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance @param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration @param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration @param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments @param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group @param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group @param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group @param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into ''' if subnet_type not in ['public', 'private']: raise RuntimeError( 'Unable to determine which type of subnet instances should be launched into. ' + str(subnet_type) + ' is not one of ["public", "private"].') if ec2_key != None and type(ec2_key) != Ref: ec2_key = Ref(ec2_key) elif ec2_key == None: ec2_key = Ref(self.template.parameters['ec2Key']) if default_instance_type == None: default_instance_type = 'm1.small' if type(instance_type) != str: instance_type = Ref(instance_type) sg_list = [] for sg in security_groups: if isinstance(sg, Ref): sg_list.append(sg) else: sg_list.append(Ref(sg)) launch_config_obj = autoscaling.LaunchConfiguration( layer_name + 'LaunchConfiguration', IamInstanceProfile=Ref(instance_profile), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name), InstanceType=instance_type, SecurityGroups=sg_list, KeyName=ec2_key, Metadata=(launch_config_metadata or None), InstanceMonitoring=instance_monitoring) if user_data != None: launch_config_obj.UserData = user_data block_devices = [] if root_volume_type != None and root_volume_size != None: ebs_device = ec2.EBSBlockDevice(VolumeSize=root_volume_size) if root_volume_type != None: ebs_device.VolumeType = root_volume_type block_devices.append( ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=ebs_device)) device_names = ['/dev/sd%s' % c for c in 'bcdefghijklmnopqrstuvwxyz'] if ebs_data_volumes != None and len(ebs_data_volumes) > 0: for ebs_volume in ebs_data_volumes: device_name = device_names.pop() ebs_block_device = ec2.EBSBlockDevice( DeleteOnTermination=ebs_volume.get('delete_on_termination', True), VolumeSize=ebs_volume.get('size', '100'), VolumeType=ebs_volume.get('type', 'gp2')) if 'iops' in ebs_volume: ebs_block_device.Iops = int(ebs_volume.get('iops')) if 'snapshot_id' in ebs_volume: ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id') block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, Ebs=ebs_block_device)) if include_ephemerals and number_ephemeral_vols > 0: device_names.reverse() for x in range(0, number_ephemeral_vols): device_name = device_names.pop() block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, VirtualName='ephemeral' + str(x))) if len(block_devices) > 0: launch_config_obj.BlockDeviceMappings = block_devices launch_config = self.template.add_resource(launch_config_obj) if depends_on: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ], DependsOn=depends_on) else: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ]) lb_tmp = [] if load_balancer is not None: try: if type(load_balancer) is dict: for lb in load_balancer: lb_tmp.append(Ref(load_balancer[lb])) elif type(load_balancer) is not Ref: for lb in load_balancer: lb_tmp.append(Ref(lb)) else: lb_tmp.append(load_balancer) except TypeError: lb_tmp.append(Ref(load_balancer)) else: lb_tmp = None if lb_tmp is not None and len(lb_tmp) > 0: auto_scaling_obj.LoadBalancerNames = lb_tmp if creation_policy is not None: auto_scaling_obj.resource['CreationPolicy'] = creation_policy if update_policy is not None: auto_scaling_obj.resource['UpdatePolicy'] = update_policy if custom_tags != None and len(custom_tags) > 0: if type(custom_tags) != list: custom_tags = [custom_tags] auto_scaling_obj.Tags = custom_tags else: auto_scaling_obj.Tags = [] auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True)) return self.template.add_resource(auto_scaling_obj)
def set_up_stack(self): """Sets up the stack""" if not self.INPUTS or not self.STACK_NAME_PREFIX or not self.HEALTH_ENDPOINT: raise MKInputError( 'Must define INPUTS, STACK_NAME_PREFIX, and HEALTH_ENDPOINT') super(AppServerStack, self).set_up_stack() tags = self.get_input('Tags').copy() self.add_description('{} App Server Stack for Cac'.format( self.STACK_NAME_PREFIX)) assert isinstance(tags, dict), 'tags must be a dictionary' self.availability_zones = get_availability_zones() tags.update({'StackType': 'AppServer'}) self.default_tags = tags self.app_server_instance_type_parameter = self.add_parameter( Parameter( 'AppServerInstanceType', Type='String', Default='t2.medium', Description='NAT EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.'), source='AppServerInstanceType') self.param_app_server_iam_profile = self.add_parameter( Parameter('AppServerIAMProfile', Type='String', Description='IAM Profile for instances'), source='AppServerIAMProfile') self.app_server_ami = self.add_parameter(Parameter( 'AppServerAMI', Type='String', Description='{} Server EC2 AMI'.format(self.STACK_NAME_PREFIX)), source='AppServerAMI') self.keyname_parameter = self.add_parameter(Parameter( 'KeyName', Type='String', Default='cac', Description='Name of an existing EC2 key pair'), source='KeyName') self.param_color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green', 'Orange']), source='StackColor') self.param_stacktype = self.add_parameter(Parameter( 'StackType', Type='String', Description='Stack type', AllowedValues=['Development', 'Staging', 'Production']), source='StackType') self.param_public_hosted_zone_name = self.add_parameter( Parameter('PublicHostedZoneName', Type='String', Description='Public hosted zone name'), source='PublicHostedZoneName') self.param_vpc = self.add_parameter(Parameter( 'VpcId', Type='String', Description='Name of an existing VPC'), source='VpcId') self.param_notification_arn = self.add_parameter( Parameter( 'GlobalNotificationsARN', Type='String', Description='Physical resource ID on an AWS::SNS::Topic for ' 'notifications'), source='GlobalNotificationsARN') self.param_ssl_certificate_arn = self.add_parameter( Parameter('SSLCertificateARN', Type='String', Description= 'Physical resource ID on an AWS::IAM::ServerCertificate ' 'for the application server load balancer'), source='SSLCertificateARN') self.param_public_subnets = self.add_parameter( Parameter('PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets'), source='AppServerPublicSubnets') self.param_private_subnets = self.add_parameter( Parameter('PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets'), source='AppServerPrivateSubnets') self.param_bastion_security_group = self.add_parameter( Parameter('BastionSecurityGroup', Type='String', Description='The ID of the bastion security group'), source='BastionSecurityGroup') self.param_database_security_group = self.add_parameter( Parameter('DatabaseSecurityGroup', Type='String', Description='The ID of the database security group'), source='DatabaseSecurityGroup') self.param_nat_security_group = self.add_parameter( Parameter('NATSecurityGroup', Type='String', Description='The ID of the NAT security group'), source='NATSecurityGroup') self.param_min_size = self.add_parameter(Parameter( 'ASGMinSize', Type='Number', Default='1', Description='Min size of ASG'), source='ASGMinSize') self.param_max_size = self.add_parameter(Parameter( 'ASGMaxSize', Type='Number', Default='1', Description='Max size of ASG'), source='ASGMaxSize') self.param_desired_capacity = self.add_parameter( Parameter('ASGDesiredCapacity', Type='Number', Default='1', Description='Desired capacity of ASG'), source='ASGDesiredCapacity') # # Security Group # app_server_load_balancer_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServerLoadBalancer', GroupDescription= 'Enables access to app servers via a load balancer', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443] ], Tags=Tags(Name='sgAppServerLoadBalancer', Color=Ref(self.param_color)))) app_server_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServer', GroupDescription='Enables access to App Servers', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [22, 80, 443] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=80, ToPort=80) for sg in [app_server_load_balancer_security_group] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=443, ToPort=443) for sg in [app_server_load_balancer_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443, PAPERTRAIL_PORT] ], Tags=Tags(Name='sgAppServer', Color=Ref(self.param_color)))) # ELB to App Server self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTP', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=80, ToPort=80)) self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTPS', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=443, ToPort=443)) # Bastion to App Server, app server to db, app server to inet rules = [(self.param_bastion_security_group, app_server_security_group, [80, 443, 22]), (app_server_security_group, self.param_database_security_group, [POSTGRES]), (app_server_security_group, self.param_nat_security_group, [80, 443, 22, 587, PAPERTRAIL_PORT])] for num, (srcsg, destsg, ports) in enumerate(rules): for port in ports: self.add_resource( ec2.SecurityGroupEgress( 'sgEgress{}p{}'.format(num, port), GroupId=Ref(srcsg), DestinationSecurityGroupId=Ref(destsg), IpProtocol='tcp', FromPort=port, ToPort=port)) self.add_resource( ec2.SecurityGroupIngress('sgIngress{}p{}'.format( num, port), GroupId=Ref(destsg), SourceSecurityGroupId=Ref(srcsg), IpProtocol='tcp', FromPort=port, ToPort=port)) # # ELB # app_server_load_balancer = self.add_resource( elb.LoadBalancer( 'elbAppServer', ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=300), CrossZone=True, SecurityGroups=[Ref(app_server_load_balancer_security_group)], Listeners=[ elb.Listener(LoadBalancerPort='80', Protocol='HTTP', InstancePort='80', InstanceProtocol='HTTP'), elb.Listener(LoadBalancerPort='443', Protocol='HTTPS', InstancePort='443', InstanceProtocol='HTTP', SSLCertificateId=Ref( self.param_ssl_certificate_arn)) ], HealthCheck=elb.HealthCheck( Target=self.HEALTH_ENDPOINT, HealthyThreshold='3', UnhealthyThreshold='2', Interval='30', Timeout='5', ), Subnets=Ref(self.param_public_subnets), Tags=Tags(Name='elbAppServer', Color=Ref(self.param_color)))) self.add_resource( cw.Alarm('alarmAppServerBackend4xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=300, Threshold='5', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_4XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) self.add_resource( cw.Alarm('alarmAppServerBackend5xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=60, Threshold='0', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_5XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) # # ASG # app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServer', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.param_app_server_iam_profile), InstanceType=Ref(self.app_server_instance_type_parameter), KeyName=Ref(self.keyname_parameter), SecurityGroups=[Ref(app_server_security_group)])) autoscaling_group = self.add_resource( asg.AutoScalingGroup( 'asgAppServer', AvailabilityZones=self.get_input( 'AppServerAvailabilityZones').split(','), Cooldown=300, DesiredCapacity=Ref(self.param_desired_capacity), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(app_server_launch_config), LoadBalancerNames=[Ref(app_server_load_balancer)], MaxSize=Ref(self.param_max_size), MinSize=Ref(self.param_min_size), NotificationConfiguration=asg.NotificationConfiguration( TopicARN=Ref(self.param_notification_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]), VPCZoneIdentifier=Ref(self.param_private_subnets), Tags=[ asg.Tag('Name', '{}Server'.format(self.STACK_NAME_PREFIX), True), asg.Tag('Color', Ref(self.param_color), True) ])) # autoscaling policies autoscaling_policy_add = self.add_resource( asg.ScalingPolicy('scalingPolicyAddAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='1')) autoscaling_policy_remove = self.add_resource( asg.ScalingPolicy('scalingPolicyRemoveAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='-1')) if self.STACK_NAME_PREFIX == 'Otp': # trigger scale down if CPU avg usage < 10% for 3 consecutive 5 min periods self.add_resource( cw.Alarm('alarmAppServerLowCPU', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='10', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # trigger scale up if CPU avg usage >= 30% for a 5 min period self.add_resource( cw.Alarm('alarmAppServerHighCPU', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='30', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) else: # scale web servers based on network usage self.add_resource( cw.Alarm('alarmAppServerLowNetworkUsage', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='500000', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) self.add_resource( cw.Alarm('alarmAppServerHighNetworkUsage', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='10000000', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # # DNS name # self.create_resource( route53.RecordSetType( 'dnsName', Name=Join('.', [ Ref(self.param_color), Ref(self.param_stacktype), self.STACK_NAME_PREFIX, Ref(self.param_public_hosted_zone_name) ]), Type='A', AliasTarget=route53.AliasTarget( GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID'), GetAtt(app_server_load_balancer, 'DNSName')), HostedZoneName=Ref(self.param_public_hosted_zone_name))) self.add_output([ Output('{}ServerLoadBalancerEndpoint'.format( self.STACK_NAME_PREFIX), Description='Application server endpoint', Value=GetAtt(app_server_load_balancer, 'DNSName')), Output('{}ServerLoadBalancerHostedZoneNameID'.format( self.STACK_NAME_PREFIX), Description='ID of canonical hosted zone name for ELB', Value=GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID')) ])
def add_resources(self): metadata = { "AWS::CloudFormation::Init": { "configSets": { "wordpress_install": [ "install_wordpress"] }, "install_wordpress": { "packages": { "apt": { "apache2": [], "php": [], "php-mysql": [], "php7.0": [], "php7.0-mysql": [], "libapache2-mod-php7.0": [], "php7.0-cli": [], "php7.0-cgi": [], "php7.0-gd": [], "mysql-client": [], "sendmail": [] } }, "sources": { "/var/www/html": "http://wordpress.org/latest.tar.gz" }, "files": { "/tmp/create-wp-config": { "content": { "Fn::Join": ["", [ "#!/bin/bash\n", "cp /var/www/html/wordpress/wp-config-sample.php /var/www/html/wordpress/wp-config.php\n", "sed -i \"s/'database_name_here'/'", Ref( self.DBName), "'/g\" wp-config.php\n", "sed -i \"s/'username_here'/'", Ref( self.DBUser), "'/g\" wp-config.php\n", "sed -i \"s/'password_here'/'", Ref( self.DBPass), "'/g\" wp-config.php\n", "sed -i \"s/'localhost'/'", Ref( self.RDSEndpoint), "'/g\" wp-config.php\n" ]] }, "mode": "000500", "owner": "root", "group": "root" } }, "commands": { "01_configure_wordpress": { "command": "/tmp/create-wp-config", "cwd": "/var/www/html/wordpress" } } } } } self.WaitHandle = self.template.add_resource(cloudformation.WaitConditionHandle( "WaitHandle", )) self.WaitCondition = self.template.add_resource(cloudformation.WaitCondition( "WaitCondition", Handle=Ref(self.WaitHandle), Timeout="600", DependsOn="WebServerAutoScalingGroup", )) self.WebServerLaunchConfiguration = self.template.add_resource(autoscaling.LaunchConfiguration( "WebServerLaunchConfiguration", Metadata=metadata, UserData=Base64(Join("", [ "#!/bin/bash -x\n", "apt-get update\n", "apt-get install python-pip nfs-common -y \n", "mkdir -p /var/www/html/\n", "EC2_AZ=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)\n", "echo \"$EC2_AZ.", Ref(self.FileSystemID), ".efs.", Ref( "AWS::Region"), ".amazonaws.com:/ /var/www/html/ nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0\" >> /etc/fstab\n" "mount -a\n", "pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", # "exec > /tmp/userdata.log 2>&1\n", "/usr/local/bin/cfn-init -v --stack ", Ref("AWS::StackName"), " --resource WebServerLaunchConfiguration ", " --configsets wordpress_install ", " --region ", Ref("AWS::Region"), "\n", "/bin/mv /var/www/html/wordpress/* /var/www/html/\n", "/bin/rm -f /var/www/html/index.html\n", "/bin/rm -rf /var/www/html/wordpress/\n", "chown www-data:www-data /var/www/html/* -R\n", "/usr/sbin/service apache2 restart\n", "/usr/bin/curl -O https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar\n", "/bin/chmod +x wp-cli.phar\n", "/bin/mv wp-cli.phar /usr/local/bin/wp\n", "cd /var/www/html/\n", "if ! $(sudo -u www-data /usr/local/bin/wp core is-installed); then\n", "sudo -u www-data /usr/local/bin/wp core install ", "--url='", Ref(self.Hostname), ".", Ref(self.Domain), "' ", "--title='Cloudreach Meetup - ", Ref( self.Environment), "' ", "--admin_user='******' ", "--admin_password='******' ", "--admin_email='*****@*****.**'\n", "wget https://s3-eu-west-1.amazonaws.com/sceptre-meetup-munich/header.jpg -O /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n", "chown www-data:www-data /var/www/html/wp-content/themes/twentyseventeen/assets/images/header.jpg\n", "fi\n", "/usr/local/bin/cfn-signal -e $? --stack ", Ref( "AWS::StackName"), " -r \"Webserver setup complete\" '", Ref(self.WaitHandle), "'\n" ] )), ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "AMI"), KeyName=Ref(self.KeyName), SecurityGroups=[Ref(self.WebSecurityGroup)], InstanceType=Ref(self.InstanceType), AssociatePublicIpAddress=True, )) self.WebServerAutoScalingGroup = self.template.add_resource(autoscaling.AutoScalingGroup( "WebServerAutoScalingGroup", MinSize=Ref(self.WebServerCapacity), DesiredCapacity=Ref(self.WebServerCapacity), MaxSize=Ref(self.WebServerCapacity), VPCZoneIdentifier=[Ref(self.Subnet1), Ref(self.Subnet2)], AvailabilityZones=[Ref(self.AvailabilityZone1), Ref(self.AvailabilityZone2)], Tags=autoscaling.Tags( Name=Join("-", [Ref(self.Project), "web", "asg"]), Environment=Ref(self.Environment), Project=Ref(self.Project), ), LoadBalancerNames=[Ref(self.ElasticLoadBalancer)], LaunchConfigurationName=Ref(self.WebServerLaunchConfiguration), )) self.WebServerScaleUpPolicy = self.template.add_resource(autoscaling.ScalingPolicy( "WebServerScaleUpPolicy", ScalingAdjustment="1", Cooldown="60", AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup), AdjustmentType="ChangeInCapacity", )) self.WebServerScaleDownPolicy = self.template.add_resource(autoscaling.ScalingPolicy( "WebServerScaleDownPolicy", ScalingAdjustment="-1", Cooldown="60", AutoScalingGroupName=Ref(self.WebServerAutoScalingGroup), AdjustmentType="ChangeInCapacity", )) self.CPUAlarmLow = self.template.add_resource(cloudwatch.Alarm( "CPUAlarmLow", EvaluationPeriods="2", Dimensions=[ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref(self.WebServerAutoScalingGroup) ), ], AlarmActions=[Ref(self.WebServerScaleDownPolicy)], AlarmDescription="Scale-down if CPU < 70% for 1 minute", Namespace="AWS/EC2", Period="60", ComparisonOperator="LessThanThreshold", Statistic="Average", Threshold="70", MetricName="CPUUtilization", )) self.CPUAlarmHigh = self.template.add_resource(cloudwatch.Alarm( "CPUAlarmHigh", EvaluationPeriods="2", Dimensions=[ cloudwatch.MetricDimension( Name="AutoScalingGroupName", Value=Ref("WebServerAutoScalingGroup") ), ], AlarmActions=[Ref(self.WebServerScaleUpPolicy)], AlarmDescription="Scale-up if CPU > 50% for 1 minute", Namespace="AWS/EC2", Period="60", ComparisonOperator="GreaterThanThreshold", Statistic="Average", Threshold="50", MetricName="CPUUtilization", ))
)), ], KeyName=Ref(key_name), ) autoscaling_group = autoscaling.AutoScalingGroup( autoscaling_group_name, template=template, VPCZoneIdentifier=[Ref(private_subnet_a), Ref(private_subnet_b)], MinSize=desired_container_instances, MaxSize=max_container_instances, DesiredCapacity=desired_container_instances, LaunchConfigurationName=Ref(container_instance_configuration), LoadBalancerNames=[Ref(load_balancer)], HealthCheckType="EC2", HealthCheckGracePeriod=300, Tags=[ { "Key": "Name", "Value": Join("-", [Ref(AWS_STACK_NAME), "web_worker"]), "PropagateAtLaunch": True, }, { "Key": "aws-web-stacks:role", "Value": "worker", "PropagateAtLaunch": True, }, ], )
# Launch config launch_config = template.add_resource(auto.LaunchConfiguration('MyLaunchConfig', ImageId = FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType = Ref(instance_type), KeyName = Ref(keyname), SecurityGroups = [Ref(sg)], IamInstanceProfile = Ref(instance_profile) )) # Autoscaling Group asg = template.add_resource(auto.AutoScalingGroup('MyASG', AvailabilityZones = GetAZs(''), Cooldown = 120, LaunchConfigurationName = Ref(launch_config), MaxSize = '1', MinSize = '1', Tags = [ {'Key': 'Name', 'Value': 'Eureka', 'PropagateAtLaunch': 'true'} ] )) # Add generic output template.add_output(Output( 'Eureka', Description = 'Please go to the EC2 page in the AWS Web Console', Value = 'Look for the instance named Eureka and assign it an Elastic IP' )) # Print template print(template.to_json())
def configure(self): """ Returns a Pritunl template """ self.defaults = {'instance_type': 't3.large'} self.service = 'pritunl' self.set_description('Sets up Pritunl servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl'] _global_config = constants.ENVIRONMENTS[self.env] _bootstrap_mode = _vpn_config.get('bootstrap_mode', False) _bootstrap_ami = get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon') _ivy_ami = get_latest_ami_id(self.region, 'ivy-base', _global_config.get('ami_owner', 'self')) self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami)) _public_dns = _vpn_config['public_dns'] _vpn_name = '{}Pritunl'.format(self.env) # We want the preferred subnet only. _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] # Add our security group _vpn_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(_vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for Pritunl {}'.format( _vpn_name), SecurityGroupIngress=[ { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, # Ping { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, # HTTPS { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0" }, # SSH { "IpProtocol": "udp", "FromPort": "10000", "ToPort": "20000", "CidrIp": "0.0.0.0/0" }, # HTTPS/OVPN { "IpProtocol": "tcp", "FromPort": "27017", "ToPort": "27017", "CidrIp": constants.SUPERNET }, # mongodb master { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET } # Replies from local VPC ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add EBS volume if local mongo used _data_volume = None if _vpn_config.get('local_mongo', False): self.add_iam_policy( iam.Policy( PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) _data_volume = ec2.Volume( '{}DataVolume'.format(_vpn_name), Size=_vpn_config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=_vpn_subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=_vpn_name) + [ec2.Tag('Name', _vpn_name + "-datavol")]) self.add_resource(_data_volume) # Add the elastic IP and the ENI for it, then attach it. _vpn_eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(_vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(service_override=self.service, role_override=_vpn_name))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Add a route53 DNS name if self.get_partition() != 'aws-us-gov': self.add_resource( route53.RecordSetGroup('{}Route53'.format(_vpn_name), HostedZoneName=constants.ENVIRONMENTS[ self.env]['route53_zone'], RecordSets=[ route53.RecordSet( Name=_public_dns, ResourceRecords=[Ref(_vpn_eip)], Type='A', TTL=600) ])) # Get all route tables in the VPC _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] # Set up the routing table for the VPC # Allow for changing client subnets in constants.py for client_subnet in _vpn_config['client_subnets']: for route_table in _vpc_route_tables: self.add_resource( ec2.Route('{}Route{}{}'.format( _vpn_name, client_subnet.translate({ ord("."): "", ord("/"): "" }), route_table['RouteTableId'].replace('-', '')), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=client_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _mongodb = _vpn_config.get('mongodb') _server_id = _vpn_config['server_id'] _userdata_template = self.get_cloudinit_template( _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None, replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVER_ID__', _server_id), ('__SERVICE__', self.service), ('__MONGODB__', _mongodb if _mongodb else ''))) _userdata = Sub( _userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_vpn_eni), 'CFN_EBS_ID': Ref(_data_volume) if _data_volume else '' }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(_vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(_vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=0, MaxSize=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=_vpn_name) + [autoscaling.Tag('Name', _vpn_name, True)]))
DeploymentConfiguration=ecs.DeploymentConfiguration( MinimumHealthyPercent=0, MaximumPercent=100 ), DesiredCount=Ref(scheduler_task_count), LaunchType='EC2' ) ) autoscaling_group = template.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', DesiredCapacity=Ref(api_instance_count), MinSize=Ref(api_instance_count), MaxSize=Ref(api_instance_count), LaunchTemplate=autoscaling.LaunchTemplateSpecification( LaunchTemplateId=Ref(launch_template), Version=GetAtt(launch_template, 'LatestVersionNumber') ), AvailabilityZones=['eu-west-1a', 'eu-west-1b', 'eu-west-1c'] ) ) # Create the users. api_user = template.add_resource( iam.User( 'ApiUser', UserName=Ref(api_user_name), Policies=[ iam.Policy( PolicyName='ApiUserPolicy',
def main(): t = Template("A template to create a load balanced autoscaled Web flask deployment using ansible.") addMapping(t) ### VPC CONFIGURATION ### vpc = ec2.VPC( "MainVPC", CidrBlock="10.1.0.0/16" ) t.add_resource(vpc) vpc_id = Ref(vpc) subnet_1 = ec2.Subnet( "WebAppSubnet1", t, AvailabilityZone="us-east-1a", CidrBlock="10.1.0.0/24", MapPublicIpOnLaunch=True, VpcId=vpc_id, ) subnet_1_id = Ref(subnet_1) subnet_2 = ec2.Subnet( "WebAppSubnet2", t, AvailabilityZone="us-east-1b", CidrBlock="10.1.1.0/24", MapPublicIpOnLaunch=True, VpcId=vpc_id, ) subnet_2_id = Ref(subnet_2) ### NETWORKING ### igw = ec2.InternetGateway("internetGateway", t) gateway_to_internet = ec2.VPCGatewayAttachment( "GatewayToInternet", t, VpcId=vpc_id, InternetGatewayId=Ref(igw) ) route_table = ec2.RouteTable( "subnetRouteTable", t, VpcId=vpc_id ) route_table_id = Ref(route_table) internet_route = ec2.Route( "routeToInternet", t, DependsOn=gateway_to_internet, DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw), RouteTableId=route_table_id ) subnet_1_route_assoc = ec2.SubnetRouteTableAssociation( "Subnet1RouteAssociation", t, RouteTableId=route_table_id, SubnetId=Ref(subnet_1) ) subnet_2_route_assoc = ec2.SubnetRouteTableAssociation( "Subnet2RouteAssociation", t, RouteTableId=route_table_id, SubnetId=Ref(subnet_2) ) http_ingress = { "CidrIp": "0.0.0.0/0", "Description": "Allow HTTP traffic in from internet.", "IpProtocol": "tcp", "FromPort": 80, "ToPort": 80, } ssh_ingress = { "CidrIp": "0.0.0.0/0", "Description": "Allow SSH traffic in from internet.", "IpProtocol": "tcp", "FromPort": 22, "ToPort": 22, } elb_sg = ec2.SecurityGroup( "elbSecurityGroup", t, GroupName="WebGroup", GroupDescription="Allow web traffic in from internet to ELB", VpcId=vpc_id, SecurityGroupIngress=[ http_ingress ]) ssh_sg = ec2.SecurityGroup( "sshSecurityGroup", t, GroupName="SSHGroup", GroupDescription="Allow SSH traffic in from internet", VpcId=vpc_id, SecurityGroupIngress=[ ssh_ingress ] ) elb_sg_id = Ref(elb_sg) ssh_sg_id = Ref(ssh_sg) autoscale_ingress = { "SourceSecurityGroupId": elb_sg_id, "Description": "Allow web traffic in from ELB", "IpProtocol": "tcp", "FromPort": 80, "ToPort": 80 } autoscale_sg = ec2.SecurityGroup( "WebAutoscaleSG", t, GroupName="AutoscaleGroup", GroupDescription="Allow web traffic in from elb on port 80", VpcId=vpc_id, SecurityGroupIngress=[ autoscale_ingress ] ) autoscale_sg_id = Ref(autoscale_sg) # BUCKETS app_bucket = s3.Bucket( "CodeDeployApplicationBucket", t, ) ### LOAD BALANCING ### Web_elb = elb.LoadBalancer( "WebElb", t, Name="WebElb", # TODO: Fix for name conflict Subnets=[subnet_1_id, subnet_2_id], SecurityGroups=[elb_sg_id] ) Web_target_group = elb.TargetGroup( "WebTargetGroup", t, DependsOn=Web_elb, HealthCheckPath="/health", HealthCheckPort=80, HealthCheckProtocol="HTTP", Matcher=elb.Matcher(HttpCode="200"), Name="NginxTargetGroup", Port=80, Protocol="HTTP", VpcId=vpc_id ) Web_listener = elb.Listener( "WebListener", t, LoadBalancerArn=Ref(Web_elb), DefaultActions=[ elb.Action("forwardAction", TargetGroupArn=Ref(Web_target_group), Type="forward" ) ], Port=80, Protocol="HTTP" ) ### AUTOSCALING ### # Everything after sudo -u ubuntu is one command # The sudo command is required to properly set file permissions when # running the ansible script as it assumes running from non root user lc_user_data = Base64(Join("\n", [ "#!/bin/bash", "apt-add-repository -y ppa:ansible/ansible", "apt-get update && sudo apt-get -y upgrade", "apt-get -y install git", "apt-get -y install ansible", "cd /home/ubuntu/", "sudo -H -u ubuntu bash -c '" "export LC_ALL=C.UTF-8 && " "export LANG=C.UTF-8 && " "ansible-pull -U https://github.com/DameonSmith/aws-meetup-ansible.git --extra-vars \"user=ubuntu\"'" ])) web_instance_role = iam.Role( "webInstanceCodeDeployRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': 'ec2.amazonaws.com' }, 'Action': 'sts:AssumeRole' }] }, Policies=[ iam.Policy( PolicyName="CodeDeployS3Policy", PolicyDocument=aws.Policy( Version='2012-10-17', Statement=[ aws.Statement( Sid='CodeDeployS3', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject, aws_s3.ListObjects, aws_s3.ListBucket, aws_s3.ListBucketVersions, aws_s3.ListAllMyBuckets, aws_s3.ListMultipartUploadParts, aws_s3.ListBucketMultipartUploads, aws_s3.ListBucketByTags, ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*', ]), "arn:aws:s3:::aws-codedeploy-us-east-2/*", "arn:aws:s3:::aws-codedeploy-us-east-1/*", "arn:aws:s3:::aws-codedeploy-us-west-1/*", "arn:aws:s3:::aws-codedeploy-us-west-2/*", "arn:aws:s3:::aws-codedeploy-ca-central-1/*", "arn:aws:s3:::aws-codedeploy-eu-west-1/*", "arn:aws:s3:::aws-codedeploy-eu-west-2/*", "arn:aws:s3:::aws-codedeploy-eu-west-3/*", "arn:aws:s3:::aws-codedeploy-eu-central-1/*", "arn:aws:s3:::aws-codedeploy-ap-northeast-1/*", "arn:aws:s3:::aws-codedeploy-ap-northeast-2/*", "arn:aws:s3:::aws-codedeploy-ap-southeast-1/*", "arn:aws:s3:::aws-codedeploy-ap-southeast-2/*", "arn:aws:s3:::aws-codedeploy-ap-south-1/*", "arn:aws:s3:::aws-codedeploy-sa-east-1/*", ] ) ] ) ) ] ) web_instance_profile = iam.InstanceProfile( "webInstanceProfile", t, Path='/', Roles=[Ref(web_instance_role)], ) Web_launch_config = autoscaling.LaunchConfiguration( "webLaunchConfig", t, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), # TODO: Remove magic string SecurityGroups=[ssh_sg_id, autoscale_sg_id], IamInstanceProfile=Ref(web_instance_profile), InstanceType="t2.micro", BlockDeviceMappings= [{ "DeviceName": "/dev/sdk", "Ebs": {"VolumeSize": "10"} }], UserData= lc_user_data, KeyName="advanced-cfn", ) Web_autoscaler = autoscaling.AutoScalingGroup( "WebAutoScaler", t, LaunchConfigurationName=Ref(Web_launch_config), MinSize="2", # TODO: Change to parameter MaxSize="2", VPCZoneIdentifier=[subnet_2_id, subnet_1_id], TargetGroupARNs= [Ref(Web_target_group)] ) t.add_output([ Output( "ALBDNS", Description="The DNS name for the application load balancer.", Value=GetAtt(Web_elb, "DNSName") ) ]) # DEVTOOLS CONFIG codebuild_service_role = iam.Role( "CMSCodeBuildServiceRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['codebuild.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, Policies=[ iam.Policy( PolicyName="CloudWatchLogsPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='logs', Effect=aws.Allow, Action=[ aws_logs.CreateLogGroup, aws_logs.CreateLogStream, aws_logs.PutLogEvents ], Resource=['*'] ) ] ) ), iam.Policy( PolicyName="s3AccessPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*', ]) ] ) ] ) ) ] ) github_repo = Parameter( "GithubRepoLink", Description="Name of the repository you wish to connect to codebuild.", Type="String" ) artifact_key = Parameter( "ArtifactKey", Description="The key for the artifact that codebuild creates.", Type="String" ) t.add_parameter(github_repo) t.add_parameter(artifact_key) cms_code_build_project = codebuild.Project( "CMSBuild", t, Name="CMS-Build", Artifacts=codebuild.Artifacts( Location=Ref(app_bucket), Name=Ref(artifact_key), NamespaceType="BUILD_ID", Type="S3", Packaging="ZIP" ), Description="Code build for CMS", Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/python:3.6.5", Type="LINUX_CONTAINER", ), ServiceRole=GetAtt(codebuild_service_role, 'Arn'), Source=codebuild.Source( "CMSSourceCode", Auth=codebuild.SourceAuth( "GitHubAuth", Type="OAUTH" ), Location=Ref(github_repo), Type="GITHUB" ), Triggers=codebuild.ProjectTriggers( Webhook=True ) ) codedeploy_service_role = iam.Role( "CMSDeploymentGroupServiceRole", t, AssumeRolePolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'Service': ['codedeploy.amazonaws.com'] }, 'Action': ['sts:AssumeRole'] }] }, Policies=[ iam.Policy( PolicyName="CloudWatchLogsPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='logs', Effect=aws.Allow, Action=[ aws_logs.CreateLogGroup, aws_logs.CreateLogStream, aws_logs.PutLogEvents ], Resource=['*'] ) ] ) ), iam.Policy( PolicyName="s3AccessPolicy", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws_s3.PutObject, aws_s3.GetObject, aws_s3.GetObjectVersion, aws_s3.DeleteObject ], Resource=[ GetAtt(app_bucket, 'Arn'), Join('', [ GetAtt(app_bucket, 'Arn'), '/*' ]) ] ) ] ) ), iam.Policy( PolicyName="autoscalingAccess", PolicyDocument=aws.Policy( Version="2012-10-17", Statement=[ aws.Statement( Sid='codebuilder', Effect=aws.Allow, Action=[ aws.Action('autoscaling', '*'), aws.Action('elasticloadbalancing', '*') ], Resource=[ '*' ] ) ] ) ) ] ) cms_codedeploy_application = codedeploy.Application( "CMSCodeDeployApplication", t, ) cms_deployment_group = codedeploy.DeploymentGroup( "CMSDeploymentGroup", t, DependsOn=[cms_codedeploy_application], ApplicationName=Ref(cms_codedeploy_application), AutoScalingGroups=[Ref(Web_autoscaler)], LoadBalancerInfo=codedeploy.LoadBalancerInfo( "CodeDeployLBInfo", TargetGroupInfoList=[ codedeploy.TargetGroupInfoList( "WebTargetGroup", Name=GetAtt(Web_target_group, "TargetGroupName") ) ] ), ServiceRoleArn=GetAtt(codedeploy_service_role, 'Arn') ) print(t.to_yaml())
' - /kube2consul\n', ' - -consul-agent=http://127.0.0.1:8500\n', ' - -kube_master_url=http://127.0.0.1:8080\n', ])), )) AUTO_SCALING_GROUP = TEMPLATE.add_resource( autoscaling.AutoScalingGroup( 'AutoScalingGroup', DesiredCapacity='1', Tags=[autoscaling.Tag('Name', 'Kubernetes Master', True)], LaunchConfigurationName=Ref(LAUNCH_CONFIGURATION), LoadBalancerNames=[ Ref(API_SERVER_LOAD_BALANCER), Ref(CONSUL_HTTP_API_LOAD_BALANCER) ], MinSize='1', MaxSize='3', VPCZoneIdentifier=[Ref(SUBNET)], UpdatePolicy=policies.UpdatePolicy( AutoScalingRollingUpdate=policies.AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1', ), ), )) TEMPLATE.add_output( Output( 'APIServer', Value=Join('', ['https://', GetAtt(API_SERVER_LOAD_BALANCER, 'DNSName')]),