def configure(self): """ Returns a vpn template """ self.defaults = {'instance_type': 't2.small'} self.service = 'vpn' self.add_description('Sets up VPNs') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) # Custom config per VPN for vpn in constants.ENVIRONMENTS[self.env]['vpn']: if not vpn['active']: continue _vpn_name = vpn['name'] _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] _role = 'vpn-{}'.format(_vpn_name) _vpn_security_group = self.add_resource( ec2.SecurityGroup( self.cfn_name('VPNSecurityGroup', _vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for VPN {}'.format( _vpn_name), SecurityGroupIngress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }], SecurityGroupEgress=[{ "IpProtocol": "50", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "51", "FromPort": "-1", "ToPort": "-1", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "500", "ToPort": "500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "udp", "FromPort": "4500", "ToPort": "4500", "CidrIp": vpn['remote_ip'] + '/32' }, { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "udp", "FromPort": "123", "ToPort": "123", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET }])) _vpn_eip = self.add_resource( ec2.EIP(self.cfn_name('VPNInstanceEIP', _vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( self.cfn_name('VPNInstanceENI', _vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for VPN - {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(role_override=_role))) self.add_resource( ec2.EIPAssociation(self.cfn_name('AssociateVPNInstanceENI', _vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Set up Routes from all VPC subnets to the ENI _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] _local_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('local_envs', []), constants.ENVIRONMENTS.keys()))) _local_subnets = list( itertools.chain(_local_subnets, [ self.vpc_metadata['cidrblock'], ])) # append remote vpc subnets _remote_subnets = iter( map( lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'], filter(lambda z: z in vpn.get('remote_envs', []), constants.ENVIRONMENTS.keys()))) _remote_subnets = list( itertools.chain(_remote_subnets, vpn.get('remote_subnets', []))) for remote_subnet in _remote_subnets: for route_table in _vpc_route_tables: self.add_resource( ec2.Route(self.cfn_name(_vpn_name, "VPNRoute", remote_subnet, route_table['RouteTableId']), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=remote_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _user_data_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__LOCAL_SUBNETS__', ','.join(sorted(_local_subnets))), ('__REMOTE_IP__', vpn['remote_ip']), ('__REMOTE_SUBNETS__', ','.join(sorted(_remote_subnets))), ('__SECRET__', vpn['secret']), ('__IKE__', vpn.get('ike', 'aes256-sha1-modp1536')), ('__IKE_LIFETIME__', vpn.get('ikelifetime', '28800s')), ('__ESP__', vpn.get('esp', 'aes256-sha1')), ('__KEYLIFE__', vpn.get('keylife', '1800s')), ('__IPTABLES_RULES__', '\n'.join(vpn.get('iptables_rules', ''))), ('__SERVICE__', self.service), ('__VPN_NAME__', _vpn_name), ('__TAG__', _vpn_name.lower()), ('__VPC_ID__', self.vpc_id))) _user_data = Sub( _user_data_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets, { 'CFN_EIP_ADDR': Ref(_vpn_eip), 'CFN_ENI_ID': Ref(_vpn_eni), }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( self.cfn_name('VPNLaunchConfiguration', _vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_user_data))) self.add_resource( autoscaling.AutoScalingGroup( self.cfn_name('VPNASGroup', _vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=1, MaxSize=1, DesiredCapacity=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(role_override=_role) + [autoscaling.Tag('Name', _role, True)]))
def configure(self): """ This template creates a mesos-master per subnet in the VPC """ config = constants.ENVIRONMENTS[self.env]['mesos']['master'] self.defaults = { 'instance_type': config.get('instance_type', 't3.large') } self.add_description('Sets up Mesos Masters in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-mesos', _global_config.get('ami_owner', 'self')))) _mesos_master_security_group = self.add_resource( ec2.SecurityGroup( 'MesosMasterSecurityGroup', VpcId=self.vpc_id, GroupDescription='Security Group for MesosMaster Instances', SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 2181, 'ToPort': 2181, 'CidrIp': self.vpc_cidr }, # zk { 'IpProtocol': 'tcp', 'FromPort': 4400, 'ToPort': 4400, 'CidrIp': self.vpc_cidr }, # chronos { 'IpProtocol': 'tcp', 'FromPort': 5050, 'ToPort': 5051, 'CidrIp': self.vpc_cidr }, # mesos { 'IpProtocol': 'tcp', 'FromPort': 8080, 'ToPort': 8080, 'CidrIp': self.vpc_cidr }, # marathon { 'IpProtocol': 'tcp', 'FromPort': 8500, 'ToPort': 8500, 'CidrIp': self.vpc_cidr }, # consul ui { 'IpProtocol': 'tcp', 'FromPort': 8300, 'ToPort': 8301, 'CidrIp': self.vpc_cidr }, # consul rpc/lan serf { 'IpProtocol': 'tcp', 'FromPort': 8302, 'ToPort': 8302, 'CidrIp': constants.SUPERNET }, # consul wan serf { 'IpProtocol': 'udp', 'FromPort': 8300, 'ToPort': 8301, 'CidrIp': self.vpc_cidr }, # consul rpc/lan serf (udp) { 'IpProtocol': 'udp', 'FromPort': 8302, 'ToPort': 8302, 'CidrIp': constants.SUPERNET }, # consul wan serf (udp) ], SecurityGroupEgress=[{ 'IpProtocol': '-1', 'FromPort': 0, 'ToPort': 65535, 'CidrIp': '0.0.0.0/0' }])) self.add_resource( ec2.SecurityGroupIngress( 'MesosMasterIngressSecurityGroup', GroupId=Ref(_mesos_master_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_mesos_master_security_group) # this allows members all traffic (for replication) )) self.add_security_group(Ref(_mesos_master_security_group)) masters = [(index, ip) for index, ip in enumerate(config['masters'], 1)] subnets = self.get_subnets('private') for master in masters: zone_index, master_ip = master subnet = [ s for s in subnets if netaddr.IPAddress(master_ip) in netaddr.IPNetwork(s['CidrBlock']) ][0] _mesos_master_eni = ec2.NetworkInterface( 'MesosMasterInstanceENI{}'.format( subnet['AvailabilityZone'][-1]), Description='ENI for Mesos Master ENV: {0} PrivateSubnet {1}'. format(self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=master_ip, SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=self.get_tags(service_override="Mesos", role_override='MesosMaster-{}'.format( subnet['AvailabilityZone']))) self.add_resource(_mesos_master_eni) _user_data_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__ENI_IP__', master_ip), ('__ZK_SERVER_ID__', zone_index), ('__HOSTS_ENTRIES__', '\n'.join([ '{0} mesos-master-{1}.node.{2}.{3} mesos-master-{1}'. format(ip, index, self.env, constants.TAG) for index, ip in masters ])), ('__ZK_CONNECT__', ','.join(['{}:2181'.format(z[1]) for z in masters])), ('__ZK_PEERS__', '\n'.join([ 'server.{0}={1}:2888:3888'.format(index, ip) for index, ip in masters ])))) _user_data = Sub( _user_data_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_mesos_master_eni), }) _mesos_master_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( 'MesosMasterLaunchConfiguration{}'.format( subnet['AvailabilityZone'][-1]), AssociatePublicIpAddress=False, BlockDeviceMappings=get_block_device_mapping( self.parameters['InstanceType'].resource['Default']), SecurityGroups=self.security_groups, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_user_data))) self.add_resource( autoscaling.AutoScalingGroup( 'MesosMasterASGroup{}'.format( subnet['AvailabilityZone'][-1]), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref( _mesos_master_launch_configuration), MinSize=0, MaxSize=1, # DesiredCapacity=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override="MesosMaster", role_override='MesosMaster-{}'.format( subnet['AvailabilityZone'])) + [ autoscaling.Tag( 'Name', '{}Mesos-Master-{}'.format( self.env, subnet['AvailabilityZone']), True), # tag to allow consul to discover the hosts # autoscaling.Tag('{}:consul_master'.format(constants.TAG), self.env, True) ]))
def configure(self): """ Returns a Pritunl template """ self.defaults = {'instance_type': 't3.large'} self.service = 'pritunl' self.set_description('Sets up Pritunl servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl'] _global_config = constants.ENVIRONMENTS[self.env] _bootstrap_mode = _vpn_config.get('bootstrap_mode', False) _bootstrap_ami = get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon') _ivy_ami = get_latest_ami_id(self.region, 'ivy-base', _global_config.get('ami_owner', 'self')) self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami)) _public_dns = _vpn_config['public_dns'] _vpn_name = '{}Pritunl'.format(self.env) # We want the preferred subnet only. _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] # Add our security group _vpn_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(_vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for Pritunl {}'.format( _vpn_name), SecurityGroupIngress=[ { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, # Ping { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, # HTTPS { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0" }, # SSH { "IpProtocol": "udp", "FromPort": "10000", "ToPort": "20000", "CidrIp": "0.0.0.0/0" }, # HTTPS/OVPN { "IpProtocol": "tcp", "FromPort": "27017", "ToPort": "27017", "CidrIp": constants.SUPERNET }, # mongodb master { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET } # Replies from local VPC ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add EBS volume if local mongo used _data_volume = None if _vpn_config.get('local_mongo', False): self.add_iam_policy( iam.Policy( PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) _data_volume = ec2.Volume( '{}DataVolume'.format(_vpn_name), Size=_vpn_config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=_vpn_subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=_vpn_name) + [ec2.Tag('Name', _vpn_name + "-datavol")]) self.add_resource(_data_volume) # Add the elastic IP and the ENI for it, then attach it. _vpn_eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(_vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(service_override=self.service, role_override=_vpn_name))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Add a route53 DNS name if self.get_partition() != 'aws-us-gov': self.add_resource( route53.RecordSetGroup('{}Route53'.format(_vpn_name), HostedZoneName=constants.ENVIRONMENTS[ self.env]['route53_zone'], RecordSets=[ route53.RecordSet( Name=_public_dns, ResourceRecords=[Ref(_vpn_eip)], Type='A', TTL=600) ])) # Get all route tables in the VPC _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] # Set up the routing table for the VPC # Allow for changing client subnets in constants.py for client_subnet in _vpn_config['client_subnets']: for route_table in _vpc_route_tables: self.add_resource( ec2.Route('{}Route{}{}'.format( _vpn_name, client_subnet.translate({ ord("."): "", ord("/"): "" }), route_table['RouteTableId'].replace('-', '')), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=client_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _mongodb = _vpn_config.get('mongodb') _server_id = _vpn_config['server_id'] _userdata_template = self.get_cloudinit_template( _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None, replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVER_ID__', _server_id), ('__SERVICE__', self.service), ('__MONGODB__', _mongodb if _mongodb else ''))) _userdata = Sub( _userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_vpn_eni), 'CFN_EBS_ID': Ref(_data_volume) if _data_volume else '' }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(_vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(_vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=0, MaxSize=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=_vpn_name) + [autoscaling.Tag('Name', _vpn_name, True)]))
def configure(self): config = constants.ENVIRONMENTS[self.env]['mesos']['agent'] self.defaults = { 'instance_type': config.get('instance_type', 'r5.xlarge') } self.add_description('Sets up Mesos Agents in all Zones') self.get_standard_parameters() self.get_standard_policies() self.get_default_security_groups() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter( 'AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id(self.region, 'ivy-mesos', _global_config.get('ami_owner', 'self')) ) ) # Mesos Agent Security Group self.mesos_agent_security_group = self.add_resource( ec2.SecurityGroup( 'MesosAgentSecurityGroup', VpcId=self.vpc_id, GroupDescription='Security Group for MesosAgent Instances', SecurityGroupIngress=[ # public http via ELB {'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'CidrIp': self.vpc_cidr}, # internal service SSL direct {'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443, 'CidrIp': self.vpc_cidr}, # host-network services (tcp) {'IpProtocol': 'tcp', 'FromPort': 5000, 'ToPort': 5049, 'CidrIp': self.vpc_cidr}, # host-network services (udp) {'IpProtocol': 'udp', 'FromPort': 5000, 'ToPort': 5049, 'CidrIp': self.vpc_cidr}, # mesos agent api {'IpProtocol': 'tcp', 'FromPort': 5050, 'ToPort': 5051, 'CidrIp': self.vpc_cidr}, # internal http-alt direct {'IpProtocol': 'tcp', 'FromPort': 8000, 'ToPort': 8000, 'CidrIp': self.vpc_cidr}, # internal http via ELB {'IpProtocol': 'tcp', 'FromPort': 8080, 'ToPort': 8080, 'CidrIp': self.vpc_cidr}, # internal http-alt direct {'IpProtocol': 'tcp', 'FromPort': 9090, 'ToPort': 9090, 'CidrIp': self.vpc_cidr}, # mesos tasks (udp) {'IpProtocol': 'udp', 'FromPort': 31000, 'ToPort': 32000, 'CidrIp': self.vpc_cidr}, # mesos tasks (tcp) {'IpProtocol': 'tcp', 'FromPort': 31000, 'ToPort': 32000, 'CidrIp': self.vpc_cidr} ] ) ) self.add_resource( ec2.SecurityGroupIngress( 'MesosAgentIngressSecurityGroup', GroupId=Ref(self.mesos_agent_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(self.mesos_agent_security_group) # All Mesos agents can access all ports on each other ) ) self.add_security_group(Ref(self.mesos_agent_security_group)) # Security group for the internet-facing (external) ELBs - not added to the mesos agents themselves self.elb_external_security_group = self.add_resource( ec2.SecurityGroup( 'MesosAgentELBExternalSecurityGroup', VpcId=self.vpc_id, GroupDescription='External Security Group for MesosAgent ELB Instances', SecurityGroupIngress=[ {'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'CidrIp': '0.0.0.0/0'}, # http {'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443, 'CidrIp': '0.0.0.0/0'}, # https {'IpProtocol': 'tcp', 'FromPort': 8443, 'ToPort': 8443, 'CidrIp': '0.0.0.0/0'}, # https-alt {'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'CidrIp': '0.0.0.0/0'} # ping (health checks) ] ) ) # # Docker roles # # Allow assume /docker roles by ec2metaproxy self.add_iam_policy( iam.Policy( PolicyName='AssumeDockerRoles', PolicyDocument={ 'Statement': [ { 'Effect': 'Allow', 'Action': ["sts:AssumeRole"], "Resource": { "Fn::Join": [ "", ["arn:{}:iam::".format(self.get_partition()), {"Ref": "AWS::AccountId"}, ":role/docker/*"] ] }, } ] } ) ) # Add docker roles to assumable roles list for r in self.generate_docker_roles(): self.add_resource(r) # # Load Balancers # lb_type = config.get('lb_type', 'classic') elb_log_bucket = config.get('log_bucket', '{}-{}-logs'.format(constants.TAG, self.env)) if lb_type == 'classic': internal_elb = self.add_resource( self.generate_load_balancer( "{}MesosAgentInternalELB".format(self.env), "internal", 8080, constants.SSL_CERTIFICATES[config['private_elb_cert']]['Arn'], elb_log_bucket ) ) external_elb = self.add_resource( self.generate_load_balancer( "{}MesosAgentExternalELB".format(self.env), "internet-facing", 80, constants.SSL_CERTIFICATES[config['public_elb_cert']]['Arn'], elb_log_bucket ) ) elif lb_type == 'application': internal_elb, internal_target_group = self.generate_app_load_balancer( "{}MesosAgentInternalALB".format(self.env), "internal", 8080, constants.SSL_CERTIFICATES[config['private_elb_cert']]['Arn'], elb_log_bucket ) self.add_resource(internal_elb) self.add_resource(internal_target_group) external_elb, external_target_group = self.generate_app_load_balancer( "{}MesosAgentExternalALB".format(self.env), "internet-facing", 80, constants.SSL_CERTIFICATES[config['public_elb_cert']]['Arn'], elb_log_bucket ) self.add_resource(external_elb) self.add_resource(external_target_group) # extra public load balancers (for SSL termination, ELB doesn't do SNI) extra_public_load_balancers = [] for lb_config in config.get('extra_public_load_balancers', []): if lb_type == 'classic': extra_public_load_balancers.append(Ref(self.add_resource( self.generate_load_balancer( "{}{}MesosAgentExternalELB".format(self.env, lb_config['name']), "internet-facing", 80, constants.SSL_CERTIFICATES[lb_config['cert']]['Arn'], elb_log_bucket ) ))) elif lb_type == 'application': _extra_public_lb, _extra_external_tg = self.generate_app_load_balancer( "{}{}MesosAgentExternalALB".format(self.env, lb_config['name']), "internet-facing", 80, constants.SSL_CERTIFICATES[lb_config['cert']]['Arn'], elb_log_bucket ) self.add_resource(_extra_public_lb) extra_public_load_balancers.append(Ref(self.add_resource(_extra_external_tg))) # # Instances # # Add docker volume block_device_mapping = get_block_device_mapping(self.parameters['InstanceType'].resource['Default']) block_device_mapping.extend([ ec2.BlockDeviceMapping( DeviceName="/dev/xvda", # rootfs Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=config.get('rootfs_size', 50), VolumeType="gp2" ) ), ec2.BlockDeviceMapping( DeviceName="/dev/xvdb", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=config.get('dockervol_size', 100), VolumeType=config.get('dockervol_type', 'gp2') ) ) ]) # Launch configurations preferred_only = config.get('preferred_placement', False) if lb_type == 'classic': # Private ASG self.generate_asg("private", count=config['count'].get('private', 2), block_mapping=block_device_mapping, load_balancers=[Ref(internal_elb), Ref(external_elb)] + extra_public_load_balancers, preferred_subnets_only=preferred_only ) # Public ASG self.generate_asg("public", count=config['count'].get('public', 0), block_mapping=block_device_mapping, load_balancers=[Ref(internal_elb), Ref(external_elb)] + extra_public_load_balancers, preferred_subnets_only=preferred_only ) elif lb_type == 'application': # Private ASG self.generate_asg("private", count=config['count'].get('private', 2), block_mapping=block_device_mapping, target_group_arns=[Ref(internal_target_group), Ref(external_target_group)] + extra_public_load_balancers, preferred_subnets_only=preferred_only ) # Public ASG self.generate_asg("public", count=config['count'].get('public', 0), block_mapping=block_device_mapping, target_group_arns=[Ref(internal_target_group), Ref(external_target_group)] + extra_public_load_balancers, preferred_subnets_only=preferred_only ) # # DNS Records # if self.get_partition() != 'aws-us-gov': zone = constants.ENVIRONMENTS[self.env]['route53_zone'] self.add_resource( route53.RecordSetGroup( 'ELBRoute53', HostedZoneName=zone, RecordSets=[ route53.RecordSet( Name='internal.{}'.format(zone)[:-1], ResourceRecords=[GetAtt(internal_elb, 'DNSName')], Type='CNAME', TTL=300 ), route53.RecordSet( Name='external.{}'.format(zone)[:-1], ResourceRecords=[GetAtt(external_elb, 'DNSName')], Type='CNAME', TTL=300 ) ] ) )
def configure(self): """ Returns a Nexus template """ self.defaults = {'instance_type': 't3.xlarge'} self.service = 'nexus' self.set_description('Sets up Nexus repository manager servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # We want the preferred subnet only. subnet = self.get_subnets('private', _preferred_only=True)[0] # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": constants.SUPERNET }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": constants.SUPERNET }, # HTTPS # {"IpProtocol": "tcp", "FromPort": "8081", "ToPort": "8081", "CidrIp": constants.SUPERNET}, # NexusRM Direct (disabled!) ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add our EBS data volume data_volume = ec2.Volume( '{}DataVolume'.format(self.name), Size=config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=self.name) + [ec2.Tag('Name', self.name + "-datavol")]) self.add_resource(data_volume) self.add_iam_policy( iam.Policy(PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) # Add a ENI for static IP address eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.name), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(self.name), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=self.name))) self.get_eni_policies() # Add a route53 A record for the main Nexus host route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] private_dns = config.get('private_dns', 'nexus.{}'.format(route53_zone)) self.add_resource( route53.RecordSetGroup( '{}Route53'.format(self.name), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name=private_dns, ResourceRecords=[ GetAtt(eni, 'PrimaryPrivateIpAddress') ], Type='A', TTL=600) ])) # Add CNAME records for each repository, pointing to the main for repository in config['repositories']: self.add_resource( route53.RecordSetGroup( '{}{}Route53'.format(self.name, self.cfn_name(repository)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name='{}.{}'.format( repository, route53_zone), ResourceRecords=[private_dns], Type='CNAME', TTL=600) ])) # Add S3 IAM role for nexus blobstore access self.add_iam_policy( iam.Policy( PolicyName='S3Access', PolicyDocument={ 'Statement': [{ "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", "s3:ListBucketVersions", "s3:GetBucketAcl", "s3:GetLifecycleConfiguration", "s3:PutLifecycleConfiguration" ], "Resource": [ 'arn:{}:s3:::{}'.format(self.get_partition(), config['s3_bucket']) ] }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:GetObjectTagging", "s3:PutObjectTagging", "s3:GetObjectTagging", "s3:DeleteObjectTagging" ], "Resource": [ 'arn:{}:s3:::{}/*'.format(self.get_partition(), config['s3_bucket']) ] }] })) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__DEFAULT_DOMAIN__', route53_zone[:-1]), # route53_zone has a trailing '.', strip it ('__TOP_DOMAIN__', constants.ROOT_ROUTE53_ZONE), # ('__REPOSITORIES__', " ".join(['"{}"'.format(x) for x in config['repositories']])) # '"abc" "def" "ghi"' )) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_EBS_ID': Ref(data_volume) }) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.name), AssociatePublicIpAddress=False, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.name), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=self.name) + [autoscaling.Tag('Name', self.name, True)]))
def configure(self): """ Returns a BIND template """ self.defaults = {'instance_type': 't3.micro'} self.service = 'bind' self.set_description('Sets up BIND DNS servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # All subnets in public get a DNS server subnets = self.get_subnets('public') # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS TCP { "IpProtocol": "udp", "FromPort": "53", "ToPort": "53", "CidrIp": "0.0.0.0/0" }, # DNS UDP ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] zonefile = '' for zone in config['forwarders']: zonefile += "\n" + self.make_bind_zone(zone) for subnet in subnets: subnet_name = subnet['AvailabilityZone'] role = '{}-{}-{}'.format(self.env, self.service, subnet_name) # myenv-bind-us-west-2a # Add the elastic IP and the ENI for it, then attach it. eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(self.cfn_name(role)), Domain='vpc')) eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.cfn_name(role)), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(role), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=role))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format( self.cfn_name(role)), AllocationId=GetAtt(eip, "AllocationId"), NetworkInterfaceId=Ref(eni))) # Add a route53 DNS name self.add_resource( route53.RecordSetGroup('{}Route53'.format(self.cfn_name(role)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet( Name="{}.{}".format( role, route53_zone), ResourceRecords=[Ref(eip)], Type='A', TTL=600) ])) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template( replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__BIND_ZONEFILE__', zonefile))) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets {'CFN_ENI_ID': Ref(eni)}) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.cfn_name(role)), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.cfn_name(role)), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=self.service, role_override=role) + [autoscaling.Tag('Name', role, True)]))
def configure(self): """ Returns a cassandra template with seed nodes """ self.add_description('Sets up Cassandra in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-cassandra', _global_config.get('ami_owner', 'self')))) _cassandra_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {} Instances'.format( self.name), SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 7000, 'ToPort': 7001, 'CidrIp': self.vpc_cidr }, # inter-node { 'IpProtocol': 'tcp', 'FromPort': 7199, 'ToPort': 7199, 'CidrIp': self.vpc_cidr }, # jmx { 'IpProtocol': 'tcp', 'FromPort': 9042, 'ToPort': 9042, 'CidrIp': self.vpc_cidr }, # client port { 'IpProtocol': 'tcp', 'FromPort': 9160, 'ToPort': 9160, 'CidrIp': self.vpc_cidr }, # client (thrift) ])) self.add_resource( ec2.SecurityGroupIngress( '{}IngressSecurityGroup'.format(self.name), GroupId=Ref(_cassandra_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_cassandra_security_group ) # this allows members all traffic )) self.add_security_group(Ref(_cassandra_security_group)) # Add support for creating EBS snapshots and tagging them self.add_iam_policy( iam.Policy(PolicyName='CassandraBackups', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:CreateSnapshot', 'ec2:CreateTags', 'ec2:DeleteSnapshot', 'ec2:DescribeInstances', 'ec2:DescribeSnapshots', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) for cluster in constants.ENVIRONMENTS[ self.env]['cassandra']['clusters']: for _instance in cluster['instances']: subnet = [ s for s in self.get_subnets('private') if netaddr.IPAddress(_instance['ip']) in netaddr.IPNetwork( s['CidrBlock']) ][0] service = 'cassandra-{}'.format(cluster['name']) role = '-'.join([ self.name, cluster['name'], subnet['AvailabilityZone'], _instance['ip'] ]) tags = self.get_tags(service_override=service, role_override=role) # Create ENI for this server, and hold onto a Ref for it so we can feed it into the userdata uniq_id = hashlib.md5(role.encode('utf-8')).hexdigest()[:10] eni = ec2.NetworkInterface( self.name + cluster['name'] + "ENI" + uniq_id, Description= 'Cassandra: Cluster: {} ENV: {} PrivateSubnet {}'.format( cluster['name'], self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=_instance['ip'], SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=tags, ) self.add_resource(eni) # Add the rootfs _block_device_mapping = get_block_device_mapping( self.parameters['InstanceType'].resource['Default']) _block_device_mapping += { ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=cluster.get( 'rootfs_size', 20), VolumeType="gp2", )) } # Seed the cluster from one node in the remote DC, plus three nodes in this DC # We want to avoid making too many nodes into seeds if cluster.get('remote_seed'): remote_env_name = cluster['remote_seed']['datacenter'] remote_cluster_name = cluster['remote_seed']['cluster'] remote_clusters = constants.ENVIRONMENTS[remote_env_name][ 'cassandra']['clusters'] # filter to just the remote cluster in the remote DC and return that one only remote_cluster = list( filter(lambda x: x['name'] == remote_cluster_name, remote_clusters))[0] remote_seeds = [ i['ip'] for i in remote_cluster['instances'] ][:1] local_seeds = [i['ip'] for i in cluster['instances']][:3] seeds = ','.join(remote_seeds + local_seeds) else: # Use the first three cassandra nodes as seeds seeds = ','.join([i['ip'] for i in cluster['instances']][:3]) if cluster.get('data_volume_size'): # Create the EBS volume data_volume = ec2.Volume( '{}{}DataVolume{}'.format( self.name, cluster['name'], uniq_id ), # something like 'envnameCassandraappDataVolumec47145e176' Size=cluster.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=tags + [ec2.Tag('Name', role + "-datavol")]) self.add_resource(data_volume) else: data_volume = None # Create the user data in two phases # Phase 1: substitute from constants in Rain user_data_template = self.get_cloudinit_template( cluster['cassandra_template'], replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__CASSANDRA_CLUSTER__', cluster['name']), ('__CASSANDRA_CLUSTER_OVERRIDE__', cluster.get('cluster_name_override', "")), ('__CASSANDRA_SEEDS__', seeds), ('__SERVICE__', service))) # Phase 2: Allow AWS Cloudformation to further substitute Ref()'s in the userdata userdata = Base64( Sub( user_data_template.replace( '${', '${!' ) # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_DATA_EBS_VOLUME_ID': Ref(data_volume) if data_volume else "" })) # Create the Launch Configuration / ASG _instance_type = cluster.get('instance_type', Ref(self.instance_type)) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}{}LaunchConfiguration{}'.format( self.name, cluster['name'], uniq_id), AssociatePublicIpAddress=False, BlockDeviceMappings=_block_device_mapping, EbsOptimized=True if _instance_type in EBS_OPTIMIZED_INSTANCES else False, ImageId=Ref(self.ami), InstanceType=_instance_type, InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), KeyName=Ref(self.keypair_name), SecurityGroups=self.security_groups, UserData=userdata)) self.add_resource( autoscaling.AutoScalingGroup( '{}{}ASGroup{}'.format(self.name, cluster['name'], uniq_id), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=1, MaxSize=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=service, role_override=role) + [autoscaling.Tag('Name', role, True)]))
def configure(self): """ Returns a Kafka template """ self.add_description('Configures Kafka in each AZ per config') self.service = 'kafka' self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter( 'AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id(self.region, "ivy-" + self.service, _global_config.get('ami_owner', 'self')) ) ) for cluster in constants.ENVIRONMENTS[self.env][self.service]: _cluster_name = "{}-{}".format(self.service, cluster['name']) # {service}-app _security_group = self.add_resource( ec2.SecurityGroup( self.cfn_name(_cluster_name, 'SecurityGroup'), VpcId=self.vpc_id, GroupDescription='Security Group for {} Instances'.format(self.service), SecurityGroupIngress=[ {'IpProtocol': 'tcp', 'FromPort': 9091, 'ToPort': 9093, 'CidrIp': self.vpc_cidr}, # Kafka Standard {'IpProtocol': 'tcp', 'FromPort': 9999, 'ToPort': 9999, 'CidrIp': self.vpc_cidr} # JMX ] ) ) self.add_resource( ec2.SecurityGroupIngress( self.cfn_name(_cluster_name, 'IngressSecurityGroup'), GroupId=Ref(_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_security_group) # this allows members all traffic ) ) self.add_security_group(Ref(_security_group)) _block_device_mapping = get_block_device_mapping(self.parameters['InstanceType'].resource['Default']) _block_device_mapping += { ec2.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=cluster.get('volume_size', 20), VolumeType="gp2", ) ) } _userdata = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__CLUSTER_NAME__', _cluster_name), )) _launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( self.cfn_name(_cluster_name, 'LaunchConfiguration'), AssociatePublicIpAddress=False, BlockDeviceMappings=_block_device_mapping, ImageId=Ref(self.ami), InstanceType=cluster.get('instance_type', 't2.nano'), EbsOptimized=True if cluster.get('instance_type', 't2.nano') in EBS_OPTIMIZED_INSTANCES else False, InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), KeyName=Ref(self.keypair_name), SecurityGroups=self.security_groups, UserData=Base64(_userdata) ) ) self.add_resource( autoscaling.AutoScalingGroup( self.cfn_name(_cluster_name, 'ASGroup'), HealthCheckType='EC2', LaunchConfigurationName=Ref(_launch_configuration), MinSize=cluster.get('count', 3), MaxSize=cluster.get('count', 3), VPCZoneIdentifier=[subnet['SubnetId'] for subnet in self.get_subnets('private')], Tags=self.get_autoscaling_tags(service_override=_cluster_name, role_override=self.service) + [ autoscaling.Tag('Name', "{}{}".format(self.env, _cluster_name), True) ] ) )