def _build_volume(self, template, vpc): t = template az = vpc.output_azs()[self.az_index] if az in t.parameters: az_ref = Ref(t.parameters[az]) else: az_param = t.add_parameter(Parameter(az, Type='String')) az_ref = Ref(az_param) ebs = t.add_resource( ec2.Volume('{}EBSVolume'.format(self.name), Size=self.volume_size, VolumeType=self.volume_type, AvailabilityZone=az_ref, Tags=Tags(Name="{} EBS Volume".format(self.name)))) t.add_output( Output('{}Volume'.format(self.name), Value=Ref(ebs), Description="{} EBS Volume".format(self.name))) return ebs
def render_ext_volume(context, context_ext, template, actual_ec2_instances, node=1): vtype = context_ext.get('type', 'standard') if node in actual_ec2_instances: availability_zone = GetAtt(EC2_TITLE_NODE % node, "AvailabilityZone") else: availability_zone = context['aws'][ 'availability-zone'] if node % 2 == 1 else context['aws'][ 'redundant-availability-zone'] args = { "Size": str(context_ext['size']), # TODO: change "AvailabilityZone": availability_zone, "VolumeType": vtype, "Tags": instance_tags(context, node), } ec2v = ec2.Volume(EXT_TITLE % node, **args) template.add_resource(ec2v) if node in actual_ec2_instances: args = { "InstanceId": Ref(EC2_TITLE_NODE % node), "VolumeId": Ref(ec2v), "Device": context_ext.get('device'), } template.add_resource(ec2.VolumeAttachment(EXT_MP_TITLE % node, **args))
def InstanceVolumeTemplate(self): self.stack_name = "volumeTest{0}".format(int(time.time())) template = Template() keyname_param = template.add_parameter( Parameter( "KeyName", Description="Name of an existing EC2 KeyPair " "to enable SSH access to the instance", Type="String", )) template.add_mapping('RegionMap', {"": { "AMI": self.tester.get_emi().id }}) for i in xrange(2): ec2_instance = template.add_resource( ec2.Instance("Instance{0}".format(i), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro", KeyName=Ref(keyname_param), SecurityGroups=[self.group.name], UserData=Base64("80"))) vol = template.add_resource( ec2.Volume("Volume{0}".format(i), Size="8", AvailabilityZone=GetAtt("Instance{0}".format(i), "AvailabilityZone"))) mount = template.add_resource( ec2.VolumeAttachment("MountPt{0}".format(i), InstanceId=Ref("Instance{0}".format(i)), VolumeId=Ref("Volume{0}".format(i)), Device="/dev/vdc")) stack = self.tester.create_stack(self.stack_name, template.to_json(), parameters=[("KeyName", self.keypair.name)]) def stack_completed(): return self.tester.cloudformation.describe_stacks( self.stack_name).status == "CREATE_COMPLETE" self.tester.wait_for_result(stack_completed, True, timeout=600) self.tester.delete_stack(self.stack_name)
def ext_volume(context, template): context_ext = context['ext'] vtype = context_ext.get('type', 'standard') # who cares what gp2 stands for? everyone knows what 'ssd' and 'standard' mean ... if vtype == 'ssd': vtype = 'gp2' args = { "Size": str(context_ext['size']), "AvailabilityZone": GetAtt(EC2_TITLE, "AvailabilityZone"), "VolumeType": vtype, } ec2v = ec2.Volume(EXT_TITLE, **args) args = { "InstanceId": Ref(EC2_TITLE), "VolumeId": Ref(ec2v), "Device": context_ext['device'], } ec2va = ec2.VolumeAttachment(EXT_MP_TITLE, **args) map(template.add_resource, [ec2v, ec2va])
def configure(self): """ Returns a Pritunl template """ self.defaults = {'instance_type': 't3.large'} self.service = 'pritunl' self.set_description('Sets up Pritunl servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl'] _global_config = constants.ENVIRONMENTS[self.env] _bootstrap_mode = _vpn_config.get('bootstrap_mode', False) _bootstrap_ami = get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon') _ivy_ami = get_latest_ami_id(self.region, 'ivy-base', _global_config.get('ami_owner', 'self')) self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami)) _public_dns = _vpn_config['public_dns'] _vpn_name = '{}Pritunl'.format(self.env) # We want the preferred subnet only. _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0] # Add our security group _vpn_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(_vpn_name), VpcId=self.vpc_id, GroupDescription='Security Group for Pritunl {}'.format( _vpn_name), SecurityGroupIngress=[ { "IpProtocol": "icmp", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }, # Ping { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0" }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0" }, # HTTPS { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0" }, # SSH { "IpProtocol": "udp", "FromPort": "10000", "ToPort": "20000", "CidrIp": "0.0.0.0/0" }, # HTTPS/OVPN { "IpProtocol": "tcp", "FromPort": "27017", "ToPort": "27017", "CidrIp": constants.SUPERNET }, # mongodb master { "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": constants.SUPERNET } # Replies from local VPC ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add EBS volume if local mongo used _data_volume = None if _vpn_config.get('local_mongo', False): self.add_iam_policy( iam.Policy( PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) _data_volume = ec2.Volume( '{}DataVolume'.format(_vpn_name), Size=_vpn_config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=_vpn_subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=_vpn_name) + [ec2.Tag('Name', _vpn_name + "-datavol")]) self.add_resource(_data_volume) # Add the elastic IP and the ENI for it, then attach it. _vpn_eip = self.add_resource( ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc')) _vpn_eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(_vpn_name), SubnetId=_vpn_subnet['SubnetId'], Description='ENI for {}'.format(_vpn_name), GroupSet=[Ref(_vpn_security_group)] + self.security_groups, SourceDestCheck=False, Tags=self.get_tags(service_override=self.service, role_override=_vpn_name))) self.get_eni_policies() self.add_resource( ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name), AllocationId=GetAtt(_vpn_eip, "AllocationId"), NetworkInterfaceId=Ref(_vpn_eni))) # Add a route53 DNS name if self.get_partition() != 'aws-us-gov': self.add_resource( route53.RecordSetGroup('{}Route53'.format(_vpn_name), HostedZoneName=constants.ENVIRONMENTS[ self.env]['route53_zone'], RecordSets=[ route53.RecordSet( Name=_public_dns, ResourceRecords=[Ref(_vpn_eip)], Type='A', TTL=600) ])) # Get all route tables in the VPC _vpc_route_tables = self.ec2_conn.describe_route_tables( Filters=[{ 'Name': 'vpc-id', 'Values': [self.vpc_id] }])['RouteTables'] # Set up the routing table for the VPC # Allow for changing client subnets in constants.py for client_subnet in _vpn_config['client_subnets']: for route_table in _vpc_route_tables: self.add_resource( ec2.Route('{}Route{}{}'.format( _vpn_name, client_subnet.translate({ ord("."): "", ord("/"): "" }), route_table['RouteTableId'].replace('-', '')), RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=client_subnet, NetworkInterfaceId=Ref(_vpn_eni))) _mongodb = _vpn_config.get('mongodb') _server_id = _vpn_config['server_id'] _userdata_template = self.get_cloudinit_template( _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None, replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__SERVER_ID__', _server_id), ('__SERVICE__', self.service), ('__MONGODB__', _mongodb if _mongodb else ''))) _userdata = Sub( _userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(_vpn_eni), 'CFN_EBS_ID': Ref(_data_volume) if _data_volume else '' }) _vpn_launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(_vpn_name), AssociatePublicIpAddress=True, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(_userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(_vpn_name), AvailabilityZones=[_vpn_subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(_vpn_launch_configuration), MinSize=0, MaxSize=1, VPCZoneIdentifier=[_vpn_subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=_vpn_name) + [autoscaling.Tag('Name', _vpn_name, True)]))
def buildInstance(t, args): t.add_resource( ec2.SecurityGroup('WebserverSG', GroupDescription='Global Webserver Access', VpcId=Ref('VPC'), Tags=Tags(Name='Global Webserver Access'))) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress1', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='22', ToPort='22')) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress2', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='80', ToPort='80')) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress3', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='443', ToPort='443')) rolePolicyStatements = [{ "Sid": "Stmt1500699052003", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket')])] }, { "Sid": "Stmt1500699052000", "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])] }, { "Sid": "Stmt1500612724002", "Effect": "Allow", "Action": ["kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*"], "Resource": [OpenEMRKeyARN] }] t.add_resource( iam.ManagedPolicy('WebserverPolicy', Description='Policy for webserver instance', PolicyDocument={ "Version": "2012-10-17", "Statement": rolePolicyStatements })) t.add_resource( iam.Role('WebserverRole', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path='/', ManagedPolicyArns=[Ref('WebserverPolicy')])) t.add_resource( iam.InstanceProfile('WebserverInstanceProfile', Path='/', Roles=[Ref('WebserverRole')])) t.add_resource( ec2.Volume('DockerVolume', DeletionPolicy='Delete' if args.dev else 'Snapshot', Size=Ref('PracticeStorage'), AvailabilityZone=Select("0", GetAZs("")), VolumeType='gp2', Encrypted=True, KmsKeyId=OpenEMRKeyID, Tags=Tags(Name="OpenEMR Practice"))) bootstrapScript = [ "#!/bin/bash -x\n", "exec > /tmp/part-001.log 2>&1\n", "apt-get -y update\n", "apt-get -y install python-pip\n", "pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", "cfn-init -v ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --configsets Setup ", " --region ", ref_region, "\n", "cfn-signal -e $? ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --region ", ref_region, "\n" ] setupScript = [ "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n", "DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" --force-yes\n", "mkfs -t ext4 /dev/xvdd\n", "mkdir /mnt/docker\n", "cat /root/fstab.append >> /etc/fstab\n", "mount /mnt/docker\n", "ln -s /mnt/docker /var/lib/docker\n", "apt-get -y install python-boto awscli\n", "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n", "touch /root/cloud-backups-enabled\n", "echo $S3 > /root/.cloud-s3.txt\n", "echo $KMS > /root/.cloud-kms.txt\n", "touch /tmp/mypass\n", "chmod 500 /tmp/mypass\n", "openssl rand -base64 32 >> /tmp/mypass\n", "aws s3 cp /tmp/mypass s3://$S3/Backup/passphrase.txt --sse aws:kms --sse-kms-key-id $KMS\n", "rm /tmp/mypass\n", "curl -L https://raw.githubusercontent.com/openemr/openemr-devops/master/packages/lightsail/launch.sh > /root/launch.sh\n", "chmod +x /root/launch.sh && /root/launch.sh -s 0\n" ] fstabFile = ["/dev/xvdd /mnt/docker ext4 defaults,nofail 0 0\n"] bootstrapInstall = cloudformation.InitConfig( files={ "/root/cloud-setup.sh": { "content": Join("", setupScript), "mode": "000500", "owner": "root", "group": "root" }, "/root/fstab.append": { "content": Join("", fstabFile), "mode": "000400", "owner": "root", "group": "root" } }, commands={"01_setup": { "command": "/root/cloud-setup.sh" }}) bootstrapMetadata = cloudformation.Metadata( cloudformation.Init(cloudformation.InitConfigSets(Setup=['Install']), Install=bootstrapInstall)) t.add_resource( ec2.Instance('WebserverInstance', Metadata=bootstrapMetadata, ImageId=FindInMap('RegionData', ref_region, 'UbuntuAMI'), InstanceType=Ref('InstanceSize'), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex="0", GroupSet=[Ref('WebserverSG')], SubnetId=Ref('PublicSubnet1')) ], KeyName=Ref('EC2KeyPair'), IamInstanceProfile=Ref('WebserverInstanceProfile'), Volumes=[{ "Device": "/dev/sdd", "VolumeId": Ref('DockerVolume') }], Tags=Tags(Name='OpenEMR Express Plus'), InstanceInitiatedShutdownBehavior='stop', UserData=Base64(Join('', bootstrapScript)), CreationPolicy={"ResourceSignal": { "Timeout": "PT25M" }})) return t
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description="Availability Zone the cluster will launch into. " "THIS IS REQUIRED", )) raid_options = t.add_parameter( Parameter( "RAIDOptions", Type="CommaDelimitedList", Description="Comma separated list of RAID related options, " "8 parameters in total, " "[" "0 shared_dir," "1 raid_type," "2 num_of_vols," "3 vol_type," "4 vol_size," "5 vol_IOPS," "6 encrypted, " "7 ebs_kms_key]", )) use_vol = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), Not(Equals(Select("0", Ref(raid_options)), "NONE"))) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Select("2", Ref(raid_options)), str(i))), Condition(use_vol[i - 1])), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select("3", Ref(raid_options)), "io1")) use_volume_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select("4", Ref(raid_options)), "NONE"))) use_volume_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select("3", Ref(raid_options)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select("6", Ref(raid_options)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select("7", Ref(raid_options)), "NONE"))), ) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_volume_type, Select("3", Ref(raid_options)), "gp2"), Size=If(use_volume_size, Select("4", Ref(raid_options)), 20), Iops=If(use_ebs_iops, Select("5", Ref(raid_options)), NoValue), Encrypted=If(use_ebs_encryption, Select("6", Ref(raid_options)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select("7", Ref(raid_options)), NoValue), Condition=use_vol[i], )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = Ref(v[i]) if i == 0: outputs[i] = If(use_vol[i], vol_to_return[i], "NONE") else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted RAID EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def buildInstance(t, args): t.add_resource( ec2.SecurityGroup('WebserverIngressSG', GroupDescription='Global Webserver Access', VpcId=Ref('VPC'), Tags=Tags(Name='Global Webserver Access'))) t.add_resource( ec2.SecurityGroupIngress('WebserverIngressSG80', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='80', ToPort='80')) t.add_resource( ec2.SecurityGroupIngress('WebserverIngress443', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='443', ToPort='443')) t.add_resource( ec2.SecurityGroup('SysAdminAccessSG', GroupDescription='System Administrator Access', VpcId=Ref('VPC'), Tags=Tags(Name='System Administrator Access'))) if (args.dev): t.add_resource( ec2.SecurityGroupIngress('DevSysadminIngress22', GroupId=Ref('SysAdminAccessSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='22', ToPort='22')) rolePolicyStatements = [{ "Sid": "Stmt1500699052003", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket')])] }, { "Sid": "Stmt1500699052000", "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])] }, { "Sid": "Stmt1500612724002", "Effect": "Allow", "Action": ["kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*"], "Resource": [OpenEMRKeyARN] }] if (args.recovery): rolePolicyStatements.extend([ { "Sid": "Stmt1500699052004", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join( "", ["arn:aws:s3:::", Ref('RecoveryS3Bucket')])] }, { "Sid": "Stmt1500699052005", "Effect": "Allow", "Action": [ "s3:GetObject", ], "Resource": [ Join("", [ "arn:aws:s3:::", Ref('RecoveryS3Bucket'), '/Backup/*' ]) ] }, ]) t.add_resource( iam.ManagedPolicy('WebserverPolicy', Description='Policy for webserver instance', PolicyDocument={ "Version": "2012-10-17", "Statement": rolePolicyStatements })) t.add_resource( iam.Role('WebserverRole', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path='/', ManagedPolicyArns=[Ref('WebserverPolicy')])) t.add_resource( iam.InstanceProfile('WebserverInstanceProfile', Path='/', Roles=[Ref('WebserverRole')])) t.add_resource( ec2.Volume('DockerVolume', DeletionPolicy='Delete' if args.dev else 'Snapshot', Size=Ref('PracticeStorage'), AvailabilityZone=Select("0", GetAZs("")), VolumeType='gp2', Encrypted=True, KmsKeyId=OpenEMRKeyID, Tags=Tags(Name="OpenEMR Practice"))) bootstrapScript = [ "#!/bin/bash -x\n", "exec > /var/log/openemr-cfn-bootstrap 2>&1\n", "cfn-init -v ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --configsets Setup ", " --region ", ref_region, "\n", "cfn-signal -e $? ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --region ", ref_region, "\n" ] setupScript = [ "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n", "/root/openemr-devops/packages/standard/ami/ami-configure.sh\n" ] stackPassthroughFile = [ "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n" ] if (args.recovery): stackPassthroughFile.extend([ "RECOVERYS3=", Ref('RecoveryS3Bucket'), "\n", "RECOVERY_NEWRDS=", GetAtt('RDSInstance', 'Endpoint.Address'), "\n", ]) if (args.recovery): dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MANUAL_SETUP: 1\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] else: dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MYSQL_HOST: '", GetAtt('RDSInstance', 'Endpoint.Address'), "'\n", " MYSQL_ROOT_USER: openemr\n", " MYSQL_ROOT_PASS: '******'RDSPassword'), "'\n", " MYSQL_USER: openemr\n", " MYSQL_PASS: '******'RDSPassword'), "'\n", " OE_USER: admin\n", " OE_PASS: '******'AdminPassword'), "'\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] bootstrapInstall = cloudformation.InitConfig( files={ "/root/cloud-setup.sh": { "content": Join("", setupScript), "mode": "000500", "owner": "root", "group": "root" }, "/root/cloud-variables": { "content": Join("", stackPassthroughFile), "mode": "000500", "owner": "root", "group": "root" }, "/root/openemr-devops/packages/standard/docker-compose.yaml": { "content": Join("", dockerComposeFile), "mode": "000500", "owner": "root", "group": "root" } }, commands={"01_setup": { "command": "/root/cloud-setup.sh" }}) bootstrapMetadata = cloudformation.Metadata( cloudformation.Init(cloudformation.InitConfigSets(Setup=['Install']), Install=bootstrapInstall)) t.add_resource( ec2.Instance('WebserverInstance', Metadata=bootstrapMetadata, ImageId=FindInMap('RegionData', ref_region, 'OpenEMRMktPlaceAMI'), InstanceType=Ref('WebserverInstanceSize'), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex="0", GroupSet=[ Ref('ApplicationSecurityGroup'), Ref('WebserverIngressSG'), Ref('SysAdminAccessSG') ], SubnetId=Ref('PublicSubnet1')) ], KeyName=Ref('EC2KeyPair'), IamInstanceProfile=Ref('WebserverInstanceProfile'), Volumes=[{ "Device": "/dev/sdd", "VolumeId": Ref('DockerVolume') }], Tags=Tags(Name='OpenEMR Cloud Standard'), InstanceInitiatedShutdownBehavior='stop', UserData=Base64(Join('', bootstrapScript)), CreationPolicy={"ResourceSignal": { "Timeout": "PT15M" }})) return t
def configure(self): """ Returns a Nexus template """ self.defaults = {'instance_type': 't3.xlarge'} self.service = 'nexus' self.set_description('Sets up Nexus repository manager servers') self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon'))) config = constants.ENVIRONMENTS[self.env][self.service] # We want the preferred subnet only. subnet = self.get_subnets('private', _preferred_only=True)[0] # Add our security group security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {}'.format(self.name), SecurityGroupIngress=[ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": constants.SUPERNET }, # HTTP { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": constants.SUPERNET }, # HTTPS # {"IpProtocol": "tcp", "FromPort": "8081", "ToPort": "8081", "CidrIp": constants.SUPERNET}, # NexusRM Direct (disabled!) ], SecurityGroupEgress=[{ "IpProtocol": "-1", "FromPort": "-1", "ToPort": "-1", "CidrIp": "0.0.0.0/0" }])) # Add our EBS data volume data_volume = ec2.Volume( '{}DataVolume'.format(self.name), Size=config.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=self.get_tags(service_override=self.service, role_override=self.name) + [ec2.Tag('Name', self.name + "-datavol")]) self.add_resource(data_volume) self.add_iam_policy( iam.Policy(PolicyName='AttachVolume', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:DeleteSnapshot', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) # Add a ENI for static IP address eni = self.add_resource( ec2.NetworkInterface( '{}InstanceENI'.format(self.name), SubnetId=subnet['SubnetId'], Description='ENI for {}'.format(self.name), GroupSet=[Ref(security_group)] + self.security_groups, SourceDestCheck=True, Tags=self.get_tags(service_override=self.service, role_override=self.name))) self.get_eni_policies() # Add a route53 A record for the main Nexus host route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone'] private_dns = config.get('private_dns', 'nexus.{}'.format(route53_zone)) self.add_resource( route53.RecordSetGroup( '{}Route53'.format(self.name), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name=private_dns, ResourceRecords=[ GetAtt(eni, 'PrimaryPrivateIpAddress') ], Type='A', TTL=600) ])) # Add CNAME records for each repository, pointing to the main for repository in config['repositories']: self.add_resource( route53.RecordSetGroup( '{}{}Route53'.format(self.name, self.cfn_name(repository)), HostedZoneName=route53_zone, RecordSets=[ route53.RecordSet(Name='{}.{}'.format( repository, route53_zone), ResourceRecords=[private_dns], Type='CNAME', TTL=600) ])) # Add S3 IAM role for nexus blobstore access self.add_iam_policy( iam.Policy( PolicyName='S3Access', PolicyDocument={ 'Statement': [{ "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListBucketMultipartUploads", "s3:ListBucketVersions", "s3:GetBucketAcl", "s3:GetLifecycleConfiguration", "s3:PutLifecycleConfiguration" ], "Resource": [ 'arn:{}:s3:::{}'.format(self.get_partition(), config['s3_bucket']) ] }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:GetObjectTagging", "s3:PutObjectTagging", "s3:GetObjectTagging", "s3:DeleteObjectTagging" ], "Resource": [ 'arn:{}:s3:::{}/*'.format(self.get_partition(), config['s3_bucket']) ] }] })) # Substitute the userdata template and feed it to CFN userdata_template = self.get_cloudinit_template(replacements=( ('__PROMPT_COLOR__', self.prompt_color()), ('__SERVICE__', self.service), ('__DEFAULT_DOMAIN__', route53_zone[:-1]), # route53_zone has a trailing '.', strip it ('__TOP_DOMAIN__', constants.ROOT_ROUTE53_ZONE), # ('__REPOSITORIES__', " ".join(['"{}"'.format(x) for x in config['repositories']])) # '"abc" "def" "ghi"' )) userdata = Sub( userdata_template.replace( '${', '${!') # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_EBS_ID': Ref(data_volume) }) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}LaunchConfiguration'.format(self.name), AssociatePublicIpAddress=False, KeyName=Ref(self.keypair_name), ImageId=Ref(self.ami), InstanceType=Ref(self.instance_type), InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), UserData=Base64(userdata))) self.add_resource( autoscaling.AutoScalingGroup( '{}ASGroup'.format(self.name), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=0, MaxSize=1, DesiredCapacity=0, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags(service_override=self.service, role_override=self.name) + [autoscaling.Tag('Name', self.name, True)]))
Type="AWS::EC2::AvailabilityZone::Name", Default="")) # # ┌───────────────────────────────────────────────────────────────────────────┐ # │ EBS Volume Parameter │ # └───────────────────────────────────────────────────────────────────────────┘ # sensuaggregatorebsvolumesize = template.add_parameter( Parameter("SensuAggrefatorEc2VolSizeParam", Description="Second Volume size for for Monitoring Instance", Type="Number", Default=30)) sensuaggregatorvolume = template.add_resource( ec2.Volume("SensuAggrefatorinstancevolume", AvailabilityZone=Ref(vpc_availibility_zone), DeletionPolicy="Delete", Size=Ref(sensuaggregatorebsvolumesize))) sensuebsvolumesize = template.add_parameter( Parameter("SensuGeneratorEc2VolSizeParam", Description="Second Volume size for for Monitoring Instance", Type="Number", Default=30)) sensuvolume = template.add_resource( ec2.Volume("SensuGeneratorinstancevolume", AvailabilityZone=Ref(vpc_availibility_zone), DeletionPolicy="Delete", Size=Ref(sensuebsvolumesize))) # # ┌───────────────────────────────────────────────────────────────────────────┐
CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout='PT5M')), EbsOptimized=If('DefaultEbsOptimizationCondition', Ref(AWS_NO_VALUE), Ref(param_ebs_optimized)), Monitoring=Ref(param_detailed_monitoring), Tags=Tags(Name=Ref(AWS_STACK_NAME)), )) volume1 = t.add_resource( ec2.Volume( 'Volume1', Condition='Volume1Condition', # DeletionPolicy=Retain, AvailabilityZone=GetAtt(aws_linux_instance, 'AvailabilityZone'), VolumeType=Ref(param_volume1_type), Size=Ref(param_volume1_size), Iops=If('Volume1IopsOptimizedCondition', Ref(param_volume1_iops), Ref(AWS_NO_VALUE)), Encrypted=If('VolumeEncryptedCondition', 'true', 'false'), KmsKeyId=If('VolumeEncryptedCondition', Ref(param_volume_key), Ref(AWS_NO_VALUE)), Tags=Tags(Name=Ref(AWS_STACK_NAME)), )) volume1_attachment = t.add_resource( ec2.VolumeAttachment( 'Volume1Attachment', Condition='Volume1Condition', Device=Ref(param_volume1_device), InstanceId=Ref(aws_linux_instance), VolumeId=Ref(volume1), ))
UseEBSSnapshot = t.add_condition( "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(EBSSnapshotId)), "NONE"))) UseExistingEBSVolume[i] = t.add_condition( "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(EBSVolumeId)), "NONE"))) v[i] = t.add_resource( ec2.Volume("Volume%s" % (i + 1), AvailabilityZone=Ref(AvailabilityZone), VolumeType=If(UseVolumeType, Select(str(i), Ref(VolumeType)), "gp2"), Size=If( UseEBSSnapshot, NoValue, If(UseVolumeSize, Select(str(i), Ref(VolumeSize)), "20")), SnapshotId=If(UseEBSSnapshot, Select(str(i), Ref(EBSSnapshotId)), NoValue), Iops=If(UseEBSPIOPS, Select(str(i), Ref(VolumeIOPS)), NoValue), Encrypted=If(UseEBSEncryption, Select(str(i), Ref(EBSEncryption)), NoValue), KmsKeyId=If(UseEBSKMSKey, Select(str(i), Ref(EBSKMSKeyId)), NoValue), Condition=CreateVol)) outputs = [None] * numberOfVol volToReturn = [None] * numberOfVol for i in range(numberOfVol): volToReturn[i] = If(UseExistingEBSVolume[i], Select(str(i), Ref(EBSVolumeId)), Ref(v[i])) if i == 0:
InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource( ec2.Instance( "Instance", ImageId="ami-ed838091", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, IamInstanceProfile=Ref("InstanceProfile"), )) t.add_resource( ec2.Volume("Volume", Size="100", AvailabilityZone=GetAtt("Instance", "AvailabilityZone"))) t.add_resource( ec2.VolumeAttachment("VolumeAttachment", InstanceId=Ref("Instance"), VolumeId=Ref("Volume"), Device="/dev/sdh")) t.add_output( Output( "InstancePublicIp", Description="Public IP of our instance.", Value=GetAtt("Instance", "PublicIp"), ))
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description= "Availability Zone the cluster will launch into. THIS IS REQUIRED", )) volume_size = t.add_parameter( Parameter( "VolumeSize", Type="CommaDelimitedList", Description="Size of EBS volume in GB, if creating a new one")) volume_type = t.add_parameter( Parameter( "VolumeType", Type="CommaDelimitedList", Description="Type of volume to create either new or from snapshot") ) volume_iops = t.add_parameter( Parameter( "VolumeIOPS", Type="CommaDelimitedList", Description= "Number of IOPS for volume type io1. Not used for other volume types.", )) ebs_encryption = t.add_parameter( Parameter( "EBSEncryption", Type="CommaDelimitedList", Description="Boolean flag to use EBS encryption for /shared volume. " "(Not to be used for snapshots)", )) ebs_kms_id = t.add_parameter( Parameter( "EBSKMSKeyId", Type="CommaDelimitedList", Description= "KMS ARN for customer created master key, will be used for EBS encryption", )) ebs_volume_id = t.add_parameter( Parameter("EBSVolumeId", Type="CommaDelimitedList", Description="Existing EBS volume Id")) ebs_snapshot_id = t.add_parameter( Parameter( "EBSSnapshotId", Type="CommaDelimitedList", Description= "Id of EBS snapshot if using snapshot as source for volume", )) ebs_vol_num = t.add_parameter( Parameter( "NumberOfEBSVol", Type="Number", Description="Number of EBS Volumes the user requested, up to %s" % number_of_vol, )) use_vol = [None] * number_of_vol use_existing_ebs_volume = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")) elif i == 1: use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(ebs_vol_num), str(i)))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Ref(ebs_vol_num), str(i))), Condition(use_vol[i - 1]))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(volume_type)), "io1")) use_vol_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select(str(i), Ref(volume_size)), "NONE"))) use_vol_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select(str(i), Ref(volume_type)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select(str(i), Ref(ebs_encryption)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select(str(i), Ref(ebs_kms_id)), "NONE"))), ) use_ebs_snapshot = t.add_condition( "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_snapshot_id)), "NONE"))) use_existing_ebs_volume[i] = t.add_condition( "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_volume_id)), "NONE"))) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_vol_type, Select(str(i), Ref(volume_type)), "gp2"), Size=If( use_ebs_snapshot, NoValue, If(use_vol_size, Select(str(i), Ref(volume_size)), "20")), SnapshotId=If(use_ebs_snapshot, Select(str(i), Ref(ebs_snapshot_id)), NoValue), Iops=If(use_ebs_iops, Select(str(i), Ref(volume_iops)), NoValue), Encrypted=If(use_ebs_encryption, Select(str(i), Ref(ebs_encryption)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select(str(i), Ref(ebs_kms_id)), NoValue), Condition=create_vol, )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = If(use_existing_ebs_volume[i], Select(str(i), Ref(ebs_volume_id)), Ref(v[i])) if i == 0: outputs[i] = vol_to_return[i] else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def configure(self): """ Returns a cassandra template with seed nodes """ self.add_description('Sets up Cassandra in all Zones') self.get_eni_policies() self.get_default_security_groups() self.get_standard_parameters() self.get_standard_policies() _global_config = constants.ENVIRONMENTS[self.env] self.ami = self.add_parameter( Parameter('AMI', Type='String', Description='AMI ID for instances', Default=get_latest_ami_id( self.region, 'ivy-cassandra', _global_config.get('ami_owner', 'self')))) _cassandra_security_group = self.add_resource( ec2.SecurityGroup( '{}SecurityGroup'.format(self.name), VpcId=self.vpc_id, GroupDescription='Security Group for {} Instances'.format( self.name), SecurityGroupIngress=[ { 'IpProtocol': 'tcp', 'FromPort': 7000, 'ToPort': 7001, 'CidrIp': self.vpc_cidr }, # inter-node { 'IpProtocol': 'tcp', 'FromPort': 7199, 'ToPort': 7199, 'CidrIp': self.vpc_cidr }, # jmx { 'IpProtocol': 'tcp', 'FromPort': 9042, 'ToPort': 9042, 'CidrIp': self.vpc_cidr }, # client port { 'IpProtocol': 'tcp', 'FromPort': 9160, 'ToPort': 9160, 'CidrIp': self.vpc_cidr }, # client (thrift) ])) self.add_resource( ec2.SecurityGroupIngress( '{}IngressSecurityGroup'.format(self.name), GroupId=Ref(_cassandra_security_group), IpProtocol='-1', FromPort=-1, ToPort=-1, SourceSecurityGroupId=Ref(_cassandra_security_group ) # this allows members all traffic )) self.add_security_group(Ref(_cassandra_security_group)) # Add support for creating EBS snapshots and tagging them self.add_iam_policy( iam.Policy(PolicyName='CassandraBackups', PolicyDocument={ 'Statement': [{ 'Effect': 'Allow', 'Resource': '*', 'Action': [ 'ec2:AttachVolume', 'ec2:CreateSnapshot', 'ec2:CreateTags', 'ec2:DeleteSnapshot', 'ec2:DescribeInstances', 'ec2:DescribeSnapshots', 'ec2:DescribeTags', 'ec2:DescribeVolumeAttribute', 'ec2:DescribeVolumeStatus', 'ec2:DescribeVolumes', 'ec2:DetachVolume' ] }] })) for cluster in constants.ENVIRONMENTS[ self.env]['cassandra']['clusters']: for _instance in cluster['instances']: subnet = [ s for s in self.get_subnets('private') if netaddr.IPAddress(_instance['ip']) in netaddr.IPNetwork( s['CidrBlock']) ][0] service = 'cassandra-{}'.format(cluster['name']) role = '-'.join([ self.name, cluster['name'], subnet['AvailabilityZone'], _instance['ip'] ]) tags = self.get_tags(service_override=service, role_override=role) # Create ENI for this server, and hold onto a Ref for it so we can feed it into the userdata uniq_id = hashlib.md5(role.encode('utf-8')).hexdigest()[:10] eni = ec2.NetworkInterface( self.name + cluster['name'] + "ENI" + uniq_id, Description= 'Cassandra: Cluster: {} ENV: {} PrivateSubnet {}'.format( cluster['name'], self.env, subnet['SubnetId']), GroupSet=self.security_groups, PrivateIpAddress=_instance['ip'], SourceDestCheck=True, SubnetId=subnet['SubnetId'], Tags=tags, ) self.add_resource(eni) # Add the rootfs _block_device_mapping = get_block_device_mapping( self.parameters['InstanceType'].resource['Default']) _block_device_mapping += { ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=cluster.get( 'rootfs_size', 20), VolumeType="gp2", )) } # Seed the cluster from one node in the remote DC, plus three nodes in this DC # We want to avoid making too many nodes into seeds if cluster.get('remote_seed'): remote_env_name = cluster['remote_seed']['datacenter'] remote_cluster_name = cluster['remote_seed']['cluster'] remote_clusters = constants.ENVIRONMENTS[remote_env_name][ 'cassandra']['clusters'] # filter to just the remote cluster in the remote DC and return that one only remote_cluster = list( filter(lambda x: x['name'] == remote_cluster_name, remote_clusters))[0] remote_seeds = [ i['ip'] for i in remote_cluster['instances'] ][:1] local_seeds = [i['ip'] for i in cluster['instances']][:3] seeds = ','.join(remote_seeds + local_seeds) else: # Use the first three cassandra nodes as seeds seeds = ','.join([i['ip'] for i in cluster['instances']][:3]) if cluster.get('data_volume_size'): # Create the EBS volume data_volume = ec2.Volume( '{}{}DataVolume{}'.format( self.name, cluster['name'], uniq_id ), # something like 'envnameCassandraappDataVolumec47145e176' Size=cluster.get('data_volume_size', 20), VolumeType='gp2', AvailabilityZone=subnet['AvailabilityZone'], DeletionPolicy='Retain', Tags=tags + [ec2.Tag('Name', role + "-datavol")]) self.add_resource(data_volume) else: data_volume = None # Create the user data in two phases # Phase 1: substitute from constants in Rain user_data_template = self.get_cloudinit_template( cluster['cassandra_template'], replacements=(('__PROMPT_COLOR__', self.prompt_color()), ('__CASSANDRA_CLUSTER__', cluster['name']), ('__CASSANDRA_CLUSTER_OVERRIDE__', cluster.get('cluster_name_override', "")), ('__CASSANDRA_SEEDS__', seeds), ('__SERVICE__', service))) # Phase 2: Allow AWS Cloudformation to further substitute Ref()'s in the userdata userdata = Base64( Sub( user_data_template.replace( '${', '${!' ) # Replace bash brackets with CFN escaped style .replace( '{#', '${' ), # Replace rain-style CFN escapes with proper CFN brackets { 'CFN_ENI_ID': Ref(eni), 'CFN_DATA_EBS_VOLUME_ID': Ref(data_volume) if data_volume else "" })) # Create the Launch Configuration / ASG _instance_type = cluster.get('instance_type', Ref(self.instance_type)) launch_configuration = self.add_resource( autoscaling.LaunchConfiguration( '{}{}LaunchConfiguration{}'.format( self.name, cluster['name'], uniq_id), AssociatePublicIpAddress=False, BlockDeviceMappings=_block_device_mapping, EbsOptimized=True if _instance_type in EBS_OPTIMIZED_INSTANCES else False, ImageId=Ref(self.ami), InstanceType=_instance_type, InstanceMonitoring=False, IamInstanceProfile=Ref(self.instance_profile), KeyName=Ref(self.keypair_name), SecurityGroups=self.security_groups, UserData=userdata)) self.add_resource( autoscaling.AutoScalingGroup( '{}{}ASGroup{}'.format(self.name, cluster['name'], uniq_id), AvailabilityZones=[subnet['AvailabilityZone']], HealthCheckType='EC2', LaunchConfigurationName=Ref(launch_configuration), MinSize=1, MaxSize=1, VPCZoneIdentifier=[subnet['SubnetId']], Tags=self.get_autoscaling_tags( service_override=service, role_override=role) + [autoscaling.Tag('Name', role, True)]))