def gen_postgis_function(): PostGisFunction = Function( "PostGisProvisionerFunction", Code=Code( S3Bucket=Ref("BucketName"), S3Key=Ref("BucketKey"), ), FunctionName=Sub("${AWS::StackName}-PostGisProvisioner"), Handler="postgis_provisioner.lambda_handler", Role=GetAtt("PostgisProvisionerExecutionRole", "Arn"), Timeout="60", Runtime="python3.6", VpcConfig=VPCConfig( SecurityGroupIds=[Ref("PostGisProvisionerSg")], SubnetIds=[ Select( 0, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets"))) ), Select( 1, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))) ])) return PostGisFunction
def add_dns_entries(self, template): """ Method to add CloudMap service and record for DNS resolution. """ sd_entry = SdService( f"{self.title.title()}ServiceDiscovery", template=template, DependsOn=[self.service.title], Description=Sub( f"Record for VirtualService {self.title} in mesh ${{{self.service.title}.MeshName}}" ), NamespaceId=Ref(PRIVATE_DNS_ZONE_ID), DnsConfig=SdDnsConfig( RoutingPolicy="MULTIVALUE", NamespaceId=Ref(AWS_NO_VALUE), DnsRecords=[SdDnsRecord(TTL="30", Type="A")], ), Name=Select(0, Split(".", GetAtt(self.service, "VirtualServiceName"))), ) SdInstance( f"{self.title.title()}ServiceDiscoveryFakeInstance", template=template, InstanceAttributes={"AWS_INSTANCE_IPV4": f"169.254.255.254"}, ServiceId=Ref(sd_entry), )
def update_from_vpc(self, vpc_stack, settings=None): """ Override for EFS to update settings from VPC Stack :param ecs_composex.vpc.vpc_stack.XStack vpc_stack: :param ecs_composex.common.settings.ComposeXSettings settings: :return: """ subnets_params = self.subnets_param if self.subnets_override: for subnet_az in vpc_stack.vpc_resource.azs: if subnet_az.title == self.subnets_override: subnets_params = subnet_az break else: raise KeyError( f"{self.module.res_key}.{self.name} - " f"Override subnet name {self.subnets_override} is not defined in x-vpc", list(vpc_stack.vpc_resource.azs.keys()), ) for count, az in enumerate(vpc_stack.vpc_resource.azs[subnets_params]): self.stack.stack_template.add_resource( MountTarget( f"{self.logical_name}MountPoint{az.title().strip().split('-')[-1]}", FileSystemId=Ref(self.cfn_resource), SecurityGroups=[GetAtt(self.db_sg, "GroupId")], SubnetId=Select(count, Ref(STORAGE_SUBNETS)), ))
def availability_zones(self): t = self.template zones = [] for i in range(self.variables['AZCount']): az = Select(i, GetAZs('')) zones.append(az) t.add_output(Output('AvailabilityZones', Value=Join(",", zones))) return zones
def dump_yaml(cfn_file): template = Template() vpc_cidr_param = template.add_parameter( Parameter( "vpcCidrParam", Description="string of vpc cidr block to use", Type="String", )) subnet_cidr_param = template.add_parameter( Parameter( "subnetCidrParam", Description="string of subnet cidr block to use", Type="String", )) resource_tags = Tags(Name=Sub("${AWS::StackName}"), user="******", stelligent_u_lesson='lesson-4-1', stelligent_u_lab='lab-1') vpc = template.add_resource( ec2.VPC( "Vpc", CidrBlock=Ref(vpc_cidr_param), EnableDnsSupport=True, EnableDnsHostnames=True, InstanceTenancy="default", Tags=resource_tags, )) subnet = template.add_resource( ec2.Subnet( "Subnet", VpcId=Ref(vpc), CidrBlock=Ref(subnet_cidr_param), MapPublicIpOnLaunch=False, AvailabilityZone=Select(0, GetAZs()), Tags=resource_tags, )) template.add_output([ Output( "vpcId", Description="InstanceId of the newly created EC2 instance", Value=Ref(vpc), ), Output( "SubnetId", Description="InstanceId of the newly created EC2 instance", Value=Ref(subnet), ), ]) with open(cfn_file, 'w') as f: f.write(template.to_yaml())
def add_private_subnet2(self): ''' Add a private subnet 2 ''' self.cfn_template.add_resource( Subnet(title=constants.PRIV_SUBNET2, VpcId=Ref(constants.VPC), CidrBlock=Ref('PrivateSubnet2CIDRBlock'), AvailabilityZone=Select('1', GetAZs()))) return self.cfn_template
def _wait_condition_data_to_s3_url(condition: cloudformation.WaitCondition, artifacts_bucket: s3.Bucket) -> Sub: """Build a CloudFormation ``Sub`` structure that resolves to the S3 key reported to a wait condition. :param condition: Wait condition to reference :param artifacts_bucket: Bucket to reference """ return Sub( f"https://${{{artifacts_bucket.title}.DomainName}}/${{key}}", {"key": Select(3, Split('"', condition.get_att("Data")))}, )
def add_public_subnet2(self): ''' Add a public subnet 2 ''' self.cfn_template.add_resource( Subnet(title=constants.PUB_SUBNET2, VpcId=Ref(constants.VPC), CidrBlock=Ref('PublicSubnet2CIDRBlock'), MapPublicIpOnLaunch="true", AvailabilityZone=Select('1', GetAZs()))) return self.cfn_template
def create_composite_alarm(alarm: Alarm, alarms: list[Alarm]) -> None: """ Function to create the composite alarms """ if alarm.properties and keyisset("AlarmRule", alarm.properties): eval_expression = alarm.properties["AlarmRule"] elif alarm.parameters and keyisset("CompositeExpression", alarm.parameters): eval_expression = alarm.parameters["CompositeExpression"] else: raise KeyError( "Either Properties.AlarmRule or MacroParameters.CompositeExpression must be set", alarm.properties, alarm.parameters, ) mapping = map_expression_to_alarms(eval_expression, alarms) composite_expression = create_composite_alarm_expression( mapping, eval_expression) stack_id = Select(4, Split("-", Select(2, Split("/", Ref(AWS_STACK_ID))))) alarm_name = f"${{{AWS_REGION}}}-${{StackId}}-CompositeAlarmFor" + "".join( [a.title for a in mapping.values()]) alarm_name = (alarm_name[:(254 - 12)] if len(alarm_name) > (254 - 12) else alarm_name) if alarm.properties: props = import_record_properties(alarm.properties, CompositeAlarm) props.update({ "AlarmRule": composite_expression, "AlarmName": Sub(alarm_name, StackId=stack_id), }) else: props = { "AlarmRule": composite_expression, "AlarmName": Sub(alarm_name, StackId=stack_id), "ActionsEnabled": True, } alarm.properties = props alarm.cfn_resource = CompositeAlarm( alarm.logical_name, DependsOn=[a.title for a in mapping.values()], **props, )
def configure_for_follower(instance, counter): subnet_index = counter % NUM_AZS if counter == 2: instance.DependsOn = "MonitorInstance" else: instance.DependsOn = BASE_NAME + str(counter - 1) #base_instance.title instance.SubnetId = Select(str(subnet_index), Ref(PrivateSubnetsToSpanParam)) # instance.AvailabilityZone = Select(str(subnet_index), Ref(AvailabilityZonesParam)) instance.Metadata = cloudformation.Metadata( cloudformation.Authentication({ "S3AccessCreds": cloudformation.AuthenticationBlock( type="S3", roleName=Ref(StorReduceHostRole), #Ref(HostRoleParam), buckets=[Ref(QSS3BucketNameParam)]) }), cloudformation.Init({ "config": cloudformation.InitConfig( files=cloudformation.InitFiles({ "/home/ec2-user/connect-srr.sh": cloudformation.InitFile(source=Sub( "https://${" + QSS3BucketNameParam.title + "}.${QSS3Region}.amazonaws.com/${" + QSS3KeyPrefixParam.title + "}scripts/connect-srr.sh", **{ "QSS3Region": If("GovCloudCondition", "s3-us-gov-west-1", "s3") }), mode="000550", owner="root", group="root") }), commands={ "connect-srr": { "command": Join("", [ "/home/ec2-user/connect-srr.sh \"", GetAtt(base_instance, "PrivateDnsName"), "\" \'", Ref(StorReducePasswordParam), "\' ", "\"", Ref(ShardsNumParam), "\" ", "\"", Ref(ReplicaShardsNumParam), "\" ", "\"", Ref(elasticLB), "\" ", "\"", Ref("AWS::Region"), "\" ", "\"", GetAtt("Eth0", "PrimaryPrivateIpAddress"), "\" ", "\"", Ref(NumSRRHostsParam), "\"" ]) } }) })) instance.Tags = [{"Key": "Name", "Value": "StorReduce-QS-Host"}]
def add_subnets(self): t = self.template for subnetDict in self.subnets: for i in range(0, self.numAz): az = Select(i, GetAZs()) azNum = str(i + 1) subnetName = subnetDict['tier'] + 'Az' + azNum + 'Subnet' baseCidr = subnetDict['az' + azNum] cidr = baseCidr + subnetDict['suffix'] subnet = self.build_subnet(t, subnetName, az, cidr) subnetDict['ID' + azNum] = Ref(subnet)
def add_subnets(self): t = self.template for subnet_dict in self.sceptre_user_data['subnets']: for i in range(0, self.sceptre_user_data['num_az']): az = Select(i, GetAZs()) az_num = str(i + 1) subnet_name = self.sceptre_user_data[ 'application'] + '-' + subnet_dict['tier'] + '-az' + az_num cidr = subnet_dict['az' + az_num] + subnet_dict['suffix'] subnet = self.build_subnet(t, subnet_name, az, cidr) self.subnet_ids[subnet_dict['tier'] + '-az' + az_num] = Ref(subnet) return 0
def define_public_mapping(eips, azs): """Function to get the public mapping for NLB :param eips: list of EIPSs :type eips: list(troposphere.ec2.EIP) :param azs: list of AZs to created EIPs into :type azs: list :return: list """ public_mappings = [] if eips: public_mappings = [ SubnetMapping( AllocationId=GetAtt(eip, "AllocationId"), SubnetId=Select(count, Ref(PUBLIC_SUBNETS)), ) for count, eip in enumerate(eips) ] elif azs: public_mappings = [ SubnetMapping(SubnetId=Select(count, Ref(PUBLIC_SUBNETS))) for count in range(len(azs)) ] return public_mappings
def gen_rds_db(service_name): db_subnet_group = DBSubnetGroup( "DBSubnetGroup", DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=[ Select( 0, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))), Select( 1, Split( ",", ImportValue( Sub("${NetworkName}-network-vpc-PrivateSubnets")))) ], ) db = DBInstance("DB", DBName=Ref(parameters['DBName']), AllocatedStorage=Ref(parameters['DBStorage']), DBInstanceClass=Ref(parameters['DBClass']), DBInstanceIdentifier=service_name, VPCSecurityGroups=[Ref('DBSecurityGroup')], Engine=Ref(parameters['DBEngine']), EngineVersion=Ref(parameters['DBEngineVersion']), StorageType=Ref(parameters['DBStorageType']), Iops=Ref(parameters['Iops']), MasterUsername=Ref(parameters['Username']), MasterUserPassword=Ref(parameters['Password']), MultiAZ=Ref(parameters['MultiAZ']), PubliclyAccessible=Ref(parameters['PubliclyAccessible']), DBSubnetGroupName=Ref("DBSubnetGroup"), Tags=gen_tags(service_name)) return [db, db_subnet_group]
def create_elasticsearch_resource(template, elasticsearch_domain_name_variable, elasticsearch_instance_count_parameter, elasticsearch_instance_class_parameter, elasticsearch_security_group_resource, subnets_parameter, elasticsearch_log_group_resource): return template.add_resource( elasticsearch.Domain( 'Elasticsearch', AccessPolicies={ 'Version': '2012-10-17', 'Statement': [{ 'Effect': 'Allow', 'Principal': { 'AWS': '*' }, 'Action': 'es:*', 'Resource': Sub('arn:aws:es:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}/*', DomainName=elasticsearch_domain_name_variable) }, { "Effect": "Allow", "Principal": { "Service": "es.amazonaws.com" }, "Action": ["logs:PutLogEvents", "logs:CreateLogStream"], "Resource": GetAtt(elasticsearch_log_group_resource, 'Arn') }] }, DomainName=elasticsearch_domain_name_variable, EBSOptions=elasticsearch.EBSOptions(EBSEnabled=True, VolumeSize=10, VolumeType='gp2'), ElasticsearchClusterConfig=elasticsearch. ElasticsearchClusterConfig( InstanceCount=Ref(elasticsearch_instance_count_parameter), InstanceType=Ref(elasticsearch_instance_class_parameter)), ElasticsearchVersion='7.9', VPCOptions=elasticsearch.VPCOptions( SecurityGroupIds=[ GetAtt(elasticsearch_security_group_resource, 'GroupId') ], SubnetIds=[Select(0, Ref(subnets_parameter))])))
def subnet_adder(self, subnet_list, name_ref, route_table_name): for index, cidr in enumerate(subnet_list): self.template.add_resource( Subnet( name_ref + str(index), CidrBlock=str(cidr), VpcId=Ref(self.vpc), # not a fan of the below line, but will do for now. This basically exists to ensure that subnets are # distributed between availability zones. However, since we are always creating 3 pub/priv subnets # this will fail if there are less than 3 AZs in a given region. Ideally the subnet count & # distribution would happen dynamically based on how many zones are available. AvailabilityZone=Select(index, self.azs))) self.template.add_resource( SubnetRouteTableAssociation( route_table_name + str(index), SubnetId=Ref(name_ref + str(index)), RouteTableId=Ref(route_table_name)))
def set_subnet_mappings(self, vpc_stack): """ For NLB, defines the EC2 EIP and Subnets Mappings to use. Determines the number of EIP to produce from the VPC Settings. """ if self.is_alb(): return Ref(AWS_NO_VALUE) if not self.lb_eips and self.lb_is_public: self.set_eips(vpc_stack) mappings = [] subnets = self.define_override_subnets(PUBLIC_SUBNETS.title, vpc_stack) for count, eip in enumerate(self.lb_eips): mappings.append( SubnetMapping( AllocationId=GetAtt(eip, "AllocationId"), SubnetId=Select(count, Ref(subnets)), )) return mappings
def create_bastion(self): bastion_security_group_name = 'sgBastion' bastion_security_group = self.add_resource( ec2.SecurityGroup( bastion_security_group_name, GroupDescription='Enables access to the BastionHost', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=Ref(self.ip_access), FromPort=p, ToPort=p) for p in [SSH] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [POSTGRESQL, REDIS, SSH] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [HTTP, HTTPS] ], Tags=self.get_tags(Name=bastion_security_group_name))) bastion_host_name = 'BastionHost' return self.add_resource( ec2.Instance(bastion_host_name, InstanceType=Ref(self.bastion_instance_type), KeyName=Ref(self.keyname), ImageId=Ref(self.bastion_host_ami), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( Description='ENI for BastionHost', GroupSet=[Ref(bastion_security_group)], SubnetId=Select('0', Ref(self.public_subnets)), AssociatePublicIpAddress=True, DeviceIndex=0, DeleteOnTermination=True) ], Tags=self.get_tags(Name=bastion_host_name)))
def link_cluster_to_service( cluster: CacheCluster, cluster_mappings: dict, mapping_name: str ): """ Function to go over each service defined in the DB and assign found DB settings to service :param ecs_composex.elasticache.elasticache_stack.CacheCluster cluster: :param dict cluster_mappings: :param str mapping_name: :return: """ for target in cluster.families_targets: target[0].template.add_mapping(mapping_name, cluster_mappings) add_security_group_ingress( target[0].stack, cluster.logical_name, sg_id=Select( 0, FindInMap(mapping_name, cluster.logical_name, CLUSTER_SG.title), ), port=FindInMap(mapping_name, cluster.logical_name, cluster.port_attr.title), )
def gen_public_subnet_az(self, az_index, cidr_block): name = f"PublicSubnet{az_index}" subnet = ec2.Subnet( name, VpcId=Ref(self.vpc), CidrBlock=cidr_block, AvailabilityZone=Select(str(az_index), GetAZs(Ref("AWS::Region"))), ) self.template.add_resource(subnet) self.export_value(Ref(subnet), name) route_table_association = ec2.SubnetRouteTableAssociation( f"{name}RouteTableAssociation", SubnetId=Ref(subnet), RouteTableId=Ref(self.public_route_table), ) self.template.add_resource(route_table_association) network_acl_association = ec2.SubnetNetworkAclAssociation( f"{name}NetworkAclAssociation", SubnetId=Ref(subnet), NetworkAclId=Ref(self.network_acl), ) self.template.add_resource(network_acl_association)
Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"])) ]), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole' ])) t.add_resource( ecs.Service( "service", Cluster=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "cluster-id"])), DesiredCount=1, TaskDefinition=Ref("task"), LoadBalancers=[ ecs.LoadBalancer( ContainerName="helloworld", ContainerPort=3000, TargetGroupArn=ImportValue( Join("-", [ Select(0, Split("-", Ref("AWS::StackName"))), "alb-target-group" ]), ), ) ], Role=Ref("ServiceRole")))
VPC = template.add_resource( VPC('VPC', CidrBlock=vpc.CidrBlock, EnableDnsSupport=True, EnableDnsHostnames=True, Tags=Tags(Name=environmentString + "VPC", Stack=Ref("AWS::StackName")))) vpc.instance = VPC for f in vpc.subnets: subnet = template.add_resource( Subnet(f.name, CidrBlock=f.CidrBlock, VpcId=Ref(VPC), MapPublicIpOnLaunch=f.MapPublicIpOnLaunch, AvailabilityZone=Select(f.AvailabilityZone, GetAZs()), Tags=Tags(Name=environmentString + f.name, Stack=Ref("AWS::StackName")))) f.instance = subnet ############################# ADD GATEWAY AND ROLE ########################### internetGateway = template.add_resource( InternetGateway('InternetGateway', Tags=Tags(Name=environmentString + "Internet-Gateway", Stack=Ref("AWS::StackName")))) gatewayAttachment = template.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(VPC), InternetGatewayId=Ref(internetGateway)))
def create_network(self): t = self.template variables = self.get_variables() self.create_gateway() t.add_resource(ec2.NetworkAcl('DefaultACL', VpcId=VPC_ID)) self.create_nat_security_groups() subnets = {'public': [], 'private': []} net_types = subnets.keys() zones = [] for i in range(variables["AZCount"]): az = Select(i, GetAZs("")) zones.append(az) name_suffix = i for net_type in net_types: name_prefix = net_type.capitalize() subnet_name = "%sSubnet%s" % (name_prefix, name_suffix) subnets[net_type].append(subnet_name) t.add_resource( ec2.Subnet(subnet_name, AvailabilityZone=az, VpcId=VPC_ID, DependsOn=GW_ATTACH, CidrBlock=variables.get("%sSubnets" % name_prefix)[i], Tags=Tags(type=net_type))) route_table_name = "%sRouteTable%s" % (name_prefix, name_suffix) t.add_resource( ec2.RouteTable(route_table_name, VpcId=VPC_ID, Tags=[ec2.Tag('type', net_type)])) t.add_resource( ec2.SubnetRouteTableAssociation( "%sRouteTableAssociation%s" % (name_prefix, name_suffix), SubnetId=Ref(subnet_name), RouteTableId=Ref(route_table_name))) route_name = '%sRoute%s' % (name_prefix, name_suffix) if net_type == 'public': # the public subnets are where the NAT instances live, # so their default route needs to go to the AWS # Internet Gateway t.add_resource( ec2.Route(route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(GATEWAY))) self.create_nat_instance(i, subnet_name) else: # Private subnets are where actual instances will live # so their gateway needs to be through the nat instances route = ec2.Route( route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock='0.0.0.0/0', ) if variables["UseNatGateway"]: route.NatGatewayId = Ref(NAT_GATEWAY_NAME % name_suffix) else: route.InstanceId = Ref(NAT_INSTANCE_NAME % name_suffix) t.add_resource(route) for net_type in net_types: t.add_output( Output("%sSubnets" % net_type.capitalize(), Value=Join(",", [Ref(sn) for sn in subnets[net_type]]))) for i, sn in enumerate(subnets[net_type]): t.add_output( Output("%sSubnet%d" % (net_type.capitalize(), i), Value=Ref(sn))) self.template.add_output( Output("AvailabilityZones", Value=Join(",", zones))) for i, az in enumerate(zones): t.add_output(Output("AvailabilityZone%d" % (i), Value=az))
Action=[AssumeRole], Principal=Principal("Service", ["ecs.amazonaws.com"]) ) ] ), Path="/", ManagedPolicyArns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole'] )) t.add_resource(ecs.Service( "service", Cluster=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "cluster-id"] ) ), DesiredCount=1, TaskDefinition=Ref("task"), LoadBalancers=[ecs.LoadBalancer( ContainerName="helloworld", ContainerPort=3000, TargetGroupArn=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "alb-target-group"] ), ),
def __init__(self): super(VPC, self).__init__() self.vpc = ec2.VPC( "VPC", CidrBlock="172.1.0.0/16", InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=True, Tags=Tags(Name=Ref("AWS::StackName")), ) self.internet_gateway = ec2.InternetGateway( "InternetGateway", Tags=Tags(Name=Join( "", [Ref("AWS::StackName"), "-internet-gateway"]), ), ) self.internet_gateway_attachment = ec2.VPCGatewayAttachment( "InternetGatewayAttachment", InternetGatewayId=Ref(self.internet_gateway), VpcId=Ref(self.vpc), ) self.public_route_table = ec2.RouteTable( "PublicRouteTable", VpcId=Ref(self.vpc), Tags=Tags(Name=Join( "-", [Ref("AWS::StackName"), "public-route-table"]), ), ) self.private_route_table = ec2.RouteTable( "PrivateRouteTable", VpcId=Ref(self.vpc), Tags=Tags(Name=Join( "-", [Ref("AWS::StackName"), "private-route-table"]), ), ) self.vpc_s3_endpoint = ec2.VPCEndpoint( "VPCS3Endpoint", ServiceName=Join( "", ["com.amazonaws.", Ref("AWS::Region"), ".s3"]), VpcId=Ref(self.vpc), RouteTableIds=[ Ref(self.public_route_table), Ref(self.private_route_table) ], ) self.route_to_internet = ec2.Route( "RouteToInternet", DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(self.internet_gateway), RouteTableId=Ref(self.public_route_table), DependsOn=self.internet_gateway_attachment.title, ) # private subnets self.private_subnet_1 = ec2.Subnet( "PrivateSubnet1", AvailabilityZone=Select(0, GetAZs()), CidrBlock="172.1.1.0/24", MapPublicIpOnLaunch=False, Tags=Tags(Name=Join( "", [Ref("AWS::StackName"), "-private-subnet-1"]), ), VpcId=Ref(self.vpc), ) self.private_subnet_1_route_table_association = ec2.SubnetRouteTableAssociation( "PrivateSubnet1RouteTableAssociation", RouteTableId=Ref(self.private_route_table), SubnetId=Ref(self.private_subnet_1), ) self.private_subnet_2 = ec2.Subnet( "PrivateSubnet2", AvailabilityZone=Select(1, GetAZs()), CidrBlock="172.1.2.0/24", MapPublicIpOnLaunch=False, Tags=Tags(Name=Join( "", [Ref("AWS::StackName"), "-private-subnet-2"]), ), VpcId=Ref(self.vpc), ) self.private_subnet_2_route_table_association = ec2.SubnetRouteTableAssociation( "PrivateSubnet2RouteTableAssociation", RouteTableId=Ref(self.private_route_table), SubnetId=Ref(self.private_subnet_2), ) self.private_network_aCL = ec2.NetworkAcl( "PrivateNetworkACL", VpcId=Ref(self.vpc), Tags=Tags(Name=Join("", [Ref("AWS::StackName"), "-private-nacl"]), ), ) self.private_subnet_1_network_acl_association = ec2.SubnetNetworkAclAssociation( "PrivateSubnet1NetworkAclAssociation", SubnetId=Ref(self.private_subnet_1), NetworkAclId=Ref(self.private_network_aCL), ) self.private_subnet_2_network_acl_association = ec2.SubnetNetworkAclAssociation( "PrivateSubnet2NetworkAclAssociation", SubnetId=Ref(self.private_subnet_2), NetworkAclId=Ref(self.private_network_aCL), ) self.private_network_acl_entry_in = ec2.NetworkAclEntry( "PrivateNetworkAclEntryIn", CidrBlock="172.1.0.0/16", Egress=False, NetworkAclId=Ref(self.private_network_aCL), Protocol=-1, RuleAction="allow", RuleNumber=200, ) self.private_network_acl_entry_out = ec2.NetworkAclEntry( "PrivateNetworkAclEntryOut", CidrBlock="172.1.0.0/16", Egress=True, NetworkAclId=Ref(self.private_network_aCL), Protocol=-1, RuleAction="allow", RuleNumber=200, ) # public subnets self.public_subnet_1 = ec2.Subnet( "PublicSubnet1", AvailabilityZone=Select(0, GetAZs()), CidrBlock="172.1.128.0/24", MapPublicIpOnLaunch=True, Tags=Tags(Name=Join( "", [Ref("AWS::StackName"), "-public-subnet-1"]), ), VpcId=Ref(self.vpc), ) self.public_subnet_1_route_table_association = ec2.SubnetRouteTableAssociation( "PublicSubnet1RouteTableAssociation", RouteTableId=Ref(self.public_route_table), SubnetId=Ref(self.public_subnet_1), ) self.public_subnet_2 = ec2.Subnet( "PublicSubnet2", AvailabilityZone=Select(1, GetAZs()), CidrBlock="172.1.129.0/24", MapPublicIpOnLaunch=True, Tags=Tags(Name=Join( "", [Ref("AWS::StackName"), "-public-subnet-2"]), ), VpcId=Ref(self.vpc), ) self.public_subnet_2_route_table_association = ec2.SubnetRouteTableAssociation( "PublicSubnet2RouteTableAssociation", RouteTableId=Ref(self.public_route_table), SubnetId=Ref(self.public_subnet_2), )
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map): """ :param int cluster_size: The number of nodes to create in the Flocker cluster (including control service node). :param dict client_ami_map: A map between AWS region name and AWS AMI ID for the client. :param dict node_ami_map: A map between AWS region name and AWS AMI ID for the node. :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm cluster. """ # Base JSON template. template = Template() # Keys corresponding to CloudFormation user Inputs. access_key_id_param = template.add_parameter( Parameter( "AmazonAccessKeyID", Description="Required: Your Amazon AWS access key ID", Type="String", NoEcho=True, AllowedPattern="[\w]+", MinLength="16", MaxLength="32", )) secret_access_key_param = template.add_parameter( Parameter( "AmazonSecretAccessKey", Description="Required: Your Amazon AWS secret access key", Type="String", NoEcho=True, MinLength="1", )) keyname_param = template.add_parameter( Parameter( "EC2KeyPair", Description= "Required: Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="AWS::EC2::KeyPair::KeyName", )) template.add_parameter( Parameter( "S3AccessPolicy", Description="Required: Is current IAM user allowed to access S3? " "S3 access is required to distribute Flocker and Docker " "configuration amongst stack nodes. Reference: " "http://docs.aws.amazon.com/IAM/latest/UserGuide/" "access_permissions.html Stack creation will fail if user " "cannot access S3", Type="String", AllowedValues=["Yes"], )) volumehub_token = template.add_parameter( Parameter( "VolumeHubToken", Description=("Optional: Your Volume Hub token. " "You'll find the token at " "https://volumehub.clusterhq.com/v1/token."), Type="String", Default="", )) template.add_mapping('RegionMapClient', {k: { "AMI": v } for k, v in client_ami_map.items()}) template.add_mapping('RegionMapNode', {k: { "AMI": v } for k, v in node_ami_map.items()}) # Select a random AvailabilityZone within given AWS Region. zone = Select(0, GetAZs("")) # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution # between nodes. s3bucket = Bucket('ClusterConfig', DeletionPolicy='Retain') template.add_resource(s3bucket) # Create SecurityGroup for cluster instances. instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription=( "Enable ingress access on all protocols and ports."), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol=protocol, FromPort="0", ToPort="65535", CidrIp="0.0.0.0/0", ) for protocol in ('tcp', 'udp') ])) # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes. base_user_data = [ '#!/bin/bash\n', 'aws_region="', Ref("AWS::Region"), '"\n', 'aws_zone="', zone, '"\n', 'access_key_id="', Ref(access_key_id_param), '"\n', 'secret_access_key="', Ref(secret_access_key_param), '"\n', 's3_bucket="', Ref(s3bucket), '"\n', 'stack_name="', Ref("AWS::StackName"), '"\n', 'volumehub_token="', Ref(volumehub_token), '"\n', 'node_count="{}"\n'.format(cluster_size), 'apt-get update\n', ] # XXX Flocker agents are indexed from 1 while the nodes overall are indexed # from 0. flocker_agent_number = 1 # Gather WaitConditions wait_condition_names = [] for i in range(cluster_size): if i == 0: node_name = CONTROL_NODE_NAME else: node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i) # Create an EC2 instance for the {Agent, Control} Node. ec2_instance = ec2.Instance(node_name, ImageId=FindInMap("RegionMapNode", Ref("AWS::Region"), "AMI"), InstanceType="m3.large", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=node_name)) # WaitCondition and corresponding Handler to signal completion # of {Flocker, Docker, Swarm} configuration on the node. wait_condition_handle = WaitConditionHandle( INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name)) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name), Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Gather WaitConditions wait_condition_names.append(wait_condition.name) user_data = base_user_data[:] user_data += [ 'node_number="{}"\n'.format(i), 'node_name="{}"\n'.format(node_name), 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', ] # Setup S3 utilities to push/pull node-specific data to/from S3 bucket. user_data += _sibling_lines(S3_SETUP) if i == 0: # Control Node configuration. control_service_instance = ec2_instance user_data += ['flocker_node_type="control"\n'] user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR) user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP) user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_MANAGER_SETUP) template.add_output([ Output( "ControlNodeIP", Description="Public IP of Flocker Control and " "Swarm Manager.", Value=GetAtt(ec2_instance, "PublicIp"), ) ]) else: # Agent Node configuration. ec2_instance.DependsOn = control_service_instance.name user_data += [ 'flocker_node_type="agent"\n', 'flocker_agent_number="{}"\n'.format(flocker_agent_number) ] flocker_agent_number += 1 user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_NODE_SETUP) template.add_output([ Output( "AgentNode{}IP".format(i), Description=( "Public IP of Agent Node for Flocker and Swarm."), Value=GetAtt(ec2_instance, "PublicIp"), ) ]) user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER) user_data += _sibling_lines(VOLUMEHUB_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) ec2_instance.UserData = Base64(Join("", user_data)) template.add_resource(ec2_instance) # Client Node creation. client_instance = ec2.Instance(CLIENT_NODE_NAME, ImageId=FindInMap("RegionMapClient", Ref("AWS::Region"), "AMI"), InstanceType="m3.medium", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=CLIENT_NODE_NAME)) wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( CLIENT_WAIT_CONDITION, Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Client Node {Flockerctl, Docker-compose} configuration. user_data = base_user_data[:] user_data += [ 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', 'node_number="{}"\n'.format("-1"), ] user_data += _sibling_lines(S3_SETUP) user_data += _sibling_lines(CLIENT_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) client_instance.UserData = Base64(Join("", user_data)) # Start Client Node after Control Node and Agent Nodes are # up and running Flocker, Docker, Swarm stack. client_instance.DependsOn = wait_condition_names template.add_resource(client_instance) # List of Output fields upon successful creation of the stack. template.add_output([ Output( "ClientNodeIP", Description="Public IP address of the client node.", Value=GetAtt(client_instance, "PublicIp"), ) ]) template.add_output( Output( "ClientConfigDockerSwarmHost", Value=Join("", [ "export DOCKER_HOST=tcp://", GetAtt(control_service_instance, "PublicIp"), ":2376" ]), Description="Client config: Swarm Manager's DOCKER_HOST setting.")) template.add_output( Output("ClientConfigDockerTLS", Value="export DOCKER_TLS_VERIFY=1", Description="Client config: Enable TLS client for Swarm.")) return template.to_json()
def sg_subnet_vpc(self, template, provision_refs): ref_stack_id = Ref('AWS::StackId') if 'aws_vpc_id' in self.app.config['provision']: vpc = self.app.config['provision']['aws_vpc_id'] use_subnet = self.app.config['provision']['aws_subnet_id'] use_subnet2 = self.app.config['provision']['aws_subnet2_id'] use_sg = self.app.config['provision']['aws_sg_id'] use_alb_sg = self.app.config['provision']['alb_sg_id'] self.app.log.info( 'Using your AWS subnet, make sure the routes and ports are configured correctly' ) else: vpc = Ref( template.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Application=ref_stack_id)))) internet_gateway = template.add_resource( ec2.InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id))) template.add_resource( ec2.VPCGatewayAttachment( 'AttachGateway', VpcId=vpc, InternetGatewayId=Ref(internet_gateway))) route_table = template.add_resource( ec2.RouteTable('RouteTable', VpcId=vpc, Tags=Tags(Application=ref_stack_id))) subnet = template.add_resource( ec2.Subnet('Subnet', CidrBlock='10.0.0.0/24', VpcId=vpc, AvailabilityZone=Select(0, GetAZs("")), Tags=Tags(Application=ref_stack_id))) subnet2 = template.add_resource( ec2.Subnet('Subnet2', CidrBlock='10.0.1.0/24', VpcId=vpc, AvailabilityZone=Select(1, GetAZs("")), Tags=Tags(Application=ref_stack_id))) template.add_resource( ec2.Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(route_table), )) template.add_resource( ec2.SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(subnet), RouteTableId=Ref(route_table), )) template.add_resource( ec2.SubnetRouteTableAssociation( 'Subnet2RouteTableAssociation', SubnetId=Ref(subnet2), RouteTableId=Ref(route_table), )) network_acl = template.add_resource( ec2.NetworkAcl( 'NetworkAcl', VpcId=vpc, Tags=Tags(Application=ref_stack_id), )) template.add_resource( ec2.NetworkAclEntry( 'InboundSSHNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='100', Protocol='6', PortRange=ec2.PortRange(From='22', To='22'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'InboundResponsePortsNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='101', Protocol='6', PortRange=ec2.PortRange(From='1024', To='65535'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'InboundICMPNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='102', Protocol='1', Icmp=ec2.ICMP(Code=-1, Type=-1), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) # Only used when Blockscout is deployed template.add_resource( ec2.NetworkAclEntry( 'InboundHttpsNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='103', Protocol='6', PortRange=ec2.PortRange(From='443', To='443'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'OutBoundHTTPNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='100', Protocol='6', PortRange=ec2.PortRange(From='80', To='80'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'OutBoundHTTPSNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='101', Protocol='6', PortRange=ec2.PortRange(From='443', To='443'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'OutBoundResponsePortsNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='102', Protocol='6', PortRange=ec2.PortRange(From='1024', To='65535'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.NetworkAclEntry( 'OutboundICMPNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='103', Protocol='1', Icmp=ec2.ICMP(Code=-1, Type=-1), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) template.add_resource( ec2.SubnetNetworkAclAssociation( 'SubnetNetworkAclAssociation', SubnetId=Ref(subnet), NetworkAclId=Ref(network_acl), )) template.add_resource( ec2.SubnetNetworkAclAssociation( 'Subnet2NetworkAclAssociation', SubnetId=Ref(subnet2), NetworkAclId=Ref(network_acl), )) use_subnet = Ref(subnet) use_subnet2 = Ref(subnet2) alb_security_group = template.add_resource( ec2.SecurityGroup( 'ALBSecurityGroup', GroupDescription= 'ALB allows traffic from public, is used to terminate SSL', SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='46658', ToPort='46658', CidrIp='0.0.0.0/0'), ], VpcId=vpc, )) use_alb_sg = Ref(alb_security_group) instance_security_group = template.add_resource( ec2.SecurityGroup( 'InstanceSecurityGroup', GroupDescription='Enable tendermint and SSH for all nodes', SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='46656', ToPort='46656', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(IpProtocol='tcp', FromPort='46658', ToPort='46658', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(IpProtocol='icmp', FromPort='-1', ToPort='-1', CidrIp='0.0.0.0/0'), ], VpcId=vpc, )) use_sg = Ref(instance_security_group) provision_refs.vpc = vpc provision_refs.security_group_ec2 = use_sg provision_refs.security_group_alb = use_alb_sg provision_refs.subnets.append(use_subnet) provision_refs.subnets.append(use_subnet2)
VPC( "VPC", EnableDnsSupport=True, CidrBlock=Ref(VpcCIDR), EnableDnsHostnames=True, Tags=Tags(Name=Ref(EnvironmentName), ), )) Private2 = template.add_resource( Subnet( "Private2", Tags=Tags(Name=Sub("${EnvironmentName} Private Subnet (AZ2)"), ), VpcId=Ref(VPC), CidrBlock=Ref(Private2CIDR), MapPublicIpOnLaunch=False, AvailabilityZone=Select(1, GetAZs("")), )) Private1 = template.add_resource( Subnet( "Private1", Tags=Tags(Name=Sub("${EnvironmentName} Private Subnet (AZ1)"), ), VpcId=Ref(VPC), CidrBlock=Ref(Private1CIDR), MapPublicIpOnLaunch=False, AvailabilityZone=Select(0, GetAZs("")), )) IgwSubnet2 = template.add_resource( Subnet( "IgwSubnet2",
def main(args): t = Template() # ================= Parameters ================= # 0 1 2 3 4 5 6 # [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path, # 7 # weekly_maintenance_start_time] fsx_options = t.add_parameter( Parameter( "FSXOptions", Type="CommaDelimitedList", Description= "Comma separated list of fsx related options, 8 parameters in total, [shared_dir,fsx_fs_id," "storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path," "weekly_maintenance_start_time]", )) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for FSx filesystem")) subnet_id = t.add_parameter( Parameter("SubnetId", Type="String", Description="SubnetId for FSx filesystem")) # ================= Conditions ================= create_fsx = t.add_condition( "CreateFSX", And(Not(Equals(Select(str(0), Ref(fsx_options)), "NONE")), Equals(Select(str(1), Ref(fsx_options)), "NONE")), ) use_storage_capacity = t.add_condition( "UseStorageCap", Not(Equals(Select(str(2), Ref(fsx_options)), "NONE"))) use_fsx_kms_key = t.add_condition( "UseFSXKMSKey", Not(Equals(Select(str(3), Ref(fsx_options)), "NONE"))) use_imported_file_chunk_size = t.add_condition( "UseImportedFileChunkSize", Not(Equals(Select(str(4), Ref(fsx_options)), "NONE"))) use_export_path = t.add_condition( "UseExportPath", Not(Equals(Select(str(5), Ref(fsx_options)), "NONE"))) use_import_path = t.add_condition( "UseImportPath", Not(Equals(Select(str(6), Ref(fsx_options)), "NONE"))) use_weekly_mainenance_start_time = t.add_condition( "UseWeeklyMaintenanceStartTime", Not(Equals(Select(str(7), Ref(fsx_options)), "NONE"))) # ================= Resources ================= fs = t.add_resource( FileSystem( "FileSystem", FileSystemType="LUSTRE", SubnetIds=[Ref(subnet_id)], SecurityGroupIds=[Ref(compute_security_group)], KmsKeyId=If(use_fsx_kms_key, Select(str(3), Ref(fsx_options)), NoValue), StorageCapacity=If(use_storage_capacity, Select(str(2), Ref(fsx_options)), NoValue), LustreConfiguration=LustreConfiguration( ImportedFileChunkSize=If(use_imported_file_chunk_size, Select(str(4), Ref(fsx_options)), NoValue), ExportPath=If(use_export_path, Select(str(5), Ref(fsx_options)), NoValue), ImportPath=If(use_import_path, Select(str(6), Ref(fsx_options)), NoValue), WeeklyMaintenanceStartTime=If(use_weekly_mainenance_start_time, Select(str(7), Ref(fsx_options)), NoValue), ), Condition=create_fsx, )) # ================= Outputs ================= t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_fsx, Ref(fs), Select("1", Ref(fsx_options))), )) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def dump_base_yaml(cfn_file): template = Template() vpc_cidr_param = template.add_parameter( Parameter( "vpcCidrParam", Description="string of vpc cidr block to use", Type="String", )) subnet_cidr_param = template.add_parameter( Parameter( "subnetCidrParam", Description="string of subnet cidr block to use", Type="String", )) igw = template.add_resource( ec2.InternetGateway( "Igw", Tags=resource_tags, )) vpc = template.add_resource( ec2.VPC( "Vpc", CidrBlock=Ref(vpc_cidr_param), EnableDnsSupport=True, EnableDnsHostnames=True, InstanceTenancy="default", Tags=resource_tags, )) igwa = template.add_resource( ec2.VPCGatewayAttachment( "IgwA", VpcId=Ref(vpc), InternetGatewayId=Ref(igw), )) route_tbl = template.add_resource( ec2.RouteTable( "RouteTable", VpcId=Ref(vpc), Tags=resource_tags, )) default_route = template.add_resource( ec2.Route("defaultRoute", DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(igw), RouteTableId=Ref(route_tbl))) subnet = template.add_resource( ec2.Subnet( "Subnet", VpcId=Ref(vpc), CidrBlock=Ref(subnet_cidr_param), MapPublicIpOnLaunch=True, AvailabilityZone=Select(0, GetAZs()), Tags=resource_tags, )) route_tbl_asoc = template.add_resource( ec2.SubnetRouteTableAssociation("RouteTblSubnetAsoc", RouteTableId=Ref(route_tbl), SubnetId=Ref(subnet))) priv_route_tbl = template.add_resource( ec2.RouteTable( "PrivRouteTable", VpcId=Ref(vpc), Tags=resource_tags, )) priv_subnet = template.add_resource( ec2.Subnet( "PrivSubnet", VpcId=Ref(vpc), CidrBlock="10.10.1.0/24", MapPublicIpOnLaunch=False, AvailabilityZone=Select(0, GetAZs()), Tags=resource_tags, )) route_tbl_asoc = template.add_resource( ec2.SubnetRouteTableAssociation("RouteTblPrivSubnetAsoc", RouteTableId=Ref(priv_route_tbl), SubnetId=Ref(priv_subnet))) ngw_elastic_ip = template.add_resource( ec2.EIP( "MyNgwEip", Tags=resource_tags, )) nat_gateway = template.add_resource( ec2.NatGateway( "MyNatGateway", AllocationId=GetAtt(ngw_elastic_ip, "AllocationId"), SubnetId=Ref(subnet), )) private_out_route = template.add_resource( ec2.Route("privateOutRoute", DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(nat_gateway), RouteTableId=Ref(priv_route_tbl))) template.add_output([ Output( "VpcId", Description="InstanceId of the newly created EC2 instance", Value=Ref(vpc), Export=Export("VpcId-jdix"), ), Output( "SubnetId", Description="InstanceId of the newly created EC2 instance", Value=Ref(subnet), Export=Export("SubnetId-jdix"), ), Output( "PrivSubnetId", Description="InstanceId of the newly created EC2 instance", Value=Ref(priv_subnet), Export=Export("PrivSubnetId-jdix"), ), ]) template_out_yaml(cfn_file, template)