def define_names_from_lookup(self, session): """ Method to Lookup the secret based on its tags. """ lookup_info = self.definition[self.x_key]["Lookup"] if keyisset("Name", self.definition[self.x_key]): lookup_info["Name"] = self.definition[self.x_key]["Name"] secret_config = lookup_secret_config(self.logical_name, lookup_info, session) self.aws_name = get_secret_name_from_arn(secret_config[self.logical_name]) self.arn = secret_config[self.logical_name] self.iam_arn = secret_config[self.logical_name] if keyisset("KmsKeyId", secret_config) and not secret_config[ "KmsKeyId" ].startswith("alias"): self.kms_key = secret_config["KmsKeyId"] elif keyisset("KmsKeyId", secret_config) and secret_config[ "KmsKeyId" ].startswith("alias"): LOG.warning( f"secrets.{self.name} - The KMS Key retrieved is a KMS Key Alias, not importing." ) self.mapping = { self.map_arn_name: secret_config[self.logical_name], self.map_name_name: secret_config[self.map_name_name], } if self.kms_key: self.mapping[self.map_kms_name] = self.kms_key self.kms_key_arn = FindInMap( self.map_name, self.logical_name, self.map_kms_name ) self.arn = FindInMap(self.map_name, self.logical_name, self.map_arn_name) self.ecs_secret = [EcsSecret(Name=self.name, ValueFrom=self.arn)]
def handle_import_cognito_pool(the_pool, listener_stack, settings): """ Function to map AWS Cognito Pool to attributes :param the_pool: :param listener_stack: :param settings: :return: """ if the_pool.cfn_resource and not the_pool.mappings: pool_id_param = Parameter( f"{the_pool.logical_name}{USERPOOL_ID.title}", Type="String") pool_arn = Parameter(f"{the_pool.logical_name}{USERPOOL_ARN.title}", Type="String") add_parameters(listener_stack.stack_template, [pool_id_param, pool_arn]) listener_stack.Parameters.update({ pool_id_param.title: Ref(the_pool.cfn_resource), pool_arn.title: Ref(pool_arn), }) return Ref(pool_id_param), Ref(pool_arn) elif the_pool.mappings and not the_pool.cfn_resource: add_update_mapping( listener_stack.stack_template, the_pool.module.mapping_key, settings.mappings[the_pool.module.mapping_key], ) return ( FindInMap(COGNITO_MAP, the_pool.logical_name, USERPOOL_ID.title), FindInMap(COGNITO_MAP, the_pool.logical_name, USERPOOL_ARN.return_value), FindInMap(COGNITO_MAP, the_pool.logical_name, USERPOOL_DOMAIN.title), )
def __init__(self, arg_dict): ''' Init method wires up all the required networking resources to deploy this set of infrastructure @param arg_dict [dict] collection of keyword arguments for this class implementation ''' network_config = arg_dict.get('network', {}) template_config = arg_dict.get('template', {}) EnvironmentBase.__init__(self, arg_dict) self.vpc = None self.azs = [] self.local_subnets = {} self.stack_outputs = {} self.add_vpc_az_mapping( boto_config=arg_dict.get('boto', {}), az_count=max(network_config.get('public_subnet_count', 2), network_config.get('private_subnet_count', 2))) self.add_network_cidr_mapping(network_config=network_config) self.create_network(network_config=network_config) self.utility_bucket = self.add_utility_bucket( name=template_config.get('s3_utility_bucket', 'demo')) self.common_sg = self.template.add_resource( ec2.SecurityGroup( 'commonSecurityGroup', GroupDescription= 'Security Group allows ingress and egress for common usage patterns throughout this deployed infrastructure.', VpcId=Ref(self.vpc), SecurityGroupEgress=[ ec2.SecurityGroupRule(FromPort='80', ToPort='80', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='443', ToPort='443', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='123', ToPort='123', IpProtocol='udp', CidrIp='0.0.0.0/0') ], SecurityGroupIngress=[ ec2.SecurityGroupRule(FromPort='22', ToPort='22', IpProtocol='tcp', CidrIp=FindInMap( 'networkAddresses', 'vpcBase', 'cidr')) ])) for x in range( 0, max(int(network_config.get('public_subnet_count', 2)), int(network_config.get('private_subnet_count', 2)))): self.azs.append( FindInMap('RegionMap', Ref('AWS::Region'), 'az' + str(x) + 'Name'))
def set_vpc_params_from_vpc_stack_import(self, vpc_stack): """ Method to set the stack parameters when we are not creating a VPC. """ add_parameters(self.stack_template, vpc_stack.vpc_resource.subnets_parameters) add_parameters(self.stack_template, [VPC_ID]) self.Parameters.update( {VPC_ID.title: FindInMap("Network", VPC_ID.title, VPC_ID.title)}) for subnet_param in vpc_stack.vpc_resource.subnets_parameters: self.Parameters.update({ subnet_param.title: Join(",", FindInMap("Network", subnet_param.title, "Ids")) })
def construct_network(self): """ Main function to construct VPC, subnets, security groups, NAT instances, etc """ network_config = self.config.get('network', {}) self.azs = [] az_count = int(network_config.get('az_count', '2')) self.stack_outputs = {} self.add_vpc_az_mapping(boto_config=self.config.get('boto', {}), az_count=az_count) self.add_network_cidr_mapping(network_config=network_config) self.create_network_components(network_config=network_config) self.template.common_security_group = self.template.add_resource( ec2.SecurityGroup( 'commonSecurityGroup', GroupDescription= 'Security Group allows ingress and egress for common usage patterns throughout this deployed infrastructure.', VpcId=Ref(self.template.vpc_id), SecurityGroupEgress=[ ec2.SecurityGroupRule(FromPort='80', ToPort='80', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='443', ToPort='443', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='123', ToPort='123', IpProtocol='udp', CidrIp='0.0.0.0/0') ], SecurityGroupIngress=[ ec2.SecurityGroupRule(FromPort='22', ToPort='22', IpProtocol='tcp', CidrIp=FindInMap( 'networkAddresses', 'vpcBase', 'cidr')) ])) for x in range(0, az_count): self.azs.append( FindInMap('RegionMap', Ref('AWS::Region'), 'az' + str(x) + 'Name'))
def create_nat_instance(self, zone_id, subnet_name): t = self.template suffix = zone_id nat_instance = t.add_resource( ec2.Instance(NAT_INSTANCE_NAME % suffix, Condition="UseNatInstances", ImageId=FindInMap('AmiMap', Ref("AWS::Region"), Ref("ImageName")), SecurityGroupIds=[Ref(DEFAULT_SG), Ref(NAT_SG)], SubnetId=Ref(subnet_name), InstanceType=Ref('InstanceType'), SourceDestCheck=False, KeyName=Ref('SshKeyName'), Tags=[ec2.Tag('Name', 'nat-gw%s' % suffix)], DependsOn=GW_ATTACH)) eip = t.add_resource( ec2.EIP('NATExternalIp%s' % suffix, Domain='vpc', InstanceId=If("UseNatInstances", Ref(nat_instance), Ref("AWS::NoValue")), DependsOn=GW_ATTACH)) t.add_resource( ec2.NatGateway( NAT_GATEWAY_NAME % suffix, Condition="UseNatGateway", AllocationId=GetAtt(eip, 'AllocationId'), SubnetId=Ref(subnet_name), )) return nat_instance
def addSearchGuard(template, role, subnet, keyname, secgroup, profilename): profile = InstanceProfile("sgprofile" + profilename, Path="/", Roles=[Ref(role)]) template.add_resource(profile) instance = Instance( "sg" + profilename, InstanceType="m4.xlarge", ImageId=FindInMap("RegionToAmi", Ref("AWS::Region"), "stable"), DisableApiTermination=False, IamInstanceProfile=Ref(profile), KeyName=Ref(keyname), Monitoring=False, InstanceInitiatedShutdownBehavior="stop", UserData=userdata.from_file("src/bootstrap.sh"), NetworkInterfaces=[ NetworkInterfaceProperty(DeviceIndex=0, Description="Primary network interface", SubnetId=Ref(subnet), DeleteOnTermination=True, AssociatePublicIpAddress=True, GroupSet=[Ref(secgroup)]) ], Tags=[ Tag("Name", "Search Guard " + profilename), Tag("sgnodetag", profilename) ], EbsOptimized=False, BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice(VolumeSize=25)) ]) template.add_resource(instance) return instance
def add_resource_and_output(self): """Add resources to template.""" template = self.template variables = self.get_variables() networkinterfaces = [] for di, netintid in variables['NetworkInterfaces'].iteritems(): networkinterfaces.append( ec2.NetworkInterfaceProperty(DeviceIndex=di, NetworkInterfaceId=netintid)) ec2instance = template.add_resource( ec2.Instance( 'PaEc2Instance', DisableApiTermination='true', # s4f # IamInstanceProfile=variables['InstanceProfile'].ref, ImageId=FindInMap('PaAmiRegionMap', Ref('AWS::Region'), 'AMI'), InstanceType=variables['InstanceType'].ref, KeyName=variables['KeyName'].ref, Monitoring=variables['DetailedMonitoring'].ref, NetworkInterfaces=networkinterfaces, Tags=Tags(variables['Tags']), Tenancy=variables['PlacementTenancy'].ref, )) template.add_output( Output('{}Id'.format(ec2instance.title), Description='ID of EC2 Instance created', Export=Export( Sub('${AWS::StackName}-%sId' % ec2instance.title)), Value=Ref(ec2instance)))
def imap(self, mapping, key_lvl1, key_lvl2, value): if mapping not in self.mappings: self.mappings[mapping] = {} if key_lvl1 not in self.mappings[mapping]: self.mappings[mapping][key_lvl1] = {} self.mappings[mapping][key_lvl1][key_lvl2] = value return FindInMap(mapping, key_lvl1, key_lvl2)
def build(self): keyname_param = self.template.add_parameter( Parameter( "KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance", Type="String", )) ec2_instance = self.template.add_resource( ec2.Instance( "Ec2Instance", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=self.InstanceType, KeyName=Ref(keyname_param), SecurityGroups=["default"], UserData=Base64("80"), )) self.template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ) ])
def add_nat_asg(self): user_data = [resources.get_resource('nat_takeover.sh')] if self.enable_ntp: user_data.append(resources.get_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) nat_asg_name = "Nat%sASG" % str(self.subnet_index) user_data.extend([ "\n", "cfn-signal -s true", " --resource ", nat_asg_name, " --stack ", { "Ref": "AWS::StackName" }, " --region ", { "Ref": "AWS::Region" } ]) nat_launch_config = self.add_resource( LaunchConfiguration("Nat%sLaunchConfig" % str(self.subnet_index), UserData=Base64(Join('', user_data)), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'), KeyName=Ref('ec2Key'), SecurityGroups=[Ref(self.sg)], EbsOptimized=False, IamInstanceProfile=Ref(self.instance_profile), InstanceType=self.instance_type, AssociatePublicIpAddress=True)) # Create the NAT in a public subnet subnet_layer = self._subnets['public'].keys()[0] nat_asg = self.add_resource( AutoScalingGroup( nat_asg_name, DesiredCapacity=1, Tags=[ Tag("Name", Join("-", [ "NAT", self.subnet_index, ]), True), Tag("isNat", "true", True) ], MinSize=1, MaxSize=1, Cooldown="30", LaunchConfigurationName=Ref(nat_launch_config), HealthCheckGracePeriod=30, HealthCheckType="EC2", VPCZoneIdentifier=[ self._subnets['public'][subnet_layer][self.subnet_index] ], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Count=1, Timeout='PT15M')))) return nat_asg
def create_autoscaling_group(self): t = self.template t.add_resource( autoscaling.LaunchConfiguration( 'EmpireControllerLaunchConfig', IamInstanceProfile=GetAtt("EmpireControllerProfile", "Arn"), ImageId=FindInMap('AmiMap', Ref("AWS::Region"), Ref("ImageName")), BlockDeviceMappings=self.build_block_device(), InstanceType=Ref("InstanceType"), KeyName=Ref("SshKeyName"), UserData=self.generate_user_data(), SecurityGroups=[Ref("DefaultSG"), Ref(CLUSTER_SG_NAME)])) t.add_resource( autoscaling.AutoScalingGroup( 'EmpireControllerAutoscalingGroup', AvailabilityZones=Ref("AvailabilityZones"), LaunchConfigurationName=Ref("EmpireControllerLaunchConfig"), MinSize=Ref("MinHosts"), MaxSize=Ref("MaxHosts"), VPCZoneIdentifier=Ref("PrivateSubnets"), LoadBalancerNames=[ Ref("EmpireControllerLoadBalancer"), ], Tags=[ASTag('Name', 'empire_controller', True)]))
def define_names_from_import(self): """ Define the names from docker-compose file content """ if not keyisset(self.map_name_name, self.definition[self.x_key]): raise KeyError( f"Missing {self.map_name_name} when doing non-lookup import for {self.name}" ) name_input = self.definition[self.x_key][self.map_name_name] if name_input.startswith("arn:"): self.aws_name = get_secret_name_from_arn( self.definition[self.x_key][self.map_name_name] ) self.mapping = { self.map_arn_name: name_input, self.map_name_name: self.aws_name, } else: self.aws_name = name_input self.mapping = {self.map_name_name: self.aws_name} self.arn = Sub( f"arn:${{{AWS_PARTITION}}}:secretsmanager:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" "secret:${SecretName}", SecretName=FindInMap( self.map_name, self.logical_name, self.map_name_name ), ) self.iam_arn = Sub( f"arn:${{{AWS_PARTITION}}}:secretsmanager:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" "secret:${SecretName}*", SecretName=FindInMap( self.map_name, self.logical_name, self.map_name_name ), ) self.ecs_secret = [EcsSecret(Name=self.name, ValueFrom=self.arn)] if keyisset(self.map_kms_name, self.definition): if not self.definition[self.map_kms_name].startswith( "arn:" ) or not KMS_KEY_ARN_RE.match(self.definition[self.map_kms_name]): LOG.error( f"secrets.{self.name} - When specifying {self.map_kms_name} you must specify the full ARN" ) else: self.mapping[self.map_kms_name] = self.definition[self.map_kms_name] self.kms_key_arn = FindInMap( self.map_name, self.logical_name, self.map_kms_name )
def lookup_cluster(self, session): """ Define the ECS Cluster properties and definitions from ECS API. :param boto3.session.Session session: Boto3 session to make API calls. :return: The cluster details :rtype: dict """ if not isinstance(self.lookup, (str, dict)): raise TypeError("The value for Lookup must be", str, dict, "Got", type(self.lookup)) ecs_session = session if isinstance(self.lookup, dict): if keyisset("RoleArn", self.lookup): ecs_session = get_assume_role_session( session, self.lookup["RoleArn"], session_name="EcsClusterLookup@ComposeX", ) cluster_name = self.lookup["ClusterName"] else: cluster_name = self.lookup try: clusters = list_all_ecs_clusters(session=ecs_session) cluster_names = [ CLUSTER_NAME_FROM_ARN.match(c_name).group("name") for c_name in clusters ] clusters_config = describe_all_ecs_clusters_from_ccapi( clusters, return_as_map=True, use_cluster_name=True, session=ecs_session) if cluster_name not in clusters_config.keys(): raise LookupError( f"Failed to find {cluster_name}. Available clusters are", cluster_names, ) the_cluster = clusters_config[cluster_name] LOG.info( f"x-cluster.{cluster_name} found. Setting {CLUSTER_NAME.title} accordingly." ) self.mappings = { CLUSTER_NAME.title: { "Name": the_cluster["ClusterName"] } } self.set_cluster_mappings(the_cluster) self.capacity_providers = evaluate_capacity_providers(the_cluster) if self.capacity_providers: self.default_strategy_providers = get_default_capacity_strategy( the_cluster) self.platform_override = evaluate_fargate_is_set( self.capacity_providers, the_cluster) self.cluster_identifier = FindInMap(self.mappings_key, CLUSTER_NAME.title, "Name") except ClientError as error: LOG.error(error) raise
def set_attributes_from_mapping(self, attribute_parameter): """ Method to define the attribute outputs for lookup resources, which use FindInMap or Ref :param attribute_parameter: The parameter mapped to the resource attribute :type attribute_parameter: ecs_composex.common.cfn_params.Parameter :return: The FindInMap setting for mapped resource """ if attribute_parameter.return_value: return FindInMap( self.module.mapping_key, self.logical_name, NONALPHANUM.sub("", attribute_parameter.return_value), ) else: return FindInMap(self.module.mapping_key, self.logical_name, attribute_parameter.title)
def get_launch_configuration_parameters(self): return { 'ImageId': FindInMap('AmiMap', Ref("AWS::Region"), Ref('ImageName')), 'InstanceType': Ref("InstanceType"), 'KeyName': Ref("SshKeyName"), 'SecurityGroups': self.get_launch_configuration_security_groups(), }
def vpc_id(self): """ Gives the VPC ID :return: """ if not self.is_void and self.vpc_resource: return GetAtt(self.title, f"Outputs.{VPC_ID.title}") elif self.is_void and self.vpc_resource.mappings: return FindInMap("Network", VPC_ID.title, VPC_ID.title) else: return None
def create_template(self): t = self.template variables = self.get_variables() self.bucket_ids = [] for title, attrs in variables["Buckets"].items(): bucket_id = Ref(title) t.add_resource(s3.Bucket.from_dict(title, attrs)) t.add_output(Output(title + "BucketId", Value=bucket_id)) t.add_output(Output(title + "BucketArn", Value=s3_arn(bucket_id))) t.add_output( Output(title + "BucketDomainName", Value=GetAtt(title, "DomainName"))) if "WebsiteConfiguration" in attrs: t.add_mapping("WebsiteEndpoints", S3_WEBSITE_ENDPOINTS) t.add_resource( s3.BucketPolicy( title + "BucketPolicy", Bucket=bucket_id, PolicyDocument=static_website_bucket_policy(bucket_id), )) t.add_output( Output(title + "WebsiteUrl", Value=GetAtt(title, "WebsiteURL"))) t.add_output( Output(title + "WebsiteEndpoint", Value=FindInMap("WebsiteEndpoints", Region, "endpoint"))) self.bucket_ids.append(bucket_id) read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), PolicyDocument=read_write_s3_bucket_policy( self.bucket_ids), Roles=read_write_roles, )) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=Sub("${AWS::StackName}ReadPolicy"), PolicyDocument=read_only_s3_bucket_policy(self.bucket_ids), Roles=read_only_roles, ))
def buildMySQL(t, args): t.add_resource( ec2.SecurityGroup('DBSecurityGroup', GroupDescription='Patient Records', VpcId=Ref('VPC'), Tags=Tags(Name='MySQL Access'))) t.add_resource( ec2.SecurityGroupIngress( 'DBSGIngress', GroupId=Ref('DBSecurityGroup'), IpProtocol='-1', SourceSecurityGroupId=Ref('ApplicationSecurityGroup'))) t.add_resource( rds.DBSubnetGroup( 'RDSSubnetGroup', DBSubnetGroupDescription='MySQL node locations', SubnetIds=[Ref('PrivateSubnet1'), Ref('PrivateSubnet2')])) if (args.recovery): t.add_resource( rds.DBInstance('RDSInstance', DeletionPolicy='Delete' if args.dev else 'Snapshot', DBSnapshotIdentifier=Ref('RecoveryRDSSnapshotARN'), DBInstanceClass=Ref('RDSInstanceSize'), PubliclyAccessible=False, DBSubnetGroupName=Ref('RDSSubnetGroup'), VPCSecurityGroups=[Ref('DBSecurityGroup')], MultiAZ=not args.dev, Tags=Tags(Name='Patient Records'))) else: t.add_resource( rds.DBInstance('RDSInstance', DeletionPolicy='Delete' if args.dev else 'Snapshot', DBName='openemr', AllocatedStorage=Ref('PatientRecords'), DBInstanceClass=Ref('RDSInstanceSize'), Engine='MySQL', EngineVersion=FindInMap('RegionData', ref_region, 'MySQLVersion'), MasterUsername='******', MasterUserPassword=Ref('RDSPassword'), PubliclyAccessible=False, DBSubnetGroupName=Ref('RDSSubnetGroup'), VPCSecurityGroups=[Ref('DBSecurityGroup')], KmsKeyId=OpenEMRKeyID, StorageEncrypted=True, MultiAZ=not args.dev, Tags=Tags(Name='Patient Records'))) return t
def _get_launch_configuration_parameters(self, chain_context): asg_sg_list = [chain_context.metadata[META_SECURITY_GROUP_REF]] parameters = { 'ImageId': FindInMap('AmiMap', Ref("AWS::Region"), Ref('ImageName')), 'InstanceType': Ref("InstanceType"), 'KeyName': Ref("SshKeyName"), 'SecurityGroups': asg_sg_list, } return parameters
def set_from_definition(self, root_stack, session, settings): if self.lookup: self.lookup_cluster(session) add_update_mapping(root_stack.stack_template, self.mappings_key, self.mappings) elif self.definition and self.use: self.mappings = {CLUSTER_NAME.title: {"Name": self.use}} add_update_mapping(root_stack.stack_template, self.mappings_key, self.mappings) self.cluster_identifier = FindInMap(self.mappings_key, CLUSTER_NAME.title, "Name") elif self.properties: self.define_cluster(root_stack, settings)
def link_cluster_to_service( cluster: CacheCluster, cluster_mappings: dict, mapping_name: str ): """ Function to go over each service defined in the DB and assign found DB settings to service :param ecs_composex.elasticache.elasticache_stack.CacheCluster cluster: :param dict cluster_mappings: :param str mapping_name: :return: """ for target in cluster.families_targets: target[0].template.add_mapping(mapping_name, cluster_mappings) add_security_group_ingress( target[0].stack, cluster.logical_name, sg_id=Select( 0, FindInMap(mapping_name, cluster.logical_name, CLUSTER_SG.title), ), port=FindInMap(mapping_name, cluster.logical_name, cluster.port_attr.title), )
def create_nat_instance(self, zone_id, subnet_name): t = self.template variables = self.get_variables() suffix = zone_id eip_name = "NATExternalIp%s" % suffix if variables["UseNatGateway"]: gateway_name = NAT_GATEWAY_NAME % suffix t.add_resource( ec2.NatGateway( gateway_name, AllocationId=GetAtt(eip_name, 'AllocationId'), SubnetId=Ref(subnet_name), )) t.add_output(Output(gateway_name + "Id", Value=Ref(gateway_name))) # Using NAT Gateways, leave the EIP unattached - it gets allocated # to the NAT Gateway in that resource above eip_instance_id = Ref("AWS::NoValue") else: image_id = FindInMap('AmiMap', Ref("AWS::Region"), Ref("ImageName")) instance_name = NAT_INSTANCE_NAME % suffix t.add_resource( ec2.Instance(instance_name, Condition="UseNatInstances", ImageId=image_id, SecurityGroupIds=[Ref(DEFAULT_SG), Ref(NAT_SG)], SubnetId=Ref(subnet_name), InstanceType=variables["InstanceType"], SourceDestCheck=False, KeyName=variables["SshKeyName"], Tags=[ec2.Tag('Name', 'nat-gw%s' % suffix)], DependsOn=GW_ATTACH)) t.add_output( Output(instance_name + "PublicHostname", Value=GetAtt(instance_name, "PublicDnsName"))) t.add_output( Output(instance_name + "InstanceId", Value=Ref(instance_name))) # Since we're using NAT instances, go ahead and attach the EIP # to the NAT instance eip_instance_id = Ref(instance_name) t.add_resource( ec2.EIP(eip_name, Domain='vpc', InstanceId=eip_instance_id, DependsOn=GW_ATTACH))
def define_secret(self, secret_name, json_key) -> EcsSecret: if isinstance(self.arn, str): secret = EcsSecret(Name=secret_name, ValueFrom=f"{self.arn}:{json_key}::") elif isinstance(self.arn, Sub): secret = EcsSecret( Name=secret_name, ValueFrom=Sub( f"arn:${{{AWS_PARTITION}}}:secretsmanager:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" f"secret:${{SecretName}}:{json_key}::", SecretName=FindInMap( self.map_name, self.logical_name, self.map_name_name, ), ), ) elif isinstance(self.arn, FindInMap): secret = EcsSecret( Name=secret_name, ValueFrom=Sub( f"${{SecretArn}}:{json_key}::", SecretArn=FindInMap( self.map_name, self.logical_name, self.map_arn_name, ), ), ) else: raise TypeError( f"secrets.{self.name} - ARN is", type(self.arn), "must be one of", str, Sub, FindInMap, ) return secret
def import_log_config(self, exec_config): """ Sets the properties for bucket and cw log group to use for ECS Execute :param dict exec_config: :return: """ if keyisset("LogConfiguration", exec_config): log_config = exec_config["LogConfiguration"] if keyisset("CloudWatchLogGroupName", log_config): self.mappings[ CLUSTER_NAME.title]["CloudWatchLogGroupName"] = log_config[ "CloudWatchLogGroupName"] self.log_group = FindInMap( self.mappings_key, CLUSTER_NAME.title, "CloudWatchLogGroupName", ) if keyisset("S3BucketName", log_config): self.mappings[CLUSTER_NAME.title]["S3BucketName"] = log_config[ "S3BucketName"] self.log_bucket = FindInMap(self.mappings_key, CLUSTER_NAME.title, "S3BucketName")
def add_nodes(t, nodes, prefix): nodes_list = [] for x in range(0, nodes): node_name = prefix + "Node" + str((x + 1)) t.add_resource( ec2.Instance(node_name, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref("InstanceType"), KeyName=Ref("KeyName"), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=False, GroupSet=[Ref("QumuloSecurityGroup")], DeviceIndex=0, DeleteOnTermination=True, SubnetId=Ref("SubnetId"), ) ])) nodes_list.append(node_name) # Create a list containing the Private IPs of all nodes. output_ips = [] for i in nodes_list: output_ips.append(GetAtt(i, "PrivateIp")) t.add_output( Output( "ClusterPrivateIPs", Description= "Copy and paste this list into the QF2 Cluster Creation Screen", Value=Join(", ", output_ips), )) t.add_output( Output( "LinkToManagement", Description="Click to launch the QF2 Admin Console", Value=Join( "", ["https://", GetAtt(nodes_list[0], "PrivateIp")]), )) t.add_output( Output( "InstanceId", Description= "Copy and paste this instance ID into the QF2 Cluster Creation Screen.", Value=Ref(prefix + "Node1"), ))
def add_instance(template, instance_name, instance_type, security_group='default'): template = add_keyname_parameter(template) if not template.get_instance_metadata(instance_name): new_instance = Instance( instance_name, ImageId=FindInMap(regionmap_name, Ref('AWS::Region'), regionmap_key), InstanceType=instance_type, KeyName=Ref(template.get_parameter_metadata(keyname_name))) template.set_instance_metadata(instance_name, new_instance) template.add_resource(new_instance) return template
def set_cluster_mappings(self, cluster_api_def): """ From the API info on the cluster, evaluate whether config is needed to enable ECS Execution :param dict cluster_api_def: """ if keyisset("Configuration", cluster_api_def): config = cluster_api_def["Configuration"] if keyisset("ExecuteCommandConfiguration", config): exec_config = config["ExecuteCommandConfiguration"] if keyisset("KmsKeyId", exec_config): self.mappings[CLUSTER_NAME. title]["KmsKeyId"] = exec_config["KmsKeyId"] self.log_key = FindInMap(self.mappings_key, CLUSTER_NAME.title, "KmsKeyId") self.import_log_config(exec_config)
def add_compose_families(settings: ComposeXSettings) -> None: """ Using existing ComposeFamily in settings, creates the ServiceStack and template :param ecs_composex.common.settings.ComposeXSettings settings: """ for family_name, family in settings.families.items(): family.init_family() initialize_family_services(settings, family) add_parameters( family.template, [ family.iam_manager.task_role.arn_param, family.iam_manager.task_role.name_param, family.iam_manager.exec_role.arn_param, family.iam_manager.exec_role.name_param, ], ) family.stack.Parameters.update({ ecs_params.CLUSTER_NAME.title: settings.ecs_cluster.cluster_identifier, ecs_params.FARGATE_VERSION.title: FindInMap("ComposeXDefaults", "ECS", "PlatformVersion"), family.iam_manager.task_role.arn_param.title: family.iam_manager.task_role.output_arn, family.iam_manager.task_role.name_param.title: family.iam_manager.task_role.output_name, family.iam_manager.exec_role.arn_param.title: family.iam_manager.exec_role.output_arn, family.iam_manager.exec_role.name_param.title: family.iam_manager.exec_role.output_name, ecs_params.SERVICE_HOSTNAME.title: family.family_hostname, }) family.template.metadata.update(metadata) settings.root_stack.stack_template.add_resource(family.stack) family.validate_compute_configuration_for_task(settings) families_stacks = [ family for family in settings.root_stack.stack_template.resources if (family in settings.families and isinstance(settings.families[family].stack, ServiceStack)) ] handle_families_dependencies(settings, families_stacks)
def construct_network(self): """ Main function to construct VPC, subnets, security groups, NAT instances, etc """ network_config = self.network_config nat_config = self.nat_config az_count = self.az_count self.add_network_cidr_mapping(network_config=network_config) self._prepare_subnets(self._subnet_configs) self.create_network_components(network_config=network_config, nat_config=nat_config) self._common_security_group = self.add_resource( ec2.SecurityGroup( 'commonSecurityGroup', GroupDescription= 'Security Group allows ingress and egress for common usage patterns throughout this deployed infrastructure.', VpcId=self.vpc_id, SecurityGroupEgress=[ ec2.SecurityGroupRule(FromPort='80', ToPort='80', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='443', ToPort='443', IpProtocol='tcp', CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(FromPort='123', ToPort='123', IpProtocol='udp', CidrIp='0.0.0.0/0') ], SecurityGroupIngress=[ ec2.SecurityGroupRule(FromPort='22', ToPort='22', IpProtocol='tcp', CidrIp=FindInMap( 'networkAddresses', 'vpcBase', 'cidr')) ])) self.add_output( Output('commonSecurityGroup', Value=self.common_security_group))