def validate_services(self): services_names = list({service["name"] for service in self.services}) if len(services_names) == 1: LOG.info( f"LB {self.name} only has a unique service. LB will be deployed with the service stack." ) self.unique_service_lb = True
def create_bucket(bucket_name, session): """ Function that checks if the S3 bucket exists and if not attempts to create it. :param bucket_name: name of the s3 bucket :type bucket_name: str :param session: boto3 session to use if wanted to override settings. :type session: boto3.session.Session :returns: True/False, Returns whether the bucket exists or not for upload :rtype: bool """ client = session.client("s3") region = session.region_name location = {"LocationConstraint": region} try: client.create_bucket( ACL="private", Bucket=bucket_name, ObjectLockEnabledForBucket=True, CreateBucketConfiguration=location, ) LOG.info(f"Bucket {bucket_name} successfully created.") except client.exceptions.BucketAlreadyExists: LOG.warning(f"Bucket {bucket_name} already exists.") except client.exceptions.BucketAlreadyOwnedByYou: LOG.info(f"You already own the bucket {bucket_name}") except ClientError as error: LOG.error("Error whilst creating the bucket") LOG.error(error) raise
def define_default_actions(self, template): """ If DefaultTarget is set it will set it if not a service, otherwise at the service level. If not defined, and there is more than one service, it will fail. If not defined and there is only one service defined, it will skip """ if not self.default_actions and not self.services: warnings.warn( f"{self.name} - There are no actions defined or services for listener {self.title}. Skipping" ) return if self.default_actions: handle_default_actions(self) elif not self.default_actions and self.services and len( self.services) == 1: LOG.info( f"{self.title} has no defined DefaultActions and only 1 service. Default all to service." ) self.DefaultActions = define_actions(self, self.services[0]) elif not self.default_actions and self.services and len( self.services) > 1: LOG.warning( f"{self.title} - " "No default actions defined and more than one service defined. " "If one of the access path is / it will be used as default") rules = handle_non_default_services(self, self.services) for rule in rules: template.add_resource(rule) else: raise ValueError( f"Failed to determine any default action for {self.title}")
def resolve_lookup( lookup_resources: list[Certificate], settings: ComposeXSettings, module: XResourceModule, ) -> None: """ Lookup the ACM certificates in AWS and creates the CFN mappings for them :param list[Certificate] lookup_resources: List of resources to lookup :param ecs_composex.common.settings.ComposeXSettings settings: :param ecs_composex.mod_manager.XResourceModule module: """ if not keyisset(module.mapping_key, settings.mappings): settings.mappings[module.mapping_key] = {} for resource in lookup_resources: resource.lookup_resource( ACM_ARN_RE, get_cert_config, CfnAcmCertificate.resource_type, "acm:certificate", ) resource.init_outputs() resource.generate_cfn_mappings_from_lookup_properties() resource.generate_outputs() LOG.info( f"{resource.module.res_key}.{resource.name} - Matched certificate {resource.arn}" ) settings.mappings[module.mapping_key].update( {resource.logical_name: resource.mappings} )
def import_parameters_into_config_file(parameters_file, config_file): """ Imports parameter file and outputs it into a CFN Template config file :param parameters_file: path to the parameters file :type parameters_file: str :param config_file: path to the config file. :type config_file: str """ with open(parameters_file, "r") as params_fd: parameters = json.loads(params_fd.read()) try: with open(config_file, "r") as config_fd: try: config = json.loads(config_fd.read()) except json.decoder.JSONDecodeError: config = {"Parameters": {}} if not keyisset("Parameters", config): config["Parameters"] = {} except FileNotFoundError: config = {"Parameters": {}} print(config) new_config = build_config_template_file(config, parameters) LOG.info(new_config) with open(config_file, "w") as config_fd: config_fd.write(json.dumps(new_config, indent=4))
def __init__(self, family: ComposeFamily): """ Initialize network settings for the family ServiceConfig :param ecs_composex.ecs.ecs_family.ComposeFamily family: """ self.family = family self._network_mode = "awsvpc" if family.service_compute.launch_type == "EXTERNAL": LOG.warning( f"{family.name} - External mode cannot use awsvpc mode. Falling back to bridge" ) self.network_mode = "bridge" self.ports = [] self.networks = {} self.merge_services_ports() self.merge_networks() self.definition = merge_family_services_networking(family) self.ingress_from_self = False if any([svc.expose_ports for svc in family.services]): self.ingress_from_self = True LOG.info( f"{family.name} - services have export ports, allowing internal ingress" ) self._security_group = None self.extra_security_groups = [] self._subnets = Ref(APP_SUBNETS) self.cloudmap_config = (merge_cloudmap_settings(family, self.ports) if self.ports else {}) self.ingress = Ingress(self.definition[Ingress.master_key], self.ports) self.ingress_from_self = keyisset(self.self_key, self.definition)
def define_vpc_settings(settings: ComposeXSettings, vpc_module: XResourceModule, vpc_stack: ComposeXStack): """ Function to deal with vpc stack settings """ if settings.requires_vpc() and not vpc_stack.vpc_resource: LOG.info( f"{settings.name} - Services or x-Resources need a VPC to function. Creating default one" ) vpc_stack.create_new_default_vpc("vpc", vpc_module, settings) settings.root_stack.stack_template.add_resource(vpc_stack) vpc_stack.vpc_resource.generate_outputs() elif (vpc_stack.is_void and vpc_stack.vpc_resource and vpc_stack.vpc_resource.mappings): vpc_stack.vpc_resource.generate_outputs() add_update_mapping( settings.root_stack.stack_template, "Network", vpc_stack.vpc_resource.mappings, ) elif (vpc_stack.vpc_resource and vpc_stack.vpc_resource.cfn_resource and vpc_stack.title not in settings.root_stack.stack_template.resources.keys()): settings.root_stack.stack_template.add_resource(vpc_stack) LOG.info( f"{settings.name}.x-vpc - VPC stack added. A new VPC will be created." ) vpc_stack.vpc_resource.generate_outputs()
def scan_poll_and_wait(registry, repository_name, image, image_url, ecr_session=None): """ Function to pull the scans results until no longer in progress :param boto3.session.Session ecr_session: :param registry: :param repository_name: :param image: :param image_url: :param ecr_session: :return: The scan report :rtype: dict """ client = ecr_session.client("ecr") while True: try: image_scan_r = client.describe_image_scan_findings( registryId=registry, repositoryName=repository_name, imageId=image, ) if image_scan_r["imageScanStatus"]["status"] == "IN_PROGRESS": LOG.info(f"{image_url} - Scan in progress - waiting 10 seconds") sleep(10) else: return image_scan_r except client.exceptions.LimitExceededException: LOG.warn(f"{image_url} - Exceeding API Calls quota. Waiting 10 seconds") sleep(10)
def set_services_targets_from_list(self, settings): """ Override method to map services and families targets of the services defined specifically for events TargetStructure: (family, family_wide, services[], access) :param ecs_composex.common.settings.ComposeXSettings settings: :return: """ if not self.services: LOG.info(f"No services defined for {self.name}") return for service in self.services: service_name = service["name"] if service_name in settings.families and service_name not in [ f[0].name for f in self.families_targets ]: self.families_targets.append(( settings.families[service_name], True, settings.families[service_name].services, service["TaskCount"], service, )) elif service_name in settings.families and service_name in [ f[0].name for f in self.families_targets ]: LOG.warning( f"The family {service_name} has already been added. Skipping" ) elif service_name in [s.name for s in settings.services]: self.handle_families_targets_expansion(service, settings)
def lookup_vpc_id(session, vpc_id): """ :param session: boto3 session :param vpc_id: VPC ID :return: """ args = {"VpcIds": [vpc_id]} arn_regexp = r"(^arn:(aws|aws-cn|aws-us-gov):ec2:([a-z]{2}-[\w]{2,6}-[0-9]{1}):([0-9]{12}):vpc\/(vpc-[a-z0-9]+)$)" arn_re = re.compile(arn_regexp) if vpc_id.startswith("arn:") and arn_re.match(vpc_id): LOG.debug(arn_re.findall(vpc_id)) re_vpc_id = arn_re.findall(vpc_id)[-1][-1] re_vpc_owner = arn_re.findall(vpc_id)[-1][-2] args = { "VpcIds": [re_vpc_id], "Filters": [{"Name": "owner-id", "Values": [re_vpc_owner]}], } vpc_id = re_vpc_id elif vpc_id.startswith("arn:") and not arn_re.match(vpc_id): raise ValueError( "Vpc ARN is not valid. Got", vpc_id, "Valid ARN Regexp", arn_regexp ) client = session.client("ec2") vpcs_r = client.describe_vpcs(**args) LOG.debug(vpcs_r) LOG.debug(vpcs_r["Vpcs"][0]["VpcId"]) if keyisset("Vpcs", vpcs_r) and vpcs_r["Vpcs"][0]["VpcId"] == vpc_id: LOG.info(f"VPC Found and confirmed: {vpcs_r['Vpcs'][0]['VpcId']}") return vpcs_r["Vpcs"][0]["VpcId"] raise ValueError("No VPC found with ID", args["VpcIds"][0])
def determine_resource_type(db_name, properties): """ Function to determine if the properties are the ones of a DB Cluster or DB Instance. By default it will assume Cluster if cannot conclude that it is a DB Instance :param str db_name: :param dict properties: :return: """ if ( keyisset(DB_ENGINE_NAME.title, properties) and properties[DB_ENGINE_NAME.title].startswith("aurora") or all( property_name in DBCluster.props.keys() for property_name in properties.keys() ) ): LOG.info(f"Identified {db_name} to be a RDS Aurora Cluster") return DBCluster elif all( property_name in DBInstance.props.keys() for property_name in properties.keys() ): LOG.info(f"Identified {db_name} to be a RDS Instance") return DBInstance LOG.error( "From the properties defined, we cannot determine whether this is a RDS Cluster or RDS Instance." " Setting to Cluster" ) return None
def lookup_ecs_cluster(session, cluster_lookup): """ Function to find the ECS Cluster. :param boto3.session.Session session: Boto3 session to make API calls. :param cluster_lookup: Cluster lookup definition. :return: """ if not isinstance(cluster_lookup, str): raise TypeError("The value for Lookup must be", str, "Got", type(cluster_lookup)) client = session.client("ecs") try: cluster_r = client.describe_clusters(clusters=[cluster_lookup]) if not keyisset("clusters", cluster_r): LOG.warning( f"No cluster named {cluster_lookup} found. Creating one with default settings" ) return get_default_cluster_config() elif (keyisset("clusters", cluster_r) and cluster_r["clusters"][0]["clusterName"] == cluster_lookup): LOG.info( f"Found ECS Cluster {cluster_lookup}. Setting {CLUSTER_NAME_T} accordingly." ) return cluster_r["clusters"][0]["clusterName"] except ClientError as error: LOG.error(error) raise
def generate_cluster(root_stack, settings): """ Function to create the ECS Cluster. :param ecs_composex.common.stacks.ComposeXStack root_stack: :param ecs_composex.common.settings.ComposeXSettings settings: :return: """ cluster = get_default_cluster_config() if not keyisset(RES_KEY, settings.compose_content): return cluster if keyisset(RES_KEY, settings.compose_content) and keyisset( "Lookup", settings.compose_content[RES_KEY]): cluster = lookup_ecs_cluster( settings.session, settings.compose_content[RES_KEY]["Lookup"]) elif keyisset(RES_KEY, settings.compose_content) and isinstance( settings.compose_content[RES_KEY], str): cluster = settings.compose_content[RES_KEY] LOG.info(f"Using cluster {settings.compose_content[RES_KEY]}") elif isinstance(settings.compose_content[RES_KEY], dict) and keyisset( "Use", settings.compose_content[RES_KEY]): cluster = settings.compose_content[RES_KEY]["Use"] LOG.info(f"Using cluster {settings.compose_content[RES_KEY]['Use']}") elif keyisset(RES_KEY, settings.compose_content) and not keyisset( "Lookup", settings.compose_content[RES_KEY]): cluster = define_cluster(root_stack, settings.compose_content[RES_KEY]) return cluster
def determine_resource_type(name, properties): """ Function to determine if the properties are the ones of a DB Cluster or DB Instance. By default it will assume Cluster if cannot conclude that it is a DB Instance :param str name: :param dict properties: :return: """ if all( property_name in CacheCluster.props.keys() for property_name in properties.keys() ): LOG.info(f"Identified {name} to be {CacheCluster.resource_type}") return CacheCluster elif all( property_name in ReplicationGroup.props.keys() for property_name in properties.keys() ): LOG.info(f"Identified {name} to be {ReplicationGroup.resource_type}") return ReplicationGroup LOG.error( "From the properties defined, we cannot determine whether this is a RDS Cluster or RDS Instance." " Setting to Cluster" ) return None
def __init__(self, name, definition): self.name = name self.volume_name = name self.autogenerated = False self.definition = deepcopy(definition) self.is_shared = False self.services = [] self.parameters = {} self.device = None self.cfn_volume = None self.efs_definition = {} self.use = {} self.lookup = {} self.type = "volume" self.driver = "local" self.external = False self.efs_definition = evaluate_plugin_efs_properties( self.definition, self.driver_opts_key) if self.efs_definition: LOG.info( f"volumes.{self.name} - Identified properties as defined by Docker Plugin" ) self.type = "bind" self.driver = "nfs" elif (keyisset("external", self.definition) and keyisset("name", self.definition) and FS_REGEXP.match(self.definition["name"])): LOG.warning(f"volumes.{self.name} - Identified a EFS to use") self.efs_definition = {"Use": self.definition["name"]} self.use = self.definition["name"] else: self.import_volume_from_definition()
def define_service_target_group_definition( resource, family, service, target_def, resources_root_stack, ): """ Function to create the new service TGT Group :param ecs_composex.elbv2.elbv2_stack.Elbv2 resource: :param service: :param ecs_composex.ecs.ecs_family.ComposeFamily family: :param dict target_def: :param ecs_composex.common.stacks.ComposeXStack resources_root_stack: :return: """ if resource.logical_name not in family.stack.DependsOn: family.stack.DependsOn.append(resources_root_stack.title) LOG.info( f"{resource.module.res_key}.{resource.name} - Adding {family.logical_name} {service.name}" ) service_tgt_group = define_service_target_group( resource, family, service, resources_root_stack, target_def, ) return Ref(service_tgt_group)
def resolve_lookup(lookup_resources: list[Queue], settings: ComposeXSettings, module: XResourceModule) -> None: """ Lookup AWS Resource :param list[Queue] lookup_resources: :param ecs_composex.common.settings.ComposeXSettings settings: :param XResourceModule module: """ if not keyisset(module.mapping_key, settings.mappings): settings.mappings[module.mapping_key] = {} for resource in lookup_resources: resource.lookup_resource( SQS_QUEUE_ARN_RE, get_queue_config, CfnQueue.resource_type, TAGGING_API_ID, ) settings.mappings[module.mapping_key].update( {resource.logical_name: resource.mappings}) LOG.info( f"{module.res_key}.{resource.name} - Matched AWS Resource {resource.arn}" ) if keyisset(SQS_KMS_KEY, resource.lookup_properties): LOG.info(f"{module.res_key}.{resource.name} - Identified CMK" " - {resource.lookup_properties[SQS_KMS_KEY]}")
def ephemeral_storage(self): storage_key = "ecs.ephemeral.storage" storage_value = set_else_none(storage_key, set_else_none("labels", self.deploy, alt_value={}), alt_value=0) if isinstance(storage_value, (int, float)): ephemeral_storage = int(storage_value) elif isinstance(storage_value, str): ephemeral_storage = int(set_memory_to_mb(storage_value) / 1024) else: raise TypeError( f"The value for {storage_key} is of type", type(storage_value), "Expected one of", [int, float, str], ) if ephemeral_storage <= 21: return 0 elif ephemeral_storage > 200: return 200 else: LOG.info(f"{self.name} - {storage_key} set to {ephemeral_storage}") return int(ephemeral_storage)
def evaluate_docker_configs(settings): """ Function to go over the services settings and evaluate x-docker :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :return: """ image_tag_re = re.compile( r"(?P<tag>(?:\@sha[\d]+:[a-z-Z0-9]+$)|(?::[\S]+$))") for family in settings.families.values(): for service in family.services: if not keyisset("x-docker_opts", service.definition): continue docker_config = service.definition["x-docker_opts"] if SCANS_POSSIBLE: if keyisset("InterpolateWithDigest", docker_config): if not invalidate_image_from_ecr(service, mute=True): LOG.warn( "You set InterpolateWithDigest to true for x-docker for an image in AWS ECR." "Please refer to x-ecr") continue else: warnings.warn( "Run pip install ecs_composex[ecrscan] to use x-ecr features" ) service.retrieve_image_digest() if service.image_digest: service.image = image_tag_re.sub( f"@{service.image_digest}", service.image) LOG.info( f"Successfully retrieved digest for {service.name}.") LOG.info(f"{service.name} - {service.image}")
def add_vpc_to_root(root_stack, settings): """ Function to figure whether to create the VPC Stack and if not, set the parameters. :param root_stack: :param settings: :return: vpc_stack :rtype: VpcStack """ vpc_stack = None vpc_xkey = f"{X_KEY}{RES_KEY}" if keyisset(vpc_xkey, settings.compose_content): if keyisset("Lookup", settings.compose_content[vpc_xkey]): x_settings = lookup_x_vpc_settings( settings.session, settings.compose_content[vpc_xkey]["Lookup"]) apply_vpc_settings(x_settings, root_stack) elif keyisset("Use", settings.compose_content[vpc_xkey]): x_settings = import_vpc_settings( settings.compose_content[vpc_xkey]["Use"]) apply_vpc_settings(x_settings, root_stack) else: if keyisset("Create", settings.compose_content[vpc_xkey]) and keyisset( "Lookup", settings.compose_content[vpc_xkey]): LOG.warning("We have both Create and Lookup set for x-vpc." "Creating a new VPC") vpc_stack = create_new_vpc(vpc_xkey, settings) else: LOG.info(f"No {vpc_xkey} detected. Creating a new VPC.") vpc_stack = create_new_vpc(vpc_xkey, settings, default=True) if isinstance(vpc_stack, VpcStack): root_stack.stack_template.add_resource(vpc_stack) return vpc_stack
def find_mesh_in_list(mesh_name, client, next_token=None): """ Function to recursively go through meshes in case the mesh exists but we don't know the account Id :param mesh_name: Name of the mesh :param next_token: token for next api call :return: """ if next_token is not None: mesh_r = client.list_meshes(nexToken=next_token) else: mesh_r = client.list_meshes() if not keyisset("meshes", mesh_r): return {} for mesh in mesh_r["meshes"]: if mesh["meshName"] == mesh_name: mesh_info = { MESH_NAME.title: mesh["meshName"], MESH_OWNER_ID.title: mesh["meshOwner"], } LOG.info( f"Found shared mesh {mesh_name} owned by {mesh_info[MESH_OWNER_ID.title]}" ) return mesh_info if keyisset("nextToken", mesh_r): return find_mesh_in_list(mesh_name, client, mesh_r["nextToken"])
def evaluate_ecr_configs(settings) -> int: """ Function to go over each service of each family in its final state and evaluate the ECR Image validity. :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :return: """ result = 0 if not SCANS_POSSIBLE: return result for family in settings.families.values(): for service in family.services: if not isinstance(service.image, str): continue if not keyisset("x-ecr", service.definition) or invalidate_image_from_ecr( service, True): continue service_image = define_service_image(service, settings) if (service.ecr_config and keyisset("InterpolateWithDigest", service.ecr_config) and keyisset("imageDigest", service_image)): service.image = interpolate_ecr_uri_tag_with_digest( service.image, service_image["imageDigest"]) LOG.info( f"Update service {family.name}.{service.name} image to {service.image}" ) if scan_service_image(service, settings, service_image): LOG.warn( f"{family.name}.{service.name} - vulnerabilities found") result = 1 else: LOG.info( f"{family.name}.{service.name} - ECR Evaluation Passed.") return result
def lookup_mesh_by_name(session, mesh_name, mesh_owner=None): """ Function to figure out whether the mesh exists or not. :param str mesh_name: :param boto3.session.Session session: :param str mesh_owner: :return: """ r_params = { "meshName": mesh_name, } if mesh_owner is not None: r_params["meshOwner"] = mesh_owner client = session.client("appmesh") try: mesh_r = client.describe_mesh(**r_params)["mesh"] mesh_info = { MESH_NAME.title: mesh_r["meshName"], MESH_OWNER_ID.title: mesh_r["metadata"]["meshOwner"], } LOG.info( f"Found mesh {mesh_name} owned by {mesh_info[MESH_OWNER_ID.title]}" ) return mesh_info except client.exceptions.NotFoundException: LOG.info( f"No mesh {mesh_name} found owned with current details. Looking for shared meshes." ) mesh_info = find_mesh_in_list(mesh_name, client) return mesh_info
def set_content(self, kwargs, content=None, fully_load=True): """ Method to initialize the compose content :param dict kwargs: :param dict content: :param bool fully_load: """ files = ( [] if not keyisset(self.input_file_arg, kwargs) else kwargs[self.input_file_arg] ) content_def = ComposeDefinition(files, content) self.compose_content = content_def.definition source = pkg_files("ecs_composex").joinpath("specs/compose-spec.json") LOG.info(f"Validating against input schema {source}") resolver = jsonschema.RefResolver( f"file://{path.abspath(path.dirname(source))}/", None ) jsonschema.validate( content_def.definition, loads(source.read_text()), resolver=resolver, ) if fully_load: self.set_secrets() self.set_volumes() self.set_services() self.set_families() self.set_efs()
def get_mod_function(module_name, function_name): """ Function to get function in a given module name from function_name :param module_name: the name of the module in ecs_composex to find and try to import :type module_name: str :param function_name: name of the function to try to get :type function_name: str :return: function, if found, from the module :rtype: function """ composex_module_name = f"ecs_composex.{module_name}" LOG.debug(composex_module_name) function = None try: res_module = import_module(composex_module_name) LOG.debug(res_module) try: function = getattr(res_module, function_name) return function except AttributeError: LOG.info(f"No {function_name} function found - skipping") except ImportError as error: LOG.error(f"Failure to process the module {composex_module_name}") LOG.error(error) return function
def validate_vpc_input(args): """ Function to validate the VPC arguments are all present :param args: Parser arguments :type args: dict :raise: KeyError if missing argument when not creating VPC """ nocreate_requirements = [ PUBLIC_SUBNETS_T, APP_SUBNETS_T, STORAGE_SUBNETS_T, VPC_ID_T, VPC_MAP_ID_T, ] if not keyisset("CreateVpc", args): for key in nocreate_requirements: if not keyisset(key, args): warnings.warn( f"{key} was not provided. Not adding to the parameters file", UserWarning, ) else: for key in nocreate_requirements: if keyisset(key, args): LOG.info(args[key]) warnings.warn(f"Creating VPC is set. Ignoring value for {key}", UserWarning)
def match_volumes_services_config(service: ComposeService, vol_config: dict, volumes: list): """ Function to map volume config in services and top-level volumes :param service: :param vol_config: :param volumes: :raises LookupError: """ if keyisset("source", vol_config) and vol_config["source"].startswith(r"/"): vol_config["volume"] = None service.volumes.append(vol_config) LOG.info(f"volumes.{vol_config['source']} - Mapped to {service.name}") return else: for volume in volumes: if not keyisset("source", vol_config) and not keyisset( "volume", volume): LOG.error(f"volumes - Failure to process {volume}") continue if volume.name == vol_config["source"]: volume.services.append(service) vol_config["volume"] = volume service.volumes.append(vol_config) LOG.info(f"volumes.{volume.name} - Mapped to {service.name}") return raise LookupError( f"Volume {vol_config['source']} was not found in {[vol.name for vol in volumes]}" )
def lookup_cluster(self, session): """ Define the ECS Cluster properties and definitions from ECS API. :param boto3.session.Session session: Boto3 session to make API calls. :return: The cluster details :rtype: dict """ if not isinstance(self.lookup, (str, dict)): raise TypeError("The value for Lookup must be", str, dict, "Got", type(self.lookup)) ecs_session = session if isinstance(self.lookup, dict): if keyisset("RoleArn", self.lookup): ecs_session = get_assume_role_session( session, self.lookup["RoleArn"], session_name="EcsClusterLookup@ComposeX", ) cluster_name = self.lookup["ClusterName"] else: cluster_name = self.lookup try: clusters = list_all_ecs_clusters(session=ecs_session) cluster_names = [ CLUSTER_NAME_FROM_ARN.match(c_name).group("name") for c_name in clusters ] clusters_config = describe_all_ecs_clusters_from_ccapi( clusters, return_as_map=True, use_cluster_name=True, session=ecs_session) if cluster_name not in clusters_config.keys(): raise LookupError( f"Failed to find {cluster_name}. Available clusters are", cluster_names, ) the_cluster = clusters_config[cluster_name] LOG.info( f"x-cluster.{cluster_name} found. Setting {CLUSTER_NAME.title} accordingly." ) self.mappings = { CLUSTER_NAME.title: { "Name": the_cluster["ClusterName"] } } self.set_cluster_mappings(the_cluster) self.capacity_providers = evaluate_capacity_providers(the_cluster) if self.capacity_providers: self.default_strategy_providers = get_default_capacity_strategy( the_cluster) self.platform_override = evaluate_fargate_is_set( self.capacity_providers, the_cluster) self.cluster_identifier = FindInMap(self.mappings_key, CLUSTER_NAME.title, "Name") except ClientError as error: LOG.error(error) raise
def lookup_resource( self, arn_re, native_lookup_function, cfn_resource_type, tagging_api_id, subattribute_key=None, ): """ Method to self-identify properties. It will try to use AWS Cloud Control API if possible, otherwise fallback to using boto3 descriptions functions to create a mapping of the attributes. """ self.init_outputs() lookup_attributes = self.lookup if subattribute_key is not None: lookup_attributes = self.lookup[subattribute_key] if keyisset("Arn", lookup_attributes): LOG.info(f"{self.module.res_key}.{self.name} - Lookup via ARN") LOG.debug( f"{self.module.res_key}.{self.name} - ARN is {lookup_attributes['Arn']}" ) arn_parts = arn_re.match(lookup_attributes["Arn"]) if not arn_parts: raise KeyError( f"{self.module.res_key}.{self.name} - ARN {lookup_attributes['Arn']} is not valid. Must match", arn_re.pattern, ) self.arn = lookup_attributes["Arn"] resource_id = arn_parts.group("id") account_id = arn_parts.group("accountid") elif keyisset("Tags", lookup_attributes): LOG.info(f"{self.module.res_key}.{self.name} - Lookup via Tags") LOG.debug( f"{self.module.res_key}.{self.name} - Lookup tags -> {lookup_attributes}" ) self.arn = find_aws_resource_arn_from_tags_api( lookup_attributes, self.lookup_session, tagging_api_id) arn_parts = arn_re.match(self.arn) resource_id = arn_parts.group("id") account_id = arn_parts.group("accountid") else: raise KeyError( f"{self.module.res_key}.{self.name} - You must specify Arn or Tags to identify existing resource" ) if not self.arn: raise LookupError( f"{self.module.res_key}.{self.name} - Failed to find the AWS Resource with given tags" ) props = {} _account_id = get_account_id(self.lookup_session) if _account_id == account_id and self.cloud_control_attributes_mapping: props = self.cloud_control_attributes_mapping_lookup( cfn_resource_type, resource_id) if not props: props = self.native_attributes_mapping_lookup( account_id, resource_id, native_lookup_function) self.lookup_properties = props self.generate_cfn_mappings_from_lookup_properties() self.generate_outputs()
def eip_assign(self): if any([svc.eip_auto_assign for svc in self.family.ordered_services]): LOG.info( f"{self.family.name} - networking - " "At least one service in definition has AssignPublicIp set to True." ) return "ENABLED" return "DISABLED"