def generate_resource_envvars(resource_name, resource, attribute, arn=None): """ Function to generate environment variables that can be added to a container definition shall the ecs_service need to know about the Queue :param str resource_name: The name of the resource :param dict resource: The resource definition as defined in docker-compose file. :param str attribute: the attribute of the resource we are using for Import :param str arn: The ARN of the resource if already looked up. :return: environment key/pairs :rtype: list<troposphere.ecs.Environment> """ env_names = [] export_strings = ( generate_export_strings(resource_name, attribute) if not arn else arn ) if keyisset("Settings", resource) and keyisset("EnvNames", resource["Settings"]): for env_name in resource["Settings"]["EnvNames"]: env_names.append(Environment(Name=env_name, Value=export_strings,)) if resource_name not in resource["Settings"]["EnvNames"]: env_names.append(Environment(Name=resource_name, Value=export_strings,)) else: env_names.append(Environment(Name=resource_name, Value=export_strings,)) return env_names
def generate_key(key_name, res_name, key_def): """ Function to create the KMS Key :param key_name: :param res_name: :param key_def: :return: key :rtype: troposphere.kms.Key """ properties = ( key_def["Properties"] if keyisset("Properties", key_def) else { "Description": Sub(f"{key_name} created in ${{{ROOT_STACK_NAME.title}}}"), "Enabled": True, "EnableKeyRotation": True, "KeyUsage": "ENCRYPT_DECRYPT", "PendingWindowInDays": 7, } ) if not keyisset("KeyPolicy", properties): properties.update({"KeyPolicy": define_default_key_policy()}) properties.update({"Metadata": metadata}) LOG.debug(properties) kms_key = Key(res_name, **properties) return kms_key
def define_queue(queue_name, queue_def, queues, mono_template=True): """ Function to parse the queue definition and generate the queue accordingly. Created the redrive policy if necessary :param str queue_name: name of the queue :param dict queue_def: queue definition as found in composex file :param dict queues: the queues defined in x-sqs :param bool mono_template: whether or not there are so many outputs we need to split. :return: queue :rtype: troposphere.sqs.Queue """ redrive_policy = None if keypresent("Properties", queue_def): props = deepcopy(queue_def) properties = props["Properties"] properties.update({"Metadata": metadata}) else: properties = {"Metadata": metadata} if keyisset("RedrivePolicy", properties) and keyisset( "deadLetterTargetArn", properties["RedrivePolicy"]): redrive_target = properties["RedrivePolicy"]["deadLetterTargetArn"] if redrive_target not in queues: raise KeyError( f"Queue {redrive_target} defined as DLQ for {queue_name} but is not defined" ) if keyisset("maxReceiveCount", properties["RedrivePolicy"]): retries = int(properties["RedrivePolicy"]["maxReceiveCount"]) else: retries = 5 redrive_policy = define_redrive_policy(redrive_target, retries, mono_template) queue = set_queue(queue_name, properties, redrive_policy) LOG.debug(queue.title) return queue
def handle_key_settings(template, key, key_def): """ Function to add to the template for additional KMS key related resources. :param troposphere.Template template: :param key: the KMS key :param dict key_def: :return: """ if keyisset("Settings", key_def) and keyisset("Alias", key_def["Settings"]): alias_name = key_def["Settings"]["Alias"] if not (alias_name.startswith("alias/") or alias_name.startswith("aws")): alias_name = If( USE_STACK_NAME_CON_T, Sub(f"alias/${{AWS::StackName}}/{alias_name}"), Sub(f"alias/${{{ROOT_STACK_NAME.title}}}/{alias_name}"), ) elif alias_name.startswith("alias/aws") or alias_name.startswith("aws"): raise ValueError(f"Alias {alias_name} cannot start with alias/aws.") Alias( f"{key.title}Alias", template=template, AliasName=alias_name, TargetKeyId=Ref(key), Metadata=metadata, )
def generate_sns_templates(settings): """ Entrypoint function to generate the SNS topics templates :param settings: :type settings: ecs_composex.common.settings.ComposeXSettings :return: """ allowed_keys = [TOPICS_KEY, SUBSCRIPTIONS_KEY] res_content = settings.compose_content[RES_KEY] if not set(res_content).issubset(allowed_keys): raise KeyError( "SNS Only supports two types of resources", allowed_keys, "provided", res_content.keys(), ) root_template = build_template("SNS Root Template") res_count = define_resources(res_content) if keyisset(TOPICS_KEY, res_content): add_sns_topics(root_template, settings.compose_content, res_count) if keyisset(SUBSCRIPTIONS_KEY, res_content): pass add_topics_outputs(root_template) return root_template
def find_mesh_in_list(mesh_name, client, next_token=None): """ Function to recursively go through meshes in case the mesh exists but we don't know the account Id :param mesh_name: Name of the mesh :param next_token: token for next api call :return: """ if next_token is not None: mesh_r = client.list_meshes(nexToken=next_token) else: mesh_r = client.list_meshes() if not keyisset("meshes", mesh_r): return {} for mesh in mesh_r["meshes"]: if mesh["meshName"] == mesh_name: mesh_info = { MESH_NAME.title: mesh["meshName"], MESH_OWNER_ID.title: mesh["meshOwner"], } LOG.info( f"Found shared mesh {mesh_name} owned by {mesh_info[MESH_OWNER_ID.title]}" ) return mesh_info if keyisset("nextToken", mesh_r): return find_mesh_in_list(mesh_name, client, mesh_r["nextToken"])
def define_projection(projection_def): projection = dynamodb.Projection() if keyisset("NonKeyAttributes", projection_def): projection.NonKeyAttributes = projection_def["NonKeyAttributes"] if keyisset("ProjectionType", projection_def): projection.ProjectionType = projection_def["ProjectionType"] return projection
def define_cluster(root_stack, cluster_def): """ Function to create the cluster from provided properties. :param dict cluster_def: :param ecs_composex.common.stacks.ComposeXStack root_stack: :return: cluster :rtype: troposphere.ecs.Cluster """ cluster_params = {} if not keyisset("Properties", cluster_def): return get_default_cluster_config() props = cluster_def["Properties"] if keyisset("ClusterName", props): root_stack.Parameters.update({CLUSTER_NAME_T: props["ClusterName"]}) if not keyisset("CapacityProviders", props): LOG.warning("No capacity providers defined. Setting it to default.") cluster_params["CapacityProviders"] = DEFAULT_PROVIDERS else: cluster_params["CapacityProviders"] = props["CapacityProviders"] if not keyisset("DefaultCapacityProviderStrategy", props): LOG.warning("No Default Strategy set. Setting to default.") cluster_params["DefaultCapacityProviderStrategy"] = DEFAULT_STRATEGY else: cluster_params[ "DefaultCapacityProviderStrategy"] = import_capacity_strategy( props["DefaultCapacityProviderStrategy"]) cluster_params["Metadata"] = metadata cluster_params["ClusterName"] = If(GENERATED_CLUSTER_NAME_CON_T, Ref(AWS_STACK_NAME), Ref(CLUSTER_NAME_T)) cluster = Cluster(CLUSTER_T, **cluster_params) return cluster
def lookup_ecs_cluster(session, cluster_lookup): """ Function to find the ECS Cluster. :param boto3.session.Session session: Boto3 session to make API calls. :param cluster_lookup: Cluster lookup definition. :return: """ if not isinstance(cluster_lookup, str): raise TypeError("The value for Lookup must be", str, "Got", type(cluster_lookup)) client = session.client("ecs") try: cluster_r = client.describe_clusters(clusters=[cluster_lookup]) if not keyisset("clusters", cluster_r): LOG.warning( f"No cluster named {cluster_lookup} found. Creating one with default settings" ) return get_default_cluster_config() elif (keyisset("clusters", cluster_r) and cluster_r["clusters"][0]["clusterName"] == cluster_lookup): LOG.info( f"Found ECS Cluster {cluster_lookup}. Setting {CLUSTER_NAME_T} accordingly." ) return cluster_r["clusters"][0]["clusterName"] except ClientError as error: LOG.error(error) raise
def validate_vpc_input(args): """ Function to validate the VPC arguments are all present :param args: Parser arguments :type args: dict :raise: KeyError if missing argument when not creating VPC """ nocreate_requirements = [ PUBLIC_SUBNETS_T, APP_SUBNETS_T, STORAGE_SUBNETS_T, VPC_ID_T, VPC_MAP_ID_T, ] if not keyisset("CreateVpc", args): for key in nocreate_requirements: if not keyisset(key, args): warnings.warn( f"{key} was not provided. Not adding to the parameters file", UserWarning, ) else: for key in nocreate_requirements: if keyisset(key, args): LOG.info(args[key]) warnings.warn(f"Creating VPC is set. Ignoring value for {key}", UserWarning)
def define_sse_spec(properties): return dynamodb.SSESpecification( SSEEnabled=True if keyisset("SSESpecification", properties) and keyisset("SSEEnabled", properties["SSESpecification"]) else False )
def add_public_security_group_ingress(self, security_group): """ Method to add ingress rules from external sources to a given Security Group (ie. ALB Security Group). If a list of IPs is found in the config['ext_sources'] part of the network section of configs for the service, then it will use that. If no IPv4 source is indicated, it will by default allow traffic from 0.0.0.0/0 :param security_group: security group (object or title string) to add the rules to :type security_group: str or troposphere.ec2.SecurityGroup """ if not self.config.ext_sources: self.config.ext_sources = [{ "ipv4": "0.0.0.0/0", "protocol": -1, "source_name": "ANY" }] for allowed_source in self.config.ext_sources: if not keyisset("ipv4", allowed_source) and not keyisset( "ipv6", allowed_source): LOG.warn("No IPv4 or IPv6 set. Skipping") continue props = generate_security_group_props(allowed_source, self.service_name) if props: LOG.debug(f"Adding {allowed_source} for ingress") self.create_lb_ingress_rule(allowed_source, security_group, **props)
def add_vpc_to_root(root_stack, settings): """ Function to figure whether to create the VPC Stack and if not, set the parameters. :param root_stack: :param settings: :return: vpc_stack :rtype: VpcStack """ vpc_stack = None vpc_xkey = f"{X_KEY}{RES_KEY}" if keyisset(vpc_xkey, settings.compose_content): if keyisset("Lookup", settings.compose_content[vpc_xkey]): x_settings = lookup_x_vpc_settings( settings.session, settings.compose_content[vpc_xkey]["Lookup"]) apply_vpc_settings(x_settings, root_stack) elif keyisset("Use", settings.compose_content[vpc_xkey]): x_settings = import_vpc_settings( settings.compose_content[vpc_xkey]["Use"]) apply_vpc_settings(x_settings, root_stack) else: if keyisset("Create", settings.compose_content[vpc_xkey]) and keyisset( "Lookup", settings.compose_content[vpc_xkey]): LOG.warning("We have both Create and Lookup set for x-vpc." "Creating a new VPC") vpc_stack = create_new_vpc(vpc_xkey, settings) else: LOG.info(f"No {vpc_xkey} detected. Creating a new VPC.") vpc_stack = create_new_vpc(vpc_xkey, settings, default=True) if isinstance(vpc_stack, VpcStack): root_stack.stack_template.add_resource(vpc_stack) return vpc_stack
def generate_security_group_props(allowed_source, service_name): """ Function to parse the allowed source and create the SG Opening options accordingly. :param dict allowed_source: The allowed source defined in configs :param str service_name: :return: security group ingress properties :rtype: dict """ props = { "CidrIp": (allowed_source["ipv4"] if keyisset("ipv4", allowed_source) else Ref(AWS_NO_VALUE)), "CidrIpv6": (allowed_source["ipv6"] if keyisset( "ipv6", allowed_source) else Ref(AWS_NO_VALUE)), } if (keyisset("CidrIp", props) and isinstance(props["CidrIp"], str) and not CIDR_PAT.match(props["CidrIp"])): LOG.error( f"Falty IP Address: {allowed_source} - ecs_service {service_name}") raise ValueError( "Not a valid IPv4 CIDR notation", props["CidrIp"], "Expected", CIDR_REG, ) return props
def get_deploy_labels(service_definition): """ Function to get the deploy labels of a service definition :param dict service_definition: The service definition as defined in compose file :return: labels if any :rtype: dict """ labels = {} deploy_key = "deploy" labels_key = "labels" svc_labels = {} if keyisset(deploy_key, service_definition) and keyisset( labels_key, service_definition[deploy_key]): svc_labels = service_definition[deploy_key][labels_key] LOG.debug(f"labels: {svc_labels}") if svc_labels: if isinstance(svc_labels, list): for item in svc_labels: if not isinstance(item, str): raise TypeError( f"When using a list for deploy labels, all labels must be of type string" ) parse_string_labels(labels, svc_labels) elif isinstance(svc_labels, dict): return svc_labels return labels
def import_secrets(template, definition, container, settings): """ Function to import secrets from composex mapping to AWS Secrets in Secrets Manager :param troposphere.Template template: :param dict definition: :param troposhere.ecs.ContainerDefinition container: :param ecs_composex.common.settings.ComposeXSettings settings: :return: """ if keyisset("secrets", definition) and isinstance(definition["secrets"], list): secrets = definition["secrets"] else: return if not keyisset("secrets", settings.compose_content): return else: settings_secrets = settings.compose_content["secrets"] for secret in secrets: if (isinstance(secret, str) and secret in settings_secrets and keyisset("ComposeSecret", settings_secrets[secret])): settings_secrets[secret][ "ComposeSecret"].assign_to_task_definition( template, container) elif isinstance(secret, dict) and keyisset("source", secret): secret_name = secret["source"] if keyisset("ComposeSecret", settings_secrets[secret_name]): settings_secrets[secret_name][ "ComposeSecret"].assign_to_task_definition( template, container)
def build_cert_params(cert_def): """ Function to build the certificate parameters :param dict cert_def: :return: cert_params :rtype: dict """ cert_params = { CERT_ALT_NAMES_T: Ref(CERT_ALT_NAMES) if keyisset("SubjectAlternativeNames", cert_def) else Ref(AWS_NO_VALUE), CERT_CN_T: cert_def["DomainName"], CERT_VALIDATION_METHOD.title: cert_def["ValidationMethod"] if keyisset("ValidationMethod", cert_def) else CERT_VALIDATION_METHOD.Default, } if keyisset("DomainValidationOptions", cert_def): options = cert_def["DomainValidationOptions"] if len(options) > 1: warn( ValueError( "For now we are going to support only just the one validation methond." )) option = options[0] cert_params[VALIDATION_DOMAIN_ZONE_ID_T] = ( option["HostedZoneId"] if keyisset( "HostedZoneId", option) else VALIDATION_DOMAIN_ZONE_ID.Default) cert_params[VALIDATION_DOMAIN_NAME_T] = ( cert_def["ValidationDomain"] if keyisset("ValidationDomain", option) else VALIDATION_DOMAIN_NAME.Default) return cert_params
def set_healthcheck(definition): """ Function to set healtcheck configuration :return: """ key = "healthcheck" valid_keys = ["test", "interval", "timeout", "retries", "start_period"] attr_mappings = { "test": "Command", "interval": "Interval", "timeout": "Timeout", "retries": "Retries", "start_period": "StartPeriod", } required_keys = ["test"] if not keyisset(key, definition): return None healthcheck = definition[key] validate_healthcheck(healthcheck, valid_keys, required_keys) params = {} for key in healthcheck.keys(): params[attr_mappings[key]] = healthcheck[key] if isinstance(params["Command"], str): params["Command"] = [healthcheck["test"]] if keyisset("Interval", params) and isinstance(params["Interval"], str): params["Interval"] = int(healthcheck["interval"]) return HealthCheck(**params)
def set_xray(self, definition): """ Function to set the xray """ if keyisset(self.master_key, definition) and keyisset( "use_xray", definition[self.master_key] ): self.use_xray = True
def __init__(self, settings): """ Initializes the ComposeXConfig class :param ComposeXSettings settings: The execution settings """ self.composex_config = {} if keyisset(self.master_key, settings.compose_content) and keyisset( self.composex_key, settings.compose_content[self.master_key]): self.composex_config = settings.compose_content[self.master_key][ self.composex_key]
def set_from_kwargs(self, **kwargs): """ Method to set internal settings based on kwargs keys :param kwargs: unordered parameters :type kwargs: dict """ if keyisset(DIR_DEST, kwargs): self.output_dir = path.abspath(kwargs[DIR_DEST]) if keyisset("BucketName", kwargs): self.bucket = kwargs["BucketName"] self.can_upload = True
def define_pit_spec(properties): pit_recover = ( True if keyisset("PointInTimeRecoverySpecification", properties) and keyisset( "PointInTimeRecoveryEnabled", properties["PointInTimeRecoverySpecification"] ) else False ) return dynamodb.PointInTimeRecoverySpecification( PointInTimeRecoveryEnabled=pit_recover )
def validate_cluster_input(args): """Function to validate the cluster arguments :param args: Parser arguments :raise: KeyError """ if not keyisset("CreateCluster", args) and not keyisset( CLUSTER_NAME_T, args): warnings.warn( f"You must provide an ECS Cluster name if you do not want ECS ComposeX to create one for you", UserWarning, )
def define_provisioned_throughput(properties): if keyisset("ProvisionedThroughput", properties): props = properties["ProvisionedThroughput"] return dynamodb.ProvisionedThroughput( ReadCapacityUnits=int(props["ReadCapacityUnits"]) if keyisset("ReadCapacityUnits", props) else Ref(AWS_NO_VALUE), WriteCapacityUnits=int(props["WriteCapacityUnits"]) if keyisset("WriteCapacityUnits", props) else Ref(AWS_NO_VALUE), ) return Ref(AWS_NO_VALUE)
def define_resources(res_content): """ Function to determine how many resources are going to be created. :return: """ res_count = 0 if keyisset(TOPICS_KEY, res_content): for topic in res_content[TOPICS_KEY]: res_count += 1 if keyisset("Subscription", topic): res_count += len(topic["Subscription"]) if keyisset(SUBSCRIPTIONS_KEY, res_content): res_count += len(res_content[SUBSCRIPTIONS_KEY]) return res_count
def define_stream_spec(properties): """ Function to define Table stream specs :param dict properties: :return: """ if keyisset("StreamSpecification", properties) and keyisset( "StreamViewType", properties["StreamSpecification"] ): return dynamodb.StreamSpecification( StreamViewType=properties["StreamSpecification"]["StreamViewType"] ) return Ref(AWS_NO_VALUE)
def update_families(families, labels, service_name): """ Function to update families info from labels :param dict families: registry of applications families :param dict labels: the list of labels from a a service :param str service_name: name of the service for which we get these labels """ for label in labels: if label == ECS_TASK_FAMILY_LABEL: family_name = labels[label] if not keyisset(family_name, families): families[family_name] = [service_name] elif keyisset(family_name, families): families[family_name].append(service_name)
def set_service_deploy(self, definition): """ Function to setup the service configuration from the deploy section of the service in compose file. """ if not keyisset("deploy", definition): return deployment = definition["deploy"] if keyisset("resources", deployment): self.set_compute_resources(deployment["resources"]) self.set_deployment_settings(deployment) if keyisset("labels", deployment) and keyisset("ecs.depends.condition", deployment["labels"]): allowed_values = ["START", "COMPLETE", "SUCCESS", "HEALTHY"] if deployment["labels"]["ecs.depends.condition"] not in allowed_values: raise ValueError("Attribute ecs.depends.condition is invalid. Must be one of", allowed_values) self.container_start_condition = deployment["labels"]["ecs.depends.condition"]
def db_secrets_names(db_name, db_def): """ Function to return the list of env vars set for the DB to use as env vars for the Secret. :param db_def: Definition of the DB :return: list of names to use. :rtype: list """ names = [] if keyisset("Settings", db_def) and keyisset("EnvNames", db_def["Settings"]): names = db_def["Settings"]["EnvNames"] if db_name not in names: names.append(db_name) return names
def set_output_settings(self, kwargs): """ Method to set the output settings based on kwargs """ self.format = self.default_format if ( keyisset(self.format_arg, kwargs) and kwargs[self.format_arg] in self.allowed_formats ): self.format = kwargs[self.format_arg] self.output_dir = ( kwargs[self.output_dir_arg] if keyisset(self.output_dir_arg, kwargs) else self.default_output_dir )