def handle_defined_x_aws_autoscaling(configs, service): """ Function to sort out existing or not x-aws-autoscaling in the deploy section :param list configs: :param ecs_composex.common.compose_services.ComposeService service: :return: """ if keyisset("deploy", service.definition) and keyisset( "x-aws-autoscaling", service.definition["deploy"]): config = service.definition["deploy"]["x-aws-autoscaling"] min_count = 1 if not keypresent("min", config) else int(config["min"]) max_count = 1 if not keypresent("max", config) else int(config["max"]) if not service.x_scaling: service.x_scaling = {"Range": f"{min_count}-{max_count}"} if keyisset("cpu", config): service.x_scaling.update( {"TargetScaling": { "CpuTarget": int(config["cpu"]) }}) elif service.x_scaling: LOG.warning( f"Detected both x-aws-autoscaling and x-scaling for {service.name}. Priority goes to x-scaling" ) configs.append(service.x_scaling) elif service.x_scaling: LOG.debug("No x-aws-autoscaling detected, proceeding as usual") configs.append(service.x_scaling)
def set_service_update_config(family) -> dict: """ Method to determine the update_config for the service. When a family has multiple containers, this applies to all tasks. """ deployment_config = {} min_percents = [ int(service.definition["x-aws-min_percent"]) for service in family.services if keypresent("x-aws-min_percent", service.definition) ] max_percents = [ int(service.definition["x-aws-max_percent"]) for service in family.services if keypresent("x-aws-max_percent", service.definition) ] family_min_percent = define_family_deploy_percents(min_percents, 100) family_max_percent = define_family_deploy_percents(max_percents, 200) rollback = True actions = [ service.update_config["failure_action"] != "rollback" for service in family.services if service.update_config and keyisset("failure_action", service.update_config) ] if any(actions): rollback = False deployment_config.update({ "MinimumHealthyPercent": family_min_percent, "MaximumPercent": family_max_percent, "RollBack": rollback, }) return deployment_config
def update_networking(service_name, service_def): if not keyisset("x-network", service_def): return x_network = service_def["x-network"] if keypresent("UseCloudmap", x_network): del x_network["UseCloudmap"] if keypresent("IsPublic", x_network): del x_network["IsPublic"]
def import_record_properties( properties, top_class, set_to_novalue=False, ignore_missing_required=True, ignore_missing_sub_required=False, ): """ Generic function importing the RecordSet properties. If the property was not defined, it is either left empty or set to AWS::NoValue For inner recursive, we enforce check on required properties. :param dict properties: :param top_class: The class we are going to import properties for :param bool set_to_novalue: Instead of skipping the property, actively set to AWS::NoValue :param bool ignore_missing_required: Whether raise an error when missing an essential key. :param bool ignore_missing_sub_required: Whether raise an error when missing an essential key in sub properties :return: The properties for the RecordSet :rtype: dict """ props = {} for prop_name in top_class.props: if not keypresent(prop_name, properties) and not top_class.props[prop_name][1]: continue elif (not keypresent(prop_name, properties) and top_class.props[prop_name][1] and not ignore_missing_required): raise KeyError( f"Property {prop_name} is required for the definition of {top_class}" ) elif keyisset(prop_name, properties) and isinstance( top_class.props[prop_name][0], list): props[prop_name] = handle_list(properties[prop_name], top_class.props[prop_name][0][0]) elif keypresent(prop_name, properties) and isfunction( top_class.props[prop_name][0]): props[prop_name] = properties[prop_name] elif keypresent(prop_name, properties) and not isfunction(properties[prop_name]): import_non_functions( props, prop_name, top_class, properties, set_to_novalue, ignore_missing_sub_required, ) elif keypresent(prop_name, properties): props[prop_name] = properties[prop_name] return props
def handle_awslogs_options(service: ComposeService, logging_def: dict) -> LogConfiguration: options_def = set_else_none("options", logging_def) options = { "awslogs-group": set_else_none("awslogs-group", options_def, alt_value=service.logical_name), "awslogs-region": set_else_none("awslogs-region", options_def, alt_value=Region), "awslogs-stream-prefix": set_else_none("awslogs-stream-prefix", options_def, alt_value=service.name), "awslogs-endpoint": set_else_none("awslogs-endpoint", options_def, alt_value=NoValue), "awslogs-datetime-format": set_else_none( "awslogs-datetime-format", options_def, alt_value=NoValue, ), "awslogs-multiline-pattern": set_else_none( "awslogs-multiline-pattern", options_def, alt_value=NoValue, ), "mode": set_else_none("mode", options_def, alt_value=NoValue), "max-buffer-size": set_else_none("max-buffer-size", options_def, alt_value=NoValue), } if keypresent("awslogs-create-group", options_def) and isinstance( options_def["awslogs-create-group"], bool): options["awslogs-create-group"] = keyisset("awslogs-create-group", options_def) elif keypresent("awslogs-create-group", options_def) and isinstance( options_def["awslogs-create-group"], str): options["awslogs-create-group"] = options_def[ "awslogs-create-group"] in [ "yes", "true", "Yes", "True", ] return LogConfiguration( LogDriver="awslogs", Options=options, )
def define_new_config(config, key, new_config): valid_keys = [ "CpuTarget", "MemoryTarget", "DisableScaleIn", "TgtTargetsCount", "ScaleInCooldown", "ScaleOutCooldown", ] for prop in valid_keys: if keypresent(prop, config[key]) and keypresent(prop, new_config): handle_defined_target_scaling_props(prop, config, key, new_config) elif not keypresent(prop, config[key]) and keypresent( prop, new_config): config[key][prop] = new_config[prop]
def remove_env_settings(resource_definition): if keyisset("Settings", resource_definition) and keyisset( "EnvNames", resource_definition["Settings"]): del resource_definition["Settings"]["EnvNames"] if keypresent("Settings", resource_definition) and not keyisset( "Settings", resource_definition): del resource_definition["Settings"]
def set_db_cluster(template, db, secret, sgs): """ Function to parse and transform yaml definition to Troposphere :param troposphere.Template template: :param ecs_composex.docdb.docdb_stack.DocDb db: :param troposphere.secretsmanager.Secret secret: :param list sgs: """ props = import_record_properties(db.properties, docdb.DBCluster) if not keypresent("StorageEncrypted", props): props["StorageEncrypted"] = True props.update({ "VpcSecurityGroupIds": sgs, "MasterUsername": Sub(f"{{{{resolve:secretsmanager:${{{secret.title}}}:SecretString:username}}}}" ), "MasterUserPassword": Sub(f"{{{{resolve:secretsmanager:${{{secret.title}}}:SecretString:password}}}}" ), "DBSubnetGroupName": Ref(db.db_subnets_group), }) if db.parameters and keyisset("DBClusterParameterGroup", db.parameters): parameter_group = template.add_resource(add_parameters_group(db)) props["DBClusterParameterGroupName"] = Ref(parameter_group) db.cfn_resource = docdb.DBCluster(db.logical_name, **props) template.add_resource(db.cfn_resource)
def merge_capacity_providers(service_compute): """ Merge capacity providers set on the services of the task service_compute.family if service is not sidecar """ task_config = {} for svc in service_compute.family.ordered_services: if not svc.capacity_provider_strategy or svc.is_aws_sidecar: continue for provider in svc.capacity_provider_strategy: if provider["CapacityProvider"] not in task_config.keys(): name = provider["CapacityProvider"] task_config[name] = { "Base": [], "Weight": [], "CapacityProvider": name, } task_config[name]["Base"].append( set_else_none("Base", provider, alt_value=0) ) task_config[name]["Weight"].append( set_else_none("Weight", provider, alt_value=0) ) for count, provider in enumerate(task_config.values()): if count == 0: provider["Base"] = int(max(provider["Base"])) elif count > 0 and keypresent("Base", provider): del provider["Base"] LOG.warning( f"{service_compute.family.name}.x-ecs Only one capacity provider can have a base value. " f"Deleting Base for {provider['CapacityProvider']}" ) provider["Weight"] = int(max(provider["Weight"])) service_compute.ecs_capacity_providers = [ CapacityProviderStrategyItem(**config) for config in task_config.values() ]
def deploy_labels(self, value: dict): if not self.deploy: self.deploy: dict = {"labels": value} if keypresent("labels", self.deploy) and not keyisset("labels", self.deploy): self.deploy["labels"]: dict = value elif keyisset("labels", self.deploy): self.deploy.update(value)
def no_value_if_not_set(props, key, is_bool=False): """ Function to simplify setting value if the key is in the dict and else Ref(AWS_NO_VALUE) for resource properties :param dict props: :param str key: :param bool is_bool: :return: """ if not is_bool: return Ref(AWS_NO_VALUE) if not keyisset(key, props) else props[key] else: return Ref(AWS_NO_VALUE) if not keypresent(key, props) else props[key]
def define_host_volumes(family): """ Goes over all volumes of all services and if the volume is None, source starts with / then this is a host volume :return: list of volumes :rtype: list[dict] """ host_volumes = [] for service in family.services: for volume in service.volumes: if (((keypresent("volume", volume) and volume["volume"] is None) or not keyisset("volume", volume)) and keyisset("source", volume) and volume["source"].startswith(r"/")): host_volumes.append(volume) return host_volumes
def define_db_prefix(db, mappings_definition): prefix = "" if keypresent("PrefixWithDbName", mappings_definition): if isinstance(mappings_definition["PrefixWithDbName"], bool): prefix = (f"{db.name}_" if keyisset("PrefixWithDbName", mappings_definition) else "") elif isinstance(mappings_definition["PrefixWithDbName"], str): prefix = f"{mappings_definition['PrefixWithDbName']}_" else: raise TypeError( "PrefixWithDbName can only be one of", str, bool, "Got", type(mappings_definition["PrefixWithDbName"]), ) return prefix
def set_db_cluster(template, db, sgs): """ Function to parse and transform yaml definition to Troposphere :param troposphere.Template template: :param ecs_composex.docdb_stack.DocDb db: :param troposphere.secretsmanager.Secret secret: :param list sgs: """ props = import_record_properties(db.properties, DBCluster) if not keypresent("StorageEncrypted", props): props["StorageEncrypted"] = True props.update({ "VpcSecurityGroupIds": sgs, }) if db.parameters and keyisset("DBClusterParameterGroup", db.parameters): parameter_group = template.add_resource(add_parameters_group(db)) props["DBClusterParameterGroupName"] = Ref(parameter_group) db.cfn_resource = DBCluster(db.logical_name, **props) template.add_resource(db.cfn_resource)
def correcting_required_settings(domain, props): """ :param ecs_composex.opensearch.opensearch_stack.OpenSearchDomain domain: :param dict props: :return: """ if not keyisset("NodeToNodeEncryptionOptions", props): props[ "NodeToNodeEncryptionOptions"] = opensearchservice.NodeToNodeEncryptionOptions( Enabled=True) elif (keypresent("NodeToNodeEncryptionOptions", domain.parameters) and not domain.parameters["NodeToNodeEncryptionOptions"]): LOG.warn( "You have Advanced Security options enabled but NodeToNodeEncryptionOptions is disabled. Enabling" ) props[ "NodeToNodeEncryptionOptions"] = opensearchservice.NodeToNodeEncryptionOptions( Enabled=True) if keyisset("EncryptionAtRestOptions", props): crypt_options = props["EncryptionAtRestOptions"] if hasattr(crypt_options, "Enabled") and crypt_options.Enabled is False: LOG.warn( f"{domain.name} - With Advanced Security options, Encryption at rest must be enabled. Enabling" ) setattr(crypt_options, "Enabled", True) else: props[ "EncryptionAtRestOptions"] = opensearchservice.EncryptionAtRestOptions( Enabled=True) if keyisset("DomainEndpointOptions", props): settings = props["DomainEndpointOptions"] setattr(settings, "EnforceHTTPS", True) else: props[ "DomainEndpointOptions"] = opensearchservice.DomainEndpointOptions( EnforceHTTPS=True, )
def parse_attributes_settings(self): """ Method to parse pre-defined settings for shortcuts :return: the lb attributes mappings :rtype: list """ valid_settings = [ ("timeout_seconds", int, handle_timeout_seconds, self.is_alb()), ( "desync_mitigation_mode", str, handle_desync_mitigation_mode, self.is_alb(), ), ( "drop_invalid_header_fields", bool, handle_drop_invalid_headers, self.is_alb(), ), ("http2", bool, handle_http2, self.is_alb()), ("cross_zone", bool, handle_cross_zone, self.is_nlb()), ] mappings = [] for setting in valid_settings: if (keypresent(setting[0], self.parameters) and isinstance(self.parameters[setting[0]], setting[1]) and setting[3]): if setting[2] and setting[3]: mappings.append(setting[2](self.parameters[setting[0]])) elif setting[3]: mappings.append( LoadBalancerAttributes( Key=setting[0], Value=str(self.parameters[setting[0]]), )) return mappings
def set_compose_services_ingress(root_stack, dst_family: ComposeFamily, families: list, settings: ComposeXSettings) -> None: """ Function to crate SG Ingress between two families / services. Presently, the ingress rules are set after all services have been created :param ecs_composex.common.stacks.ComposeXStack root_stack: :param ecs_composex.ecs.ecs_family.ComposeFamily dst_family: :param list families: The list of family names. :param ecs_composex.common.settings.ComposeXSettings settings: """ for service in dst_family.service_networking.ingress.services: service_name = service["Name"] if service_name not in families: raise KeyError( f"The service {service_name} is not among the services created together. Valid services are", families, ) if not keypresent("DependsOn", service): add_independent_rules(dst_family, service_name, root_stack) else: src_family = settings.families[service_name] if dst_family.stack.title not in src_family.stack.DependsOn: src_family.stack.DependsOn.append(dst_family.stack.title) dst_family_sg_param = Parameter(f"{dst_family.stack.title}GroupId", Type=SG_ID_TYPE) add_parameters(src_family.template, [dst_family_sg_param]) src_family.stack.Parameters.update({ dst_family_sg_param.title: GetAtt( dst_family.stack.title, f"Outputs.{dst_family.logical_name}GroupId", ), }) add_dependant_ingress_rules(dst_family, dst_family_sg_param, src_family)
def define_queue(queue, queues, mono_template=True): """ Function to parse the queue definition and generate the queue accordingly. Created the redrive policy if necessary :param ecs_composex.common.compose_resources.Queue queue: name of the queue :param list[ecs_composex.sqs.sqs_stack.Queue] queues: the queues defined in x-sqs :param bool mono_template: whether or not there are so many outputs we need to split. :return: queue :rtype: troposphere.sqs.Queue """ redrive_policy = None if keypresent("Properties", queue.definition): props = deepcopy(queue.definition) properties = props["Properties"] properties.update({"Metadata": metadata}) else: properties = {"Metadata": metadata} if keyisset("RedrivePolicy", properties) and keyisset( "deadLetterTargetArn", properties["RedrivePolicy"]): redrive_target = properties["RedrivePolicy"]["deadLetterTargetArn"] for _queue in queues: if redrive_target == _queue.name: break else: raise KeyError( f"Queue {redrive_target} defined as DLQ for {queue.name} but is not defined" ) if keyisset("maxReceiveCount", properties["RedrivePolicy"]): retries = int(properties["RedrivePolicy"]["maxReceiveCount"]) else: retries = 5 redrive_policy = define_redrive_policy(_queue, retries, mono_template) queue.cfn_resource = set_queue(queue, properties, redrive_policy) LOG.debug(queue.cfn_resource.title, queue.logical_name) return queue
def replace_awslogs_with_firelens_configuration( service: ComposeService, awslogs_config: LogConfiguration) -> LogConfiguration: """ Remaps the awslogs driver options into the fluentbit options :param ComposeService service: :param LogConfiguration awslogs_config: :return: """ awslogs_to_fluentbit = { "awslogs-group": "log_group_name", "awslogs-stream-prefix": "log_stream_prefix", "awslogs-endpoint": "endpoint", "awslogs-region": "region", "awslogs-create-group": "auto_create_group", } set_options = awslogs_config.Options fluent_bit_options: dict = {"Name": "cloudwatch"} for awslogs_option, fluentbit_option in awslogs_to_fluentbit.items(): if keyisset(awslogs_option, set_options): if (isinstance(set_options[awslogs_option], Ref) and set_options[awslogs_option] == NoValue): continue elif set_options[awslogs_option]: fluent_bit_options[fluentbit_option] = set_options[ awslogs_option] if not keyisset("log_group_name", fluent_bit_options): fluent_bit_options[ "log_group_name"] = f"ecs/svc/{service.logical_name}" if not keyisset("log_stream_prefix", fluent_bit_options): fluent_bit_options["log_stream_prefix"] = service.name if not keypresent("auto_create_group", fluent_bit_options): fluent_bit_options["auto_create_group"] = True return LogConfiguration(LogDriver="awsfirelens", Options=fluent_bit_options)
def __init__( self, name: str, definition: dict, module: XResourceModule, settings: ComposeXSettings, ): """ :param str name: Name of the resource in the template :param dict definition: The definition of the resource as-is :param ecs_composex.common.settings.ComposeXSettings settings: """ if not isinstance(module, XResourceModule): raise TypeError(name, "module must be", XResourceModule, "Got", module, type(module)) self.module = module self.validate_schema(name, definition, module.mod_key) self.name = name self.requires_vpc = False self.arn = None self.iam_manager = None self.cloud_control_attributes_mapping = {} self.native_attributes_mapping = {} self.definition = deepcopy(definition) self.env_names = [] self.env_vars = [] self.validators = [] self.logical_name = NONALPHANUM.sub("", self.name) self.settings = (None if not keyisset("Settings", self.definition) else self.definition["Settings"]) self.use = (None if not keyisset("Use", self.definition) else self.definition["Use"]) self.lookup = (None if not keyisset("Lookup", self.definition) else self.definition["Lookup"]) if self.lookup: self.lookup_session = define_lookup_role_from_info( self.lookup, settings.session) else: self.lookup_session = settings.session if keyisset("Properties", self.definition) and not self.lookup: self.properties = self.definition["Properties"] elif not keyisset("Properties", self.definition) and keypresent( "Properties", self.definition): self.properties = {} else: self.properties = None self.parameters = ({} if not keyisset("MacroParameters", self.definition) else self.definition["MacroParameters"]) self.uses_default = not any( [self.lookup, self.parameters, self.use, self.properties]) self.scaling = set_else_none("Scaling", self.definition) self.scaling_target = None self.cfn_resource = None self.output_properties = {} self.outputs = [] self.attributes_outputs = {} self.is_nested = False self.stack = None self.ref_parameter = None self.lookup_properties = {} self.mappings = {} self.default_tags = { "compose-x::module": self.module.mod_key, "compose-x::resource_name": self.name, "compose-x::logical_name": self.logical_name, } self.cloudmap_settings = set_else_none("x-cloudmap", self.settings, {}) self.default_cloudmap_settings = {} self.cloudmap_dns_supported = False self.policies_scaffolds = get_access_types(module.mod_key)
def no_value_if_not_set(props, key, is_bool=False): if not is_bool: return Ref(AWS_NO_VALUE) if not keyisset(key, props) else props[key] else: return Ref(AWS_NO_VALUE) if not keypresent(key, props) else props[key]