Example #1
0
def handle_json_validation(resource: SsmParameter, value: str,
                           file_path: str) -> str:
    """
    Function to evaluate the JSON content

    :param SsmParamter resource:
    :param str value: Value read from file
    :param str file_path:
    :return:
    """
    try:
        payload = json.loads(value)
        if keyisset("MinimizeJson", resource.parameters):
            return json.dumps(payload, separators=(",", ":"))
        return value
    except json.decoder.JSONDecodeError:
        if keyisset("IgnoreInvalidJson", resource.parameters):
            LOG.warn(
                f"{resource.name} - The content of {file_path} "
                "did not pass JSON validation. Skipping due to IgnoreInvalidJson"
            )
            return value
        else:
            LOG.error(f"{resource.name} - The content of {file_path} "
                      "did not pass JSON validation.")
            raise
Example #2
0
def handle_yaml_validation(resource: SsmParameter, value: str,
                           file_path: str) -> str:
    """
    Function to evaluate the JSON content

    :param SsmParamter resource:
    :param str value: Value read from file
    :param str file_path:
    :return:
    """
    try:
        payload = yaml.load(value, Loader=Loader)
        if keyisset("RenderToJson", resource.parameters):
            return json.dumps(payload, separators=(",", ":"))
        return value
    except yaml.YAMLError:
        if keyisset("IgnoreInvalidYaml", resource.parameters):
            LOG.warn(
                f"{resource.name} - The content of {file_path} "
                "did not pass YAML validation. Skipping due to IgnoreInvalidYaml"
            )
            return value
        else:
            LOG.error(f"{resource.name} - The content of {file_path} "
                      "did not pass YAML validation.")
            raise
Example #3
0
 def generate_ref_env_var(self, target) -> list:
     """
     Method to define all the env var of a resource based on its own defined output attributes
     """
     if not self.ref_parameter:
         LOG.error(
             f"{self.module.res_key}.{self.name}. Default ref_parameter not set. Skipping env_vars"
         )
         return []
     env_var_name = ENV_VAR_NAME.sub("",
                                     self.name.upper().replace("-", "_"))
     if self.cfn_resource and self.attributes_outputs and self.ref_parameter:
         ref_env_var = Environment(
             Name=env_var_name,
             Value=Ref(self.attributes_outputs[self.ref_parameter]
                       ["ImportParameter"]),
         )
         ref_param_settings = get_parameter_settings(
             self, self.ref_parameter)
         add_parameters(target[0].template, [ref_param_settings[1]])
         target[0].stack.Parameters.update(
             {ref_param_settings[0]: ref_param_settings[2]})
     elif self.lookup_properties and self.ref_parameter:
         ref_env_var = Environment(
             Name=env_var_name,
             Value=self.attributes_outputs[self.ref_parameter]
             ["ImportValue"],
         )
     else:
         raise ValueError(
             f"{self.module.res_key}.{self.name} - Unable to set the default env var"
         )
     return [ref_env_var]
def create_new_stream(stream: DeliveryStream) -> None:
    """
    Imports the settings from CFN Definitions and define the CFN Resource from properties

    :param DeliveryStream stream:
    """
    props = import_record_properties(
        stream.properties,
        CfnDeliveryStream,
        ignore_missing_required=True,
        ignore_missing_sub_required=True,
    )
    stream.cfn_resource = CfnDeliveryStream(stream.logical_name, **props)
    stream.log_group = LogGroup(
        f"{stream.logical_name}LogGroup",
        LogGroupName=Sub(f"firehose/${{STACK_ID}}/{stream.name}",
                         STACK_ID=STACK_ID_SHORT),
    )
    if (stream.cfn_resource.DeliveryStreamType == "KinesisStreamAsSource"
            and stream.cfn_resource.DeliveryStreamEncryptionConfigurationInput
            != NoValue):
        LOG.error(
            f"{stream.module.res_key}.{stream.name} -"
            " You can only have ServerSide encryption with DirectPut DeliveryStream. Removing."
        )
        stream.cfn_resource.DeliveryStreamEncryptionConfigurationInput = NoValue
    set_replace_iam_role(stream)
    values_validation(stream)
    stream.init_outputs()
    stream.generate_outputs()
Example #5
0
def get_key_config(key, account_id: str, resource_id: str) -> dict | None:
    """

    :param KmsKey key:
    :param str account_id: unused
    :param str resource_id: unused
    :return:
    """
    key_attributes_mappings = {
        KMS_KEY_ARN: "KeyMetadata::Arn",
        KMS_KEY_ID: "KeyMetadata::KeyId",
    }
    client = key.lookup_session.client("kms")
    try:
        key_desc = client.describe_key(KeyId=key.arn)
        key_attributes = attributes_to_mapping(key_desc,
                                               key_attributes_mappings)
        key.manager = key_desc["KeyMetadata"]["KeyManager"]
        try:
            aliases_r = client.list_aliases(KeyId=key_attributes[KMS_KEY_ID])
            if aliases_r["Aliases"]:
                key_attributes[KMS_KEY_ALIAS_NAME] = aliases_r["Aliases"][0][
                    "AliasName"]
        except client.exceptions.NotFoundException:
            LOG.debug(f"{key.module.res_key}.{key.name} - No KMS Key Alias.")
        return key_attributes
    except client.exceptions.QueueDoesNotExist:
        return None
    except ClientError as error:
        LOG.error(error)
        raise
Example #6
0
    def validate_schema(self,
                        name,
                        definition,
                        module_name,
                        module_schema: str = None) -> None:
        """
        JSON Validation of the resources module validation
        """
        if not self.module.json_schema and not module_schema:
            return
        resolver_source = pkg_files("ecs_composex").joinpath(
            "specs/compose-spec.json")
        LOG.debug(f"Validating against input schema {resolver_source}")
        resolver = jsonschema.RefResolver(
            base_uri=f"file://{path.abspath(path.dirname(resolver_source))}/",
            referrer=self.module.json_schema,
        )

        try:
            jsonschema.validate(
                definition,
                module_schema if module_schema else self.module.json_schema,
                resolver=resolver,
            )
        except jsonschema.exceptions.ValidationError:
            LOG.error(
                f"{module_name}.{name} - Definition is not conform to schema.")
            raise
Example #7
0
def get_family_from_engine_version(engine_name,
                                   engine_version,
                                   session=None,
                                   client=None):
    """
    Function to get the engine family from engine name and version
    :param client: override client for boto3 call
    :type client: boto3.client
    :param session: override session for boto3 client
    :type session: boto3.session.Session
    :param engine_name: engine name, ie. aurora-mysql
    :type engine_name: str
    :param engine_version: engine version, ie. 5.7.12
    :type engine_version: str
    :return: engine_family
    :rtype: str
    """
    if not client:
        if not session:
            session = boto3.session.Session()
        client = session.client("rds")
    try:
        req = client.describe_db_engine_versions(Engine=engine_name,
                                                 EngineVersion=engine_version)
    except ClientError as error:
        LOG.error(error)
        return None

    db_family = req["DBEngineVersions"][0]["DBParameterGroupFamily"]
    return db_family
Example #8
0
 def validate(self, settings):
     """
     Method to validate the CloudFormation template, either via URL once uploaded to S3 or via TemplateBody
     """
     try:
         if not settings.no_upload and self.url:
             validate_wrapper(settings.session, url=self.url)
         elif settings.no_upload or not self.url:
             if not self.file_path:
                 self.write(settings)
             LOG.debug(f"No upload - Validating template body - {self.file_path}")
             if len(self.body) >= 51200:
                 LOG.warning(
                     f"Template body for {self.file_name} is too big for local validation."
                     " No upload is True, so skipping."
                 )
             else:
                 validate_wrapper(settings.session, body=self.body)
         LOG.debug(f"Template {self.file_name} was validated successfully by CFN")
     except ClientError as error:
         LOG.error(error)
         with open(f"/tmp/{settings.name}.{settings.format}", "w") as failed_file_fd:
             failed_file_fd.write(self.body)
             LOG.error(
                 f"Failed validation template written at /tmp/{settings.name}.{settings.format}"
             )
             raise
def apply_extra_parameters(settings, db, db_stack) -> None:
    """
    Function to add extra parameters set in MacroParameters post creation of the DB resource from properties

    :param ecs_composex.common.settings.ComposeXSettings settings:
    :param ecs_composex.rds.rds_stack.Rds db:
    :param ecs_composex.rds.rds_template.RdsDbStack db_stack:
    """
    if not db.parameters:
        return
    permissions_boundary = Ref(AWS_NO_VALUE)
    if keyisset("PermissionsBoundary", db.parameters):
        permissions_boundary = define_iam_policy(
            db.parameters["PermissionsBoundary"])
    extra_parameters = {"RdsFeatures": (list, add_rds_features)}
    for name, config in extra_parameters.items():
        if not keyisset(name, db.parameters):
            LOG.debug(
                f"Feature {name} has not been set in compose file. {db.parameters}"
            )
        if (keyisset(name, db.parameters)
                and isinstance(db.parameters[name], config[0]) and config[1]):
            config[1](
                settings,
                db,
                db_stack,
                db.parameters[name],
                permissions_boundary,
            )
        elif keyisset(name, db.parameters) and not isinstance(
                db.parameters[name], config[0]):
            LOG.error(
                f"The property {name} is of type {type(db.parameters[name])}. Expected {config[0]}. Skipping"
            )
Example #10
0
def determine_resource_type(db_name, properties):
    """
    Function to determine if the properties are the ones of a DB Cluster or DB Instance.
    By default it will assume Cluster if cannot conclude that it is a DB Instance

    :param str db_name:
    :param dict properties:
    :return:
    """
    if (
        keyisset(DB_ENGINE_NAME.title, properties)
        and properties[DB_ENGINE_NAME.title].startswith("aurora")
        or all(
            property_name in DBCluster.props.keys()
            for property_name in properties.keys()
        )
    ):
        LOG.info(f"Identified {db_name} to be a RDS Aurora Cluster")
        return DBCluster
    elif all(
        property_name in DBInstance.props.keys() for property_name in properties.keys()
    ):
        LOG.info(f"Identified {db_name} to be a RDS Instance")
        return DBInstance
    LOG.error(
        "From the properties defined, we cannot determine whether this is a RDS Cluster or RDS Instance."
        " Setting to Cluster"
    )
    return None
Example #11
0
def lookup_ecs_cluster(session, cluster_lookup):
    """
    Function to find the ECS Cluster.

    :param boto3.session.Session session: Boto3 session to make API calls.
    :param cluster_lookup: Cluster lookup definition.
    :return:
    """
    if not isinstance(cluster_lookup, str):
        raise TypeError("The value for Lookup must be", str, "Got",
                        type(cluster_lookup))
    client = session.client("ecs")
    try:
        cluster_r = client.describe_clusters(clusters=[cluster_lookup])
        if not keyisset("clusters", cluster_r):
            LOG.warning(
                f"No cluster named {cluster_lookup} found. Creating one with default settings"
            )
            return get_default_cluster_config()
        elif (keyisset("clusters", cluster_r)
              and cluster_r["clusters"][0]["clusterName"] == cluster_lookup):
            LOG.info(
                f"Found ECS Cluster {cluster_lookup}. Setting {CLUSTER_NAME_T} accordingly."
            )
            return cluster_r["clusters"][0]["clusterName"]
    except ClientError as error:
        LOG.error(error)
        raise
def determine_resource_type(name, properties):
    """
    Function to determine if the properties are the ones of a DB Cluster or DB Instance.
    By default it will assume Cluster if cannot conclude that it is a DB Instance

    :param str name:
    :param dict properties:
    :return:
    """
    if all(
        property_name in CacheCluster.props.keys()
        for property_name in properties.keys()
    ):
        LOG.info(f"Identified {name} to be {CacheCluster.resource_type}")
        return CacheCluster
    elif all(
        property_name in ReplicationGroup.props.keys()
        for property_name in properties.keys()
    ):
        LOG.info(f"Identified {name} to be {ReplicationGroup.resource_type}")
        return ReplicationGroup
    LOG.error(
        "From the properties defined, we cannot determine whether this is a RDS Cluster or RDS Instance."
        " Setting to Cluster"
    )
    return None
Example #13
0
def create_bucket(bucket_name, session):
    """
    Function that checks if the S3 bucket exists and if not attempts to create it.

    :param bucket_name: name of the s3 bucket
    :type bucket_name: str
    :param session: boto3 session to use if wanted to override settings.
    :type session: boto3.session.Session
    :returns: True/False, Returns whether the bucket exists or not for upload
    :rtype: bool
    """
    client = session.client("s3")
    region = session.region_name
    location = {"LocationConstraint": region}
    try:
        client.create_bucket(
            ACL="private",
            Bucket=bucket_name,
            ObjectLockEnabledForBucket=True,
            CreateBucketConfiguration=location,
        )
        LOG.info(f"Bucket {bucket_name} successfully created.")
    except client.exceptions.BucketAlreadyExists:
        LOG.warning(f"Bucket {bucket_name} already exists.")
    except client.exceptions.BucketAlreadyOwnedByYou:
        LOG.info(f"You already own the bucket {bucket_name}")
    except ClientError as error:
        LOG.error("Error whilst creating the bucket")
        LOG.error(error)
        raise
Example #14
0
def generate_security_group_props(allowed_source, service_name):
    """
    Function to parse the allowed source and create the SG Opening options accordingly.

    :param dict allowed_source: The allowed source defined in configs
    :param str service_name:
    :return: security group ingress properties
    :rtype: dict
    """
    props = {
        "CidrIp": (allowed_source["ipv4"]
                   if keyisset("ipv4", allowed_source) else Ref(AWS_NO_VALUE)),
        "CidrIpv6": (allowed_source["ipv6"] if keyisset(
            "ipv6", allowed_source) else Ref(AWS_NO_VALUE)),
    }

    if (keyisset("CidrIp", props) and isinstance(props["CidrIp"], str)
            and not CIDR_PAT.match(props["CidrIp"])):
        LOG.error(
            f"Falty IP Address: {allowed_source} - ecs_service {service_name}")
        raise ValueError(
            "Not a valid IPv4 CIDR notation",
            props["CidrIp"],
            "Expected",
            CIDR_REG,
        )
    return props
Example #15
0
def handle_predefined_policies(bucket: Bucket, param_key: str,
                               managed_policies_key: str,
                               statement: list) -> None:
    """
    Function to configure and add statements for bucket policy based on predefined Bucket Policies

    :param bucket:
    :param str param_key:
    :param str managed_policies_key:
    :param list statement:
    """
    unique_policies = list(
        set(bucket.parameters[param_key]["PredefinedBucketPolicies"]))
    for policy_name in unique_policies:
        if policy_name not in bucket.module.iam_policies[
                managed_policies_key].keys():
            LOG.error(
                f"Policy {policy_name} is not defined as part of possible permissions set"
            )
            continue
        policies = generate_resource_permissions(
            bucket.logical_name,
            bucket.module.iam_policies[managed_policies_key],
            Sub(f"arn:${{{AWS_PARTITION}}}:s3:::${{{bucket.cfn_resource.title}}}"
                ),
        )
        statement += policies[policy_name].PolicyDocument["Statement"]
Example #16
0
    def __init__(self, service, definition: dict, family, settings: ComposeXSettings):
        self.service = service
        self._definition = copy.deepcopy(definition)
        self.family = family
        self.source_file = set_else_none("SourceFile", self.definition)
        self._parser_files = set_else_none("ParserFiles", self.definition, alt_value=[])
        self._env_vars = set_else_none("EnvironmentVariables", self.definition)
        self.managed_destinations = []
        self.extra_env_vars = set_else_none(
            "EnvironmentVariables", self.definition, alt_value={}
        )

        if keyisset("ComposeXManagedAwsDestinations", self.definition):
            for destination_definition in self.definition[
                "ComposeXManagedAwsDestinations"
            ]:
                if keyisset("log_group_name", destination_definition):
                    self.managed_destinations.append(
                        FireLensCloudWatchManagedDestination(
                            destination_definition, self, settings
                        )
                    )
                elif keyisset("delivery_stream", destination_definition):
                    self.managed_destinations.append(
                        FireLensFirehoseManagedDestination(
                            destination_definition, self, settings
                        )
                    )
                else:
                    LOG.error("Invalid definition for ComposeXManagedAwsDestinations")
                    LOG.error(destination_definition)
Example #17
0
def get_topic_config(topic: Topic, account_id: str, resource_id: str) -> dict | None:
    """
    Function to create the mapping definition for SNS topics
    """

    topic_config = {TOPIC_NAME: resource_id}
    client = topic.lookup_session.client("sns")
    attributes_mapping = {
        TOPIC_ARN: "Attributes::TopicArn",
        TOPIC_KMS_KEY: "Attributes::KmsMasterKeyId",
    }
    try:
        topic_r = client.get_topic_attributes(TopicArn=topic.arn)
        attributes = attributes_to_mapping(topic_r, attributes_mapping)
        if keyisset(TOPIC_KMS_KEY, attributes) and not attributes[
            TOPIC_KMS_KEY
        ].startswith("arn:aws"):
            if attributes[TOPIC_KMS_KEY].startswith("alias/aws"):
                LOG.warning(
                    f"{topic.module.res_key}.{topic.name} - Topic uses the default AWS CMK."
                )
            else:
                LOG.warning(
                    f"{topic.module.res_key}.{topic.name} - KMS Key provided is not a valid ARN."
                )
            del attributes[TOPIC_KMS_KEY]
        topic_config.update(attributes)
        return topic_config
    except client.exceptions.QueueDoesNotExist:
        return None
    except ClientError as error:
        LOG.error(error)
        raise
def lookup_rds_secret(rds_resource, secret_lookup):
    """
    Lookup RDS DB Secret specified

    :param ecs_composex.compose.x_resources.network_x_resources.DatabaseXResource rds_resource:
    :param secret_lookup:
    :return:
    """
    if keyisset("Arn", secret_lookup):
        client = rds_resource.lookup_session.client("secretsmanager")
        try:
            secret_arn = client.describe_secret(
                SecretId=secret_lookup["Arn"])["ARN"]

        except client.exceptions.ResourceNotFoundException:
            LOG.error(f"{rds_resource.module.res_key}.{rds_resource.name}"
                      f" - Secret {secret_lookup['Arn']} not found")
            raise
        except ClientError as error:
            LOG.error(error)
            raise
    elif keyisset("Tags", secret_lookup):
        secret_arn = find_aws_resource_arn_from_tags_api(
            rds_resource.lookup["secret"],
            rds_resource.lookup_session,
            "secretsmanager:secret",
        )
    else:
        raise LookupError(f"{rds_resource.module.res_key}.{rds_resource.name}"
                          " - Failed to find the DB Secret")
    if secret_arn:
        rds_resource.lookup_properties[
            rds_resource.db_secret_arn_parameter] = secret_arn
Example #19
0
def match_volumes_services_config(service: ComposeService, vol_config: dict,
                                  volumes: list):
    """
    Function to map volume config in services and top-level volumes

    :param service:
    :param vol_config:
    :param volumes:
    :raises LookupError:
    """
    if keyisset("source",
                vol_config) and vol_config["source"].startswith(r"/"):
        vol_config["volume"] = None
        service.volumes.append(vol_config)
        LOG.info(f"volumes.{vol_config['source']} - Mapped to {service.name}")
        return
    else:
        for volume in volumes:
            if not keyisset("source", vol_config) and not keyisset(
                    "volume", volume):
                LOG.error(f"volumes - Failure to process {volume}")
                continue
            if volume.name == vol_config["source"]:
                volume.services.append(service)
                vol_config["volume"] = volume
                service.volumes.append(vol_config)
                LOG.info(f"volumes.{volume.name} - Mapped to {service.name}")
                return
    raise LookupError(
        f"Volume {vol_config['source']} was not found in {[vol.name for vol in volumes]}"
    )
def get_replica_group_config(resource, cluster_name, session):
    client = session.client("elasticache")
    try:
        cluster_r = client.describe_replication_groups(ReplicationGroupId=cluster_name)
        cluster = cluster_r["ReplicationGroups"][0]
        node_r = client.describe_cache_clusters(
            CacheClusterId=cluster["MemberClusters"][0]
        )
        sg_id = node_r["CacheClusters"][0]["SecurityGroups"][0]["SecurityGroupId"]
        resource.port_attr = elasticache_params.REPLICA_PRIMARY_PORT
        return {
            elasticache_params.REPLICA_PRIMARY_ADDRESS.title: cluster["NodeGroups"][0][
                "PrimaryEndpoint"
            ]["Address"],
            elasticache_params.REPLICA_PRIMARY_PORT.title: cluster["NodeGroups"][0][
                "PrimaryEndpoint"
            ]["Port"],
            elasticache_params.REPLICA_READ_ENDPOINT_ADDRESSES.title: [
                cluster["NodeGroups"][0]["ReaderEndpoint"]["Address"]
            ],
            elasticache_params.REPLICA_READ_ENDPOINT_PORTS.title: [
                cluster["NodeGroups"][0]["ReaderEndpoint"]["Port"]
            ],
            elasticache_params.CLUSTER_SG.title: [sg_id],
        }
    except client.exceptions.ReplicationGroupNotFoundFault as error:
        LOG.error(f"Could not fetch information about {cluster_name}")
        LOG.error(error)
        return None
Example #21
0
def expand_launch_template_tags_specs(lt, tags):
    """
    Function to expand the LaunchTemplate TagSpecifications with defined x-tags.

    :param lt: the LaunchTemplate object
    :type: troposphere.ec2.LaunchTemplate
    :param tags: the Tags as built from x-tags
    :type tags: troposphere.Tags
    """
    LOG.debug("Setting tags to LaunchTemplate")
    try:
        launch_data = getattr(lt, "LaunchTemplateData")
        if hasattr(launch_data, "TagSpecifications"):
            tags_specs = getattr(launch_data, "TagSpecifications")
            if isinstance(tags_specs, list) and tags_specs:
                for tag_spec in tags_specs:
                    if not isinstance(tag_spec, TagSpecifications):
                        continue
                    original_tags = getattr(tag_spec, "Tags")
                    new_tags = original_tags + tags
                    setattr(tag_spec, "Tags", new_tags)
                setattr(launch_data, "TagSpecifications", tags_specs)
    except AttributeError:
        LOG.error("Failed to get the launch template data")
    except Exception as error:
        LOG.error(error)
Example #22
0
def get_mod_function(module_name, function_name):
    """
    Function to get function in a given module name from function_name

    :param module_name: the name of the module in ecs_composex to find and try to import
    :type module_name: str
    :param function_name: name of the function to try to get
    :type function_name: str

    :return: function, if found, from the module
    :rtype: function
    """
    composex_module_name = f"ecs_composex.{module_name}"
    LOG.debug(composex_module_name)
    function = None
    try:
        res_module = import_module(composex_module_name)
        LOG.debug(res_module)
        try:
            function = getattr(res_module, function_name)
            return function
        except AttributeError:
            LOG.info(f"No {function_name} function found - skipping")
    except ImportError as error:
        LOG.error(f"Failure to process the module {composex_module_name}")
        LOG.error(error)
    return function
def get_db_cluster_engine_parameter_group_defaults(engine_family):
    """
    Returns a dict of all the parameter group parameters and default values

    :parm str engine_family: Engine family we are getting the cluster settings for, i.e. aurora-mysql5.7
    """

    client = boto3.client("rds")
    try:
        req = client.describe_engine_default_cluster_parameters(
            DBParameterGroupFamily=engine_family)
    except ClientError as error:
        LOG.error(error)
        return None
    params_return = {}
    if "EngineDefaults" in req.keys():
        params = req["EngineDefaults"]["Parameters"]
        for param in params:
            if ("ParameterValue" in param.keys()
                    and "{" not in param["ParameterValue"]
                    and "IsModifiable" in param.keys()
                    and param["IsModifiable"] is True
                    and not param["ParameterName"].startswith("rds.")):
                params_return[param["ParameterName"]] = param["ParameterValue"]
            if param["ParameterName"] == "binlog_format":
                params_return[param["ParameterName"]] = "MIXED"
    return params_return
def set_from_x_s3(settings, db, db_stack, bucket_name):
    """
    Function to link the RDS DB to a Bucket defined in x-s3

    :param settings:
    :param db:
    :param str bucket_name:
    :return:
    """
    resource = None
    if not keyisset(S3_KEY, settings.compose_content):
        raise KeyError(
            f"No Buckets defined in the Compose file under {S3_KEY}.",
            settings.compose_content.keys(),
        )
    bucket_name = bucket_name.strip("x-s3::")
    buckets = settings.compose_content[S3_KEY]
    if bucket_name not in [res.name for res in buckets.values()]:
        LOG.error(
            f"No bucket {bucket_name} in x-s3. Buckets defined: {[res.name for res in buckets.values()]}"
        )
        return
    for resource in buckets.values():
        if bucket_name == resource.name:
            break
    if not resource:
        return
    if resource.cfn_resource:
        return get_s3_bucket_arn_from_resource(db_stack, resource)
    elif resource.lookup and keyisset("s3", settings.mappings):
        add_update_mapping(db_stack.stack_template, "s3",
                           settings.mappings["s3"])
        return resource.lookup_properties[S3_BUCKET_ARN]
Example #25
0
    def lookup_cluster(self, session):
        """
        Define the ECS Cluster properties and definitions from ECS API.

        :param boto3.session.Session session: Boto3 session to make API calls.
        :return: The cluster details
        :rtype: dict
        """
        if not isinstance(self.lookup, (str, dict)):
            raise TypeError("The value for Lookup must be", str, dict, "Got",
                            type(self.lookup))
        ecs_session = session
        if isinstance(self.lookup, dict):
            if keyisset("RoleArn", self.lookup):
                ecs_session = get_assume_role_session(
                    session,
                    self.lookup["RoleArn"],
                    session_name="EcsClusterLookup@ComposeX",
                )
            cluster_name = self.lookup["ClusterName"]
        else:
            cluster_name = self.lookup
        try:
            clusters = list_all_ecs_clusters(session=ecs_session)
            cluster_names = [
                CLUSTER_NAME_FROM_ARN.match(c_name).group("name")
                for c_name in clusters
            ]
            clusters_config = describe_all_ecs_clusters_from_ccapi(
                clusters,
                return_as_map=True,
                use_cluster_name=True,
                session=ecs_session)
            if cluster_name not in clusters_config.keys():
                raise LookupError(
                    f"Failed to find {cluster_name}. Available clusters are",
                    cluster_names,
                )
            the_cluster = clusters_config[cluster_name]
            LOG.info(
                f"x-cluster.{cluster_name} found. Setting {CLUSTER_NAME.title} accordingly."
            )
            self.mappings = {
                CLUSTER_NAME.title: {
                    "Name": the_cluster["ClusterName"]
                }
            }
            self.set_cluster_mappings(the_cluster)
            self.capacity_providers = evaluate_capacity_providers(the_cluster)
            if self.capacity_providers:
                self.default_strategy_providers = get_default_capacity_strategy(
                    the_cluster)
            self.platform_override = evaluate_fargate_is_set(
                self.capacity_providers, the_cluster)
            self.cluster_identifier = FindInMap(self.mappings_key,
                                                CLUSTER_NAME.title, "Name")
        except ClientError as error:
            LOG.error(error)
            raise
Example #26
0
def set_healthcheck_definition(props, target_definition):
    """

    :param dict props:
    :param dict target_definition:
    :return:
    """
    healthcheck_props = {
        "HealthCheckEnabled": Ref(AWS_NO_VALUE),
        "HealthCheckIntervalSeconds": Ref(AWS_NO_VALUE),
        "HealthCheckPath": Ref(AWS_NO_VALUE),
        "HealthCheckPort": Ref(AWS_NO_VALUE),
        "HealthCheckProtocol": Ref(AWS_NO_VALUE),
        "HealthCheckTimeoutSeconds": Ref(AWS_NO_VALUE),
        "HealthyThresholdCount": Ref(AWS_NO_VALUE),
    }
    required_mapping = (
        "HealthCheckPort",
        "HealthCheckProtocol",
    )
    required_rex = re.compile(
        r"^([\d]{2,5}):(HTTPS|HTTP|TCP_UDP|TCP|TLS|UDP)$")
    healthcheck_reg = re.compile(
        r"(^(?:[\d]{2,5}):(?:HTTPS|HTTP|TCP_UDP|TCP|TLS|UDP)):?"
        r"((?:[\d]{1}|10):(?:[\d]{1}|10):[\d]{1,3}:[\d]{1,3})?:"
        r"?((?:/[\S][^:]+.$)|(?:/[\S]+)(?::)(?:(?:[\d]{1,4},?){1,}.$)|(?:(?:[\d]{1,4},?){1,}.$))?"
    )
    healthcheck_definition = set_else_none("healthcheck", target_definition)
    if isinstance(healthcheck_definition, str):
        groups = healthcheck_reg.search(healthcheck_definition).groups()
        if not groups[0]:
            raise ValueError(
                "You need to define at least the Protocol and port for healthcheck"
            )
        for count, value in enumerate(required_rex.match(groups[0]).groups()):
            healthcheck_props[required_mapping[count]] = value
        if groups[1]:
            handle_ping_settings(healthcheck_props, groups[1])
        if groups[2]:
            try:
                handle_path_settings(healthcheck_props, groups[2])
            except ValueError:
                LOG.error(target_definition["name"],
                          target_definition["healthcheck"])
                raise
    elif isinstance(healthcheck_definition, dict):
        healthcheck_props.update(healthcheck_definition)
        if keyisset("Matcher", healthcheck_definition):
            healthcheck_props["Matcher"] = Matcher(
                **healthcheck_definition["Matcher"])
    else:
        raise TypeError(
            healthcheck_definition,
            type(healthcheck_definition),
            "must be one of",
            (str, dict),
        )
    props.update(healthcheck_props)
Example #27
0
def s3_to_firehose(
    resource: Bucket,
    dest_resource: DeliveryStream,
    dest_resource_stack,
    settings: ComposeXSettings,
) -> None:
    """
    Updates
    :param Bucket resource:
    :param DeliveryStream dest_resource:
    :param dest_resource_stack:
    :param settings:
    :return:
    """
    if not dest_resource.cfn_resource:
        LOG.error(
            f"{dest_resource.module.res_key}.{dest_resource.name} - Not a new resource"
        )
    for prop_path, bucket_param in FIREHOSE_PROPERTIES.items():
        prop_attr = get_dest_resource_nested_property(
            prop_path, dest_resource.cfn_resource)
        if skip_if(resource, prop_attr):
            continue
        bucket_id = resource.attributes_outputs[bucket_param]
        if resource.cfn_resource:
            add_parameters(dest_resource_stack.stack_template,
                           [bucket_id["ImportParameter"]])
            setattr(
                prop_attr[0],
                prop_attr[1],
                Ref(bucket_id["ImportParameter"]),
            )
            dest_resource.stack.Parameters.update(
                {bucket_id["ImportParameter"].title: bucket_id["ImportValue"]})
            arn_pointer = Ref(bucket_id["ImportParameter"])
        elif not resource.cfn_resource and resource.mappings:
            add_update_mapping(
                dest_resource.stack.stack_template,
                resource.module.mapping_key,
                settings.mappings[resource.module.mapping_key],
            )
            setattr(prop_attr[0], prop_attr[1], bucket_id["ImportValue"])
            arn_pointer = bucket_id["ImportValue"]
        else:
            raise ValueError(
                resource.module.mapping_key,
                resource.name,
                "Unable to determine if new or lookup",
            )
        map_x_resource_perms_to_resource(
            dest_resource,
            arn_value=arn_pointer,
            access_definition="s3destination",
            access_subkey="kinesis_firehose",
            resource_policies=get_access_types(resource.module.mod_key),
            resource_mapping_key=resource.module.mapping_key,
        )
        dest_resource.ensure_iam_policies_dependencies()
def handle_cross_account_permissions(
    family: ComposeFamily,
    service: ComposeService,
    settings: ComposeXSettings,
    parameter_name: str,
    config_value: str,
):
    """
    Function to automatically add cross-account role access for FireHose to the specified role ARN
    :param family:
    :param service:
    :param settings:
    :param parameter_name:
    :param config_value:
    :return:
    """
    try:
        validate_iam_role_arn(config_value)
    except ValueError:
        LOG.error(
            f"{family.name}.{service.name} - FireLens config for firehose role_arn is invalid"
        )
        raise
    policy_title = (
        f"{family.logical_name}{service.logical_name}LoggingFirehoseCrossAccount"
    )
    if policy_title in family.template.resources:
        policy = family.template.resources[policy_title]
        resource = policy.PolicyDocument["Statement"][0]["Resource"]
        if isinstance(resource, str):
            resource = [resource]
        if config_value not in resource:
            policy.PolicyDocument["Statement"][0]["Resource"].append(
                config_value)
    else:
        policy = PolicyType(
            policy_title,
            PolicyName=Sub(
                f"{family.logical_name}{service.logical_name}FireHoseCrossAccountAccess${{STACK_ID}}",
                STACK_ID=STACK_ID_SHORT,
            ),
            PolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Sid": "LoggingFirehoseCrossAccount",
                    "Effect": "Allow",
                    "Action": ["sts:AssumeRole"],
                    "Resource": [config_value],
                }],
            },
            Roles=family.iam_manager.task_role.name,
        )
        add_resource(family.template, policy)
    return config_value
def lookup_service_discovery_namespace(zone: PrivateNamespace,
                                       session: Session,
                                       ns_id: str = None) -> dict:
    """
    Function to find and get the PrivateDnsNamespace properties needed by other resources

    :param PrivateNamespace zone:
    :param boto3.session.Session session:
    :param str ns_id:
    :return: The properties we need
    :rtype: dict
    """
    client = session.client("servicediscovery")
    try:
        namespaces = get_all_dns_namespaces(session)
        if zone.zone_name not in [z["Name"] for z in namespaces]:
            raise LookupError("No private namespace found for zone", zone.name,
                              zone.zone_name)
        zone_r = None
        if not ns_id:
            for l_zone in namespaces:
                if zone.zone_name == l_zone["Name"]:
                    the_zone = l_zone
                    zone_r = client.get_namespace(Id=the_zone["Id"])
                    break
        else:
            zone_r = client.get_namespace(Id=ns_id)
        if not zone_r:
            raise LookupError(
                f"{zone.module.res_key}.{zone.name} - Failed to lookup {zone.zone_name}"
            )
        properties = zone_r["Namespace"]["Properties"]
        if zone_r["Namespace"]["Type"] == "HTTP":
            raise TypeError(
                "Unsupported CloudMap namespace HTTP. "
                "Only DNS namespaces, private or public, are supported")
        return {
            PRIVATE_DNS_ZONE_ID:
            properties["DnsProperties"]["HostedZoneId"],
            PRIVATE_DNS_ZONE_NAME:
            LAST_DOT_RE.sub("", properties["HttpProperties"]["HttpName"]),
            PRIVATE_NAMESPACE_ID:
            zone_r["Namespace"]["Id"],
        }
    except client.exceptions.NamespaceNotFound:
        LOG.error(f"Namespace not found for {zone.name}")
        raise
    except client.exceptions.InvalidInput:
        LOG.error("Failed to retrieve the zone info")
        raise
Example #30
0
 def upload(self):
     """
     Method to handle uploading the files to S3.
     """
     if not self.can_upload and not self.no_upload:
         LOG.error("BucketName was not specified, not attempting upload")
     elif self.no_upload:
         LOG.debug("No Upload is true. Not uploading")
     else:
         self.url = upload_template(self.body,
                                    self.bucket,
                                    self.file_name,
                                    mime=self.mime,
                                    validate=False)
         LOG.info(f"{self.file_name} uploaded successfully to {self.url}")