def scan_poll_and_wait(registry, repository_name, image, image_url, ecr_session=None): """ Function to pull the scans results until no longer in progress :param boto3.session.Session ecr_session: :param registry: :param repository_name: :param image: :param image_url: :param ecr_session: :return: The scan report :rtype: dict """ client = ecr_session.client("ecr") while True: try: image_scan_r = client.describe_image_scan_findings( registryId=registry, repositoryName=repository_name, imageId=image, ) if image_scan_r["imageScanStatus"]["status"] == "IN_PROGRESS": LOG.info(f"{image_url} - Scan in progress - waiting 10 seconds") sleep(10) else: return image_scan_r except client.exceptions.LimitExceededException: LOG.warn(f"{image_url} - Exceeding API Calls quota. Waiting 10 seconds") sleep(10)
def find_closest_fargate_configuration(cpu, ram, as_param_string=False): """ Function to get the closest Fargate CPU / RAM Configuration out of a CPU and RAM combination. :param int cpu: CPU count for the Task Definition :param int ram: RAM in MB for the Task Definition :param bool as_param_string: Returns the value as a CFN Fargate Configuration. :return: """ fargate_cpus = list(FARGATE_MODES.keys()) fargate_cpus.sort() fargate_cpu = clpow2(cpu) if fargate_cpu < cpu: fargate_cpu = nxtpow2(cpu) if fargate_cpu not in fargate_cpus: LOG.warn( f"Value {cpu} is not valid for Fargate. Valid modes: {fargate_cpus}" ) if fargate_cpu < fargate_cpus[0]: fargate_cpu = fargate_cpus[0] elif fargate_cpu > fargate_cpus[-1]: fargate_cpu = fargate_cpus[-1] fargate_ram = find_closest_ram_config(ram, FARGATE_MODES[fargate_cpu]) if as_param_string: return f"{fargate_cpu}!{fargate_ram}" return fargate_cpu, fargate_ram
def rds_to_ecs(rdsdbs, services_stack, services_families, rds_root_stack, settings): """ Function to apply onto existing ECS Templates the various settings :param rds_root_stack: :param rdsdbs: :param services_stack: :param services_families: Families definition :return: """ for db_name in rdsdbs: db_def = rdsdbs[db_name] if db_name not in rds_root_stack.stack_template.resources: raise KeyError(f"DB {db_name} not defined in RDS Root template") if not keyisset("Services", db_def): LOG.warn(f"DB {db_name} has no services defined.") continue secret_import = define_db_secret_import(db_name) for service in db_def["Services"]: handle_db_to_service_settings( db_name, db_def, secret_import, service, services_families, services_stack, rds_root_stack, )
def add_public_security_group_ingress(self, security_group): """ Method to add ingress rules from external sources to a given Security Group (ie. ALB Security Group). If a list of IPs is found in the config['ext_sources'] part of the network section of configs for the service, then it will use that. If no IPv4 source is indicated, it will by default allow traffic from 0.0.0.0/0 :param security_group: security group (object or title string) to add the rules to :type security_group: str or troposphere.ec2.SecurityGroup """ if not self.config.ext_sources: self.config.ext_sources = [{ "ipv4": "0.0.0.0/0", "protocol": -1, "source_name": "ANY" }] for allowed_source in self.config.ext_sources: if not keyisset("ipv4", allowed_source) and not keyisset( "ipv6", allowed_source): LOG.warn("No IPv4 or IPv6 set. Skipping") continue props = generate_security_group_props(allowed_source, self.service_name) if props: LOG.debug(f"Adding {allowed_source} for ingress") self.create_lb_ingress_rule(allowed_source, security_group, **props)
def evaluate_ecr_configs(settings) -> int: """ Function to go over each service of each family in its final state and evaluate the ECR Image validity. :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :return: """ result = 0 if not SCANS_POSSIBLE: return result for family in settings.families.values(): for service in family.services: if not isinstance(service.image, str): continue if not keyisset("x-ecr", service.definition) or invalidate_image_from_ecr( service, True): continue service_image = define_service_image(service, settings) if (service.ecr_config and keyisset("InterpolateWithDigest", service.ecr_config) and keyisset("imageDigest", service_image)): service.image = interpolate_ecr_uri_tag_with_digest( service.image, service_image["imageDigest"]) LOG.info( f"Update service {family.name}.{service.name} image to {service.image}" ) if scan_service_image(service, settings, service_image): LOG.warn( f"{family.name}.{service.name} - vulnerabilities found") result = 1 else: LOG.info( f"{family.name}.{service.name} - ECR Evaluation Passed.") return result
def evaluate_docker_configs(settings): """ Function to go over the services settings and evaluate x-docker :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :return: """ image_tag_re = re.compile( r"(?P<tag>(?:\@sha[\d]+:[a-z-Z0-9]+$)|(?::[\S]+$))") for family in settings.families.values(): for service in family.services: if not keyisset("x-docker_opts", service.definition): continue docker_config = service.definition["x-docker_opts"] if SCANS_POSSIBLE: if keyisset("InterpolateWithDigest", docker_config): if not invalidate_image_from_ecr(service, mute=True): LOG.warn( "You set InterpolateWithDigest to true for x-docker for an image in AWS ECR." "Please refer to x-ecr") continue else: warnings.warn( "Run pip install ecs_composex[ecrscan] to use x-ecr features" ) service.retrieve_image_digest() if service.image_digest: service.image = image_tag_re.sub( f"@{service.image_digest}", service.image) LOG.info( f"Successfully retrieved digest for {service.name}.") LOG.info(f"{service.name} - {service.image}")
def handle_json_validation(resource: SsmParameter, value: str, file_path: str) -> str: """ Function to evaluate the JSON content :param SsmParamter resource: :param str value: Value read from file :param str file_path: :return: """ try: payload = json.loads(value) if keyisset("MinimizeJson", resource.parameters): return json.dumps(payload, separators=(",", ":")) return value except json.decoder.JSONDecodeError: if keyisset("IgnoreInvalidJson", resource.parameters): LOG.warn( f"{resource.name} - The content of {file_path} " "did not pass JSON validation. Skipping due to IgnoreInvalidJson" ) return value else: LOG.error(f"{resource.name} - The content of {file_path} " "did not pass JSON validation.") raise
def handle_yaml_validation(resource: SsmParameter, value: str, file_path: str) -> str: """ Function to evaluate the JSON content :param SsmParamter resource: :param str value: Value read from file :param str file_path: :return: """ try: payload = yaml.load(value, Loader=Loader) if keyisset("RenderToJson", resource.parameters): return json.dumps(payload, separators=(",", ":")) return value except yaml.YAMLError: if keyisset("IgnoreInvalidYaml", resource.parameters): LOG.warn( f"{resource.name} - The content of {file_path} " "did not pass YAML validation. Skipping due to IgnoreInvalidYaml" ) return value else: LOG.error(f"{resource.name} - The content of {file_path} " "did not pass YAML validation.") raise
def render_new_parameters(new_resources: list[SsmParameter], root_stack: ComposeXStack) -> None: """ :param list[SsmParameter] new_resources: :param ecs_composex.common.stacks.ComposeXStack root_stack: """ for new_res in new_resources: value = None if (keyisset("Type", new_res.definition) and new_res.definition["Type"] == "SecureString"): raise ValueError( f"{new_res.name} AWS CFN does not support SecureString.") if new_res.parameters and keyisset("FromFile", new_res.parameters): value = import_value_from_file(new_res) if keyisset("Value", new_res.properties): if value: LOG.warn( "Both Value and FromFile properties were set. Using Value from Properties" ) value = new_res.properties["Value"] if not value: raise ValueError(f"{new_res.name} - Failed to determine the value") if keyisset("EncodeToBase64", new_res.parameters): value = Base64(value) new_res.properties.update({"Value": value}) param_props = import_record_properties(new_res.properties, CfnSsmParameter, ignore_missing_required=False) new_res.cfn_resource = CfnSsmParameter(new_res.logical_name, **param_props) root_stack.stack_template.add_resource(new_res.cfn_resource) new_res.init_outputs() new_res.generate_outputs() add_outputs(root_stack.stack_template, new_res.outputs)
def set_replace_iam_role(resource: DeliveryStream) -> None: """ Function to either set, or update, or neither, the RoleARN of * "S3DestinationConfiguration" * "RedshiftDestinationConfiguration" * "KinesisStreamSourceConfiguration" * "ExtendedS3DestinationConfiguration" * "ElasticsearchDestinationConfiguration" * "AmazonopensearchserviceDestinationConfiguration" :param DeliveryStream resource: """ dont_override = set_else_none("DoNotOverrideIamRole", resource.parameters, eval_bool=True) if dont_override: LOG.info( f"{resource.module.res_key}.{resource.name}" " - Not overriding any RoleARN defined for delivery destinations") return to_evaluate_role_arn = [ "AmazonopensearchserviceDestinationConfiguration", "S3DestinationConfiguration", "KinesisStreamSourceConfiguration", "ElasticsearchDestinationConfiguration", "ExtendedS3DestinationConfiguration", "RedshiftDestinationConfiguration", ] if dont_override and isinstance(dont_override, bool): return for dest_prop in to_evaluate_role_arn: if not hasattr(resource.cfn_resource, dest_prop): LOG.debug( f"{resource.module.res_key}.{resource.name} - No {dest_prop} set" ) elif (dont_override and isinstance(dont_override, list) and dest_prop in dont_override): LOG.warn( f"f{resource.module.res_key}.{resource.name} - {dest_prop} not overriding with new IAM Role" ) else: LOG.debug( f"f{resource.module.res_key}.{resource.name} - {dest_prop} overriding with new IAM Role" ) dest_config = getattr(resource.cfn_resource, dest_prop) setattr( dest_config, "RoleARN", GetAtt(resource.iam_manager.service_linked_role, "Arn"), ) set_replace_s3_backup_config(resource, dest_config)
def handle_kbytes(value): """ Function to handle KB use-case """ amount = float(re.sub(NUMBERS_REG, "", value)) unit = "KBytes" if amount < (MINIMUM_SUPPORTED * 1024): LOG.warn( f"You set unit to {unit} and value is lower than 512MB. Setting to minimum supported by Docker" ) return MINIMUM_SUPPORTED else: final_amount = int(amount / 1024) return final_amount
def define_new_namespace(new_namespaces, stack_template): """ Creates new AWS CloudMap namespaces and associates it with the stack template :param list[PrivateNamespace] new_namespaces: list of PrivateNamespace to process :param troposphere.Template stack_template: The template to add the new resources to """ for namespace in new_namespaces: if namespace.properties: if ( keyisset("Name", namespace.properties) and namespace.zone_name != namespace.properties["Name"] ): raise ValueError( f"{namespace.module.res_key}.{namespace.name} - " "ZoneName and Properties.Name must be the same value when set." ) elif not keyisset("Name", namespace.properties): namespace.properties["Name"] = namespace.zone_name namespace_props = import_record_properties( namespace.properties, PrivateNamespace ) if keyisset("Vpc", namespace_props): LOG.warn( f"{namespace.module.res_key}.{namespace.name} - " "Vpc property was set. Overriding to compose-x x-vpc defined for execution." ) namespace_props["Vpc"] = f"x-vpc::{VPC_ID.title}" namespace.cfn_resource = PrivateNamespace( namespace.logical_name, **namespace_props ) elif namespace.uses_default: namespace_props = import_record_properties( {"Name": namespace.zone_name, "Vpc": f"x-vpc::{VPC_ID.title}"}, PrivateDnsNamespace, ) namespace.cfn_resource = PrivateDnsNamespace( namespace.logical_name, **namespace_props ) if not namespace.cfn_resource: raise AttributeError( f"{namespace.module.res_key}.{namespace.name} - " "Failed to create PrivateNamespace from Properties/MacroParameters" ) stack_template.add_resource(namespace.cfn_resource) namespace.init_outputs() namespace.generate_outputs()
def handle_bytes(value): """ Function to handle the KB use-case :param value: the string value :rtype: int or Ref(AWS_NO_VALUE) """ amount = float(re.sub(NUMBERS_REG, "", value)) unit = "Bytes" if amount < (MINIMUM_SUPPORTED * 1024 * 1024): LOG.warn( f"You set unit to {unit} and value is lower than 4MB. Setting to minimum supported by Docker" ) return MINIMUM_SUPPORTED else: final_amount = (amount / 1024) / 1024 return final_amount
def add_role_boundaries(iam_role, policy): """ Function to set permission boundary onto an IAM role :param iam_role: the IAM Role to add the boundary to :type iam_role: troposphere.iam.Role :param policy: the name or ARN of the policy :type policy: str """ if not isinstance(iam_role, Role): raise TypeError(f"{iam_role} is of type", type(iam_role), "expected", Role) policy = define_iam_policy(policy) if hasattr(iam_role, "PermissionsBoundary"): LOG.warn( f"IAM Role {iam_role.title} already has PermissionsBoundary set. Overriding" ) setattr(iam_role, "PermissionsBoundary", policy)
def scan_service_image(service, settings, the_image=None): """ Function to review the service definition and evaluate scan if properties defined :param ecs_composex.common.compose_services.ComposeService service: :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :param the_image: The image to use for scanning references. :return: """ region = None if validate_input(service): return vulnerability_config = service.ecr_config["VulnerabilitiesScan"] if keyisset("Thresholds", vulnerability_config): thresholds = dict(DEFAULT_THRESHOLDS) thresholds.update(vulnerability_config["Thresholds"]) else: LOG.warn(f"No thresholds defined. Using defaults {DEFAULT_THRESHOLDS}") thresholds = DEFAULT_THRESHOLDS validate_the_image_input(the_image) parts = service.image.private_ecr repo_name = parts.group("repo_name") account_id = parts.group("account_id") region = parts.group("region") session = define_ecr_session( account_id, repo_name, region, settings, role_arn=service.ecr_config["RoleArn"] if keyisset("RoleArn", vulnerability_config) else None, ) security_findings = wait_for_scan_report( registry=account_id, repository_name=repo_name, image=the_image, image_url=service.image, ecr_session=session, ) return define_result( service.image, security_findings, thresholds, vulnerability_config )
def render_final_template(root_template): """ Function to go through all stacks of a given template and update the template It will recursively render sub stacks defined. :param root_template: the root template to iterate over the resources. :type root_template: troposphere.Template """ resources = root_template.resources for resource_name in resources: resource = resources[resource_name] if isinstance(resource, (XModuleStack, ComposeXStack)): LOG.debug(resource) LOG.debug(resource.TemplateURL) render_final_template(resource.stack_template) resource.render() elif isinstance(resource, Stack): LOG.warn(resource_name) LOG.warn(resource)
def acm_to_ecs(acms, services_stack, services_families, acm_root_stack, settings): """ Function to apply ACM settings to ECS Services :param acms: :param services_stack: :param services_families: :param acm_root_stack: """ for cert_name in acms: cert_def = acms[cert_name] if cert_name not in acm_root_stack.stack_template.resources: raise KeyError(f"DB {cert_name} not defined in RDS Root template") if not keyisset("Services", cert_def): LOG.warn(f"DB {cert_name} has no services defined.") continue cert_import = get_import_value(cert_name, acm_params.CERT_CN_T) apply_to_ecs(cert_import, cert_def, services_families, services_stack, acm_root_stack)
def set_replace_cw_logs_config( resource: DeliveryStream, dest_prop: str, dest_config, template: troposphere.Template, ) -> None: if not hasattr(dest_config, "CloudWatchLoggingOptions"): log_stream = template.add_resource( LogStream( f"{resource.logical_name}{dest_prop}LogStream", LogGroupName=Ref(resource.log_group), LogStreamName=dest_prop, )) setattr( dest_config, "CloudWatchLoggingOptions", CloudWatchLoggingOptions( Enabled=True, LogGroupName=Ref(resource.log_group), LogStreamName=Ref(log_stream), ), ) else: cw_config = getattr(dest_config, "CloudWatchLoggingOptions") if hasattr(cw_config, "Enabled") and cw_config.Enabled is False: LOG.warn( f"{resource.module.res_key}.{resource.name}.{dest_prop} - CW Logging explicitly disabled" ) else: log_stream = template.add_resource( LogStream( f"{resource.logical_name}{dest_prop}LogStream", LogGroupName=Ref(resource.log_group), LogStreamName=dest_prop, )) setattr(cw_config, "LogGroupName", Ref(resource.log_group)) setattr(cw_config, "LogStreamName", Ref(log_stream))
def add_routes(self, nodes): """ Method to register routers """ for route_protocol in self.raw_routes.keys(): if route_protocol != self.protocol: raise ValueError( f"he virtual router is configured for {self.protocol} " f"but a route for protocol {route_protocol} has been found. This is not supported." ) if route_protocol == "http" or route_protocol == "http2": self.handle_http_route( self.raw_routes[route_protocol], self.router, nodes, eval('route_protocol == "http2"'), ) elif route_protocol == "tcp": self.handle_tcp_route(self.raw_routes[route_protocol], self.router, nodes) elif route_protocol == "grcp": LOG.warn(f"gRPC is not yet supported. Sorry.")
def initial_scan_retrieval( registry, repository_name, image, image_url, trigger_scan, ecr_session=None ): """ Function to retrieve the scan findings from ECR, and if none, can trigger scan :param str registry: :param str repository_name: :param dict image: :param str image_url: :param bool trigger_scan: :param boto3.session.Session ecr_session: :return: The scan report :rtype: dict """ if ecr_session is None: ecr_session = Session() client = ecr_session.client("ecr") try: image_scan_r = client.describe_image_scan_findings( registryId=registry, repositoryName=repository_name, imageId=image ) return image_scan_r except client.exceptions.ScanNotFoundException: LOG.error(f"No scan report found for {image_url}") if trigger_scan: LOG.info(f"Triggering scan for {image_url}, trigger_scan={trigger_scan}") trigger_images_scan( repo_name=repository_name, images_to_scan=[image], ecr_session=ecr_session, ) else: LOG.warn( f"No scan was available and scanning not requested for {image_url}. Skipping" ) return None
def retrieve_services(settings: ComposeXSettings, services: dict, x_stack: ComposeXStack) -> list[tuple]: """ Function to :param ecs_composex.common.settings.ComposeXSettings settings: :param dict services: :param ecs_composex.common.stacks.ComposeXStack x_stack: :return: """ services_params = [] families_original_names = [f.name for f in settings.families.values()] for name, service_def in services.items(): if name not in families_original_names: LOG.warn(f"Service family {name} is not defined. Skipping") continue family = get_family_from_name(settings, name) if family is None: LOG.warn( f"Could not identify the {name} family in {families_original_names}" ) continue s_param = Parameter(f"{family.stack.title}{SERVICE_T}Name", Type="String") if SERVICE_T not in family.template.outputs: add_outputs( family.template, [Output(s_param.title, Value=GetAtt(SERVICE_T, "Name"))], ) x_stack.Parameters.update({ s_param.title: GetAtt(family.stack.title, f"Outputs.{s_param.title}") }) services_params.append((family.stack.title, s_param)) add_parameters(x_stack.stack_template, [value[1] for value in services_params]) return services_params
def process_stacks(root_stack, settings): """ Function to go through all stacks of a given template and update the template It will recursively render sub stacks defined. :param root_stack: the root template to iterate over the resources. :type root_stack: ecs_composex.common.stacks.ComposeXStack :param settings: The settings for execution :type settings: ecs_composex.common.settings.ComposeXSettings """ resources = root_stack.stack_template.resources for resource_name in resources: resource = resources[resource_name] if isinstance(resource, ComposeXStack) or issubclass( type(resource), ComposeXStack ): LOG.debug(resource) LOG.debug(resource.title) process_stacks(resource, settings) resource.Parameters.update(cfn_conditions.pass_root_stack_name()) elif isinstance(resource, Stack): LOG.warn(resource_name) LOG.warn(resource) root_stack.render(settings)