def main(): """ Main entry point for CLI :return: status code """ parser = main_parser() if len(sys.argv) == 1: parser.print_help() sys.exit() args = parser.parse_args() LOG.debug(args) settings = ComposeXSettings(**vars(args)) settings.set_bucket_name_from_account_id() LOG.debug(settings) if settings.deploy and not settings.upload: LOG.warning( "You must update the templates in order to deploy. We won't be deploying." ) settings.deploy = False evaluate_docker_configs(settings) scan_results = evaluate_ecr_configs(settings) if scan_results and not settings.ignore_ecr_findings: warnings.warn("SCAN Images failed for instructed images. Failure") return 1 root_stack = generate_full_template(settings) process_stacks(root_stack, settings) if settings.deploy: deploy(settings, root_stack) elif settings.plan: plan(settings, root_stack) return 0
def test_lookup(existing_cluster, nonexisting_cluster): """ Function to test the dynamodb table lookup """ here = path.abspath(path.dirname(__file__)) session = boto3.session.Session() pill = placebo.attach(session, data_path=f"{here}/x_ecs") pill.playback() template = Template() stack = ComposeXStack("test", stack_template=template) settings = ComposeXSettings( content=existing_cluster, session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.format_arg: "yaml", }, ) cluster = add_ecs_cluster(settings, stack) assert cluster is False template = Template() stack = ComposeXStack("test", stack_template=template) settings = ComposeXSettings( content=existing_cluster, session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.format_arg: "yaml", }, ) cluster = add_ecs_cluster(settings, stack) assert cluster is True
def test_iam_role_arn(): case_path = "settings/role_arn" here = path.abspath(path.dirname(__file__)) session = boto3.session.Session() pill = placebo.attach(session, data_path=f"{here}/{case_path}") pill.playback() settings = ComposeXSettings( content=get_basic_content(), session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath(f"{here}/../../uses-cases/blog.yml"), ComposeXSettings.format_arg: "yaml", ComposeXSettings.arn_arg: "arn:aws:iam::012345678912:role/testx", }, ) print(settings.secrets_mappings) with raises(ValueError): ComposeXSettings( content=get_basic_content(), session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath(f"{here}/../../uses-cases/blog.yml"), ComposeXSettings.format_arg: "yaml", ComposeXSettings.arn_arg: "arn:aws:iam::012345678912:roleX/testx", }, ) with raises(ClientError): ComposeXSettings( content=get_basic_content(), session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath(f"{here}/../../uses-cases/blog.yml"), ComposeXSettings.format_arg: "yaml", ComposeXSettings.arn_arg: "arn:aws:iam::012345678912:role/test", }, )
def set_log_bucket( self, cluster_name, settings: ComposeXSettings, log_configuration, ): """ Defines the S3 bucket and settings to log ECS Execution commands :param str cluster_name: :param ecs_composex.common.settings.ComposeXSettings settings: :param dict log_configuration: :return: """ bucket_config = { "Properties": { "AccessControl": "BucketOwnerFullControl", "PublicAccessBlockConfiguration": { "BlockPublicAcls": True, "BlockPublicPolicy": True, "IgnorePublicAcls": True, "RestrictPublicBuckets": True, }, }, "MacroParameters": { "ExpandRegionToBucket": True, "ExpandAccountIdToBucket": True, "BucketPolicy": { "PredefinedBucketPolicies": ["enforceSecureConnection"] }, }, } if keyisset("x-kms", settings.compose_content) and keyisset( "ecs-cluster-encryption-key", settings.compose_content["x-kms"]): bucket_config["Properties"]["BucketEncryption"] = { "ServerSideEncryptionConfiguration": [{ "BucketKeyEnabled": True, "ServerSideEncryptionByDefault": { "SSEAlgorithm": "aws:kms", "KMSMasterKeyID": f"x-kms::{MANAGED_KMS_KEY_NAME}", }, }] } if not keyisset("x-s3", settings.compose_content): settings.compose_content["x-s3"] = { MANAGED_S3_BUCKET_NAME: bucket_config } else: settings.compose_content["x-s3"][ MANAGED_S3_BUCKET_NAME] = bucket_config log_configuration["S3BucketName"] = f"x-s3::{MANAGED_S3_BUCKET_NAME}" log_configuration["S3KeyPrefix"] = Sub( "ecs/execute-logs/${CLUSTER_NAME}/", CLUSTER_NAME=cluster_name) log_configuration["S3EncryptionEnabled"] = True
def define_vpc_settings(settings: ComposeXSettings, vpc_module: XResourceModule, vpc_stack: ComposeXStack): """ Function to deal with vpc stack settings """ if settings.requires_vpc() and not vpc_stack.vpc_resource: LOG.info( f"{settings.name} - Services or x-Resources need a VPC to function. Creating default one" ) vpc_stack.create_new_default_vpc("vpc", vpc_module, settings) settings.root_stack.stack_template.add_resource(vpc_stack) vpc_stack.vpc_resource.generate_outputs() elif (vpc_stack.is_void and vpc_stack.vpc_resource and vpc_stack.vpc_resource.mappings): vpc_stack.vpc_resource.generate_outputs() add_update_mapping( settings.root_stack.stack_template, "Network", vpc_stack.vpc_resource.mappings, ) elif (vpc_stack.vpc_resource and vpc_stack.vpc_resource.cfn_resource and vpc_stack.title not in settings.root_stack.stack_template.resources.keys()): settings.root_stack.stack_template.add_resource(vpc_stack) LOG.info( f"{settings.name}.x-vpc - VPC stack added. A new VPC will be created." ) vpc_stack.vpc_resource.generate_outputs()
def handle_x_dependencies(self, settings: ComposeXSettings, root_stack: ComposeXStack) -> None: """ Updates other resources and replace the values for `x-kinesis` wherever applicable. :param settings: :param root_stack: :return: """ for resource in settings.get_x_resources(include_mappings=False): if not resource.cfn_resource: continue if not resource.stack: LOG.debug( f"resource {resource.name} has no `stack` attribute defined. Skipping" ) continue mappings = [(DeliveryStream, kinesis_to_firehose)] for target in mappings: if isinstance(resource, target[0]) or issubclass( type(resource), target[0]): target[1]( self, resource, resource.stack, settings, )
def handle_x_kinesis_firehose( family: ComposeFamily, service: ComposeService, settings: ComposeXSettings, parameter_name: str, config_value: str, ): """ Detects if delivery_stream is x-kinesis_firehose and interpolates the stream name :param family: :param service: :param parameter_name: :param config_value: :param settings: :return: The pointer to kinesis stream """ if not config_value.startswith("x-kinesis_firehose::"): return config_value delivery_stream = settings.find_resource(config_value) pointer = add_firehose_delivery_stream_for_firelens( delivery_stream, {}, family, settings) if not keyisset("region", service.logging.log_options): if isinstance(pointer, Ref): service.logging.log_options.update({"region": Region}) elif isinstance(pointer, FindInMap): _arn = delivery_stream.mappings[FIREHOSE_ARN.title] service.logging.log_options.update({ "region": KINESIS_FIREHOSE_ARN_RE.match(_arn).group("region") }) return pointer
def handle_x_dependencies( self, settings: ComposeXSettings, root_stack: ComposeXStack ) -> None: """ :param settings: :param root_stack: :return: """ handle_ecs_cluster(settings, bucket=self) for resource in settings.get_x_resources(include_mappings=False): if not resource.cfn_resource: continue if not resource.stack: LOG.debug( f"resource {resource.name} has no `stack` attribute defined. Skipping" ) continue mappings = [(DeliveryStream, s3_to_firehose)] for target in mappings: if isinstance(resource, target[0]) or issubclass( type(resource), target[0] ): target[1]( self, resource, resource.stack, settings, )
def main(): """ Main entry point for CLI :return: status code """ parser = main_parser() if len(sys.argv) == 1: parser.print_help() sys.exit() args = parser.parse_args() LOG.debug(args) settings = ComposeXSettings(**vars(args)) settings.set_bucket_name_from_account_id() settings.set_azs_from_api() LOG.debug(settings) if settings.deploy and not settings.upload: LOG.warning( "You must update the templates in order to deploy. We won't be deploying." ) settings.deploy = False root_stack = generate_full_template(settings) process_stacks(root_stack, settings) if settings.deploy: deploy(settings, root_stack) return 0
def map_resource_return_value_to_services_command( family: ComposeFamily, settings: ComposeXSettings ) -> None: """ Checks if their is a x-<res_key>::<name>::<return_value> """ resource_attribute_match_re = re.compile( r"^(?P<res_key>x-[\S]+)::(?P<res_name>[\S]+)::(?P<return_value>[\S]+)$" ) from itertools import chain for service in chain(family.managed_sidecars, family.ordered_services): if not hasattr(service.container_definition, "Command"): continue command = getattr(service.container_definition, "Command") if command == NoValue: continue new_command = [] for sh_part in command: parts = resource_attribute_match_re.match(sh_part) if not parts: new_command.append(sh_part) continue resource = settings.find_resource( f"{parts.group('res_key')}::{parts.group('res_name')}" ) if ( parts.group("return_value") not in resource.property_to_parameter_mapping ): raise KeyError( parts.group("return_value"), "not a valid return value for", resource.module.res_key, resource.name, resource.property_to_parameter_mapping.keys(), ) parameter = resource.property_to_parameter_mapping[ parts.group("return_value") ] res_param_id = resource.add_parameter_to_family_stack( family, settings, parameter ) if res_param_id is resource: new_command.append(Ref(resource.cfn_resource)) elif res_param_id is not resource and resource.cfn_resource: new_command.append(Ref(res_param_id["ImportParameter"])) else: new_command.append(res_param_id["ImportValue"]) service.command = new_command
def step_impl(context): cases_path = [ path.abspath(f"{here()}/../../../{file_name}") for file_name in context.files ] print(cases_path) context.settings = ComposeXSettings( profile_name=getattr(context, "profile_name") if hasattr( context, "profile_name") else None, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: cases_path, ComposeXSettings.format_arg: "yaml", }, ) context.settings.set_bucket_name_from_account_id()
def create_settings(updated_content, case_path): here = path.abspath(path.dirname(__file__)) session = boto3.session.Session() pill = placebo.attach(session, data_path=f"{here}/{case_path}") pill.playback() settings = ComposeXSettings( content=updated_content, session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath( f"{here}/../features/use-cases/vpc/vpc_from_tags.yml"), ComposeXSettings.format_arg: "yaml", }, ) return settings
def step_impl(context, file_path): """ Function to import the Docker file from use-cases. :param context: :param str file_path: :return: """ cases_path = path.abspath(f"{here()}/../{file_path}") context.settings = ComposeXSettings( profile_name=getattr(context, "profile_name") if hasattr( context, "profile_name") else None, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: [cases_path], ComposeXSettings.format_arg: "yaml", }, ) context.settings.set_azs_from_api() context.settings.set_bucket_name_from_account_id()
def main(): """ Main Function :return: """ parser = main_parser() args = parser.parse_args() settings = ComposeXSettings(**vars(args)) settings.set_bucket_name_from_account_id() settings.set_azs_from_api() vpc_stack = VpcStack(RES_KEY, settings) process_stacks(vpc_stack, settings) if settings.deploy: deploy(settings, vpc_stack) return 0
def __init__( self, definition: dict, advanced_config: FireLensServiceManagedConfiguration, settings: ComposeXSettings, ): self._definition = definition self._managed_firehose = None self.parent = advanced_config if self._definition["delivery_stream"].startswith( "x-kinesis_firehose"): self._managed_firehose = settings.find_resource( self._definition["delivery_stream"]) add_firehose_delivery_stream_for_firelens( self._managed_firehose, self.parent.extra_env_vars, self.parent.family, settings, ) self.process_all_options(self.parent.family, self.parent.service, settings)
def create_settings(updated_content, case_path): here = path.abspath(path.dirname(__file__)) session = boto3.session.Session() pill = placebo.attach(session, data_path="/tmp/") try: pill.playback() except OSError: pill.record() settings = ComposeXSettings( content=updated_content, session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath(f"{here}/../use-cases/blog.yml"), ComposeXSettings.format_arg: "yaml", }, ) return settings
def test_secrets_import(): """ Function to test secrets import """ case_path = "settings/secrets" here = path.abspath(path.dirname(__file__)) session = boto3.session.Session() pill = placebo.attach(session, data_path=f"{here}/{case_path}") pill.playback() settings = ComposeXSettings( content=get_secrets_content(), session=session, **{ ComposeXSettings.name_arg: "test", ComposeXSettings.command_arg: ComposeXSettings.render_arg, ComposeXSettings.input_file_arg: path.abspath(f"{here}/../../uses-cases/blog.features.yml"), ComposeXSettings.format_arg: "yaml", }, )
def generate_full_template(settings: ComposeXSettings): """ Function to generate the root template and associate services, x-resources to each other. * Checks that the docker images and settings are correct before proceeding further * Create the root template / stack * Create/Find ECS Cluster * Create IAM Stack (services Roles and some policies) * Create/Find x-resources * Link services and x-resources * Associates services/family to root stack :param ecs_composex.common.settings.ComposeXSettings settings: The settings for the execution :return root_template: Template, params :rtype: root_template, list """ deprecation_warning(settings) LOG.info( f"Service families to process {[family.name for family in settings.families.values()]}" ) settings.root_stack = create_root_stack(settings) add_ecs_cluster(settings) settings.mod_manager = ModManager(settings) settings.mod_manager.modules_repr() iam_stack = settings.root_stack.stack_template.add_resource( IamStack("iam", settings)) add_x_resources(settings) add_compose_families(settings) vpc_module = settings.mod_manager.add_module("x-vpc") vpc_stack = VpcStack("vpc", settings, vpc_module) define_vpc_settings(settings, vpc_module, vpc_stack) if vpc_stack.vpc_resource and (vpc_stack.vpc_resource.cfn_resource or vpc_stack.vpc_resource.mappings): settings.set_networks(vpc_stack) # if settings.use_appmesh: # from ecs_composex.appmesh.appmesh_mesh import Mesh # # mesh = Mesh( # settings.compose_content["x-appmesh"], # root_stack, # settings, # ) # mesh.render_mesh_template(root_stack, settings) x_cloud_lookup_and_new_vpc(settings, vpc_stack) for family in settings.families.values(): family.init_network_settings(settings, vpc_stack) handle_families_cross_dependencies(settings, settings.root_stack) update_network_resources_vpc_config(settings, vpc_stack) set_families_ecs_service(settings) apply_x_resource_to_x(settings, settings.root_stack, vpc_stack, env_resources_only=True) for family in settings.families.values(): add_iam_dependency(iam_stack, family) family.set_enable_execute_command() if family.enable_execute_command: family.apply_ecs_execute_command_permissions(settings) family.import_all_sidecars() family.handle_logging(settings) apply_x_configs_to_ecs(settings, settings.root_stack, modules=settings.mod_manager) apply_x_resource_to_x(settings, settings.root_stack, vpc_stack) for family in settings.families.values(): family.finalize_family_settings() map_resource_return_value_to_services_command(family, settings) family.state_facts() set_ecs_cluster_identifier(settings.root_stack, settings) add_all_tags(settings.root_stack.stack_template, settings) set_all_mappings_to_root_stack(settings.root_stack, settings) return settings.root_stack
def set_kms_key( self, cluster_name, settings: ComposeXSettings, log_settings, log_configuration, ): """ Defines the KMS Key created to encrypt ECS Execute commands :param str cluster_name: :param ecs_composex.common.stacks.ComposeXStack root_stack: :param ecs_composex.common.settings.ComposeXSettings settings: :param dict log_settings: :param dict log_configuration: """ action = [ "kms:Encrypt*", "kms:Decrypt*", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:Describe*", ] statement = [ { "Sid": "Allow direct access to key metadata to the account", "Effect": "Allow", "Principal": { "AWS": Sub(f"arn:${{{AWS_PARTITION}}}:iam::${{{AWS_ACCOUNT_ID}}}:root" ) }, "Action": ["kms:*"], "Resource": "*", "Condition": { "StringEquals": { "kms:CallerAccount": Ref(AWS_ACCOUNT_ID) } }, }, { "Sid": "Allows SSM to use the KMS key to encrypt/decrypt messages", "Effect": "Allow", "Principal": { "Service": Sub(f"ssm.${{{AWS_URL_SUFFIX}}}") }, "Action": action, "Resource": "*", }, ] if keyisset("CreateExecLoggingLogGroup", self.parameters): statement.append({ "Sid": "Allow aws logs to encrypt decrypt messages", "Effect": "Allow", "Principal": { "Service": Sub(f"logs.${{{AWS_REGION}}}.${{{AWS_URL_SUFFIX}}}") }, "Action": action, "Resource": "*", "Condition": { "ArnLike": { "kms:EncryptionContext:aws:logs:arn": Sub(f"arn:${{{AWS_PARTITION}}}:logs:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" "log-group:*") if keyisset("AllowKmsKeyReuse", self.parameters) else Sub( f"arn:${{{AWS_PARTITION}}}:logs:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" "log-group:/ecs/execute-logs/${CLUSTER_NAME}*", CLUSTER_NAME=cluster_name, ) } }, }) elif keyisset("AllowKmsKeyReuse", self.parameters): statement.append({ "Sid": "Allow aws logs to encrypt decrypt messages", "Effect": "Allow", "Principal": { "Service": Sub(f"logs.${{{AWS_REGION}}}.${{{AWS_URL_SUFFIX}}}") }, "Action": action, "Resource": "*", "Condition": { "ArnLike": { "kms:EncryptionContext:aws:logs:arn": Sub(f"arn:${{{AWS_PARTITION}}}:logs:${{{AWS_REGION}}}:${{{AWS_ACCOUNT_ID}}}:" "log-group:*") } }, }) key_config = { "Properties": { "EnableKeyRotationg": True, "Enabled": True, "Description": Sub(f"ECS Cluster {cluster_name} execute logging encryption key" ), "KeyPolicy": { "Version": "2012-10-17", "Id": "ecscluster-logging", "Statement": statement, }, }, "Settings": { "Alias": Sub( "alias/ecs/execute-logs/${CLUSTER_NAME}", CLUSTER_NAME=cluster_name, ) }, } if not keyisset("x-kms", settings.compose_content): settings.compose_content["x-kms"] = { MANAGED_KMS_KEY_NAME: key_config } else: settings.compose_content["x-kms"][ MANAGED_KMS_KEY_NAME] = key_config log_settings["KmsKeyId"] = f"x-kms::{MANAGED_KMS_KEY_NAME}" log_configuration["CloudWatchEncryptionEnabled"] = True