def test_condition_equality(self): self.assertEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringLike("s3:prefix", ["home/${aws:username}/*"]))) self.assertNotEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringLike("s3:prefix", ["other/${aws:username}/*"]))) self.assertNotEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringEquals("s3:prefix", ["home/${aws:username}/*"])))
def statement_deny_remove_boundary_policy(self) -> Statement: """Statement to deny the removal of the boundary policy.""" return Statement( Action=[ awacs.iam.DeleteRolePermissionsBoundary, awacs.iam.DeleteUserPermissionsBoundary, ], Condition=Condition( StringEquals({"iam:PermissionsBoundary": self.policy_arn})), Effect=Deny, Resource=["*"], Sid="DenyRemovalOfBoundaryFromUserOrRole", )
def kms_key_statements(key_arn, bucket_arn, bucket_prefix): s3_endpoint = Join('', ["s3.", REGION, "amazonaws.com"]) return [ Statement(Effect=Allow, Action=[ awacs.kms.Decrypt, awacs.kms.GenerateDataKey, ], Resource=[key_arn], Condition=Condition([ StringEquals("kms:ViaService", s3_endpoint), StringLike("kms:EncryptionContext:aws:s3:arn", Join('', [bucket_arn, bucket_prefix, "*"])) ])) ]
def create_role(self): t = self.template statements = [ Statement( Principal=Principal('Service', ['firehose.amazonaws.com']), Effect=Allow, Action=[sts.AssumeRole], Condition=Condition( StringEquals('sts:ExternalId', Ref('AWS::AccountId')), ), ), ] firehose_role_policy = Policy(Statement=statements) t.add_resource( iam.Role( FIREHOSE_ROLE, AssumeRolePolicyDocument=firehose_role_policy, Path='/', Policies=self.generate_iam_policies(), ), ) t.add_output(Output('Role', Value=Ref(FIREHOSE_ROLE)))
def generate_cf_template(): """ Returns an entire CloudFormation stack by using troposphere to construct each piece """ # Header of CloudFormation template t = Template() t.add_version("2010-09-09") t.add_description("Lambda Chat AWS Resources") # Paramters description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com" google_oauth_client_id = t.add_parameter(Parameter( "GoogleOAuthClientID", AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com", Type="String", Description="The Client ID of your Google project", ConstraintDescription=description )) website_s3_bucket_name = t.add_parameter(Parameter( "WebsiteS3BucketName", AllowedPattern="[a-zA-Z0-9\-]*", Type="String", Description="Name of S3 bucket to store the website in", ConstraintDescription="can contain only alphanumeric characters and dashes.", )) # The SNS topic the website will publish chat messages to website_sns_topic = t.add_resource(sns.Topic( 'WebsiteSnsTopic', TopicName='lambda-chat', DisplayName='Lambda Chat' )) t.add_output(Output( "WebsiteSnsTopic", Description="sns_topic_arn", Value=Ref(website_sns_topic), )) # The IAM Role and Policy the website will assume to publish to SNS website_role = t.add_resource(iam.Role( "WebsiteRole", Path="/", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRoleWithWebIdentity")], Principal=Principal("Federated", "accounts.google.com"), Condition=Condition( StringEquals( "accounts.google.com:aud", Ref(google_oauth_client_id) ) ), ), ], ), )) t.add_resource(iam.PolicyType( "WebsitePolicy", PolicyName="lambda-chat-website-policy", Roles=[Ref(website_role)], PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[Action("sns", "Publish")], Resource=[ Ref(website_sns_topic) ], ), ], ) )) t.add_output(Output( "WebsiteRole", Description="website_iam_role_arn", Value=GetAtt(website_role, "Arn"), )) website_bucket = t.add_resource(s3.Bucket( 'WebsiteS3Bucket', BucketName=Ref(website_s3_bucket_name), WebsiteConfiguration=s3.WebsiteConfiguration( ErrorDocument="error.html", IndexDocument="index.html" ) )) t.add_output(Output( "S3Bucket", Description="s3_bucket", Value=Ref(website_bucket), )) t.add_resource(s3.BucketPolicy( 'WebsiteS3BucketPolicy', Bucket=Ref(website_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "PublicAccess", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [{ "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "WebsiteS3Bucket", }, "/*" ] ] }] } ] } )) return t
def script_manager_ecr_deploy(self, ecr_deploy_group, asg_dict, asg_config, template): policy_statements = [] for ecr_deploy_name in ecr_deploy_group.keys(): ecr_deploy = ecr_deploy_group[ecr_deploy_name] if ecr_deploy == None: continue if ecr_deploy and len(ecr_deploy.release_phase.ecs) > 0: pull_repos = [] push_repos = [] for repository in ecr_deploy.repositories: source_ecr_obj = get_model_obj_from_ref(repository.source_repo, self.paco_ctx.project) source_env = get_parent_by_interface(source_ecr_obj, schemas.IEnvironmentRegion) source_account_id = self.paco_ctx.get_ref(source_env.network.aws_account+".id") dest_ecr_obj = get_model_obj_from_ref(repository.dest_repo, self.paco_ctx.project) dest_env = get_parent_by_interface(dest_ecr_obj, schemas.IEnvironmentRegion) dest_account_id = self.paco_ctx.get_ref(dest_env.network.aws_account+".id") pull_repo_arn = f'arn:aws:ecr:{source_env.region}:{source_account_id}:repository/{source_ecr_obj.repository_name}' push_repo_arn = f'arn:aws:ecr:{dest_env.region}:{dest_account_id}:repository/{dest_ecr_obj.repository_name}' pull_repos.append(pull_repo_arn) push_repos.append(push_repo_arn) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPull', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchGetImage'), ], Resource=pull_repos ) ) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPush', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchCheckLayerAvailability'), Action('ecr', 'PutImage'), Action('ecr', 'InitiateLayerUpload'), Action('ecr', 'UploadLayerPart'), Action('ecr', 'CompleteLayerUpload'), ], Resource=push_repos ) ) iam_cluster_cache = [] idx = 0 for command in ecr_deploy.release_phase.ecs: service_obj = get_model_obj_from_ref(command.service, self.paco_ctx.project) ecs_services_obj = get_parent_by_interface(service_obj, schemas.IECSServices) ecs_release_phase_cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterArn{idx}', description=f'ECS Release Phase Cluster Arn {idx}', value=ecs_services_obj.cluster + '.arn' ) ecs_release_phase_cluster_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=ecs_services_obj.cluster + '.name' ) ecs_release_phase_service_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseServiceName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=command.service + '.name' ) ecs_cluster_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}', troposphere.Ref(ecs_release_phase_cluster_name_param), True ) ecs_service_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_SERVICE_ID_{idx}', troposphere.Ref(ecs_release_phase_service_name_param), True ) asg_dict['Tags'].append(ecs_cluster_asg_tag) asg_dict['Tags'].append(ecs_service_asg_tag) if ecs_services_obj.cluster not in iam_cluster_cache: policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommand{idx}', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ 'arn:aws:ec2:*:*:instance/*' ], Condition=Condition( StringLike({ 'ssm:resourceTag/Paco-ECSCluster-Name': troposphere.Ref(ecs_release_phase_cluster_name_param) }) ) ) ) policy_statements.append( Statement( Sid=f'ECSRelasePhaseClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'DescribeServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), ], Resource=[ '*' ], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref(ecs_release_phase_cluster_arn_param) }) ) ) ) iam_cluster_cache.append(ecs_services_obj.cluster) idx += 1 policy_statements.append( Statement( Sid='ECSReleasePhaseSSMAutomationExecution', Effect=Allow, Action=[ Action('ssm', 'StartAutomationExecution'), Action('ssm', 'StopAutomationExecution'), Action('ssm', 'GetAutomationExecution'), ], Resource=[ 'arn:aws:ssm:::automation-definition/' ] ) ) # ECS Policies policy_statements.append( Statement( Sid='ECSRelasePhaseECS', Effect=Allow, Action=[ Action('ecs', 'DescribeTaskDefinition'), Action('ecs', 'DeregisterTaskDefinition'), Action('ecs', 'RegisterTaskDefinition'), Action('ecs', 'ListTagsForResource'), Action('ecr', 'DescribeImages') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommandDocument', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec' ] ) ) policy_statements.append( Statement( Sid='ECSReleasePhaseSSMCore', Effect=Allow, Action=[ Action('ssm', 'ListDocuments'), Action('ssm', 'ListDocumentVersions'), Action('ssm', 'DescribeDocument'), Action('ssm', 'GetDocument'), Action('ssm', 'DescribeInstanceInformation'), Action('ssm', 'DescribeDocumentParameters'), Action('ssm', 'CancelCommand'), Action('ssm', 'ListCommands'), Action('ssm', 'ListCommandInvocations'), Action('ssm', 'DescribeAutomationExecutions'), Action('ssm', 'DescribeInstanceProperties'), Action('ssm', 'GetCommandInvocation'), Action('ec2', 'DescribeInstanceStatus'), Action('ecr', 'GetAuthorizationToken') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid='IAMPassRole', Effect=Allow, Action=[ Action('iam', 'passrole') ], Resource=[ '*' ] ) ) ecs_release_phase_project_policy_res = troposphere.iam.ManagedPolicy( title='ECSReleasePhase', PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=policy_statements ), Roles=[self.instance_iam_role_name] ) template.add_resource(ecs_release_phase_project_policy_res)
def script_manager_ecs(self, ecs_group, asg_dict, asg_config, template): idx=0 policy_statements = [] for ecs_name in ecs_group.keys(): ecs = ecs_group[ecs_name] if ecs == None: continue ecs_script_manager_cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ECSScriptManagerClusterArn{idx}', description=f'ECS Script Manager Cluster Arn {idx}', value=ecs.cluster + '.arn' ) ecs_cluster_tag = troposphere.autoscaling.Tag( f'paco:script_manager:ecs:{ecs_name}:cluster:arn', troposphere.Ref(ecs_script_manager_cluster_arn_param), True ) asg_dict['Tags'].append(ecs_cluster_tag) policy_statements.append( Statement( Sid=f'ECSScriptManagerClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'UpdateService'), Action('ecs', 'DescribeServices'), Action('ecs', 'ListServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), Action('ec2', 'DescribeInstances'), ], Resource=[ '*' ], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref(ecs_script_manager_cluster_arn_param) }) ) ) ) policy_statements.append( Statement( Sid=f'ECSScriptManagerClusterEC2Access{idx}', Effect=Allow, Action=[ Action('ec2', 'DescribeInstances'), ], Resource=[ '*' ] ) ) idx += 1 script_manager_ecs_policy_res = troposphere.iam.ManagedPolicy( title='ScriptManagerECS', PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=policy_statements ), Roles=[self.instance_iam_role_name] ) template.add_resource(script_manager_ecs_policy_res)
def __init__(self, stack, paco_ctx): cip = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('CIP', self.resource_group_name, self.resource.name) self.init_template('Cognito Identity Pool') if not cip.is_enabled(): return # Cognito Identity Pool cfn_export_dict = cip.cfn_export_dict if len(cip.identity_providers) > 0: idps = [] up_client_params = {} up_params = {} for idp in cip.identity_providers: # replace <region> and <account> for refs in Services up_client_ref = Reference(idp.userpool_client) up_client_ref.set_account_name(self.account_ctx.get_name()) up_client_ref.set_region(self.aws_region) userpool_client = up_client_ref.get_model_obj(self.paco_ctx.project) if up_client_ref.ref not in up_client_params: up_client_name = self.create_cfn_logical_id(f'UserPoolClient{userpool_client.name}' + md5sum(str_data=up_client_ref.ref)) value = f'paco.ref {up_client_ref.ref }.id' up_client_params[up_client_ref.ref] = self.create_cfn_parameter( param_type='String', name=up_client_name, description=f'UserPool Client Id for {userpool_client.name}', value=value, ) userpool = get_parent_by_interface(userpool_client, ICognitoUserPool) userpool_ref = userpool.paco_ref if userpool_ref not in up_params: up_name = self.create_cfn_logical_id(f'UserPool{userpool.name}' + md5sum(str_data=userpool_ref)) up_params[userpool_ref] = self.create_cfn_parameter( param_type='String', name=up_name, description=f'UserPool ProviderName for {userpool.name}', value=userpool_ref + '.providername', ) idps.append({ "ClientId" : troposphere.Ref(up_client_params[up_client_ref.ref]), "ProviderName" : troposphere.Ref(up_params[userpool_ref]), "ServerSideTokenCheck" : idp.serverside_token_check, }) cfn_export_dict['CognitoIdentityProviders'] = idps cip_resource = troposphere.cognito.IdentityPool.from_dict( 'CognitoIdentityPool', cfn_export_dict ) self.template.add_resource(cip_resource) # Outputs self.create_output( title=cip_resource.title + 'Id', description="Cognito Identity Pool Id", value=troposphere.Ref(cip_resource), ref=[cip.paco_ref_parts, cip.paco_ref_parts + ".id"], ) # Roles roles_dict = {} unauthenticated_assume_role_policy = PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Federated',"cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}), ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "unauthenticated"}) ]), ), ], ) unauthenticated_role_resource = role_to_troposphere( cip.unauthenticated_role, 'UnauthenticatedRole', assume_role_policy=unauthenticated_assume_role_policy, ) if unauthenticated_role_resource != None: self.template.add_resource(unauthenticated_role_resource) roles_dict['unauthenticated'] = troposphere.GetAtt(unauthenticated_role_resource, "Arn") authenticated_assume_role_policy = PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Federated',"cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}), ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "authenticated"}) ]), ), ], ) authenticated_role_resource = role_to_troposphere( cip.authenticated_role, 'AuthenticatedRole', assume_role_policy=authenticated_assume_role_policy ) if authenticated_role_resource != None: self.template.add_resource(authenticated_role_resource) roles_dict['authenticated'] = troposphere.GetAtt(authenticated_role_resource, "Arn") # Identity Pool Role Attachment if roles_dict: iproleattachment_resource = troposphere.cognito.IdentityPoolRoleAttachment( title='IdentityPoolRoleAttachment', IdentityPoolId=troposphere.Ref(cip_resource), Roles=roles_dict, ) self.template.add_resource(iproleattachment_resource)
def test_attaching_role_to_instance(self): ''' The following example will show you how to use an IAM role for an EC2 instance. The following snippet shows a one-liner terminating an EC2 instance after 1 minute. ''' test_stack_name = 'TestAttachingRole2Instance' init_cf_env(test_stack_name) ### t = Template() security_group = ts_add_security_group(t) role = t.add_resource( Role( "MyRole", AssumeRolePolicyDocument= Policy( # allow the EC2 service to trust this role Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ troposphere.iam.Policy( PolicyName="my_ec2_policy", PolicyDocument=PolicyDocument(Statement=[ Statement( Effect=Allow, Action=[ Action("ec2", "StopInstances"), ], Resource=["*"], # only allow if tagged with the stack ID. Condition=Condition( StringEquals({ 'ec2:ResourceTag/aws:cloudformation:stack-id': Ref('AWS::StackId') })), ) ])), ])) instance_profile = t.add_resource( InstanceProfile("MyInstanceProfile", Roles=[Ref(role)])) instance = ts_add_instance_with_public_ip(t, Ref(security_group), tag='role-attachment') instance.IamInstanceProfile = Ref(instance_profile) t.add_output([ Output("InstanceId", Value=Ref(instance)), Output( "PublicIP", Value=GetAtt(instance, "PublicIp"), ), Output("Region", Value=Ref('AWS::Region')), ]) dump_template(t, True) cf_client.create_stack( StackName=test_stack_name, TemplateBody=t.to_yaml(), Capabilities=['CAPABILITY_NAMED_IAM'], ) cf_client.get_waiter('stack_create_complete').wait( StackName=test_stack_name) time.sleep(10) public_ip = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'PublicIP')['OutputValue'] instance_id = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'InstanceId')['OutputValue'] region = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'Region')['OutputValue'] # When using an IAM role, your access keys are injected into your EC2 instance automatically. run(f"ssh -o 'StrictHostKeyChecking no' ec2-user@{public_ip} aws ec2 stop-instances --instance-ids {instance_id} --region {region}" )
def __init__(self, stack, paco_ctx,): rds_aurora = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('RDSAurora', self.resource_group_name, self.resource.name) self.init_template('RDSAurora') if not rds_aurora.is_enabled(): return rds_cluster_logical_id = 'DBCluster' db_cluster_dict = rds_aurora.cfn_export_dict self.notification_groups = {} # DB Subnet Group db_subnet_id_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='DBSubnetIdList', description='The list of subnet IDs where this database will be provisioned.', value=rds_aurora.segment + '.subnet_id_list', ) db_subnet_group_resource = troposphere.rds.DBSubnetGroup( title='DBSubnetGroup', template=self.template, DBSubnetGroupDescription=troposphere.Ref('AWS::StackName'), SubnetIds=troposphere.Ref(db_subnet_id_list_param), ) db_cluster_dict['DBSubnetGroupName'] = troposphere.Ref(db_subnet_group_resource) # DB Cluster Parameter Group if rds_aurora.cluster_parameter_group == None: # If no Cluster Parameter Group supplied then create one param_group_family = gen_vocabulary.rds_engine_versions[rds_aurora.engine][rds_aurora.engine_version]['param_group_family'] cluster_parameter_group_ref = troposphere.rds.DBClusterParameterGroup( "DBClusterParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) else: # Use existing Parameter Group cluster_parameter_group_ref = self.create_cfn_parameter( name='DBClusterParameterGroupName', param_type='String', description='DB Cluster Parameter Group Name', value=rds_aurora.cluster_parameter_group + '.name', ) db_cluster_dict['DBClusterParameterGroupName'] = troposphere.Ref(cluster_parameter_group_ref) # Default DB Parameter Group need_db_pg = False default_instance = rds_aurora.default_instance for db_instance in rds_aurora.db_instances.values(): if default_instance.parameter_group == None and db_instance.parameter_group == None: need_db_pg = True if need_db_pg: # create default DB Parameter Group param_group_family = gen_vocabulary.rds_engine_versions[rds_config.engine][rds_config.engine_version]['param_group_family'] default_dbparametergroup_resource = troposphere.rds.DBParameterGroup( "DBParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) # Enhanced Monitoring Role need_monitoring_role = False for db_instance in rds_aurora.db_instances.values(): enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: need_monitoring_role = True if need_monitoring_role: enhanced_monitoring_role_resource = troposphere.iam.Role( title='MonitoringIAMRole', template=self.template, AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRole")], Principal=Principal("Service", "monitoring.rds.amazonaws.com") ) ] ), ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"], Path="/", ) # DB Snapshot Identifier if rds_aurora.db_snapshot_identifier == '' or rds_aurora.db_snapshot_identifier == None: db_snapshot_id_enabled = False else: db_snapshot_id_enabled = True if db_snapshot_id_enabled == True: db_cluster_dict['SnapshotIdentifier'] = rds_aurora.db_snapshot_identifier # KMS-CMK key encryption if rds_aurora.enable_kms_encryption == True and db_snapshot_id_enabled == False: key_policy = Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action('kms', '*'),], Principal=Principal("AWS", [f'arn:aws:iam::{self.stack.account_ctx.id}:root']), Resource=['*'], ), Statement( Effect=Allow, Action=[ awacs.kms.Encrypt, awacs.kms.Decrypt, Action('kms', 'ReEncrypt*'), Action('kms', 'GenerateDataKey*'), awacs.kms.CreateGrant, awacs.kms.ListGrants, awacs.kms.DescribeKey, ], Principal=Principal('AWS',['*']), Resource=['*'], Condition=Condition([ StringEquals({ 'kms:CallerAccount': f'{self.stack.account_ctx.id}', 'kms:ViaService': f'rds.{self.stack.aws_region}.amazonaws.com' }) ]), ), ], ) kms_key_resource = troposphere.kms.Key( title='AuroraKMSCMK', template=self.template, KeyPolicy=key_policy, ) db_cluster_dict['StorageEncrypted'] = True db_cluster_dict['KmsKeyId'] = troposphere.Ref(kms_key_resource) kms_key_alias_resource = troposphere.kms.Alias( title="AuroraKMSCMKAlias", template=self.template, AliasName=troposphere.Sub('alias/${' + rds_cluster_logical_id + '}'), TargetKeyId=troposphere.Ref(kms_key_resource), ) kms_key_alias_resource.DependsOn = rds_cluster_logical_id # Username and Passsword - only if there is no DB Snapshot Identifier if db_snapshot_id_enabled == False: db_cluster_dict['MasterUsername'] = rds_aurora.master_username if rds_aurora.secrets_password: # Password from Secrets Manager sta_logical_id = 'SecretTargetAttachmentRDS' secret_arn_param = self.create_cfn_parameter( param_type='String', name='RDSSecretARN', description='The ARN for the secret for the RDS master password.', value=rds_aurora.secrets_password + '.arn', ) secret_target_attachment_resource = troposphere.secretsmanager.SecretTargetAttachment( title=sta_logical_id, template=self.template, SecretId=troposphere.Ref(secret_arn_param), TargetId=troposphere.Ref(rds_cluster_logical_id), TargetType='AWS::RDS::DBCluster' ) secret_target_attachment_resource.DependsOn = rds_cluster_logical_id db_cluster_dict['MasterUserPassword'] = troposphere.Join( '', ['{{resolve:secretsmanager:', troposphere.Ref(secret_arn_param), ':SecretString:password}}' ] ) else: master_password_param = self.create_cfn_parameter( param_type='String', name='MasterUserPassword', description='The master user password.', value=rds_aurora.master_user_password, noecho=True, ) db_cluster_dict['MasterUserPassword'] = troposphere.Ref(master_password_param) db_cluster_res = troposphere.rds.DBCluster.from_dict( rds_cluster_logical_id, db_cluster_dict ) self.template.add_resource(db_cluster_res) # Cluster Event Notifications if hasattr(rds_aurora, 'cluster_event_notifications'): for group in rds_aurora.cluster_event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"ClusterEventSubscription{group}"), template=self.template, EventCategories=rds_aurora.cluster_event_notifications.event_categories, SourceIds=[troposphere.Ref(db_cluster_res)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-cluster', ) # DB Instance(s) for db_instance in rds_aurora.db_instances.values(): logical_name = self.create_cfn_logical_id(db_instance.name) db_instance_dict = { 'DBClusterIdentifier': troposphere.Ref(db_cluster_res), 'DBInstanceClass': db_instance.get_value_or_default('db_instance_type'), 'DBSubnetGroupName': troposphere.Ref(db_subnet_group_resource), 'EnablePerformanceInsights': db_instance.get_value_or_default('enable_performance_insights'), 'Engine': rds_aurora.engine, 'PubliclyAccessible': db_instance.get_value_or_default('publicly_accessible'), 'AllowMajorVersionUpgrade': db_instance.get_value_or_default('allow_major_version_upgrade'), 'AutoMinorVersionUpgrade': db_instance.get_value_or_default('auto_minor_version_upgrade'), } enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: db_instance_dict['MonitoringInterval'] = enhanced_monitoring_interval db_instance_dict['MonitoringRoleArn'] = troposphere.GetAtt(enhanced_monitoring_role_resource, "Arn") if db_instance.availability_zone != None: subnet_id_ref = f'{rds_aurora.segment}.az{db_instance.availability_zone}.availability_zone' db_instance_subnet_param = self.create_cfn_parameter( param_type='String', name=f'DBInstanceAZ{logical_name}', description=f'Subnet where DB Instance {logical_name} is provisioned', value=subnet_id_ref, ) db_instance_dict['AvailabilityZone'] = troposphere.Ref(db_instance_subnet_param) # DB Parameter Group if default_instance.parameter_group == None and db_instance.parameter_group == None: dbparametergroup_resource = default_dbparametergroup_resource elif db_instance.parameter_group != None: # Use instance-specific DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=db_instance.parameter_group + '.name', ) else: # Use default DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=default_instance.parameter_group + '.name', ) db_instance_dict['DBParameterGroupName'] = troposphere.Ref(dbparametergroup_resource) db_instance_resource = troposphere.rds.DBInstance.from_dict( f'DBInstance{logical_name}', db_instance_dict ) self.template.add_resource(db_instance_resource) # DB Event Notifications event_notifications = db_instance.get_value_or_default('event_notifications') if event_notifications != None: for group in event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"DBEventSubscription{logical_name}{group}"), template=self.template, EventCategories=event_notifications.event_categories, SourceIds=[troposphere.Ref(db_instance_resource)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-instance', ) # DB Instance Outputs self.create_output( title=f'DBInstanceName{logical_name}', description=f'DB Instance Name for {logical_name}', value=troposphere.Ref(db_instance_resource), ref=db_instance.paco_ref_parts + ".name", ) # DB Cluster Outputs self.create_output( title='DBClusterName', description='DB Cluster Name', value=troposphere.Ref(db_cluster_res), ref=self.resource.paco_ref_parts + ".name", ) self.create_output( title='ClusterEndpointAddress', description='Cluster Endpoint Address', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Address'), ref=self.resource.paco_ref_parts + ".endpoint.address", ) self.create_output( title='ClusterEndpointPort', description='Cluster Endpoint Port', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Port'), ref=self.resource.paco_ref_parts + ".endpoint.port", ) self.create_output( title='ClusterReadEndpointAddress', description='Cluster ReadEndpoint Address', value=troposphere.GetAtt(db_cluster_res, 'ReadEndpoint.Address'), ref=self.resource.paco_ref_parts + ".readendpoint.address", ) # DNS - Route53 Record Set if rds_aurora.is_dns_enabled() == True: route53_ctl = self.paco_ctx.get_controller('route53') for dns in rds_aurora.dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.endpoint.address'], stack_group=self.stack.stack_group, ) for read_dns in rds_aurora.read_dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=read_dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.readendpoint.address'], stack_group=self.stack.stack_group, )
def test_role_to_troposphere(self): # unauthenticated cognito role role = create_role_from_yaml(unauth_cognito_role_yaml) unauthenticated_assume_role_policy = PolicyDocument(Statement=[ Statement( Effect=Allow, Principal=Principal('Federated', "cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({ "cognito-identity.amazonaws.com:aud": 'some-resource' }), ForAnyValueStringLike({ "cognito-identity.amazonaws.com:amr": "unauthenticated" }) ]), ), ], ) resource = role_to_troposphere( role, 'UnauthenticatedRole', assume_role_policy=unauthenticated_assume_role_policy, ) assert isinstance(resource, troposphere.iam.Role), True policy = resource.properties['Policies'][0].properties assert policy['PolicyName'], 'CognitoSyncAll' assert policy['PolicyDocument'].properties['Statement'][0].properties[ 'Resource'], ['*'] # authenticated cognito role role = create_role_from_yaml(auth_cognito_role_yaml) authenticated_assume_role_policy = PolicyDocument(Statement=[ Statement( Effect=Allow, Principal=Principal('Federated', "cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({ "cognito-identity.amazonaws.com:aud": 'some-resource' }), ForAnyValueStringLike({ "cognito-identity.amazonaws.com:amr": "authenticated" }) ]), ), ], ) resource = role_to_troposphere( role, 'AuthenticatedRole', assume_role_policy=authenticated_assume_role_policy) assert auth_cognito_role_json, json.dumps(resource.to_dict()) # AWS Backup Role policy_arns = [ 'arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup', 'arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForRestores' ] role_dict = { 'enabled': True, 'path': '/', 'role_name': 'Backup', 'managed_policy_arns': policy_arns, 'assume_role_policy': { 'effect': 'Allow', 'service': ['backup.amazonaws.com'] } } role = paco.models.iam.Role('Backup', None) role.apply_config(role_dict) resource = role_to_troposphere(role, 'Backup') assert backup_role_json, json.dumps(resource.to_dict()) # Simple AWS Principal Role role = create_role_from_yaml(aws_principal_role_yaml) resource = role_to_troposphere(role, 'SimpleAWS') assert backup_role_json, json.dumps(resource.to_dict()) # Role with a Condition role = create_role_from_yaml(aws_condition_role_yaml) resource = role_to_troposphere(role, 'Condition') assert condition_role_json, json.dumps(resource.to_dict())
def create_codebuild_cfn(self, template, pipeline_config, action_config, config_ref): # CodeBuild compute_type_param = self.create_cfn_parameter( param_type='String', name='CodeBuildComputeType', description= 'The type of compute environment. This determines the number of CPU cores and memory the build environment uses.', value=action_config.codebuild_compute_type, ) image_param = self.create_cfn_parameter( param_type='String', name='CodeBuildImage', description= 'The image tag or image digest that identifies the Docker image to use for this build project.', value=action_config.codebuild_image, ) deploy_env_name_param = self.create_cfn_parameter( param_type='String', name='DeploymentEnvironmentName', description= 'The name of the environment codebuild will be deploying into.', value=action_config.deployment_environment, ) # If ECS Release Phase, then create the needed parameters release_phase = action_config.release_phase ecs_release_phase_cluster_arn_param = [] ecs_release_phase_cluster_name_param = [] ecs_release_phase_service_arn_param = [] if release_phase != None and release_phase.ecs != None: idx = 0 for command in release_phase.ecs: service_obj = get_model_obj_from_ref(command.service, self.paco_ctx.project) service_obj = get_parent_by_interface(service_obj, schemas.IECSServices) cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSClusterArn{idx}', description='ECS Cluster Arn', value=service_obj.cluster + '.arn', ) ecs_release_phase_cluster_arn_param.append(cluster_arn_param) cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSClusterName{idx}', description='ECS Cluster Name', value=service_obj.cluster + '.name', ) ecs_release_phase_cluster_name_param.append(cluster_arn_param) service_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSServiceArn{idx}', description='ECS Service Arn', value=command.service + '.arn', ) ecs_release_phase_service_arn_param.append(service_arn_param) idx += 1 self.project_role_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodeBuild-Project'], filter_id='IAM.Role.RoleName') # codecommit_repo_users ManagedPolicies managed_policy_arns = [] for user_ref in action_config.codecommit_repo_users: user = get_model_obj_from_ref(user_ref, self.paco_ctx.project) # codecommit_stack = user.__parent__.__parent__.__parent__.stack user_logical_id = self.gen_cf_logical_name(user.username) codecommit_user_policy_param = self.create_cfn_parameter( param_type='String', name='CodeCommitUserPolicy' + user_logical_id, description='The CodeCommit User Policy for ' + user.username, value=user_ref + '.policy.arn', ) managed_policy_arns.append( troposphere.Ref(codecommit_user_policy_param)) project_role_res = troposphere.iam.Role( title='CodeBuildProjectRole', template=template, RoleName=self.project_role_name, ManagedPolicyArns=managed_policy_arns, AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ['codebuild.amazonaws.com']), ) ])) project_policy_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodeBuild-Project'], filter_id='IAM.Policy.PolicyName') # Project Policy policy_statements = [] if pipeline_config.configuration.disable_codepipeline == False: policy_statements.extend([ Statement( Sid='S3Access', Effect=Allow, Action=[ Action('s3', 'PutObject'), Action('s3', 'PutObjectAcl'), Action('s3', 'GetObject'), Action('s3', 'GetObjectAcl'), Action('s3', 'ListBucket'), Action('s3', 'DeleteObject'), Action('s3', 'GetBucketPolicy'), Action('s3', 'HeadObject'), ], Resource=[ troposphere.Sub('arn:aws:s3:::${ArtifactsBucketName}'), troposphere.Sub( 'arn:aws:s3:::${ArtifactsBucketName}/*'), ]), Statement(Sid='KMSCMK', Effect=Allow, Action=[Action('kms', '*')], Resource=[troposphere.Ref(self.cmk_arn_param)]) ]) policy_statements.append( Statement(Sid='CloudWatchLogsAccess', Effect=Allow, Action=[ Action('logs', 'CreateLogGroup'), Action('logs', 'CreateLogStream'), Action('logs', 'PutLogEvents'), ], Resource=['arn:aws:logs:*:*:*'])) release_phase = action_config.release_phase if release_phase != None and release_phase.ecs != None: ssm_doc = self.paco_ctx.project['resource']['ssm'].ssm_documents[ 'paco_ecs_docker_exec'] # SSM Exec Document policy_statements.append( Statement(Sid='ECSReleasePhaseSSMCore', Effect=Allow, Action=[ Action('ssm', 'ListDocuments'), Action('ssm', 'ListDocumentVersions'), Action('ssm', 'DescribeDocument'), Action('ssm', 'GetDocument'), Action('ssm', 'DescribeInstanceInformation'), Action('ssm', 'DescribeDocumentParameters'), Action('ssm', 'CancelCommand'), Action('ssm', 'ListCommands'), Action('ssm', 'ListCommandInvocations'), Action('ssm', 'DescribeAutomationExecutions'), Action('ssm', 'DescribeInstanceProperties'), Action('ssm', 'GetCommandInvocation'), Action('ec2', 'DescribeInstanceStatus'), ], Resource=['*'])) policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommandDocument', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec' ])) idx = 0 for command in release_phase.ecs: policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommand{idx}', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[f'arn:aws:ec2:*:*:instance/*'], Condition=Condition( StringLike({ 'ssm:resourceTag/Paco-ECSCluster-Name': troposphere.Ref( ecs_release_phase_cluster_name_param[idx]) })))) policy_statements.append( Statement( Sid=f'ECSRelasePhaseClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'DescribeServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), ], Resource=['*'], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref( ecs_release_phase_cluster_arn_param[idx]) })))) idx += 1 policy_statements.append( Statement(Sid='ECSReleasePhaseSSMAutomationExecution', Effect=Allow, Action=[ Action('ssm', 'StartAutomationExecution'), Action('ssm', 'StopAutomationExecution'), Action('ssm', 'GetAutomationExecution'), ], Resource=['arn:aws:ssm:::automation-definition/'])) # ECS Policies policy_statements.append( Statement(Sid='ECSRelasePhaseECS', Effect=Allow, Action=[ Action('ecs', 'DescribeTaskDefinition'), Action('ecs', 'DeregisterTaskDefinition'), Action('ecs', 'RegisterTaskDefinition'), Action('ecs', 'ListTagsForResource'), Action('ecr', 'DescribeImages') ], Resource=['*'])) # IAM Pass Role policy_statements.append( Statement(Sid='IAMPassRole', Effect=Allow, Action=[Action('iam', 'passrole')], Resource=['*'])) if len(action_config.secrets) > 0: secrets_arn_list = [] for secret_ref in action_config.secrets: name_hash = md5sum(str_data=secret_ref) secret_arn_param = self.create_cfn_parameter( param_type='String', name='SecretsArn' + name_hash, description= 'Secrets Manager Secret Arn to expose access to', value=secret_ref + '.arn') secrets_arn_list.append(troposphere.Ref(secret_arn_param)) policy_statements.append( Statement(Sid='SecretsManager', Effect=Allow, Action=[ Action('secretsmanager', 'GetSecretValue'), ], Resource=secrets_arn_list)) project_policy_res = troposphere.iam.PolicyType( title='CodeBuildProjectPolicy', PolicyName=project_policy_name, PolicyDocument=PolicyDocument(Statement=policy_statements), Roles=[troposphere.Ref(project_role_res)]) template.add_resource(project_policy_res) # User defined policies for policy in action_config.role_policies: policy_name = self.create_resource_name_join( name_list=[ self.res_name_prefix, 'CodeBuild-Project', policy.name ], separator='-', filter_id='IAM.Policy.PolicyName', hash_long_names=True, camel_case=True) statement_list = [] for statement in policy.statement: action_list = [] for action in statement.action: action_parts = action.split(':') action_list.append(Action(action_parts[0], action_parts[1])) statement_list.append( Statement(Effect=statement.effect, Action=action_list, Resource=statement.resource)) troposphere.iam.PolicyType( title=self.create_cfn_logical_id('CodeBuildProjectPolicy' + policy.name, camel_case=True), template=template, PolicyName=policy_name, PolicyDocument=PolicyDocument(Statement=statement_list, ), Roles=[troposphere.Ref(project_role_res)]) # ECR Permission Policies self.set_ecr_repositories_statements( action_config.ecr_repositories, template, f'{self.res_name_prefix}-CodeBuild-Project', [troposphere.Ref(project_role_res)]) # CodeBuild Project Resource timeout_mins_param = self.create_cfn_parameter( param_type='String', name='TimeoutInMinutes', description= 'How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.', value=action_config.timeout_mins, ) # Environment Variables codebuild_env_vars = [{ 'Name': 'DeploymentEnvironmentName', 'Value': troposphere.Ref(deploy_env_name_param) }] if pipeline_config.configuration.disable_codepipeline == False: codebuild_env_vars.extend([{ 'Name': 'ArtifactsBucket', 'Value': troposphere.Ref(self.artifacts_bucket_name_param), }, { 'Name': 'KMSKey', 'Value': troposphere.Ref(self.cmk_arn_param) }]) # If ECS Release Phase, then add the config to the environment release_phase = action_config.release_phase if release_phase != None and release_phase.ecs != None: idx = 0 for command in release_phase.ecs: codebuild_env_vars.append({ 'Name': f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}', 'Value': troposphere.Ref(ecs_release_phase_cluster_arn_param[idx]) }) codebuild_env_vars.append({ 'Name': f'PACO_CB_RP_ECS_SERVICE_ID_{idx}', 'Value': troposphere.Ref(ecs_release_phase_service_arn_param[idx]) }) idx += 1 # CodeBuild: Environment source = troposphere.codebuild.Source(Type='CODEPIPELINE', ) if action_config.buildspec != None and action_config.buildspec != '': source = troposphere.codebuild.Source( Type='CODEPIPELINE', BuildSpec=action_config.buildspec, ) project_dict = { 'Name': troposphere.Ref(self.resource_name_prefix_param), 'Artifacts': { 'Type': 'NO_ARTIFACTS' }, 'Description': troposphere.Ref('AWS::StackName'), 'ServiceRole': troposphere.GetAtt('CodeBuildProjectRole', 'Arn'), 'Environment': { 'Type': 'LINUX_CONTAINER', 'ComputeType': troposphere.Ref(compute_type_param), 'Image': troposphere.Ref(image_param), 'EnvironmentVariables': codebuild_env_vars, 'PrivilegedMode': action_config.privileged_mode }, 'Source': { 'Type': 'NO_SOURCE' }, 'TimeoutInMinutes': troposphere.Ref(timeout_mins_param), 'Tags': troposphere.codebuild.Tags( Name=troposphere.Ref(self.resource_name_prefix_param)) } if action_config.buildspec: project_dict['Source']['BuildSpec'] = action_config.buildspec if pipeline_config.configuration.disable_codepipeline == False: project_dict['EncryptionKey'] = troposphere.Ref(self.cmk_arn_param) project_dict['Artifacts'] = {'Type': 'CODEPIPELINE'} project_dict['Source']['Type'] = 'CODEPIPELINE' elif action_config.source.github != None: project_dict['Source']['Type'] = 'GITHUB' project_dict['Source'][ 'Location'] = action_config.source.github.location project_dict['Source'][ 'ReportBuildStatus'] = action_config.source.github.report_build_status else: raise PacoException( "CodeBuild source must be configured when Codepipeline is disabled." ) if action_config.concurrent_build_limit > 0: project_dict[ 'ConcurrentBuildLimit'] = action_config.concurrent_build_limit project_res = troposphere.codebuild.Project.from_dict( 'CodeBuildProject', project_dict) self.template.add_resource(project_res) self.create_output(title='ProjectArn', value=troposphere.GetAtt(project_res, 'Arn'), description='CodeBuild Project Arn', ref=config_ref + '.project.arn') return project_res
def __init__( self, stack, paco_ctx, grp_id=None, topics=None, ): enabled_topics = False config = stack.resource # this template is used as both SNSTopics by global resources and a # single SNSTopic for an application resource. if topics == None: if grp_id == None: topics = [stack.resource] enabled_topics = stack.resource.is_enabled() else: topics = config.values() for topic in topics: if topic.is_enabled(): enabled_topics = True else: if len(topics) > 0: enabled_topics = True super().__init__( stack, paco_ctx, enabled=enabled_topics, ) if grp_id == None: self.set_aws_name('SNS', self.resource_group_name, self.resource_name) else: self.set_aws_name('SNS', grp_id) # Troposphere Template Initialization self.init_template('SNS Topics and Subscriptions') template = self.template # Topic Resources and Outputs topics_ref_cross_list = [] for topic in topics: if not topic.is_enabled(): continue topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Topic Resource topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict ) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Subscriptions idx = 0 for subscription in topic.subscriptions: sub_dict = { 'TopicArn': troposphere.Ref(topic_resource) } if references.is_ref(subscription.endpoint): param_name = f'Endpoint{topic_logical_id}{idx}' parameter = self.create_cfn_parameter( param_type = 'String', name = param_name, description = 'Subscription Endpoint', value = subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol if subscription.filter_policy: sub_dict['FilterPolicy'] = json.loads(subscription.filter_policy) subscription_logical_id = f"Subscription{topic_logical_id}{idx}" sub_resource = troposphere.sns.SubscriptionResource.from_dict( subscription_logical_id, sub_dict ) template.add_resource(sub_resource) idx += 1 # Topic Outputs if grp_id == None: output_ref = stack.resource.paco_ref_parts else: output_ref = '.'.join([stack.resource.paco_ref_parts, topic.name]) self.create_output( title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=f'{output_ref}.arn' ) self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=f'{output_ref}.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics = topics_ref_cross_list, PolicyDocument = Policy( Version = '2012-10-17', Id = "CrossAccountPublish", Statement=[ Statement( Effect = Allow, Principal = Principal("AWS", "*"), Action = [ awacs.sns.Publish, awacs.sns.Subscribe ], Resource = [troposphere.Ref(topic_resource) ], Condition = Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }) ) ) ] ) ) template.add_resource(topic_policy_resource)
def __init__( self, stack, paco_ctx, bucket_context, bucket_policy_only, ): bucket = bucket_context['config'] config_ref = bucket.paco_ref_parts # Application Group aws_name_list = [] if bucket_context['group_id'] != None: aws_name_list.append(bucket_context['group_id']) # Bucket Name if schemas.IResource.providedBy(bucket.__parent__) == True: aws_name_list.extend([bucket.__parent__.name, bucket.name]) cfn_logical_id_prefix = self.create_cfn_logical_id_join([bucket.__parent__.name, bucket.name ], True) else: aws_name_list.append(bucket.name) cfn_logical_id_prefix = self.create_cfn_logical_id_join([bucket.name ], True) # Policy if bucket_policy_only == True: aws_name_list.append('policy') super().__init__( stack, paco_ctx, enabled=bucket_context['config'].is_enabled(), iam_capabilities=["CAPABILITY_NAMED_IAM"], ) self.set_aws_name('S3', aws_name_list) self.s3_context_id = config_ref self.bucket_context = bucket_context s3_ctl = self.paco_ctx.get_controller('S3') bucket_name = s3_ctl.get_bucket_name(self.s3_context_id) # Init Troposphere template self.init_template(bucket.title_or_name) template = self.template # Resources if bucket_policy_only == False: s3_logical_id = cfn_logical_id_prefix + 'Bucket' cfn_export_dict = bucket.cfn_export_dict cfn_export_dict['BucketName'] = bucket_name # notification configuration if hasattr(bucket, 'notifications'): cfn_export_dict['NotificationConfiguration'] = {} if hasattr(bucket.notifications, 'lambdas'): lambda_notifs = [] params = {} for lambda_notif in bucket.notifications.lambdas: param_name = self.create_cfn_logical_id('LambdaNotif' + lambda_notif.function[9:]) if param_name not in params: lambda_arn_param = self.create_cfn_parameter( name=param_name, param_type='String', description='Lambda ARN parameter.', value=lambda_notif.function + '.arn', ) params[param_name] = lambda_arn_param lambda_notifs.append({ 'Event': lambda_notif.event, 'Function': troposphere.Ref(param_name) }) cfn_export_dict['NotificationConfiguration']["LambdaConfigurations"] = lambda_notifs s3_resource = troposphere.s3.Bucket.from_dict(s3_logical_id, cfn_export_dict) s3_resource.DeletionPolicy = 'Retain' # We always retain. Bucket cleanup is handled by Stack hooks. template.add_resource(s3_resource) # Output self.create_output( title=s3_logical_id + 'Name', value=troposphere.Ref(s3_resource), ref=config_ref + '.name' ) # Bucket Policy policy_statements = [] if bucket.cloudfront_origin == True: # CloudFront OriginAccessIdentity resource cloudfront_origin_resource = troposphere.cloudfront.CloudFrontOriginAccessIdentity.from_dict( 'CloudFrontOriginAccessIdentity', {'CloudFrontOriginAccessIdentityConfig': {'Comment': self.s3_context_id}}, ) template.add_resource(cloudfront_origin_resource) policy_statements.append( Statement( Effect = Allow, Principal = Principal('CanonicalUser',troposphere.GetAtt('CloudFrontOriginAccessIdentity','S3CanonicalUserId')), Action = [awacs.s3.GetObject], Resource = ['arn:aws:s3:::{}/*'.format(bucket_name)], ) ) # S3 BucketPolicy resource #policy = Policy( # Version='2012-10-17', # Statement=[ # Statement( # Effect = Allow, # Principal = Principal('CanonicalUser',troposphere.GetAtt('CloudFrontOriginAccessIdentity','S3CanonicalUserId')), # Action = [awacs.s3.GetObject], # Resource = ['arn:aws:s3:::{}/*'.format(bucket_name)], # ) # ] #) #bucket_policy_resource = troposphere.s3.BucketPolicy( # 'CloudFrontBucketPolicy', # Bucket = bucket_name, # PolicyDocument = policy, #) #bucket_policy_resource.DependsOn = [ # 'CloudFrontOriginAccessIdentity', # s3_logical_id #] #template.add_resource(bucket_policy_resource) # Output CloudFrontOriginAccessIdentity self.create_output( title='CloudFrontOriginAccessIdentity', value=troposphere.Ref(cloudfront_origin_resource), ref=config_ref + '.origin_id', ) if len(bucket.policy) > 0: # Bucket Policy # ToDo: allow mixing CloudFront Origin policies and other bucket policies together # Statement for policy_statement in bucket.policy: # XXX: Disabled: Bucket policies are overwritten when updated with a new stack. # This means we want all of the policies previously provisioned. #if policy_statement.processed == True: # continue statement_dict = { 'Effect': policy_statement.effect, 'Action': [ Action(*action.split(':')) for action in policy_statement.action ], } # Sid if policy_statement.sid != None and len(policy_statement.sid) > 0: statement_dict['Sid'] = policy_statement.sid # Principal if policy_statement.principal != None and len(policy_statement.principal) > 0: # ToDo: awacs only allows one type of Principal ... is there a use-case where # multiple principal types are needed? for key, value in policy_statement.principal.items(): statement_dict['Principal'] = Principal(key, value) elif policy_statement.aws != None and len(policy_statement.aws) > 0: statement_dict['Principal'] = Principal('AWS', policy_statement.aws) # Condition if policy_statement.condition != {}: # ToDo: support all conditions! # currently only invoked by ctl_cloudtrail.py conditions = [] for condition_key, condition_value in policy_statement.condition.items(): if condition_key == 'StringEquals': conditions.append(StringEquals(condition_value)) else: raise StackException( PacoErrorCode.Unknown, message="Only StringEquals is a supported condition (*fix-me!*). Bucket name: {}".format(bucket_name) ) statement_dict['Condition'] = Condition(conditions) # Resource bucket_arn = s3_ctl.get_bucket_arn(self.s3_context_id) if policy_statement.resource_suffix and len(policy_statement.resource_suffix) > 0: statement_dict['Resource'] = [ bucket_arn + res_suffix for res_suffix in policy_statement.resource_suffix ] else: statement_dict['Resource'] = [bucket_arn] policy_statements.append( Statement(**statement_dict) ) if len(policy_statements) > 0: bucket_policy_resource = troposphere.s3.BucketPolicy( cfn_logical_id_prefix + 'BucketPolicy', template = template, Bucket = bucket_name, PolicyDocument = Policy( Version = '2012-10-17', Statement = policy_statements, ) ) depends_on = [] if bucket_policy_only == False: depends_on.append(s3_resource) if bucket.cloudfront_origin == True: depends_on.append('CloudFrontOriginAccessIdentity') bucket_policy_resource.DependsOn = depends_on # Generate the Template self.set_template()
Resource=[Join("", ["arn:aws:s3:::", Ref(bucket)])]), Statement( Sid="AWSCloudTrailWrite", Effect=Allow, Action=[Action("s3", "PutObject")], Principal=Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[ Join("", [ "arn:aws:s3:::", Ref(bucket), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]) ], Condition=Condition( StringEquals('s3:x-amz-acl', 'bucket-owner-full-control'))) ]))) lambda_role = t.add_resource( Role( "LambdaRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["lambda.amazonaws.com"])) ]), Policies=[ IAMPolicy( "LambdaPolicy", PolicyName="LambdaCloudtrailPolicy", PolicyDocument=Policy(Statement=[
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def create_codebuild_cfn(self, template, pipeline_config, action_config, config_ref): # CodeBuild compute_type_param = self.create_cfn_parameter( param_type='String', name='CodeBuildComputeType', description= 'The type of compute environment. This determines the number of CPU cores and memory the build environment uses.', value=action_config.codebuild_compute_type, ) image_param = self.create_cfn_parameter( param_type='String', name='CodeBuildImage', description= 'The image tag or image digest that identifies the Docker image to use for this build project.', value=action_config.codebuild_image, ) deploy_env_name_param = self.create_cfn_parameter( param_type='String', name='DeploymentEnvironmentName', description= 'The name of the environment codebuild will be deploying into.', value=action_config.deployment_environment, ) # If ECS Release Phase, then create the needed parameters release_phase = action_config.release_phase ecs_release_phase_cluster_arn_param = [] ecs_release_phase_cluster_name_param = [] ecs_release_phase_service_arn_param = [] if release_phase != None and release_phase.ecs != None: idx = 0 for command in release_phase.ecs: service_obj = get_model_obj_from_ref(command.service, self.paco_ctx.project) service_obj = get_parent_by_interface(service_obj, schemas.IECSServices) cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSClusterArn{idx}', description='ECS Cluster Arn', value=service_obj.cluster + '.arn', ) ecs_release_phase_cluster_arn_param.append(cluster_arn_param) cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSClusterName{idx}', description='ECS Cluster Name', value=service_obj.cluster + '.name', ) ecs_release_phase_cluster_name_param.append(cluster_arn_param) service_arn_param = self.create_cfn_parameter( param_type='String', name=f'ReleasePhaseECSServiceArn{idx}', description='ECS Service Arn', value=command.service + '.arn', ) ecs_release_phase_service_arn_param.append(service_arn_param) idx += 1 self.project_role_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodeBuild-Project'], filter_id='IAM.Role.RoleName') # codecommit_repo_users ManagedPolicies managed_policy_arns = [] for user_ref in action_config.codecommit_repo_users: user = get_model_obj_from_ref(user_ref, self.paco_ctx.project) # codecommit_stack = user.__parent__.__parent__.__parent__.stack user_logical_id = self.gen_cf_logical_name(user.username) codecommit_user_policy_param = self.create_cfn_parameter( param_type='String', name='CodeCommitUserPolicy' + user_logical_id, description='The CodeCommit User Policy for ' + user.username, value=user_ref + '.policy.arn', ) managed_policy_arns.append( troposphere.Ref(codecommit_user_policy_param)) project_role_res = troposphere.iam.Role( title='CodeBuildProjectRole', template=template, RoleName=self.project_role_name, ManagedPolicyArns=managed_policy_arns, AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ['codebuild.amazonaws.com']), ) ])) project_policy_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodeBuild-Project'], filter_id='IAM.Policy.PolicyName') # Project Policy policy_statements = [] if self.enable_artifacts_bucket: policy_statements.append( Statement( Sid='S3Access', Effect=Allow, Action=[ Action('s3', 'PutObject'), Action('s3', 'PutObjectAcl'), Action('s3', 'GetObject'), Action('s3', 'GetObjectAcl'), Action('s3', 'ListBucket'), Action('s3', 'DeleteObject'), Action('s3', 'GetBucketPolicy'), Action('s3', 'HeadObject'), ], Resource=[ troposphere.Sub('arn:aws:s3:::${ArtifactsBucketName}'), troposphere.Sub( 'arn:aws:s3:::${ArtifactsBucketName}/*'), ])) if pipeline_config.configuration.disable_codepipeline == False: policy_statements.append( Statement(Sid='KMSCMK', Effect=Allow, Action=[Action('kms', '*')], Resource=[troposphere.Ref(self.cmk_arn_param)])) policy_statements.append( Statement(Sid='CloudWatchLogsAccess', Effect=Allow, Action=[ Action('logs', 'CreateLogGroup'), Action('logs', 'CreateLogStream'), Action('logs', 'PutLogEvents'), ], Resource=['arn:aws:logs:*:*:*'])) release_phase = action_config.release_phase if release_phase != None and release_phase.ecs != None: ssm_doc = self.paco_ctx.project['resource']['ssm'].ssm_documents[ 'paco_ecs_docker_exec'] # SSM Exec Document policy_statements.append( Statement(Sid='ECSReleasePhaseSSMCore', Effect=Allow, Action=[ Action('ssm', 'ListDocuments'), Action('ssm', 'ListDocumentVersions'), Action('ssm', 'DescribeDocument'), Action('ssm', 'GetDocument'), Action('ssm', 'DescribeInstanceInformation'), Action('ssm', 'DescribeDocumentParameters'), Action('ssm', 'CancelCommand'), Action('ssm', 'ListCommands'), Action('ssm', 'ListCommandInvocations'), Action('ssm', 'DescribeAutomationExecutions'), Action('ssm', 'DescribeInstanceProperties'), Action('ssm', 'GetCommandInvocation'), Action('ec2', 'DescribeInstanceStatus'), ], Resource=['*'])) policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommandDocument', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec' ])) idx = 0 for command in release_phase.ecs: policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommand{idx}', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[f'arn:aws:ec2:*:*:instance/*'], Condition=Condition( StringLike({ 'ssm:resourceTag/Paco-ECSCluster-Name': troposphere.Ref( ecs_release_phase_cluster_name_param[idx]) })))) policy_statements.append( Statement( Sid=f'ECSRelasePhaseClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'DescribeServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), ], Resource=['*'], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref( ecs_release_phase_cluster_arn_param[idx]) })))) idx += 1 policy_statements.append( Statement(Sid='ECSReleasePhaseSSMAutomationExecution', Effect=Allow, Action=[ Action('ssm', 'StartAutomationExecution'), Action('ssm', 'StopAutomationExecution'), Action('ssm', 'GetAutomationExecution'), ], Resource=['arn:aws:ssm:::automation-definition/'])) # ECS Policies policy_statements.append( Statement(Sid='ECSRelasePhaseECS', Effect=Allow, Action=[ Action('ecs', 'DescribeTaskDefinition'), Action('ecs', 'DeregisterTaskDefinition'), Action('ecs', 'RegisterTaskDefinition'), Action('ecs', 'ListTagsForResource'), Action('ecr', 'DescribeImages') ], Resource=['*'])) # IAM Pass Role policy_statements.append( Statement(Sid='IAMPassRole', Effect=Allow, Action=[Action('iam', 'passrole')], Resource=['*'])) if len(action_config.secrets) > 0: secrets_arn_list = [] for secret_ref in action_config.secrets: name_hash = md5sum(str_data=secret_ref) secret_arn_param = self.create_cfn_parameter( param_type='String', name='SecretsArn' + name_hash, description= 'Secrets Manager Secret Arn to expose access to', value=secret_ref + '.arn') secrets_arn_list.append(troposphere.Ref(secret_arn_param)) policy_statements.append( Statement(Sid='SecretsManager', Effect=Allow, Action=[ Action('secretsmanager', 'GetSecretValue'), ], Resource=secrets_arn_list)) project_policy_res = troposphere.iam.PolicyType( title='CodeBuildProjectPolicy', PolicyName=project_policy_name, PolicyDocument=PolicyDocument(Statement=policy_statements), Roles=[troposphere.Ref(project_role_res)]) project_policy_res.DependsOn = project_role_res template.add_resource(project_policy_res) # User defined policies for policy in action_config.role_policies: policy_name = self.create_resource_name_join( name_list=[ self.res_name_prefix, 'CodeBuild-Project', policy.name ], separator='-', filter_id='IAM.Policy.PolicyName', hash_long_names=True, camel_case=True) statement_list = [] for statement in policy.statement: action_list = [] for action in statement.action: action_parts = action.split(':') action_list.append(Action(action_parts[0], action_parts[1])) statement_list.append( Statement(Effect=statement.effect, Action=action_list, Resource=statement.resource)) troposphere.iam.PolicyType( title=self.create_cfn_logical_id('CodeBuildProjectPolicy' + policy.name, camel_case=True), template=template, PolicyName=policy_name, PolicyDocument=PolicyDocument(Statement=statement_list, ), Roles=[troposphere.Ref(project_role_res)]) # ECR Permission Policies self.set_ecr_repositories_statements( action_config.ecr_repositories, template, f'{self.res_name_prefix}-CodeBuild-Project', [troposphere.Ref(project_role_res)]) # CodeBuild Project Resource timeout_mins_param = self.create_cfn_parameter( param_type='String', name='TimeoutInMinutes', description= 'How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.', value=action_config.timeout_mins, ) # Environment Variables codebuild_env_vars = [{ 'Name': 'DeploymentEnvironmentName', 'Value': troposphere.Ref(deploy_env_name_param) }] if pipeline_config.configuration.disable_codepipeline == False: codebuild_env_vars.append({ 'Name': 'KMSKey', 'Value': troposphere.Ref(self.cmk_arn_param) }) if self.enable_artifacts_bucket: codebuild_env_vars.append({ 'Name': 'ArtifactsBucket', 'Value': troposphere.Ref(self.artifacts_bucket_name_param), }) # If ECS Release Phase, then add the config to the environment release_phase = action_config.release_phase if release_phase != None and release_phase.ecs != None: idx = 0 for command in release_phase.ecs: codebuild_env_vars.append({ 'Name': f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}', 'Value': troposphere.Ref(ecs_release_phase_cluster_arn_param[idx]) }) codebuild_env_vars.append({ 'Name': f'PACO_CB_RP_ECS_SERVICE_ID_{idx}', 'Value': troposphere.Ref(ecs_release_phase_service_arn_param[idx]) }) idx += 1 # CodeBuild: Environment project_dict = { 'Name': troposphere.Ref(self.resource_name_prefix_param), 'Artifacts': { 'Type': 'NO_ARTIFACTS' }, 'Description': troposphere.Ref('AWS::StackName'), 'ServiceRole': troposphere.GetAtt('CodeBuildProjectRole', 'Arn'), 'Environment': { 'Type': 'LINUX_CONTAINER', 'ComputeType': troposphere.Ref(compute_type_param), 'Image': troposphere.Ref(image_param), 'EnvironmentVariables': codebuild_env_vars, 'PrivilegedMode': action_config.privileged_mode }, 'Source': { 'Type': 'NO_SOURCE' }, 'TimeoutInMinutes': troposphere.Ref(timeout_mins_param), 'Tags': troposphere.codebuild.Tags( Name=troposphere.Ref(self.resource_name_prefix_param)) } if action_config.buildspec: project_dict['Source']['BuildSpec'] = action_config.buildspec if pipeline_config.configuration.disable_codepipeline == False: project_dict['EncryptionKey'] = troposphere.Ref(self.cmk_arn_param) project_dict['Artifacts'] = {'Type': 'CODEPIPELINE'} project_dict['Source']['Type'] = 'CODEPIPELINE' else: if action_config.artifacts == None or action_config.artifacts.type == 'NO_ARTIFACTS': project_dict['Artifacts'] = { 'Type': 'NO_ARTIFACTS', } else: project_dict['Artifacts'] = { 'Type': action_config.artifacts.type, 'Location': troposphere.Ref(self.artifacts_bucket_name_param), 'NamespaceType': action_config.artifacts.namespace_type, 'Packaging': action_config.artifacts.packaging, 'Name': action_config.artifacts.name } if action_config.artifacts.path != None: project_dict['Artifacts'][ 'Path'] = action_config.artifacts.path if action_config.source.github != None: github_config = action_config.source.github project_dict['Source']['Type'] = 'GITHUB' location = f'https://github.com/{github_config.github_owner}/{github_config.github_repository}.git' project_dict['Source']['Location'] = location project_dict['Source'][ 'ReportBuildStatus'] = github_config.report_build_status if github_config.deployment_branch_name != None: project_dict[ 'SourceVersion'] = github_config.deployment_branch_name else: raise PacoException( "CodeBuild source must be configured when Codepipeline is disabled." ) if action_config.concurrent_build_limit > 0: project_dict[ 'ConcurrentBuildLimit'] = action_config.concurrent_build_limit if action_config.vpc_config != None: vpc_config = action_config.vpc_config vpc_id_param = self.create_cfn_parameter( name='VPC', param_type='AWS::EC2::VPC::Id', description='The VPC Id', value='paco.ref netenv.{}.<environment>.<region>.network.vpc.id' .format(self.env_ctx.netenv.name), ) security_group_list = [] for sg_ref in vpc_config.security_groups: ref = Reference(sg_ref) sg_param_name = self.gen_cf_logical_name('SecurityGroupId' + ref.parts[-2] + ref.parts[-1]) sg_param = self.create_cfn_parameter( name=sg_param_name, param_type='String', description='Security Group Id', value=sg_ref + '.id', ) security_group_list.append(troposphere.Ref(sg_param)) # security_group_list_param = self.create_cfn_ref_list_param( # param_type='List<AWS::EC2::SecurityGroup::Id>', # name='SecurityGroupList', # description='List of security group ids to attach to CodeBuild.', # value=vpc_config.security_groups, # ref_attribute='id', # ) subnet_id_list = [] subnet_arn_list = [] az_size = self.env_ctx.netenv[self.account_ctx.name][ self.aws_region].network.availability_zones for segment_ref in vpc_config.segments: for az_idx in range(1, az_size + 1): # Subnet Ids segment_name = self.create_cfn_logical_id( f"Segment{segment_ref.split('.')[-1]}AZ{az_idx}") subnet_id_param = self.create_cfn_parameter( name=segment_name, param_type='AWS::EC2::Subnet::Id', description= f'VPC Subnet Id in AZ{az_idx} for CodeBuild VPC Config', value=segment_ref + f'.az{az_idx}.subnet_id') subnet_id_list.append(troposphere.Ref(subnet_id_param)) # Subnet Arns subnet_arn_param = self.create_cfn_parameter( name=segment_name + 'Arn', param_type='String', description= f'VPC Subnet Id ARN in AZ{az_idx} for CodeBuild VPC Config', value=segment_ref + f'.az{az_idx}.subnet_id.arn') subnet_arn_list.append(troposphere.Ref(subnet_arn_param)) if len(subnet_id_list) == 0: raise PacoException( "CodeBuild VPC Config must have at least one segment defined." ) # VPC Config Permissions policy_statements.append( Statement(Sid='VpcConfigPermissions', Effect=Allow, Action=[ Action('ec2', 'CreateNetworkInterface'), Action('ec2', 'DescribeDhcpOptions'), Action('ec2', 'DescribeNetworkInterfaces'), Action('ec2', 'DeleteNetworkInterface'), Action('ec2', 'DescribeSubnets'), Action('ec2', 'DescribeSecurityGroups'), Action('ec2', 'DescribeVpcs'), ], Resource=['*'])) policy_statements.append( Statement( Sid='VpcConfigNetworkInterface', Effect=Allow, Action=[ Action('ec2', 'CreateNetworkInterfacePermission'), ], Resource=[ f'arn:aws:ec2:{self.aws_region}:{self.account_ctx.id}:network-interface/*' ], Condition=Condition([ StringEquals({ "ec2:AuthorizedService": "codebuild.amazonaws.com" }), ArnEquals({"ec2:Subnet": subnet_arn_list}) ]))) project_dict['VpcConfig'] = { 'VpcId': troposphere.Ref(vpc_id_param), 'SecurityGroupIds': security_group_list, 'Subnets': subnet_id_list } # Batch Build Config batch_service_role_res = None if action_config.build_batch_config != None and action_config.build_batch_config.is_enabled( ): batch_config = action_config.build_batch_config batch_service_role_name = self.create_iam_resource_name( name_list=[ self.res_name_prefix, 'CodeBuild-BuildBatch-ServiceRole' ], filter_id='IAM.Role.RoleName') batch_service_role_res = troposphere.iam.Role( title='CodeBuildBuildBatchConfigServiceRole', template=template, RoleName=batch_service_role_name, AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ['codebuild.amazonaws.com']), ) ])) project_dict['BuildBatchConfig'] = { 'BatchReportMode': batch_config.batch_report_mode, 'CombineArtifacts': batch_config.combine_artifacts, 'TimeoutInMins': batch_config.timeout_in_mins, 'ServiceRole': troposphere.GetAtt(batch_service_role_res, 'Arn'), 'Restrictions': { 'ComputeTypesAllowed': batch_config.restrictions.compute_types_allowed, 'MaximumBuildsAllowed': batch_config.restrictions.maximum_builds_allowed } } project_res = troposphere.codebuild.Project.from_dict( 'CodeBuildProject', project_dict) project_res.DependsOn = project_policy_res if action_config.build_batch_config != None and action_config.build_batch_config.is_enabled( ): project_res.DependsOn = batch_service_role_res self.template.add_resource(project_res) if batch_service_role_res != None: build_batch_policy_statements = [] build_batch_policy_statements.append( Statement(Sid='BatchServiceRole', Effect=Allow, Action=[ Action('codebuild', 'StartBuild'), Action('codebuild', 'StopBuild'), Action('codebuild', 'RetryBuild') ], Resource=[troposphere.GetAtt(project_res, 'Arn')])) batch_policy_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodeBuild-BatchPolicy'], filter_id='IAM.Policy.PolicyName') batch_policy_res = troposphere.iam.PolicyType( title='CodeBuildBuildBatchPolicy', template=template, PolicyName=batch_policy_name, PolicyDocument=PolicyDocument( Statement=build_batch_policy_statements), Roles=[troposphere.Ref(batch_service_role_res)]) batch_policy_res.DependsOn = project_res self.create_output(title='ProjectArn', value=troposphere.GetAtt(project_res, 'Arn'), description='CodeBuild Project Arn', ref=config_ref + '.project.arn') return project_res
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, grp_id, res_id, config, res_config_ref): enabled_topics = False for topic in config: if topic.is_enabled(): enabled_topics = True super().__init__( paco_ctx, account_ctx, aws_region, config_ref=res_config_ref, stack_group=stack_group, stack_tags=stack_tags, enabled=enabled_topics, ) self.set_aws_name('SNSTopics', grp_id, res_id) self.config = config # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs any_topic_enabled = False topics_ref_cross_list = [] for topic in self.config: if not topic.is_enabled(): continue any_topic_enabled = True topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Topic Outputs output_ref = '.'.join([res_config_ref, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics=topics_ref_cross_list, PolicyDocument=Policy(Version='2012-10-17', Id="CrossAccountPublish", Statement=[ Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[awacs.sns.Publish], Resource=topics_ref_cross_list, Condition=Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }))) ])) template.add_resource(topic_policy_resource) self.enabled = any_topic_enabled # Generate the Template self.set_template()
def __init__(self, stack, paco_ctx): cup = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('CUP', self.resource_group_name, self.resource.name) self.init_template('Cognito User Pool') if not cup.is_enabled(): return cfn_export_dict = cup.cfn_export_dict # SNS Role for SMS if cup.mfa != 'off': # CloudFormation requires an SMS Role even if only software tokens are used sms_role_resource = troposphere.iam.Role( 'CognitoSMSRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Service',"cognito-idp.amazonaws.com"), Action=[Action('sts', 'AssumeRole')], Condition=Condition([ StringEquals({"sts:ExternalId": cup.paco_ref_parts}), ]), ), ], ), Policies=[ troposphere.iam.Policy( PolicyName="AllowSMS", PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sns.Publish], Resource=['*'], ) ] ) ) ], ) self.template.add_resource(sms_role_resource) cfn_export_dict['SmsConfiguration'] = { 'ExternalId': cup.paco_ref_parts, 'SnsCallerArn': troposphere.GetAtt(sms_role_resource, "Arn") } # Lambda Triggers lambda_trigger_mapping = [ ('create_auth_challenge', 'CreateAuthChallenge'), ('custom_message', 'CustomMessage'), ('define_auth_challenge', 'DefineAuthChallenge'), ('post_authentication', 'PostAuthentication'), ('post_confirmation', 'PostConfirmation'), ('pre_authentication', 'PreAuthentication'), ('pre_sign_up', 'PreSignUp'), ('pre_token_generation', 'PreTokenGeneration'), ('user_migration', 'UserMigration'), ('verify_auth_challenge_response', 'VerifyAuthChallengeResponse'), ] self.lambda_trigger_params = {} if cup.lambda_triggers != None: triggers = {} for name, cfn_key in lambda_trigger_mapping: lambda_ref = getattr(cup.lambda_triggers, name, None) if lambda_ref != None: if lambda_ref not in self.lambda_trigger_params: self.lambda_trigger_params[lambda_ref] = self.create_cfn_parameter( param_type='String', name='LambdaTrigger' + md5sum(str_data=lambda_ref), description=f'LambdaTrigger for Lambda {lambda_ref}', value=lambda_ref + '.arn', ) triggers[cfn_key] = troposphere.Ref(self.lambda_trigger_params[lambda_ref]) cfn_export_dict['LambdaConfig'] = triggers # Cognito User Pool cup_resource = troposphere.cognito.UserPool.from_dict( 'CognitoUserPool', cfn_export_dict ) self.template.add_resource(cup_resource) # Add Lambda Permissions for Lambda Triggers # Need to do this after the cup_resource is created lambda_permissions = {} if cup.lambda_triggers != None: for name, cfn_key in lambda_trigger_mapping: lambda_ref = getattr(cup.lambda_triggers, name, None) if lambda_ref != None: # Lambda Permission if lambda_ref not in lambda_permissions: lambda_permissions[lambda_ref] = True troposphere.awslambda.Permission( title='LambdaPermission' + md5sum(str_data=cup.paco_ref_parts), template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.Ref(self.lambda_trigger_params[lambda_ref]), Principal='cognito-idp.amazonaws.com', SourceArn=troposphere.GetAtt(cup_resource, "Arn"), ) # Outputs self.create_output( title=cup_resource.title + 'Id', description="Cognito UserPool Id", value=troposphere.Ref(cup_resource), ref=[cup.paco_ref_parts, cup.paco_ref_parts + ".id"], ) self.create_output( title=cup_resource.title + 'Arn', description="Cognito UserPool Arn", value=troposphere.GetAtt(cup_resource, "Arn"), ref=cup.paco_ref_parts + ".arn" ) self.create_output( title=cup_resource.title + 'ProviderName', description="Cognito UserPool ProviderName", value=troposphere.GetAtt(cup_resource, "ProviderName"), ref=[cup.paco_ref_parts + ".name", cup.paco_ref_parts + ".providername"], ) self.create_output( title=cup_resource.title + 'Url', description="Cognito UserPool ProviderURL", value=troposphere.GetAtt(cup_resource, "ProviderURL"), ref=[cup.paco_ref_parts + ".url", cup.paco_ref_parts + ".providerurl"], ) # Cognito User Pool Clients for client in cup.app_clients.values(): cfn_export_dict = client.cfn_export_dict cfn_export_dict['UserPoolId'] = troposphere.Ref(cup_resource) client_logical_id = self.create_cfn_logical_id(f"{client.name}CognitoUserPoolClient") cupclient_resource = troposphere.cognito.UserPoolClient.from_dict( client_logical_id, cfn_export_dict ) self.template.add_resource(cupclient_resource) self.create_output( title=cupclient_resource.title + 'Id', description="Cognito UserPoolClient Id", value=troposphere.Ref(cupclient_resource), ref=client.paco_ref_parts + ".id", ) if client.domain_name: # ToDo: add support for custom domains up_domain_name = self.create_cfn_logical_id(f"{client.name}UserPoolDomain") domain_resource = troposphere.cognito.UserPoolDomain( up_domain_name, Domain=client.domain_name, UserPoolId=troposphere.Ref(cup_resource) ) self.template.add_resource(domain_resource) # UI Customizations if cup.ui_customizations != None: if cup.ui_customizations.logo_file != None or cup.ui_customizations.css_file != None: # Add a Hook to set UI Customizations # CloudFormation doesn't support the Logo customization # Paco also uses the hook for CSS (this could be migration to the CloudFormation ~shrug~) stack_hooks = StackHooks() stack_hooks.add( name='SetCognitoUICustomizations', stack_action=['create','update'], stack_timing='post', hook_method=self.add_ui_customizations_hook, cache_method=self.add_ui_customizations_cache, hook_arg=cup, ) stack.add_hooks(stack_hooks)
pd = Policy(Statement=[ Statement( Action=[s3.ListAllMyBuckets, s3.GetBucketLocation], Effect=Allow, Resource=[ s3.S3_ARN("*"), ], ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.S3_ARN("myBucket")], Condition=Condition( StringEquals({ 's3:prefix': ['', 'home/'], 's3:delimiter': ['/'], }), ), ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.S3_ARN("myBucket")], Condition=Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), ), Statement( Action=[Action("s3", "*")], Effect=Allow, Resource=[ s3.S3_ARN("myBucket/home/${aws:username}"), s3.S3_ARN("myBucket/home/${aws:username}/*"),
Statement=[ Statement( Action=[s3.ListAllMyBuckets, s3.GetBucketLocation], Effect=Allow, Resource=[ s3.ARN("*"), ], ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.ARN("myBucket")], Condition=Condition( StringEquals( { "s3:prefix": ["", "home/"], "s3:delimiter": ["/"], } ), ), ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.ARN("myBucket")], Condition=Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), ), Statement( Action=[Action("s3", "*")], Effect=Allow, Resource=[ s3.ARN("myBucket/home/${aws:username}"),