def add_module_bucket(self: Template): self._bucket = self.add_resource( s3.Bucket('TerraformModules', AccessControl='Private', BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm='AES256')) ]), PublicAccessBlockConfiguration=s3. PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True))) self.add_resource( s3.BucketPolicy( 'TerraformModulesBucketPolicy', Bucket=Ref(self._bucket), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Deny, Action=[Action('s3', 'GetObject')], Principal=Principal('*'), Resource=[ Join( '', ['arn:aws:s3:::', Ref(self._bucket), '/*']) ], Condition=Condition( Bool({'aws:SecureTransport': False}))), Statement( Effect=Deny, Action=[Action('s3', 'GetObject')], Principal=Principal('*'), Resource=[ Join( '', ['arn:aws:s3:::', Ref(self._bucket), '/*']) ], Condition=Condition( Bool({'aws:SecureTransport': False}))) ]), ))
def user_delegate_role_and_policies(self, user, permissions_list): "Create and add an account delegate Role to template" user_arn = 'arn:aws:iam::{}:user/{}'.format(self.master_account_id, user.username) assume_role_res = troposphere.iam.Role( "UserAccountDelegateRole", RoleName="IAM-User-Account-Delegate-Role-{}".format( self.create_resource_name(user.name, filter_id='IAM.Role.RoleName')), AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("AWS", [user_arn]), Condition=Condition( [AWACSBool({MultiFactorAuthPresent: True})])) ])) # Iterate over permissions and create a delegate role and policices for permission_config in permissions_list: init_method = getattr( self, "init_{}_permission".format(permission_config.type.lower())) init_method(permission_config, assume_role_res) self.template.add_resource(assume_role_res) self.template.add_output( troposphere.Output( title='SigninUrl', Value=troposphere.Sub( 'https://signin.aws.amazon.com/switchrole?account=${AWS::AccountId}&roleName=${UserAccountDelegateRole}' )))
def queue_policy(sns_arn, sqs_arns): return Policy(Statement=[ Statement(Effect="Allow", Principal=Principal("*"), Action=[awacs.sqs.SendMessage], Resource=sqs_arns, Condition=Condition(ArnEquals({"aws:SourceArn": sns_arn}))) ])
def init_systemsmanagersession_permission(self, permission_config, assume_role_res): if 'ManagedPolicyArns' not in assume_role_res.properties.keys(): assume_role_res.properties['ManagedPolicyArns'] = [] resource_group_condition_list = [] for resource in permission_config.resources: resource_ref = Reference(resource) # Initialize The network environments that we need access into resource_obj = resource_ref.get_model_obj(self.paco_ctx.project) if schemas.IResourceGroup.providedBy(resource_obj): resource_group_condition_list.append( StringLike({ 'ssm:resourceTag/Paco-Application-Group-Name': resource_obj.name })) if len(resource_group_condition_list) == 0: return statement_list = [] statement_list.append( Statement( Sid='SessionManagerStartSession', Effect=Allow, Action=[ Action('ssm', 'StartSession'), ], Resource=[ 'arn:aws:ec2:*:*:instance/*', 'arn:aws:ssm:*::document/AWS-StartPortForwardingSession' ], Condition=Condition(resource_group_condition_list))) statement_list.append( Statement( Sid='SessionManagerPortForward', Effect=Allow, Action=[ Action('ssm', 'StartSession'), ], Resource=[ 'arn:aws:ssm:*::document/AWS-StartPortForwardingSession' ])) statement_list.append( Statement(Sid='SessionManagerTerminateSession', Effect=Allow, Action=[ Action('ssm', 'TerminateSession'), Action('ssm', 'ResumeSession'), ], Resource=['arn:aws:ssm:*:*:session/${aws:username}-*'])) managed_policy_res = troposphere.iam.ManagedPolicy( title=self.create_cfn_logical_id_join(["SystemsManagerSession"]), PolicyDocument=PolicyDocument(Version="2012-10-17", Statement=statement_list), Roles=[troposphere.Ref(assume_role_res)]) self.template.add_resource(managed_policy_res)
def sns_to_sqs_policy(topic): p = Policy(Statement=[ Statement(Effect=Allow, Principal=Principal('*'), Action=[sqs.SendMessage], Resource=["*"], Condition=Condition(ArnEquals(SourceArn, topic))) ]) return p
def jenkins(name): read_only_ec2 = [Action('ec2', 'DescribeInstances'), Action('ec2', 'DescribeImages'), Action('ec2', 'DescribeTags'), Action('ec2', 'DescribeSnapshots') ] cisco_cidr_only_condition = Condition(IpAddress({awacs.aws.SourceIp: ALL_CISCO_CIDRS})) jenkins_statement = statement(actions=read_only_ec2, resource=['*'], effect=Allow, condition=cisco_cidr_only_condition) jenkins = policy(name=name, statements=[jenkins_statement]) return jenkins
def queue_policy(sns_arn, sqs_arns): stmts = [] for arn in sqs_arns: stmts.append( Statement(Effect="Allow", Principal=Principal("*"), Action=[awacs.sqs.SendMessage], Resource=[arn], Condition=Condition(ArnEquals({"aws:SourceArn": sns_arn})))) return Policy(Statement=stmts)
def statement_deny_remove_boundary_policy(self) -> Statement: """Statement to deny the removal of the boundary policy.""" return Statement( Action=[ awacs.iam.DeleteRolePermissionsBoundary, awacs.iam.DeleteUserPermissionsBoundary, ], Condition=Condition( StringEquals({"iam:PermissionsBoundary": self.policy_arn})), Effect=Deny, Resource=["*"], Sid="DenyRemovalOfBoundaryFromUserOrRole", )
def add_resources_and_outputs(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() # Resources # build the list of managed policy ARNs to attach to the role managed_policy_arns = [] for mpa in variables['ManagedPolicyArns']: managed_policy_arns.append(mpa) iamrole = template.add_resource( iam.Role( 'IamRole', RoleName=variables['RoleName'].ref, #Description='Role for Admin team to create/update/terminate' # ' App resources (future state will only allow use' # ' of CloudFormation)', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ # Statement to use federated SAML authentication for # Trusted Relationships Statement( Action=[ Action('sts', 'AssumeRoleWithSAML'), ], Effect=Allow, Condition=Condition( StringEquals( 'SAML:aud', ['https://signin.aws.amazon.com/saml'])), Principal=Principal('Federated', variables['FederatedArn'].ref)) ]), ManagedPolicyArns=managed_policy_arns)) # Outputs template.add_output( Output('IamRoleName', Description='IAM role name', Value=Ref(iamrole))) template.add_output( Output("IamRoleArn".format(iamrole.title), Description='IAM role ARN', Value=GetAtt(iamrole, "Arn")))
def kms_key_statements(key_arn, bucket_arn, bucket_prefix): s3_endpoint = Join('', ["s3.", REGION, "amazonaws.com"]) return [ Statement(Effect=Allow, Action=[ awacs.kms.Decrypt, awacs.kms.GenerateDataKey, ], Resource=[key_arn], Condition=Condition([ StringEquals("kms:ViaService", s3_endpoint), StringLike("kms:EncryptionContext:aws:s3:arn", Join('', [bucket_arn, bucket_prefix, "*"])) ])) ]
def statement_deny_create_without_boundary(self) -> Statement: """Statement to deny creation of role or user without approved boundary.""" return Statement( Action=[awacs.iam.CreateRole, awacs.iam.CreateUser], Condition=Condition( StringNotEquals({ "iam:PermissionsBoundary": self.approved_boundary_policies })), Effect=Deny, Resource=[ Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*"), Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:user/*"), ], Sid="DenyCreateWithoutBoundary", )
def get_access_policy(self): policy = None variables = self.get_variables() statements = [] for trusted_network in variables["TrustedNetworks"]: condition = Condition(IpAddress({SourceIp: trusted_network})) statements.append( Statement(Effect=Allow, Action=self.get_allowed_actions(), Condition=condition, Principal=Principal(Everybody))) if statements: policy = Policy(Statement=statements) return policy
def test_condition_equality(self): self.assertEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringLike("s3:prefix", ["home/${aws:username}/*"]))) self.assertNotEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringLike("s3:prefix", ["other/${aws:username}/*"]))) self.assertNotEqualWithHash( Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), Condition(StringEquals("s3:prefix", ["home/${aws:username}/*"])))
def statement_deny_put_boundary(self) -> Statement: """Statement to deny putting unapproved boundaries.""" return Statement( Action=[ awacs.iam.PutRolePermissionsBoundary, awacs.iam.PutUserPermissionsBoundary, ], Condition=Condition( StringNotEquals({ "iam:PermissionsBoundary": self.approved_boundary_policies })), Effect=Deny, Resource=[ Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:role/*"), Sub("arn:${AWS::Partition}:iam::${AWS::AccountId}:user/*"), ], Sid="DenyPutUnapprovedBoundary", )
def create_role(self): t = self.template statements = [ Statement( Principal=Principal('Service', ['firehose.amazonaws.com']), Effect=Allow, Action=[sts.AssumeRole], Condition=Condition( StringEquals('sts:ExternalId', Ref('AWS::AccountId')), ), ), ] firehose_role_policy = Policy(Statement=statements) t.add_resource( iam.Role( FIREHOSE_ROLE, AssumeRolePolicyDocument=firehose_role_policy, Path='/', Policies=self.generate_iam_policies(), ), ) t.add_output(Output('Role', Value=Ref(FIREHOSE_ROLE)))
pd = Policy(Statement=[ Statement( Action=[s3.ListAllMyBuckets, s3.GetBucketLocation], Effect=Allow, Resource=[ s3.S3_ARN("*"), ], ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.S3_ARN("myBucket")], Condition=Condition( StringEquals({ 's3:prefix': ['', 'home/'], 's3:delimiter': ['/'], }), ), ), Statement( Action=[s3.ListBucket], Effect=Allow, Resource=[s3.S3_ARN("myBucket")], Condition=Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])), ), Statement( Action=[Action("s3", "*")], Effect=Allow, Resource=[ s3.S3_ARN("myBucket/home/${aws:username}"),
def test_attaching_role_to_instance(self): ''' The following example will show you how to use an IAM role for an EC2 instance. The following snippet shows a one-liner terminating an EC2 instance after 1 minute. ''' test_stack_name = 'TestAttachingRole2Instance' init_cf_env(test_stack_name) ### t = Template() security_group = ts_add_security_group(t) role = t.add_resource( Role( "MyRole", AssumeRolePolicyDocument= Policy( # allow the EC2 service to trust this role Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ troposphere.iam.Policy( PolicyName="my_ec2_policy", PolicyDocument=PolicyDocument(Statement=[ Statement( Effect=Allow, Action=[ Action("ec2", "StopInstances"), ], Resource=["*"], # only allow if tagged with the stack ID. Condition=Condition( StringEquals({ 'ec2:ResourceTag/aws:cloudformation:stack-id': Ref('AWS::StackId') })), ) ])), ])) instance_profile = t.add_resource( InstanceProfile("MyInstanceProfile", Roles=[Ref(role)])) instance = ts_add_instance_with_public_ip(t, Ref(security_group), tag='role-attachment') instance.IamInstanceProfile = Ref(instance_profile) t.add_output([ Output("InstanceId", Value=Ref(instance)), Output( "PublicIP", Value=GetAtt(instance, "PublicIp"), ), Output("Region", Value=Ref('AWS::Region')), ]) dump_template(t, True) cf_client.create_stack( StackName=test_stack_name, TemplateBody=t.to_yaml(), Capabilities=['CAPABILITY_NAMED_IAM'], ) cf_client.get_waiter('stack_create_complete').wait( StackName=test_stack_name) time.sleep(10) public_ip = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'PublicIP')['OutputValue'] instance_id = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'InstanceId')['OutputValue'] region = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'Region')['OutputValue'] # When using an IAM role, your access keys are injected into your EC2 instance automatically. run(f"ssh -o 'StrictHostKeyChecking no' ec2-user@{public_ip} aws ec2 stop-instances --instance-ids {instance_id} --region {region}" )
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, grp_id, res_id, config, res_config_ref): enabled_topics = False for topic in config: if topic.is_enabled(): enabled_topics = True super().__init__( paco_ctx, account_ctx, aws_region, config_ref=res_config_ref, stack_group=stack_group, stack_tags=stack_tags, enabled=enabled_topics, ) self.set_aws_name('SNSTopics', grp_id, res_id) self.config = config # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs any_topic_enabled = False topics_ref_cross_list = [] for topic in self.config: if not topic.is_enabled(): continue any_topic_enabled = True topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Topic Outputs output_ref = '.'.join([res_config_ref, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics=topics_ref_cross_list, PolicyDocument=Policy(Version='2012-10-17', Id="CrossAccountPublish", Statement=[ Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[awacs.sns.Publish], Resource=topics_ref_cross_list, Condition=Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }))) ])) template.add_resource(topic_policy_resource) self.enabled = any_topic_enabled # Generate the Template self.set_template()
def __init__(self, stack, paco_ctx,): rds_aurora = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('RDSAurora', self.resource_group_name, self.resource.name) self.init_template('RDSAurora') if not rds_aurora.is_enabled(): return rds_cluster_logical_id = 'DBCluster' db_cluster_dict = rds_aurora.cfn_export_dict self.notification_groups = {} # DB Subnet Group db_subnet_id_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='DBSubnetIdList', description='The list of subnet IDs where this database will be provisioned.', value=rds_aurora.segment + '.subnet_id_list', ) db_subnet_group_resource = troposphere.rds.DBSubnetGroup( title='DBSubnetGroup', template=self.template, DBSubnetGroupDescription=troposphere.Ref('AWS::StackName'), SubnetIds=troposphere.Ref(db_subnet_id_list_param), ) db_cluster_dict['DBSubnetGroupName'] = troposphere.Ref(db_subnet_group_resource) # DB Cluster Parameter Group if rds_aurora.cluster_parameter_group == None: # If no Cluster Parameter Group supplied then create one param_group_family = gen_vocabulary.rds_engine_versions[rds_aurora.engine][rds_aurora.engine_version]['param_group_family'] cluster_parameter_group_ref = troposphere.rds.DBClusterParameterGroup( "DBClusterParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) else: # Use existing Parameter Group cluster_parameter_group_ref = self.create_cfn_parameter( name='DBClusterParameterGroupName', param_type='String', description='DB Cluster Parameter Group Name', value=rds_aurora.cluster_parameter_group + '.name', ) db_cluster_dict['DBClusterParameterGroupName'] = troposphere.Ref(cluster_parameter_group_ref) # Default DB Parameter Group need_db_pg = False default_instance = rds_aurora.default_instance for db_instance in rds_aurora.db_instances.values(): if default_instance.parameter_group == None and db_instance.parameter_group == None: need_db_pg = True if need_db_pg: # create default DB Parameter Group param_group_family = gen_vocabulary.rds_engine_versions[rds_config.engine][rds_config.engine_version]['param_group_family'] default_dbparametergroup_resource = troposphere.rds.DBParameterGroup( "DBParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) # Enhanced Monitoring Role need_monitoring_role = False for db_instance in rds_aurora.db_instances.values(): enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: need_monitoring_role = True if need_monitoring_role: enhanced_monitoring_role_resource = troposphere.iam.Role( title='MonitoringIAMRole', template=self.template, AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRole")], Principal=Principal("Service", "monitoring.rds.amazonaws.com") ) ] ), ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"], Path="/", ) # DB Snapshot Identifier if rds_aurora.db_snapshot_identifier == '' or rds_aurora.db_snapshot_identifier == None: db_snapshot_id_enabled = False else: db_snapshot_id_enabled = True if db_snapshot_id_enabled == True: db_cluster_dict['SnapshotIdentifier'] = rds_aurora.db_snapshot_identifier # KMS-CMK key encryption if rds_aurora.enable_kms_encryption == True and db_snapshot_id_enabled == False: key_policy = Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action('kms', '*'),], Principal=Principal("AWS", [f'arn:aws:iam::{self.stack.account_ctx.id}:root']), Resource=['*'], ), Statement( Effect=Allow, Action=[ awacs.kms.Encrypt, awacs.kms.Decrypt, Action('kms', 'ReEncrypt*'), Action('kms', 'GenerateDataKey*'), awacs.kms.CreateGrant, awacs.kms.ListGrants, awacs.kms.DescribeKey, ], Principal=Principal('AWS',['*']), Resource=['*'], Condition=Condition([ StringEquals({ 'kms:CallerAccount': f'{self.stack.account_ctx.id}', 'kms:ViaService': f'rds.{self.stack.aws_region}.amazonaws.com' }) ]), ), ], ) kms_key_resource = troposphere.kms.Key( title='AuroraKMSCMK', template=self.template, KeyPolicy=key_policy, ) db_cluster_dict['StorageEncrypted'] = True db_cluster_dict['KmsKeyId'] = troposphere.Ref(kms_key_resource) kms_key_alias_resource = troposphere.kms.Alias( title="AuroraKMSCMKAlias", template=self.template, AliasName=troposphere.Sub('alias/${' + rds_cluster_logical_id + '}'), TargetKeyId=troposphere.Ref(kms_key_resource), ) kms_key_alias_resource.DependsOn = rds_cluster_logical_id # Username and Passsword - only if there is no DB Snapshot Identifier if db_snapshot_id_enabled == False: db_cluster_dict['MasterUsername'] = rds_aurora.master_username if rds_aurora.secrets_password: # Password from Secrets Manager sta_logical_id = 'SecretTargetAttachmentRDS' secret_arn_param = self.create_cfn_parameter( param_type='String', name='RDSSecretARN', description='The ARN for the secret for the RDS master password.', value=rds_aurora.secrets_password + '.arn', ) secret_target_attachment_resource = troposphere.secretsmanager.SecretTargetAttachment( title=sta_logical_id, template=self.template, SecretId=troposphere.Ref(secret_arn_param), TargetId=troposphere.Ref(rds_cluster_logical_id), TargetType='AWS::RDS::DBCluster' ) secret_target_attachment_resource.DependsOn = rds_cluster_logical_id db_cluster_dict['MasterUserPassword'] = troposphere.Join( '', ['{{resolve:secretsmanager:', troposphere.Ref(secret_arn_param), ':SecretString:password}}' ] ) else: master_password_param = self.create_cfn_parameter( param_type='String', name='MasterUserPassword', description='The master user password.', value=rds_aurora.master_user_password, noecho=True, ) db_cluster_dict['MasterUserPassword'] = troposphere.Ref(master_password_param) db_cluster_res = troposphere.rds.DBCluster.from_dict( rds_cluster_logical_id, db_cluster_dict ) self.template.add_resource(db_cluster_res) # Cluster Event Notifications if hasattr(rds_aurora, 'cluster_event_notifications'): for group in rds_aurora.cluster_event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"ClusterEventSubscription{group}"), template=self.template, EventCategories=rds_aurora.cluster_event_notifications.event_categories, SourceIds=[troposphere.Ref(db_cluster_res)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-cluster', ) # DB Instance(s) for db_instance in rds_aurora.db_instances.values(): logical_name = self.create_cfn_logical_id(db_instance.name) db_instance_dict = { 'DBClusterIdentifier': troposphere.Ref(db_cluster_res), 'DBInstanceClass': db_instance.get_value_or_default('db_instance_type'), 'DBSubnetGroupName': troposphere.Ref(db_subnet_group_resource), 'EnablePerformanceInsights': db_instance.get_value_or_default('enable_performance_insights'), 'Engine': rds_aurora.engine, 'PubliclyAccessible': db_instance.get_value_or_default('publicly_accessible'), 'AllowMajorVersionUpgrade': db_instance.get_value_or_default('allow_major_version_upgrade'), 'AutoMinorVersionUpgrade': db_instance.get_value_or_default('auto_minor_version_upgrade'), } enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: db_instance_dict['MonitoringInterval'] = enhanced_monitoring_interval db_instance_dict['MonitoringRoleArn'] = troposphere.GetAtt(enhanced_monitoring_role_resource, "Arn") if db_instance.availability_zone != None: subnet_id_ref = f'{rds_aurora.segment}.az{db_instance.availability_zone}.availability_zone' db_instance_subnet_param = self.create_cfn_parameter( param_type='String', name=f'DBInstanceAZ{logical_name}', description=f'Subnet where DB Instance {logical_name} is provisioned', value=subnet_id_ref, ) db_instance_dict['AvailabilityZone'] = troposphere.Ref(db_instance_subnet_param) # DB Parameter Group if default_instance.parameter_group == None and db_instance.parameter_group == None: dbparametergroup_resource = default_dbparametergroup_resource elif db_instance.parameter_group != None: # Use instance-specific DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=db_instance.parameter_group + '.name', ) else: # Use default DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=default_instance.parameter_group + '.name', ) db_instance_dict['DBParameterGroupName'] = troposphere.Ref(dbparametergroup_resource) db_instance_resource = troposphere.rds.DBInstance.from_dict( f'DBInstance{logical_name}', db_instance_dict ) self.template.add_resource(db_instance_resource) # DB Event Notifications event_notifications = db_instance.get_value_or_default('event_notifications') if event_notifications != None: for group in event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"DBEventSubscription{logical_name}{group}"), template=self.template, EventCategories=event_notifications.event_categories, SourceIds=[troposphere.Ref(db_instance_resource)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-instance', ) # DB Instance Outputs self.create_output( title=f'DBInstanceName{logical_name}', description=f'DB Instance Name for {logical_name}', value=troposphere.Ref(db_instance_resource), ref=db_instance.paco_ref_parts + ".name", ) # DB Cluster Outputs self.create_output( title='DBClusterName', description='DB Cluster Name', value=troposphere.Ref(db_cluster_res), ref=self.resource.paco_ref_parts + ".name", ) self.create_output( title='ClusterEndpointAddress', description='Cluster Endpoint Address', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Address'), ref=self.resource.paco_ref_parts + ".endpoint.address", ) self.create_output( title='ClusterEndpointPort', description='Cluster Endpoint Port', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Port'), ref=self.resource.paco_ref_parts + ".endpoint.port", ) self.create_output( title='ClusterReadEndpointAddress', description='Cluster ReadEndpoint Address', value=troposphere.GetAtt(db_cluster_res, 'ReadEndpoint.Address'), ref=self.resource.paco_ref_parts + ".readendpoint.address", ) # DNS - Route53 Record Set if rds_aurora.is_dns_enabled() == True: route53_ctl = self.paco_ctx.get_controller('route53') for dns in rds_aurora.dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.endpoint.address'], stack_group=self.stack.stack_group, ) for read_dns in rds_aurora.read_dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=read_dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.readendpoint.address'], stack_group=self.stack.stack_group, )
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Kubernetes Master via EKS - V1.0.0') # Resources ccpsecuritygroup = template.add_resource( ec2.SecurityGroup( 'ClusterControlPlaneSecurityGroup', GroupDescription='Cluster communication with worker nodes', Tags=[ {'Key': Sub('kubernetes.io/cluster/${EksClusterName}'), 'Value': 'owned'}, {'Key': 'Product', 'Value': 'Kubernetes'}, {'Key': 'Project', 'Value': 'eks'}, {'Key': 'Name', 'Value': Sub('${EksClusterName}-sg-worker-nodes')} ], VpcId=variables['VPC'].ref ) ) template.add_output( Output( ccpsecuritygroup.title, Description='Cluster communication with worker nodes', Export=Export( Sub('${AWS::StackName}-ControlPlaneSecurityGroup') ), Value=ccpsecuritygroup.ref() ) ) eksservicerole = template.add_resource( iam.Role( 'EksServiceRole', AssumeRolePolicyDocument=make_simple_assume_policy( 'eks.amazonaws.com' ), ManagedPolicyArns=[ IAM_POLICY_ARN_PREFIX + i for i in [ 'AmazonEKSClusterPolicy', 'AmazonEKSServicePolicy' ] ], Policies=[ iam.Policy( PolicyName='EksServiceRolePolicy', PolicyDocument=PolicyDocument( Statement=[ Statement( Action=[awacs.iam.CreateServiceLinkedRole, awacs.iam.PutRolePolicy], Condition=Condition( StringLike( 'iam:AWSServiceName', 'elasticloadbalancing.amazonaws.com' # noqa ) ), Effect=Allow, Resource=[ Sub('arn:aws:iam::${AWS::AccountId}:role/' # noqa 'aws-service-role/' 'elasticloadbalancing.amazonaws.com/' # noqa 'AWSServiceRoleForElasticLoadBalancing*') # noqa ] ) ] ) ) ] ) ) ekscluster = template.add_resource( eks.Cluster( 'EksCluster', Name=variables['EksClusterName'].ref, Version=variables['EksVersion'].ref, RoleArn=eksservicerole.get_att('Arn'), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[ccpsecuritygroup.ref()], SubnetIds=variables['EksSubnets'].ref ) ) ) template.add_output( Output( "%sName" % ekscluster.title, Description='EKS Cluster Name', Export=Export( Sub("${AWS::StackName}-%sName" % ekscluster.title) ), Value=ekscluster.ref() ) ) template.add_output( Output( "%sEndpoint" % ekscluster.title, Description='EKS Cluster Endpoint', Export=Export( Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title) ), Value=ekscluster.get_att('Endpoint') ) ) # Additional Outputs template.add_output( Output( 'VpcId', Description='EKS Cluster VPC Id', Export=Export( Sub('${AWS::StackName}-VpcId') ), Value=variables['VPC'].ref ) ) template.add_output( Output( 'Subnets', Description='EKS Cluster Subnets', Export=Export( Sub('${AWS::StackName}-Subnets') ), Value=Join(',', variables['EksSubnets'].ref) ) )
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version("2010-09-09") template.add_description("Kubernetes Master via EKS - V1.0.0") # Resources ccpsecuritygroup = template.add_resource( ec2.SecurityGroup( "ClusterControlPlaneSecurityGroup", GroupDescription="Cluster communication with worker nodes", Tags=[ { "Key": Sub("kubernetes.io/cluster/${EksClusterName}"), "Value": "owned", }, { "Key": "Product", "Value": "Kubernetes" }, { "Key": "Project", "Value": "eks" }, { "Key": "Name", "Value": Sub("${EksClusterName}-sg-worker-nodes") }, ], VpcId=variables["VPC"].ref, )) template.add_output( Output( ccpsecuritygroup.title, Description="Cluster communication with worker nodes", Export=Export( Sub("${AWS::StackName}-ControlPlaneSecurityGroup")), Value=ccpsecuritygroup.ref(), )) eksservicerole = template.add_resource( iam.Role( "EksServiceRole", AssumeRolePolicyDocument=make_simple_assume_policy( "eks.amazonaws.com"), ManagedPolicyArns=[ IAM_POLICY_ARN_PREFIX + "AmazonEKSClusterPolicy" ], Policies=[ iam.Policy( PolicyName="EksServiceRolePolicy", PolicyDocument=PolicyDocument(Statement=[ Statement( Action=[ awacs.iam.CreateServiceLinkedRole, awacs.iam.PutRolePolicy, ], Condition=Condition( StringLike( "iam:AWSServiceName", "elasticloadbalancing.amazonaws.com", )), Effect=Allow, Resource=[ Sub("arn:aws:iam::${AWS::AccountId}:role/" "aws-service-role/" "elasticloadbalancing.amazonaws.com/" "AWSServiceRoleForElasticLoadBalancing*" ) ], ) ]), ) ], )) ekscluster = template.add_resource( eks.Cluster( "EksCluster", Name=variables["EksClusterName"].ref, Version=variables["EksVersion"].ref, RoleArn=eksservicerole.get_att("Arn"), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[ccpsecuritygroup.ref()], SubnetIds=variables["EksSubnets"].ref, ), )) template.add_output( Output( "%sName" % ekscluster.title, Description="EKS Cluster Name", Export=Export( Sub("${AWS::StackName}-%sName" % ekscluster.title)), Value=ekscluster.ref(), )) template.add_output( Output( "%sEndpoint" % ekscluster.title, Description="EKS Cluster Endpoint", Export=Export( Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title)), Value=ekscluster.get_att("Endpoint"), )) # Additional Outputs template.add_output( Output( "VpcId", Description="EKS Cluster VPC Id", Export=Export(Sub("${AWS::StackName}-VpcId")), Value=variables["VPC"].ref, )) template.add_output( Output( "Subnets", Description="EKS Cluster Subnets", Export=Export(Sub("${AWS::StackName}-Subnets")), Value=Join(",", variables["EksSubnets"].ref), ))
def kms_key_policy(key_use_arns, key_admin_arns): """ Creates a key policy for use of a KMS Key. key_use_arns is a list of arns that should have access to use the KMS key. """ root_arn = Join(":", ["arn:aws:iam:", Ref("AWS::AccountId"), "root"]) statements = [] statements.append( Statement(Sid="Enable IAM User Permissions", Effect=Allow, Principal=AWSPrincipal(root_arn), Action=[ Action("kms", "*"), ], Resource=["*"])) statements.append( Statement(Sid="Allow use of the key", Effect=Allow, Principal=AWSPrincipal(key_use_arns), Action=[ awacs.kms.Encrypt, awacs.kms.Decrypt, awacs.kms.ReEncrypt, awacs.kms.GenerateDataKey, awacs.kms.GenerateDataKeyWithoutPlaintext, awacs.kms.DescribeKey, ], Resource=["*"])) statements.append( Statement(Sid="Allow attachment of persistent resources", Effect=Allow, Principal=AWSPrincipal(key_use_arns), Action=[ awacs.kms.CreateGrant, awacs.kms.ListGrants, awacs.kms.RevokeGrant, ], Resource=["*"], Condition=Condition(Bool("kms:GrantIsForAWSResource", True)))) statements.append( Statement( Sid="Allow access for Key Administrators", Effect=Allow, Principal=AWSPrincipal(key_admin_arns), Action=[ Action("kms", "Create*"), Action("kms", "Describe*"), Action("kms", "Enable*"), Action("kms", "List*"), Action("kms", "Put*"), Action("kms", "Update*"), Action("kms", "Revoke*"), Action("kms", "Disable*"), Action("kms", "Get*"), Action("kms", "Delete*"), Action("kms", "ScheduleKeyDeletion"), Action("kms", "CancelKeyDeletion"), ], Resource=["*"], )) return Policy(Version="2012-10-17", Id="key-default-1", Statement=statements)
def generate_cf_template(): """ Returns an entire CloudFormation stack by using troposphere to construct each piece """ # Header of CloudFormation template t = Template() t.add_version("2010-09-09") t.add_description("Lambda Chat AWS Resources") # Paramters description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com" google_oauth_client_id = t.add_parameter(Parameter( "GoogleOAuthClientID", AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com", Type="String", Description="The Client ID of your Google project", ConstraintDescription=description )) website_s3_bucket_name = t.add_parameter(Parameter( "WebsiteS3BucketName", AllowedPattern="[a-zA-Z0-9\-]*", Type="String", Description="Name of S3 bucket to store the website in", ConstraintDescription="can contain only alphanumeric characters and dashes.", )) # The SNS topic the website will publish chat messages to website_sns_topic = t.add_resource(sns.Topic( 'WebsiteSnsTopic', TopicName='lambda-chat', DisplayName='Lambda Chat' )) t.add_output(Output( "WebsiteSnsTopic", Description="sns_topic_arn", Value=Ref(website_sns_topic), )) # The IAM Role and Policy the website will assume to publish to SNS website_role = t.add_resource(iam.Role( "WebsiteRole", Path="/", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRoleWithWebIdentity")], Principal=Principal("Federated", "accounts.google.com"), Condition=Condition( StringEquals( "accounts.google.com:aud", Ref(google_oauth_client_id) ) ), ), ], ), )) t.add_resource(iam.PolicyType( "WebsitePolicy", PolicyName="lambda-chat-website-policy", Roles=[Ref(website_role)], PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[Action("sns", "Publish")], Resource=[ Ref(website_sns_topic) ], ), ], ) )) t.add_output(Output( "WebsiteRole", Description="website_iam_role_arn", Value=GetAtt(website_role, "Arn"), )) website_bucket = t.add_resource(s3.Bucket( 'WebsiteS3Bucket', BucketName=Ref(website_s3_bucket_name), WebsiteConfiguration=s3.WebsiteConfiguration( ErrorDocument="error.html", IndexDocument="index.html" ) )) t.add_output(Output( "S3Bucket", Description="s3_bucket", Value=Ref(website_bucket), )) t.add_resource(s3.BucketPolicy( 'WebsiteS3BucketPolicy', Bucket=Ref(website_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "PublicAccess", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [{ "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "WebsiteS3Bucket", }, "/*" ] ] }] } ] } )) return t
def __init__(self, stack, paco_ctx): cip = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('CIP', self.resource_group_name, self.resource.name) self.init_template('Cognito Identity Pool') if not cip.is_enabled(): return # Cognito Identity Pool cfn_export_dict = cip.cfn_export_dict if len(cip.identity_providers) > 0: idps = [] up_client_params = {} up_params = {} for idp in cip.identity_providers: # replace <region> and <account> for refs in Services up_client_ref = Reference(idp.userpool_client) up_client_ref.set_account_name(self.account_ctx.get_name()) up_client_ref.set_region(self.aws_region) userpool_client = up_client_ref.get_model_obj(self.paco_ctx.project) if up_client_ref.ref not in up_client_params: up_client_name = self.create_cfn_logical_id(f'UserPoolClient{userpool_client.name}' + md5sum(str_data=up_client_ref.ref)) value = f'paco.ref {up_client_ref.ref }.id' up_client_params[up_client_ref.ref] = self.create_cfn_parameter( param_type='String', name=up_client_name, description=f'UserPool Client Id for {userpool_client.name}', value=value, ) userpool = get_parent_by_interface(userpool_client, ICognitoUserPool) userpool_ref = userpool.paco_ref if userpool_ref not in up_params: up_name = self.create_cfn_logical_id(f'UserPool{userpool.name}' + md5sum(str_data=userpool_ref)) up_params[userpool_ref] = self.create_cfn_parameter( param_type='String', name=up_name, description=f'UserPool ProviderName for {userpool.name}', value=userpool_ref + '.providername', ) idps.append({ "ClientId" : troposphere.Ref(up_client_params[up_client_ref.ref]), "ProviderName" : troposphere.Ref(up_params[userpool_ref]), "ServerSideTokenCheck" : idp.serverside_token_check, }) cfn_export_dict['CognitoIdentityProviders'] = idps cip_resource = troposphere.cognito.IdentityPool.from_dict( 'CognitoIdentityPool', cfn_export_dict ) self.template.add_resource(cip_resource) # Outputs self.create_output( title=cip_resource.title + 'Id', description="Cognito Identity Pool Id", value=troposphere.Ref(cip_resource), ref=[cip.paco_ref_parts, cip.paco_ref_parts + ".id"], ) # Roles roles_dict = {} unauthenticated_assume_role_policy = PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Federated',"cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}), ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "unauthenticated"}) ]), ), ], ) unauthenticated_role_resource = role_to_troposphere( cip.unauthenticated_role, 'UnauthenticatedRole', assume_role_policy=unauthenticated_assume_role_policy, ) if unauthenticated_role_resource != None: self.template.add_resource(unauthenticated_role_resource) roles_dict['unauthenticated'] = troposphere.GetAtt(unauthenticated_role_resource, "Arn") authenticated_assume_role_policy = PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Federated',"cognito-identity.amazonaws.com"), Action=[Action('sts', 'AssumeRoleWithWebIdentity')], Condition=Condition([ StringEquals({"cognito-identity.amazonaws.com:aud": troposphere.Ref(cip_resource)}), ForAnyValueStringLike({"cognito-identity.amazonaws.com:amr": "authenticated"}) ]), ), ], ) authenticated_role_resource = role_to_troposphere( cip.authenticated_role, 'AuthenticatedRole', assume_role_policy=authenticated_assume_role_policy ) if authenticated_role_resource != None: self.template.add_resource(authenticated_role_resource) roles_dict['authenticated'] = troposphere.GetAtt(authenticated_role_resource, "Arn") # Identity Pool Role Attachment if roles_dict: iproleattachment_resource = troposphere.cognito.IdentityPoolRoleAttachment( title='IdentityPoolRoleAttachment', IdentityPoolId=troposphere.Ref(cip_resource), Roles=roles_dict, ) self.template.add_resource(iproleattachment_resource)
def script_manager_ecr_deploy(self, ecr_deploy_group, asg_dict, asg_config, template): policy_statements = [] for ecr_deploy_name in ecr_deploy_group.keys(): ecr_deploy = ecr_deploy_group[ecr_deploy_name] if ecr_deploy == None: continue if ecr_deploy and len(ecr_deploy.release_phase.ecs) > 0: pull_repos = [] push_repos = [] for repository in ecr_deploy.repositories: source_ecr_obj = get_model_obj_from_ref(repository.source_repo, self.paco_ctx.project) source_env = get_parent_by_interface(source_ecr_obj, schemas.IEnvironmentRegion) source_account_id = self.paco_ctx.get_ref(source_env.network.aws_account+".id") dest_ecr_obj = get_model_obj_from_ref(repository.dest_repo, self.paco_ctx.project) dest_env = get_parent_by_interface(dest_ecr_obj, schemas.IEnvironmentRegion) dest_account_id = self.paco_ctx.get_ref(dest_env.network.aws_account+".id") pull_repo_arn = f'arn:aws:ecr:{source_env.region}:{source_account_id}:repository/{source_ecr_obj.repository_name}' push_repo_arn = f'arn:aws:ecr:{dest_env.region}:{dest_account_id}:repository/{dest_ecr_obj.repository_name}' pull_repos.append(pull_repo_arn) push_repos.append(push_repo_arn) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPull', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchGetImage'), ], Resource=pull_repos ) ) policy_statements.append( Statement( Sid=f'ScriptManagerECRDeployPush', Effect=Allow, Action=[ Action('ecr', 'GetDownloadUrlForLayer'), Action('ecr', 'BatchCheckLayerAvailability'), Action('ecr', 'PutImage'), Action('ecr', 'InitiateLayerUpload'), Action('ecr', 'UploadLayerPart'), Action('ecr', 'CompleteLayerUpload'), ], Resource=push_repos ) ) iam_cluster_cache = [] idx = 0 for command in ecr_deploy.release_phase.ecs: service_obj = get_model_obj_from_ref(command.service, self.paco_ctx.project) ecs_services_obj = get_parent_by_interface(service_obj, schemas.IECSServices) ecs_release_phase_cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterArn{idx}', description=f'ECS Release Phase Cluster Arn {idx}', value=ecs_services_obj.cluster + '.arn' ) ecs_release_phase_cluster_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseClusterName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=ecs_services_obj.cluster + '.name' ) ecs_release_phase_service_name_param = self.create_cfn_parameter( param_type='String', name=f'ECSReleasePhaseServiceName{idx}', description=f'ECS Release Phase Cluster Name {idx}', value=command.service + '.name' ) ecs_cluster_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_CLUSTER_ID_{idx}', troposphere.Ref(ecs_release_phase_cluster_name_param), True ) ecs_service_asg_tag = troposphere.autoscaling.Tag( f'PACO_CB_RP_ECS_SERVICE_ID_{idx}', troposphere.Ref(ecs_release_phase_service_name_param), True ) asg_dict['Tags'].append(ecs_cluster_asg_tag) asg_dict['Tags'].append(ecs_service_asg_tag) if ecs_services_obj.cluster not in iam_cluster_cache: policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommand{idx}', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ 'arn:aws:ec2:*:*:instance/*' ], Condition=Condition( StringLike({ 'ssm:resourceTag/Paco-ECSCluster-Name': troposphere.Ref(ecs_release_phase_cluster_name_param) }) ) ) ) policy_statements.append( Statement( Sid=f'ECSRelasePhaseClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'DescribeServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), ], Resource=[ '*' ], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref(ecs_release_phase_cluster_arn_param) }) ) ) ) iam_cluster_cache.append(ecs_services_obj.cluster) idx += 1 policy_statements.append( Statement( Sid='ECSReleasePhaseSSMAutomationExecution', Effect=Allow, Action=[ Action('ssm', 'StartAutomationExecution'), Action('ssm', 'StopAutomationExecution'), Action('ssm', 'GetAutomationExecution'), ], Resource=[ 'arn:aws:ssm:::automation-definition/' ] ) ) # ECS Policies policy_statements.append( Statement( Sid='ECSRelasePhaseECS', Effect=Allow, Action=[ Action('ecs', 'DescribeTaskDefinition'), Action('ecs', 'DeregisterTaskDefinition'), Action('ecs', 'RegisterTaskDefinition'), Action('ecs', 'ListTagsForResource'), Action('ecr', 'DescribeImages') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid=f'ECSReleasePhaseSSMSendCommandDocument', Effect=Allow, Action=[ Action('ssm', 'SendCommand'), ], Resource=[ f'arn:aws:ssm:{self.aws_region}:{self.account_ctx.get_id()}:document/paco_ecs_docker_exec' ] ) ) policy_statements.append( Statement( Sid='ECSReleasePhaseSSMCore', Effect=Allow, Action=[ Action('ssm', 'ListDocuments'), Action('ssm', 'ListDocumentVersions'), Action('ssm', 'DescribeDocument'), Action('ssm', 'GetDocument'), Action('ssm', 'DescribeInstanceInformation'), Action('ssm', 'DescribeDocumentParameters'), Action('ssm', 'CancelCommand'), Action('ssm', 'ListCommands'), Action('ssm', 'ListCommandInvocations'), Action('ssm', 'DescribeAutomationExecutions'), Action('ssm', 'DescribeInstanceProperties'), Action('ssm', 'GetCommandInvocation'), Action('ec2', 'DescribeInstanceStatus'), Action('ecr', 'GetAuthorizationToken') ], Resource=[ '*' ] ) ) policy_statements.append( Statement( Sid='IAMPassRole', Effect=Allow, Action=[ Action('iam', 'passrole') ], Resource=[ '*' ] ) ) ecs_release_phase_project_policy_res = troposphere.iam.ManagedPolicy( title='ECSReleasePhase', PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=policy_statements ), Roles=[self.instance_iam_role_name] ) template.add_resource(ecs_release_phase_project_policy_res)
from awacs.aws import Allow, AWSPrincipal, Condition from awacs.aws import Policy, Statement from awacs.aws import DateGreaterThan, DateLessThan, IpAddress import awacs.sqs as sqs region = 'us-east-1' account = '444455556666' pd = Policy( Id="Queue1_Policy_UUID", Statement=[ Statement( Sid="Queue1_SendMessage", Effect=Allow, Principal=[ AWSPrincipal("111122223333"), ], Action=[sqs.SendMessage], Resource=[ sqs.SQS_ARN(region, account, "queue1"), ], Condition=Condition([ DateGreaterThan("aws:CurrentTime", "2010-08-16T12:00:00Z"), DateLessThan("aws:CurrentTime", "2010-08-16T15:00:00Z"), IpAddress("aws:SourceIp", ["192.0.2.0/24", "203.0.113.0/24"]), ]), ), ], ) print(pd.to_json())
def __init__(self, stack, paco_ctx): cup = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('CUP', self.resource_group_name, self.resource.name) self.init_template('Cognito User Pool') if not cup.is_enabled(): return cfn_export_dict = cup.cfn_export_dict # SNS Role for SMS if cup.mfa != 'off': # CloudFormation requires an SMS Role even if only software tokens are used sms_role_resource = troposphere.iam.Role( 'CognitoSMSRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Principal=Principal('Service',"cognito-idp.amazonaws.com"), Action=[Action('sts', 'AssumeRole')], Condition=Condition([ StringEquals({"sts:ExternalId": cup.paco_ref_parts}), ]), ), ], ), Policies=[ troposphere.iam.Policy( PolicyName="AllowSMS", PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sns.Publish], Resource=['*'], ) ] ) ) ], ) self.template.add_resource(sms_role_resource) cfn_export_dict['SmsConfiguration'] = { 'ExternalId': cup.paco_ref_parts, 'SnsCallerArn': troposphere.GetAtt(sms_role_resource, "Arn") } # Lambda Triggers lambda_trigger_mapping = [ ('create_auth_challenge', 'CreateAuthChallenge'), ('custom_message', 'CustomMessage'), ('define_auth_challenge', 'DefineAuthChallenge'), ('post_authentication', 'PostAuthentication'), ('post_confirmation', 'PostConfirmation'), ('pre_authentication', 'PreAuthentication'), ('pre_sign_up', 'PreSignUp'), ('pre_token_generation', 'PreTokenGeneration'), ('user_migration', 'UserMigration'), ('verify_auth_challenge_response', 'VerifyAuthChallengeResponse'), ] self.lambda_trigger_params = {} if cup.lambda_triggers != None: triggers = {} for name, cfn_key in lambda_trigger_mapping: lambda_ref = getattr(cup.lambda_triggers, name, None) if lambda_ref != None: if lambda_ref not in self.lambda_trigger_params: self.lambda_trigger_params[lambda_ref] = self.create_cfn_parameter( param_type='String', name='LambdaTrigger' + md5sum(str_data=lambda_ref), description=f'LambdaTrigger for Lambda {lambda_ref}', value=lambda_ref + '.arn', ) triggers[cfn_key] = troposphere.Ref(self.lambda_trigger_params[lambda_ref]) cfn_export_dict['LambdaConfig'] = triggers # Cognito User Pool cup_resource = troposphere.cognito.UserPool.from_dict( 'CognitoUserPool', cfn_export_dict ) self.template.add_resource(cup_resource) # Add Lambda Permissions for Lambda Triggers # Need to do this after the cup_resource is created lambda_permissions = {} if cup.lambda_triggers != None: for name, cfn_key in lambda_trigger_mapping: lambda_ref = getattr(cup.lambda_triggers, name, None) if lambda_ref != None: # Lambda Permission if lambda_ref not in lambda_permissions: lambda_permissions[lambda_ref] = True troposphere.awslambda.Permission( title='LambdaPermission' + md5sum(str_data=cup.paco_ref_parts), template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.Ref(self.lambda_trigger_params[lambda_ref]), Principal='cognito-idp.amazonaws.com', SourceArn=troposphere.GetAtt(cup_resource, "Arn"), ) # Outputs self.create_output( title=cup_resource.title + 'Id', description="Cognito UserPool Id", value=troposphere.Ref(cup_resource), ref=[cup.paco_ref_parts, cup.paco_ref_parts + ".id"], ) self.create_output( title=cup_resource.title + 'Arn', description="Cognito UserPool Arn", value=troposphere.GetAtt(cup_resource, "Arn"), ref=cup.paco_ref_parts + ".arn" ) self.create_output( title=cup_resource.title + 'ProviderName', description="Cognito UserPool ProviderName", value=troposphere.GetAtt(cup_resource, "ProviderName"), ref=[cup.paco_ref_parts + ".name", cup.paco_ref_parts + ".providername"], ) self.create_output( title=cup_resource.title + 'Url', description="Cognito UserPool ProviderURL", value=troposphere.GetAtt(cup_resource, "ProviderURL"), ref=[cup.paco_ref_parts + ".url", cup.paco_ref_parts + ".providerurl"], ) # Cognito User Pool Clients for client in cup.app_clients.values(): cfn_export_dict = client.cfn_export_dict cfn_export_dict['UserPoolId'] = troposphere.Ref(cup_resource) client_logical_id = self.create_cfn_logical_id(f"{client.name}CognitoUserPoolClient") cupclient_resource = troposphere.cognito.UserPoolClient.from_dict( client_logical_id, cfn_export_dict ) self.template.add_resource(cupclient_resource) self.create_output( title=cupclient_resource.title + 'Id', description="Cognito UserPoolClient Id", value=troposphere.Ref(cupclient_resource), ref=client.paco_ref_parts + ".id", ) if client.domain_name: # ToDo: add support for custom domains up_domain_name = self.create_cfn_logical_id(f"{client.name}UserPoolDomain") domain_resource = troposphere.cognito.UserPoolDomain( up_domain_name, Domain=client.domain_name, UserPoolId=troposphere.Ref(cup_resource) ) self.template.add_resource(domain_resource) # UI Customizations if cup.ui_customizations != None: if cup.ui_customizations.logo_file != None or cup.ui_customizations.css_file != None: # Add a Hook to set UI Customizations # CloudFormation doesn't support the Logo customization # Paco also uses the hook for CSS (this could be migration to the CloudFormation ~shrug~) stack_hooks = StackHooks() stack_hooks.add( name='SetCognitoUICustomizations', stack_action=['create','update'], stack_timing='post', hook_method=self.add_ui_customizations_hook, cache_method=self.add_ui_customizations_cache, hook_arg=cup, ) stack.add_hooks(stack_hooks)
def script_manager_ecs(self, ecs_group, asg_dict, asg_config, template): idx=0 policy_statements = [] for ecs_name in ecs_group.keys(): ecs = ecs_group[ecs_name] if ecs == None: continue ecs_script_manager_cluster_arn_param = self.create_cfn_parameter( param_type='String', name=f'ECSScriptManagerClusterArn{idx}', description=f'ECS Script Manager Cluster Arn {idx}', value=ecs.cluster + '.arn' ) ecs_cluster_tag = troposphere.autoscaling.Tag( f'paco:script_manager:ecs:{ecs_name}:cluster:arn', troposphere.Ref(ecs_script_manager_cluster_arn_param), True ) asg_dict['Tags'].append(ecs_cluster_tag) policy_statements.append( Statement( Sid=f'ECSScriptManagerClusterAccess{idx}', Effect=Allow, Action=[ Action('ecs', 'UpdateService'), Action('ecs', 'DescribeServices'), Action('ecs', 'ListServices'), Action('ecs', 'RunTask'), Action('ecs', 'StopTask'), Action('ecs', 'DescribeContainerInstances'), Action('ecs', 'ListTasks'), Action('ecs', 'DescribeTasks'), Action('ec2', 'DescribeInstances'), ], Resource=[ '*' ], Condition=Condition( StringEquals({ 'ecs:cluster': troposphere.Ref(ecs_script_manager_cluster_arn_param) }) ) ) ) policy_statements.append( Statement( Sid=f'ECSScriptManagerClusterEC2Access{idx}', Effect=Allow, Action=[ Action('ec2', 'DescribeInstances'), ], Resource=[ '*' ] ) ) idx += 1 script_manager_ecs_policy_res = troposphere.iam.ManagedPolicy( title='ScriptManagerECS', PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=policy_statements ), Roles=[self.instance_iam_role_name] ) template.add_resource(script_manager_ecs_policy_res)
Queue( "JsonNotificationDLQ", QueueName=Sub("${LambdaEnv}-json-notification-inbound-dlq"), )) t.add_resource( QueuePolicy( "JsonNotificationReceiveQueuePolicy", Queues=[Ref("JsonNotificationReceiveQueue")], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[SendMessage], Resource=[GetAtt("JsonNotificationReceiveQueue", "Arn")], Principal=AWSPrincipal("*"), Condition=Condition([ ArnLike("aws:SourceArn", Ref("GalileoBabelTopicArn")) ])) ]))) t.add_resource( Bucket("NotificationsToBeIngested", BucketName=Sub("${LambdaEnv}-editorial-search-galileo-babel"), DeletionPolicy="Retain", NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations(Event="s3:ObjectCreated:*", Topic=ImportValue( Sub("${LambdaEnv}-JsonTopicArn"))) ]))) t.add_resource(