def get_att(self, parameter, as_reference=True): """Retrieves an attribute from an existing stack :param parameter: The output parameter which should be retrieved :param as_reference: Is the parameter a reference (Default) or a string :return: Value of parameter to retrieve """ if as_reference: return troposphere.GetAtt(self.__custom_stack_obj, troposphere.Ref(parameter)) else: return troposphere.GetAtt(self.__custom_stack_obj, parameter)
def add_log_group(self, loggroup_name, logical_name=None): "Add a LogGroup resource to the template" if not logical_name: logical_name = loggroup_name cfn_export_dict = { 'LogGroupName': loggroup_name, } if not hasattr(self.awslambda, 'expire_events_after_days'): self.awslambda.expire_events_after_days = 'Never' if self.awslambda.expire_events_after_days != 'Never' and self.awslambda.expire_events_after_days != '': cfn_export_dict['RetentionInDays'] = int(self.awslambda.expire_events_after_days) loggroup_logical_id = self.create_cfn_logical_id('LogGroup' + logical_name) loggroup_resource = troposphere.logs.LogGroup.from_dict( loggroup_logical_id, cfn_export_dict ) loggroup_resource.DependsOn = self.awslambda_resource self.template.add_resource(loggroup_resource) # LogGroup Output self.register_stack_output_config( '{}.log_groups.{}.arn'.format(self.awslambda.paco_ref_parts, logical_name), loggroup_logical_id + 'Arn' ) loggroup_output = troposphere.Output( loggroup_logical_id + 'Arn', Value=troposphere.GetAtt(loggroup_resource, "Arn") ) self.template.add_output(loggroup_output) return loggroup_resource
def profile_role_statements(self): return collections.ChainMap( { # poll s3 events queue 'sqs': awacs.aws.Statement( Effect='Allow', Action=[awacs.sqs.Action('*')], Resource=[ ts.GetAtt(self.s3e_worker.queue, 'Arn'), ts.GetAtt(self.def_worker.queue, 'Arn'), ], ), }, super().profile_role_statements, )
def register_resources_template(self, template): targets, target_lambdas = [], [] for name, target in six.iteritems(self.settings.get('targets', {})): target_lambdas.append(target['lambda']) targets.append( events.Target( Arn=self.get_destination_arn(target['lambda']), Id=self.get_function_name(target['lambda']), Input=target.get('input', ''), InputPath=target.get('input_path', ''), )) rule = events.Rule(utils.valid_cloudformation_name(self.name, "Rule"), Description=self.settings.get('description', ''), EventPattern=self.settings.get( 'event_pattern', troposphere.Ref(troposphere.AWS_NO_VALUE)), ScheduleExpression=self.settings.get( 'schedule_expression', troposphere.Ref(troposphere.AWS_NO_VALUE)), State=self.get_enabled(), Targets=targets) template.add_resource(rule) for lambda_ in target_lambdas: template.add_resource( troposphere.awslambda.Permission( utils.valid_cloudformation_name(self.name, 'rule', 'permission'), Action="lambda:InvokeFunction", FunctionName=self.get_destination_arn(lambda_), Principal="events.amazonaws.com", SourceArn=troposphere.GetAtt(rule, 'Arn'), ))
def get_or_create_resource(self, path, api, template): """Returns the ID of the Resource ``path`` in ``api``. If the resorce doesn't exits, create a new one and add it to ``template``.""" # Add leading slash if path and path[0] != '/': path = '/{}'.format(path) # Remove trailing slash if path and path[-1] == '/': path = path[:-1] # Make / the root path if not path: path = '/' # Return API root resource if if path == '/': return troposphere.GetAtt(api, 'RootResourceId') if path in self._resources: return self._resources[path] parent_path, path_part = path.rsplit('/', 1) parent_id = self.get_or_create_resource(parent_path, api, template) resource = Resource(utils.valid_cloudformation_name( self.name, 'Resource', *path.split('/')), ParentId=parent_id, PathPart=path_part, RestApiId=troposphere.Ref(api)) template.add_resource(resource) self._resources[path] = troposphere.Ref(resource) return self._resources[path]
def profile_role_statements(self): return collections.ChainMap( { # send trigger messages 'access-all-sqs': awacs.aws.Statement( Effect='Allow', Action=[awacs.sqs.Action('*')], Resource=['*'], ), # send deferred api calls 'access-deferral-queue': awacs.aws.Statement( Effect='Allow', Action=[awacs.sqs.Action('*')], Resource=[ ts.GetAtt(self.env.worker.def_worker.queue, 'Arn') ], ), # backfill data 'read-all-s3': awacs.aws.Statement( Effect='Allow', Action=[ awacs.s3.Action('List*'), awacs.s3.Action('Get*'), ], Resource=['*'], ), }, super().profile_role_statements, )
def add_github_webhook(self, pipeline_res, stage, action): "Add a CodePipeline WebHook" logical_id = f'Webhook{stage.name}{action.name}' github_access_token = Reference(action.github_access_token).ref cfn_export_dict = { 'Authentication': 'GITHUB_HMAC', 'AuthenticationConfiguration': { 'SecretToken': "{{resolve:secretsmanager:%s}}" % github_access_token, }, 'Filters': [{ 'JsonPath': "$.ref", 'MatchEquals': 'refs/heads/{Branch}' }], 'TargetAction': f'GitHub{stage.name}{action.name}', 'RegisterWithThirdParty': True, 'TargetPipeline': troposphere.Ref(pipeline_res), 'TargetPipelineVersion': troposphere.GetAtt(pipeline_res, 'Version'), } webhook_resource = troposphere.codepipeline.Webhook.from_dict( logical_id, cfn_export_dict, ) self.template.add_resource(webhook_resource)
def __init__(self, stack, paco_ctx): dynamodb = stack.resource super().__init__(stack, paco_ctx) self.set_aws_name('DynamoDB', self.resource_group_name, self.resource.name) self.init_template('DynamoDB Table(s)') if not dynamodb.is_enabled(): return # Parameters # DynamoDB Tables for table in dynamodb.tables.values(): cfn_export_dict = table.cfn_export_dict table_logical_id = self.create_cfn_logical_id(table.name + 'DynamoDBTable') dynamodb_table_resource = troposphere.dynamodb.Table.from_dict( table_logical_id, cfn_export_dict, ) self.template.add_resource(dynamodb_table_resource) self.create_output(title=dynamodb_table_resource.title + 'Name', description="DynamoDB Table Name", value=troposphere.Ref(dynamodb_table_resource), ref=f"{table.paco_ref_parts}.name") self.create_output(title=dynamodb_table_resource.title + 'Arn', description="DynamoDB Table Arn", value=troposphere.GetAtt( dynamodb_table_resource, "Arn"), ref=f"{table.paco_ref_parts}.arn")
def record_set_group(self): return ts.route53.RecordSetGroup( self._get_logical_id('RecordSetGroup'), HostedZoneName=self.hosted_zone_name, RecordSets=[ ts.route53.RecordSet( Name=self.record_set_name, Type='A', AliasTarget=ts.route53.AliasTarget( EvaluateTargetHealth=False, DNSName=ts.GetAtt(self.elb, 'DNSName'), HostedZoneId=ts.GetAtt(self.elb, 'CanonicalHostedZoneID'), ), ) ], )
def add_apigateway_resource(self, resource): resource_logical_id = 'ApiGatewayResource' + self.create_cfn_logical_id(resource.name + md5sum(str_data=resource.paco_ref_parts)) cfn_export_dict = resource.cfn_export_dict parent_resource = resource.__parent__.__parent__ # root resource if schemas.IApiGatewayRestApi.providedBy(parent_resource): cfn_export_dict["ParentId"] = troposphere.GetAtt(self.restapi_resource, "RootResourceId") # child resource else: cfn_export_dict["ParentId"] = troposphere.Ref(parent_resource.resource) cfn_export_dict["RestApiId"] = troposphere.Ref(self.restapi_resource) resource_resource = troposphere.apigateway.Resource.from_dict(resource_logical_id, cfn_export_dict) resource.resource = resource_resource self.template.add_resource(resource_resource) self.create_output( title=self.create_cfn_logical_id(f'ApiGatewayRestApiResource{resource.name}' + md5sum(str_data=resource.paco_ref_parts)), value=troposphere.Ref(resource_resource), ref=resource.paco_ref_parts + '.id', ) # Add an OPTIONS method if CORS is enabled if resource.enable_cors == True: options_config = { 'http_method': 'OPTIONS', 'integration': { 'integration_type': 'MOCK', 'integration_http_method': 'OPTIONS', 'pass_through_behavior': 'WHEN_NO_MATCH', 'request_templates': {'application/json': '{"statusCode": 200}'}, 'integration_responses': [{ 'status_code': '200', 'response_parameters': { 'method.response.header.Access-Control-Allow-Headers': "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", 'method.response.header.Access-Control-Allow-Methods': "'POST,OPTIONS'", 'method.response.header.Access-Control-Allow-Origin': "'*'", }, 'response_templates': {'application/json': ''}, },], }, 'method_responses': [{ 'status_code': '200', 'response_models': [{ 'content_type': 'application/json', 'model_name': 'emptyjson', }], 'response_parameters': { 'method.response.header.Access-Control-Allow-Headers': False, 'method.response.header.Access-Control-Allow-Methods': False, 'method.response.header.Access-Control-Allow-Origin': False, }, }], } options_config['resource_name'] = resource.nested_name method_name = f'{resource.nested_name}PacoCORS' options_method = ApiGatewayMethod(method_name, self.apigatewayrestapi.methods) apply_attributes_from_config(options_method, options_config) self.apigatewayrestapi.methods[method_name] = options_method return resource_resource
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, zone_config, config_ref): super().__init__(paco_ctx, account_ctx, aws_region, enabled=zone_config.is_enabled(), config_ref=config_ref, iam_capabilities=["CAPABILITY_NAMED_IAM"], stack_group=stack_group, stack_tags=stack_tags) self.set_aws_name('HostedZone', zone_config.name) self.init_template('Route53 Hosted Zone: ' + zone_config.domain_name) self.paco_ctx.log_action_col("Init", "Route53", "Hosted Zone", "{}".format(zone_config.domain_name)) if zone_config.external_resource != None and zone_config.external_resource.is_enabled( ): hosted_zone_id_output_value = zone_config.external_resource.hosted_zone_id nameservers_output_value = ','.join( zone_config.external_resource.nameservers) else: hosted_zone_res = troposphere.route53.HostedZone( title='HostedZone', template=self.template, Name=zone_config.domain_name) hosted_zone_id_output_value = troposphere.Ref(hosted_zone_res) nameservers_output_value = troposphere.Join( ',', troposphere.GetAtt(hosted_zone_res, 'NameServers')) self.create_output(title='HostedZoneId', value=hosted_zone_id_output_value, ref=config_ref + '.id') self.create_output(title='HostedZoneNameServers', value=nameservers_output_value, ref=config_ref + '.name_servers') if len(zone_config.record_sets) > 0: record_set_list = [] for record_set_config in zone_config.record_sets: record_set_res = troposphere.route53.RecordSet( Name=record_set_config.record_name, Type=record_set_config.type, TTL=record_set_config.ttl, ResourceRecords=record_set_config.resource_records) record_set_list.append(record_set_res) group_res = troposphere.route53.RecordSetGroup( title='RecordSetGroup', template=self.template, HostedZoneId=troposphere.Ref(hosted_zone_res), RecordSets=record_set_list) group_res.DependsOn = hosted_zone_res self.set_template()
def queue(self): return ts.sqs.Queue( self._get_logical_id('Queue'), QueueName=self.config['QueueName'], VisibilityTimeout=60 * 5, # 5 minutes RedrivePolicy=ts.sqs.RedrivePolicy( deadLetterTargetArn=ts.GetAtt(self.dlq, 'Arn'), maxReceiveCount=3, ), )
def create_project(self, chain_context, codebuild_role, codebuild_environment, name): artifacts = codebuild.Artifacts(Type='CODEPIPELINE') vpc_config = {} # Configure vpc if available if self.vpc_config: sg = ec2.SecurityGroup( "CodebBuild%s%sSG" % (self.stage_name_to_add, self.action_name), GroupDescription="Gives codebuild access to VPC", VpcId=self.vpc_config.vpc_id, SecurityGroupEgress=[{ "IpProtocol": "-1", "CidrIp": "0.0.0.0/0", "FromPort": "0", "ToPort": "65535", }], ) chain_context.template.add_resource(sg) vpc_config = { 'VpcConfig': codebuild.VpcConfig( VpcId=self.vpc_config.vpc_id, Subnets=self.vpc_config.subnets, SecurityGroupIds=[Ref(sg)], ) } project_name = "Project%s" % name print("Action %s is using buildspec: " % self.action_name) print(self.buildspec) project = codebuild.Project( project_name, DependsOn=codebuild_role, Artifacts=artifacts, Environment=codebuild_environment, Name="%s-%s" % (chain_context.instance_name, project_name), ServiceRole=troposphere.GetAtt(codebuild_role, 'Arn'), Source=codebuild.Source( "Deploy", Type='CODEPIPELINE', BuildSpec=self.buildspec, ), **vpc_config) return project
def generate_role_template( command: str, actions: list, role_name: str, path: str, assuming_account_id: str, assuming_resource: str, additional_statements: list = [], ) -> troposphere.Template: t = troposphere.Template() t.description = f"Role used to run the {command} command" role = iam.Role( title="role", RoleName=role_name, Path=path, Policies=[ iam.Policy( PolicyName=f"{command}-permissions", PolicyDocument=aws.PolicyDocument( Version="2012-10-17", Id=f"{command}-permissions", Statement=[ aws.Statement(Sid="1", Effect=aws.Allow, Action=actions, Resource=["*"]) ] + additional_statements, ), ) ], AssumeRolePolicyDocument=aws.Policy( Version="2012-10-17", Id="AllowAssume", Statement=[ aws.Statement( Sid="1", Effect=aws.Allow, Principal=aws.Principal( "AWS", [IAM_ARN(assuming_resource, "", assuming_account_id)]), Action=[awacs_sts.AssumeRole], ) ], ), ) t.add_resource(role) t.add_output(troposphere.Output("RoleName", Value=troposphere.Ref(role))) t.add_output( troposphere.Output("RoleArn", Value=troposphere.GetAtt(role, "Arn"))) return t
def __init__(self, stack, paco_ctx): pinpoint_app = stack.resource super().__init__(stack, paco_ctx) self.set_aws_name('PinpointApp', self.resource_group_name, self.resource.name) self.init_template('Pinpoint Application') if not pinpoint_app.is_enabled(): return # Pinpoint Application pinpoint_app_logical_id = 'PinpointApplication' pinpointapp_resource = troposphere.pinpoint.App( pinpoint_app_logical_id, Name=pinpoint_app.title, ) self.template.add_resource(pinpointapp_resource) if pinpoint_app.sms_channel: cfn_export_dict = pinpoint_app.sms_channel.cfn_export_dict cfn_export_dict['ApplicationId'] = troposphere.Ref( pinpoint_app_logical_id) sms_channel_resource = troposphere.pinpoint.SMSChannel.from_dict( 'SMSChannel', cfn_export_dict, ) self.template.add_resource(sms_channel_resource) if pinpoint_app.email_channel: cfn_export_dict = pinpoint_app.email_channel.cfn_export_dict cfn_export_dict['ApplicationId'] = troposphere.Ref( pinpoint_app_logical_id) cfn_export_dict[ 'Identity'] = f'arn:aws:ses:{self.aws_region}:{self.account_ctx.id}:identity/{pinpoint_app.email_channel.from_address}' email_channel_resource = troposphere.pinpoint.EmailChannel.from_dict( 'EmailChannel', cfn_export_dict, ) self.template.add_resource(email_channel_resource) # Output self.create_output(title=pinpointapp_resource.title + 'Id', description="Pinpoint Application Id", value=troposphere.Ref(pinpointapp_resource), ref=pinpoint_app.paco_ref_parts + ".id") self.create_output(title=pinpointapp_resource.title + 'Arn', description="Pinpoint Application Arn", value=troposphere.GetAtt(pinpointapp_resource, "Arn"), ref=pinpoint_app.paco_ref_parts + ".arn")
def create_record(awsclient, name_prefix, instance_reference, type="A", host_zone_name=None): """ Builds route53 record entries enabling DNS names for services Note: gcdt.route53 create_record(awsclient, ...) is used in dataplatform cloudformation.py templates! :param name_prefix: The sub domain prefix to use :param instance_reference: The EC2 troposphere reference which's private IP should be linked to :param type: The type of the record A or CNAME (default: A) :param host_zone_name: The host zone name to use (like preprod.ds.glomex.cloud. - DO NOT FORGET THE DOT!) :return: RecordSetType """ # Only fetch the host zone from the COPS stack if nessary if host_zone_name is None: host_zone_name = _retrieve_stack_host_zone_name(awsclient) if not (type == "A" or type == "CNAME"): raise Exception("Record set type is not supported!") name_of_record = name_prefix \ .replace('.', '') \ .replace('-', '') \ .title() + "HostRecord" # Reference EC2 instance automatically to their private IP if isinstance(instance_reference, Instance): resource_record = troposphere.GetAtt(instance_reference, "PrivateIp") else: resource_record = instance_reference return RecordSetType( name_of_record, HostedZoneName=host_zone_name, Name=troposphere.Join("", [ name_prefix + ".", host_zone_name, ]), Type=type, TTL=TTL_DEFAULT, ResourceRecords=[resource_record], )
def alarm(self): env = self.env.config['Tags']['Environment'].capitalize() return ts.cloudwatch.Alarm( self._get_logical_id('Alarm'), AlarmDescription=f'Nudge {env} S3 Events DLQ alarm', Namespace='AWS/SQS', MetricName='NumberOfMessagesSent', Statistic='Sum', # https://stackoverflow.com/a/42635872 # period needs to be 15 minutes for sqs Period=900, EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', Threshold='0', AlarmActions=[ts.Ref(self.env.alerts_topic)], Dimensions=[ ts.cloudwatch.MetricDimension( Name='QueueName', Value=ts.GetAtt(self.dlq, 'QueueName'), ) ], )
def create_codepipeline_cfn( self, template, res_config, ): # CodePipeline # Source Actions source_stage_actions = [] # Source Actions for action_name in res_config.source.keys(): action_config = res_config.source[action_name] # Manual Approval Action if action_config.type == 'ManualApproval': manual_approval_action = self.init_manual_approval_action(template, action_config) source_stage_actions.append(manual_approval_action) # CodeCommit Action if action_config.type == 'CodeCommit.Source': codecommit_repo_arn_param = self.create_cfn_parameter( param_type='String', name='CodeCommitRepositoryArn', description='The Arn of the CodeCommit repository', value='{}.codecommit.arn'.format(action_config.paco_ref), ) codecommit_role_arn_param = self.create_cfn_parameter( param_type='String', name='CodeCommitRoleArn', description='The Arn of the CodeCommit Role', value='{}.codecommit_role.arn'.format(action_config.paco_ref), ) codecommit_repo_name_param = self.create_cfn_parameter( param_type='String', name='CodeCommitRepositoryName', description='The name of the CodeCommit repository', value=action_config.codecommit_repository+'.name', ) deploy_branch_name_param = self.create_cfn_parameter( param_type='String', name='CodeCommitDeploymentBranchName', description='The name of the branch where commits will trigger a build.', value=action_config.deployment_branch_name, ) codecommit_source_action = troposphere.codepipeline.Actions( Name='CodeCommit', ActionTypeId = troposphere.codepipeline.ActionTypeId( Category = 'Source', Owner = 'AWS', Version = '1', Provider = 'CodeCommit' ), Configuration = { 'RepositoryName': troposphere.Ref(codecommit_repo_name_param), 'BranchName': troposphere.Ref(deploy_branch_name_param) }, OutputArtifacts = [ troposphere.codepipeline.OutputArtifacts( Name = 'CodeCommitArtifact' ) ], RunOrder = action_config.run_order, RoleArn = troposphere.Ref(codecommit_role_arn_param) ) source_stage_actions.append(codecommit_source_action) source_stage = troposphere.codepipeline.Stages( Name="Source", Actions = source_stage_actions ) # Build Actions build_stage_actions = [] for action_name in res_config.build.keys(): action_config = res_config.build[action_name] # Manual Approval Action if action_config.type == 'ManualApproval': manual_approval_action = self.init_manual_approval_action(template, action_config) build_stage_actions.append(manual_approval_action) # CodeBuild Build Action elif action_config.type == 'CodeBuild.Build': codebuild_project_arn_param = self.create_cfn_parameter( param_type='String', name='CodeBuildProjectArn', description='The arn of the CodeBuild project', value='{}.project.arn'.format(action_config.paco_ref), ) codebuild_build_action = troposphere.codepipeline.Actions( Name='CodeBuild', ActionTypeId = troposphere.codepipeline.ActionTypeId( Category = 'Build', Owner = 'AWS', Version = '1', Provider = 'CodeBuild' ), Configuration = { 'ProjectName': troposphere.Ref(self.resource_name_prefix_param), }, InputArtifacts = [ troposphere.codepipeline.InputArtifacts( Name = 'CodeCommitArtifact' ) ], OutputArtifacts = [ troposphere.codepipeline.OutputArtifacts( Name = 'CodeBuildArtifact' ) ], RunOrder = action_config.run_order ) build_stage_actions.append(codebuild_build_action) build_stage = troposphere.codepipeline.Stages( Name="Build", Actions = build_stage_actions ) # Deploy Action [ deploy_stage, s3_deploy_assume_role_statement, codedeploy_deploy_assume_role_statement ] = self.init_deploy_stage(res_config, template) # Manual Deploy Enabled/Disable manual_approval_enabled_param = self.create_cfn_parameter( param_type='String', name='ManualApprovalEnabled', description='Boolean indicating whether a manual approval is enabled or not.', value=self.manual_approval_is_enabled, ) template.add_condition( 'ManualApprovalIsEnabled', troposphere.Equals(troposphere.Ref(manual_approval_enabled_param), 'true') ) # CodePipeline Role and Policy self.pipeline_service_role_name = self.create_iam_resource_name( name_list=[self.res_name_prefix, 'CodePipeline-Service'], filter_id='IAM.Role.RoleName' ) pipeline_service_role_res = troposphere.iam.Role( title='CodePipelineServiceRole', template = template, RoleName=self.pipeline_service_role_name, AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ AssumeRole ], Principal=Principal("Service", ['codepipeline.amazonaws.com']), ) ] ) ) pipeline_policy_statement_list = [ Statement( Sid='CodeCommitAccess', Effect=Allow, Action=[ Action('codecommit', 'List*'), Action('codecommit', 'Get*'), Action('codecommit', 'GitPull'), Action('codecommit', 'UploadArchive'), Action('codecommit', 'CancelUploadArchive'), ], Resource=[ troposphere.Ref(codecommit_repo_arn_param), ] ), Statement( Sid='CodePipelineAccess', Effect=Allow, Action=[ Action('codepipeline', '*'), Action('sns', 'Publish'), Action('s3', 'ListAllMyBuckets'), Action('s3', 'GetBucketLocation'), Action('iam', 'ListRoles'), Action('iam', 'PassRole'), ], Resource=[ '*' ] ), Statement( Sid='CodeBuildAccess', Effect=Allow, Action=[ Action('codebuild', 'BatchGetBuilds'), Action('codebuild', 'StartBuild') ], Resource=[ troposphere.Ref(codebuild_project_arn_param) ] ), Statement( Sid='S3Access', Effect=Allow, Action=[ Action('s3', 'PutObject'), Action('s3', 'GetBucketPolicy'), Action('s3', 'GetObject'), Action('s3', 'ListBucket'), ], Resource=[ troposphere.Sub('arn:aws:s3:::${ArtifactsBucketName}/*'), troposphere.Sub('arn:aws:s3:::${ArtifactsBucketName}') ] ), Statement( Sid='KMSCMK', Effect=Allow, Action=[ Action('kms', 'Decrypt'), ], Resource=[ troposphere.Ref(self.cmk_arn_param) ] ), Statement( Sid='CodeCommitAssumeRole', Effect=Allow, Action=[ Action('sts', 'AssumeRole'), ], Resource=[ troposphere.Ref(codecommit_role_arn_param) ] ), ] if codedeploy_deploy_assume_role_statement != None: pipeline_policy_statement_list.append(codedeploy_deploy_assume_role_statement) if s3_deploy_assume_role_statement != None: pipeline_policy_statement_list.append(s3_deploy_assume_role_statement) troposphere.iam.PolicyType( title='CodePipelinePolicy', template = template, DependsOn = 'CodePipelineServiceRole', PolicyName=troposphere.Sub('${ResourceNamePrefix}-CodePipeline-Policy'), PolicyDocument=PolicyDocument( Statement=pipeline_policy_statement_list, ), Roles=[troposphere.Ref(pipeline_service_role_res)] ) pipeline_stages = [] if source_stage != None: pipeline_stages.append(source_stage) if build_stage != None: pipeline_stages.append(build_stage) if deploy_stage != None: pipeline_stages.append(deploy_stage) pipeline_res = troposphere.codepipeline.Pipeline( title = 'BuildCodePipeline', template = template, DependsOn='CodePipelinePolicy', RoleArn = troposphere.GetAtt(pipeline_service_role_res, 'Arn'), Name = troposphere.Ref(self.resource_name_prefix_param), Stages = pipeline_stages, ArtifactStore = troposphere.codepipeline.ArtifactStore( Type = 'S3', Location = troposphere.Ref(self.artifacts_bucket_name_param), EncryptionKey = troposphere.codepipeline.EncryptionKey( Type = 'KMS', Id = troposphere.Ref(self.cmk_arn_param), ) ) ) return pipeline_res
def __init__( self, stack, paco_ctx, grp_id=None, topics=None, ): enabled_topics = False config = stack.resource # this template is used as both SNSTopics by global resources and a # single SNSTopic for an application resource. if topics == None: if grp_id == None: topics = [stack.resource] enabled_topics = stack.resource.is_enabled() else: topics = config.values() for topic in topics: if topic.is_enabled(): enabled_topics = True else: if len(topics) > 0: enabled_topics = True super().__init__( stack, paco_ctx, enabled=enabled_topics, ) if grp_id == None: self.set_aws_name('SNSTopics', self.resource_group_name, self.resource_name) else: self.set_aws_name('SNSTopics', grp_id) # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs topics_ref_cross_list = [] topic_policy_cache = [] for topic in topics: if not topic.is_enabled(): continue statement_list = [] topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) topic.topic_resource = topic_resource template.add_resource(topic_resource) if topic.codestar_notification_access: statement = Statement( Effect=Allow, Sid='CodeStarNotificationAccess', Principal=Principal( "Service", 'codestar-notifications.amazonaws.com'), Action=[awacs.sns.Publish], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) # Add CloudWatch service statement = Statement( Effect=Allow, Sid='CloudWatchService', Principal=Principal("Service", 'cloudwatch.amazonaws.com'), Action=[awacs.sns.Publish], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) if topic.cross_account_access: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] for account_id in account_id_list: if account_id in topic_policy_cache: continue topic_policy_cache.append(account_id) statement = Statement( Effect=Allow, Sid=self.create_cfn_logical_id_join(account_id), Principal=Principal("AWS", f'arn:aws:iam::{account_id}:root'), Action=[awacs.sns.Publish, awacs.sns.Subscribe], Resource=[troposphere.Ref(topic_resource)], ) statement_list.append(statement) if len(statement_list) > 0: topic_policy_resource = troposphere.sns.TopicPolicy( f'Paco{topic_logical_id}TopicPolicy', Topics=[troposphere.Ref(topic_resource)], PolicyDocument=Policy(Version='2012-10-17', Id="PacoSNSTopicPolicy", Statement=statement_list)) template.add_resource(topic_policy_resource) # Topic Outputs if grp_id == None: output_ref = stack.resource.paco_ref_parts else: output_ref = '.'.join( [stack.resource.paco_ref_parts, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', )
def register_resources_template(self, template): """Register the lambda Function into the troposphere template. If this function requires a custom Role, register it too.""" role = self.get_role() depends_on = [] if isinstance(role, iam.Role): template.add_resource(role) depends_on.append(role.name) role = troposphere.GetAtt(role, 'Arn') template.add_parameter( troposphere.Parameter( utils.valid_cloudformation_name(self.name, "s3version"), Type="String", )) extra = {} if self.settings.get('vpc'): vpc = self.project.get_resource('vpc::{}'.format( self.settings.get('vpc'))) if isinstance(vpc.settings['security-groups'], troposphere.Ref): vpc.settings[ 'security-groups']._type = 'List<AWS::EC2::SecurityGroup::Id>' if isinstance(vpc.settings['subnet-ids'], troposphere.Ref): vpc.settings['subnet-ids']._type = 'List<AWS::EC2::Subnet::Id>' extra['VpcConfig'] = awslambda.VPCConfig( SecurityGroupIds=vpc.settings['security-groups'], SubnetIds=vpc.settings['subnet-ids']) function = template.add_resource( awslambda.Function(self.in_project_cf_name, DependsOn=depends_on, Code=awslambda.Code( S3Bucket=troposphere.Ref("CodeBucket"), S3Key=self.get_bucket_key(), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name( self.name, "s3version")), ), Description=self.settings.get( 'description', ''), Handler=self.get_handler(), MemorySize=self.get_memory(), Role=role, Runtime=self.get_runtime(), Timeout=self.get_timeout(), **extra)) lambda_version = 'lambda:contrib_lambdas:version' lambda_ref = troposphere.GetAtt(self.project.reference(lambda_version), 'Arn') if not self.in_project_name.startswith('lambda:contrib_lambdas:'): lambda_version = '{}:current'.format(lambda_version) lambda_ref = troposphere.Ref( self.project.reference(lambda_version)) version = template.add_resource( LambdaVersion.create_with( utils.valid_cloudformation_name(self.name, "Version"), DependsOn=[ self.project.reference(lambda_version), function.name ], lambda_arn=lambda_ref, FunctionName=troposphere.Ref(function), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name(self.name, "s3version")), )) alias = template.add_resource( awslambda.Alias( self.current_alias_cf_name, DependsOn=[version.name], FunctionName=troposphere.Ref(function), FunctionVersion=troposphere.GetAtt(version, "Version"), Name="current", )) if self._get_true_false('cli-output', 't'): template.add_output([ troposphere.Output( utils.valid_cloudformation_name("Clioutput", self.in_project_name), Value=troposphere.Ref(alias), ) ])
def __init__( self, stack, paco_ctx, ): super().__init__( stack, paco_ctx, iam_capabilities=["CAPABILITY_NAMED_IAM"], ) account_ctx = stack.account_ctx aws_region = stack.aws_region self.set_aws_name('Lambda', self.resource_group_name, self.resource_name) awslambda = self.awslambda = self.stack.resource self.init_template('Lambda Function') # if not enabled finish with only empty placeholder if not awslambda.is_enabled(): return # Parameters sdb_cache_param = self.create_cfn_parameter( name='EnableSDBCache', param_type='String', description='Boolean indicating whether an SDB Domain will be created to be used as a cache.', value=awslambda.sdb_cache ) function_description_param = self.create_cfn_parameter( name='FunctionDescription', param_type='String', description='A description of the Lamdba Function.', value=awslambda.description ) handler_param = self.create_cfn_parameter( name='Handler', param_type='String', description='The name of the function to call upon execution.', value=awslambda.handler ) runtime_param = self.create_cfn_parameter( name='Runtime', param_type='String', description='The name of the runtime language.', value=awslambda.runtime ) role_arn_param = self.create_cfn_parameter( name='RoleArn', param_type='String', description='The execution role for the Lambda Function.', value=awslambda.iam_role.get_arn() ) role_name_param = self.create_cfn_parameter( name='RoleName', param_type='String', description='The execution role name for the Lambda Function.', value=awslambda.iam_role.resolve_ref_obj.role_name ) memory_size_param = self.create_cfn_parameter( name='MemorySize', param_type='Number', description="The amount of memory that your function has access to. Increasing the function's" + \ " memory also increases its CPU allocation. The default value is 128 MB. The value must be a multiple of 64 MB.", value=awslambda.memory_size ) reserved_conc_exec_param = self.create_cfn_parameter( name='ReservedConcurrentExecutions', param_type='Number', description='The number of simultaneous executions to reserve for the function.', value=awslambda.reserved_concurrent_executions ) timeout_param = self.create_cfn_parameter( name='Timeout', param_type='Number', description='The amount of time that Lambda allows a function to run before stopping it. ', value=awslambda.timeout ) layers_param = self.create_cfn_parameter( name='Layers', param_type='CommaDelimitedList', description='List of up to 5 Lambda Layer ARNs.', value=','.join(awslambda.layers) ) # create the Lambda resource cfn_export_dict = { 'Description': troposphere.Ref(function_description_param), 'Handler': troposphere.Ref(handler_param), 'MemorySize': troposphere.Ref(memory_size_param), 'Runtime': troposphere.Ref(runtime_param), 'Role': troposphere.Ref(role_arn_param), 'Timeout': troposphere.Ref(timeout_param), } if awslambda.reserved_concurrent_executions: cfn_export_dict['ReservedConcurrentExecutions'] = troposphere.Ref(reserved_conc_exec_param), if len(awslambda.layers) > 0: cfn_export_dict['Layers'] = troposphere.Ref(layers_param), # Lambda VPC if awslambda.vpc_config != None: vpc_security_group = self.create_cfn_ref_list_param( name='VpcSecurityGroupIdList', param_type='List<AWS::EC2::SecurityGroup::Id>', description='VPC Security Group Id List', value=awslambda.vpc_config.security_groups, ref_attribute='id', ) # Segment SubnetList is a Segment stack Output based on availability zones segment_ref = awslambda.vpc_config.segments[0] + '.subnet_id_list' subnet_list_param = self.create_cfn_parameter( name='VpcSubnetIdList', param_type='List<AWS::EC2::Subnet::Id>', description='VPC Subnet Id List', value=segment_ref ) cfn_export_dict['VpcConfig'] = { 'SecurityGroupIds': troposphere.Ref(vpc_security_group), 'SubnetIds': troposphere.Ref(subnet_list_param), } # Code object: S3 Bucket, inline ZipFile or deploy artifact? if awslambda.code.s3_bucket: if awslambda.code.s3_bucket.startswith('paco.ref '): value = awslambda.code.s3_bucket + ".name" else: value = awslambda.code.s3_bucket s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.", param_type='String', value=value ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Amazon S3 key of the deployment package.", param_type='String', value=awslambda.code.s3_key ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } else: zip_path = Path(awslambda.code.zipfile) if zip_path.is_file(): cfn_export_dict['Code'] = { 'ZipFile': zip_path.read_text() } elif zip_path.is_dir(): # get S3Bucket/S3Key or if it does not exist, it will create the bucket and artifact # and then upload the artifact bucket_name, artifact_name = init_lambda_code( self.paco_ctx.paco_buckets, self.stack.resource, awslambda.code.zipfile, self.stack.account_ctx, self.stack.aws_region, ) s3bucket_param = self.create_cfn_parameter( name='CodeS3Bucket', description="The Paco S3 Bucket for configuration", param_type='String', value=bucket_name ) s3key_param = self.create_cfn_parameter( name='CodeS3Key', description="The Lambda code artifact S3 Key.", param_type='String', value=artifact_name ) cfn_export_dict['Code'] = { 'S3Bucket': troposphere.Ref(s3bucket_param), 'S3Key': troposphere.Ref(s3key_param), } # Environment variables var_export = {} if awslambda.environment != None and awslambda.environment.variables != None: for var in awslambda.environment.variables: name = var.key.replace('_','') env_param = self.create_cfn_parameter( name='EnvVar{}'.format(name), param_type='String', description='Env var for {}'.format(name), value=var.value, ) var_export[var.key] = troposphere.Ref(env_param) if awslambda.sdb_cache == True: var_export['SDB_CACHE_DOMAIN'] = troposphere.Ref('LambdaSDBCacheDomain') if len(awslambda.log_group_names) > 0: # Add PACO_LOG_GROUPS Environment Variable paco_log_groups = [ prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) for loggroup_name in awslambda.log_group_names ] paco_log_groups_param = self.create_cfn_parameter( name='EnvVariablePacoLogGroups', param_type='String', description='Env var for Paco Log Groups', value=','.join(paco_log_groups), ) var_export['PACO_LOG_GROUPS'] = troposphere.Ref(paco_log_groups_param) cfn_export_dict['Environment'] = { 'Variables': var_export } # Lambda resource self.awslambda_resource = troposphere.awslambda.Function.from_dict( 'Function', cfn_export_dict ) self.template.add_resource(self.awslambda_resource) # SDB Cache with SDB Domain and SDB Domain Policy resources if awslambda.sdb_cache == True: sdb_domain_resource = troposphere.sdb.Domain( title='LambdaSDBCacheDomain', template=self.template, Description="Lambda Function Domain" ) sdb_policy = troposphere.iam.Policy( title='LambdaSDBCacheDomainPolicy', template=self.template, PolicyName='SDBDomain', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("sdb","*")], Resource=[ troposphere.Sub( 'arn:aws:sdb:${AWS::Region}:${AWS::AccountId}:domain/${DomainName}', DomainName=troposphere.Ref('LambdaSDBCacheDomain') ) ], ) ], Roles=troposphere.Ref(role_arn_param) ) ) sdb_policy.DependsOn = sdb_domain_resource self.awslambda_resource.DependsOn = sdb_domain_resource # Permissions # SNS Topic Lambda permissions and subscription idx = 1 for sns_topic_ref in awslambda.sns_topics: # SNS Topic Arn parameters param_name = 'SNSTopicArn%d' % idx self.create_cfn_parameter( name=param_name, param_type='String', description='An SNS Topic ARN to grant permission to.', value=sns_topic_ref + '.arn' ) # Lambda permission troposphere.awslambda.Permission( title=param_name + 'Permission', template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='sns.amazonaws.com', SourceArn=troposphere.Ref(param_name), ) # SNS Topic subscription sns_topic = get_model_obj_from_ref(sns_topic_ref, self.paco_ctx.project) troposphere.sns.SubscriptionResource( title=param_name + 'Subscription', template=self.template, Endpoint=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Protocol='lambda', TopicArn=troposphere.Ref(param_name), Region=sns_topic.region_name ) idx += 1 # Lambda permissions for connected Paco resources app = get_parent_by_interface(awslambda, schemas.IApplication) for obj in get_all_nodes(app): # S3 Bucket notification permission(s) if schemas.IS3Bucket.providedBy(obj): seen = {} if hasattr(obj, 'notifications'): if hasattr(obj.notifications, 'lambdas'): for lambda_notif in obj.notifications.lambdas: if lambda_notif.function == awslambda.paco_ref: # yes, this Lambda gets notification from this S3Bucket group = get_parent_by_interface(obj, schemas.IResourceGroup) s3_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if s3_logical_name not in seen: troposphere.awslambda.Permission( title='S3Bucket' + s3_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='s3.amazonaws.com', SourceArn='arn:aws:s3:::' + obj.get_bucket_name(), ) seen[s3_logical_name] = True # Events Rule permission(s) if schemas.IEventsRule.providedBy(obj): seen = {} for target in obj.targets: target_ref = Reference(target.target) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the Events Rule has a Target that is this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) eventsrule_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if eventsrule_logical_name not in seen: rule_name = create_event_rule_name(obj) # rule_name = self.create_cfn_logical_id("EventsRule" + obj.paco_ref) # rule_name = hash_smaller(rule_name, 64) source_arn = 'arn:aws:events:{}:{}:rule/{}'.format( aws_region, account_ctx.id, rule_name ) troposphere.awslambda.Permission( title='EventsRule' + eventsrule_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='events.amazonaws.com', SourceArn=source_arn, ) seen[eventsrule_logical_name] = True # IoT Analytics permission(s) if schemas.IIoTAnalyticsPipeline.providedBy(obj): seen = {} for activity in obj.pipeline_activities.values(): if activity.activity_type == 'lambda': target_ref = Reference(activity.function) target_ref.set_account_name(account_ctx.get_name()) target_ref.set_region(aws_region) lambda_ref = Reference(awslambda.paco_ref) if target_ref.raw == lambda_ref.raw: # yes, the IoT Analytics Lambda Activity has a ref to this Lambda group = get_parent_by_interface(obj, schemas.IResourceGroup) iotap_logical_name = self.gen_cf_logical_name(group.name + obj.name, '_') if iotap_logical_name not in seen: rule_name = create_event_rule_name(obj) troposphere.awslambda.Permission( title='IoTAnalyticsPipeline' + iotap_logical_name, template=self.template, Action="lambda:InvokeFunction", FunctionName=troposphere.GetAtt(self.awslambda_resource, 'Arn'), Principal='iotanalytics.amazonaws.com', ) seen[iotap_logical_name] = True # Log group(s) loggroup_function_name = troposphere.Join( '', [ '/aws/lambda/', troposphere.Select( 6, troposphere.Split(':', troposphere.GetAtt(self.awslambda_resource, 'Arn')) ) ] ) loggroup_resources = [] loggroup_resources.append( self.add_log_group(loggroup_function_name, 'lambda') ) if len(awslambda.log_group_names) > 0: # Additional App-specific LogGroups for loggroup_name in awslambda.log_group_names: # Add LogGroup to the template prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) loggroup_resources.append( self.add_log_group(prefixed_loggroup_name) ) # LogGroup permissions log_group_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, '*' ]) ] log_stream_arns = [ troposphere.Join(':', [ f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group', loggroup_function_name, 'log-stream', '*' ]) ] for loggroup_name in awslambda.log_group_names: prefixed_loggroup_name = prefixed_name(awslambda, loggroup_name, self.paco_ctx.legacy_flag) log_group_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:*' ) log_stream_arns.append( f'arn:aws:logs:{self.aws_region}:{account_ctx.id}:log-group:{prefixed_loggroup_name}:log-stream:*' ) loggroup_policy_resource = troposphere.iam.ManagedPolicy( title='LogGroupManagedPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='AllowLambdaModifyLogStreams', Effect=Allow, Action=[ Action("logs","CreateLogStream"), Action("logs","DescribeLogStreams"), ], Resource=log_group_arns, ), Statement( Sid='AllowLambdaPutLogEvents', Effect=Allow, Action=[ Action("logs","PutLogEvents"), ], Resource=log_stream_arns, ), ], ), Roles=[troposphere.Ref(role_name_param)], ) loggroup_policy_resource.DependsOn = loggroup_resources self.template.add_resource(loggroup_policy_resource) # Outputs self.create_output( title='FunctionName', value=troposphere.Ref(self.awslambda_resource), ref=awslambda.paco_ref_parts + '.name', ) self.create_output( title='FunctionArn', value=troposphere.GetAtt(self.awslambda_resource, 'Arn'), ref=awslambda.paco_ref_parts + '.arn', )
def __init__( self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, trail, s3_bucket_name ): super().__init__( paco_ctx, account_ctx, aws_region, config_ref=None, iam_capabilities=["CAPABILITY_IAM"], stack_group=stack_group, stack_tags=stack_tags ) self.set_aws_name('CloudTrail') self.init_template('CloudTrail') template = self.template # create Trail resource trail_dict = { 'TrailName': trail.name, 'EnableLogFileValidation': trail.enable_log_file_validation, 'IncludeGlobalServiceEvents': trail.include_global_service_events, 'IsLogging': trail.is_enabled(), 'IsMultiRegionTrail': trail.is_multi_region_trail, 'S3BucketName': s3_bucket_name, 'S3KeyPrefix': trail.s3_key_prefix, } if trail.cloudwatchlogs_log_group: log_group = trail.cloudwatchlogs_log_group cfn_export_dict = { 'LogGroupName': log_group.log_group_name, } if log_group.expire_events_after_days != 'Never' and log_group.expire_events_after_days != '': cfn_export_dict['RetentionInDays'] = int(log_group.expire_events_after_days) log_group.logical_id = 'CloudTrailLogGroup' log_group_resource = troposphere.logs.LogGroup.from_dict( log_group.logical_id, cfn_export_dict ) template.add_resource(log_group_resource) trail_dict['CloudWatchLogsLogGroupArn'] = troposphere.GetAtt(log_group_resource, "Arn") # Create a Role trail_role_resource = troposphere.iam.Role( "CloudTrailLogDeliveryRole", AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[ Action("sts", "AssumeRole") ], Principal=Principal("Service", "cloudtrail.amazonaws.com") ) ] ), Policies=[ troposphere.iam.Policy( PolicyName="CloudTrailLogGroupDelivery", PolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[awacs.logs.CreateLogStream], Resource=[ trail_dict['CloudWatchLogsLogGroupArn'] ] ), Statement( Effect=Allow, Action=[awacs.logs.PutLogEvents], Resource=[ trail_dict['CloudWatchLogsLogGroupArn'] ] ) ] ) ) ] ) template.add_resource(trail_role_resource) trail_dict['CloudWatchLogsRoleArn'] = troposphere.GetAtt(trail_role_resource, "Arn") # LogGroup Output self.create_output( title=log_group.logical_id + 'Arn', value=troposphere.GetAtt(log_group_resource, "Arn"), ref=log_group.paco_ref_parts + '.arn' ) # CloudTrail resource trail.logical_id = 'CloudTrail' + self.create_cfn_logical_id(trail.name) trail_resource = troposphere.cloudtrail.Trail.from_dict( trail.logical_id, trail_dict ) trail_resource.DependsOn = 'CloudTrailLogDeliveryRole' template.add_resource(trail_resource) # CloudTrail output self.create_output( title=trail.logical_id + 'Arn', value=troposphere.GetAtt(trail_resource, "Arn"), ref=trail.paco_ref_parts + '.arn', ) self.set_template()
def generate_codepipeline_template( codepipeline_role_name: str, codepipeline_role_path: str, codebuild_role_name: str, codebuild_role_path: str, ssm_parameter_prefix: str, scm_provider: str, scm_connection_arn: str, scm_full_repository_id: str, scm_branch_name: str, scm_bucket_name: str, scm_object_key: str, scm_skip_creation_of_repo: str, migrate_role_arn: str, ) -> troposphere.Template: version = pkg_resources.get_distribution("aws-organized").version t = troposphere.Template() t.set_description( "CICD template that runs aws organized migrate for the given branch of the given repo" ) project_name = "AWSOrganized-Migrate" bucket_name = scm_bucket_name if scm_provider.lower( ) == "codecommit" and scm_skip_creation_of_repo is False: t.add_resource( codecommit.Repository("Repository", RepositoryName=scm_full_repository_id)) if scm_provider.lower() == "s3" and scm_skip_creation_of_repo is False: bucket_name = ( scm_bucket_name if scm_bucket_name else troposphere.Sub("aws-organized-pipeline-source-${AWS::AccountId}")) t.add_resource( s3.Bucket( "Source", BucketName=bucket_name, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm="AES256")) ]), )) artifact_store = t.add_resource( s3.Bucket( "ArtifactStore", VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault(SSEAlgorithm="AES256")) ]), )) codepipeline_role = t.add_resource( iam.Role( "CodePipelineRole", RoleName=codepipeline_role_name, Path=codepipeline_role_path, ManagedPolicyArns=["arn:aws:iam::aws:policy/AdministratorAccess"], AssumeRolePolicyDocument=aws.PolicyDocument( Version="2012-10-17", Statement=[ aws.Statement( Effect=aws.Allow, Action=[awacs_sts.AssumeRole], Principal=aws.Principal( "Service", ["codepipeline.amazonaws.com"]), ) ], ), )) codebuild_role = t.add_resource( iam.Role( "CodeBuildRole", RoleName=codebuild_role_name, Path=codebuild_role_path, ManagedPolicyArns=["arn:aws:iam::aws:policy/AdministratorAccess"], AssumeRolePolicyDocument=aws.PolicyDocument( Version="2012-10-17", Statement=[ aws.Statement( Effect=aws.Allow, Action=[awacs_sts.AssumeRole], Principal=aws.Principal("Service", ["codebuild.amazonaws.com"]), ) ], ), )) version_parameter = ssm.Parameter( "versionparameter", Name=f"{ssm_parameter_prefix}/version", Type="String", Value=version, ) t.add_resource(version_parameter) project = t.add_resource( codebuild.Project( "AWSOrganizedMigrate", Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", Image="aws/codebuild/standard:4.0", Type="LINUX_CONTAINER", EnvironmentVariables=[ { "Name": "MIGRATE_ROLE_ARN", "Type": "PLAINTEXT", "Value": migrate_role_arn, }, { "Name": "Version", "Type": "PARAMETER_STORE", "Value": troposphere.Ref(version_parameter), }, { "Name": "SSM_PARAMETER_PREFIX", "Type": "PLAINTEXT", "Value": ssm_parameter_prefix, }, ], ), Name=project_name, ServiceRole=troposphere.GetAtt(codebuild_role, "Arn"), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=yaml.safe_dump( dict( version="0.2", phases=dict( install={ "runtime-versions": dict(python="3.8"), "commands": ["pip install aws-organized==${Version}"], }, build={ "commands": [ "aws-organized migrate --ssm-parameter-prefix $SSM_PARAMETER_PREFIX $MIGRATE_ROLE_ARN" ] }, ), artifacts=dict(files=["environment"]), )), ), )) source_actions = dict( codecommit=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId(Category="Source", Owner="AWS", Version="1", Provider="CodeCommit"), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "RepositoryName": scm_full_repository_id, "BranchName": scm_branch_name, "PollForSourceChanges": "true", }, RunOrder="1", ), codestarsourceconnection=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Version="1", Provider="CodeStarSourceConnection", ), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "ConnectionArn": scm_connection_arn, "FullRepositoryId": scm_full_repository_id, "BranchName": scm_branch_name, "OutputArtifactFormat": "CODE_ZIP", }, RunOrder="1", ), s3=codepipeline.Actions( Name="SourceAction", ActionTypeId=codepipeline.ActionTypeId(Category="Source", Owner="AWS", Version="1", Provider="S3"), OutputArtifacts=[ codepipeline.OutputArtifacts(Name="SourceOutput") ], Configuration={ "S3Bucket": bucket_name, "S3ObjectKey": scm_object_key, "PollForSourceChanges": True, }, RunOrder="1", ), ).get(scm_provider.lower()) t.add_resource( codepipeline.Pipeline( "Pipeline", RoleArn=troposphere.GetAtt(codepipeline_role, "Arn"), Stages=[ codepipeline.Stages(Name="Source", Actions=[source_actions]), codepipeline.Stages( Name="Migrate", Actions=[ codepipeline.Actions( Name="Migrate", InputArtifacts=[ codepipeline.InputArtifacts( Name="SourceOutput") ], ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Version="1", Provider="CodeBuild", ), Configuration={ "ProjectName": troposphere.Ref(project), "PrimarySource": "SourceAction", }, RunOrder="1", ) ], ), ], ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=troposphere.Ref(artifact_store)), )) return t
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, grp_id, res_id, config, res_config_ref): enabled_topics = False for topic in config: if topic.is_enabled(): enabled_topics = True super().__init__( paco_ctx, account_ctx, aws_region, config_ref=res_config_ref, stack_group=stack_group, stack_tags=stack_tags, enabled=enabled_topics, ) self.set_aws_name('SNSTopics', grp_id, res_id) self.config = config # Troposphere Template Initialization self.init_template('SNS Topics') template = self.template # Topic Resources and Outputs any_topic_enabled = False topics_ref_cross_list = [] for topic in self.config: if not topic.is_enabled(): continue any_topic_enabled = True topic_logical_id = self.create_cfn_logical_id(topic.name) # Do not specify a TopicName, as then updates cannot be performed that require # replacement of this resource. cfn_export_dict = {} if topic.display_name: cfn_export_dict['DisplayName'] = topic.display_name # Subscriptions if len(topic.subscriptions) > 0: cfn_export_dict['Subscription'] = [] for subscription in topic.subscriptions: sub_dict = {} if references.is_ref(subscription.endpoint): param_name = 'Endpoint{}'.format(topic_logical_id) parameter = self.create_cfn_parameter( param_type='String', name=param_name, description='SNSTopic Endpoint value', value=subscription.endpoint, ) endpoint = parameter else: endpoint = subscription.endpoint sub_dict['Endpoint'] = endpoint sub_dict['Protocol'] = subscription.protocol cfn_export_dict['Subscription'].append(sub_dict) topic_resource = troposphere.sns.Topic.from_dict( 'Topic' + topic_logical_id, cfn_export_dict) if topic.cross_account_access: topics_ref_cross_list.append(troposphere.Ref(topic_resource)) topic.topic_resource = topic_resource template.add_resource(topic_resource) # Topic Outputs output_ref = '.'.join([res_config_ref, topic.name]) self.create_output(title='SNSTopicArn' + topic_logical_id, value=troposphere.Ref(topic_resource), ref=output_ref + '.arn') self.create_output( title='SNSTopicName' + topic_logical_id, value=troposphere.GetAtt(topic_resource, "TopicName"), ref=output_ref + '.name', ) # Cross-account access policy if len(topics_ref_cross_list) > 0: account_id_list = [ account.account_id for account in self.paco_ctx.project.accounts.values() ] topic_policy_resource = troposphere.sns.TopicPolicy( 'TopicPolicyCrossAccountPacoProject', Topics=topics_ref_cross_list, PolicyDocument=Policy(Version='2012-10-17', Id="CrossAccountPublish", Statement=[ Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[awacs.sns.Publish], Resource=topics_ref_cross_list, Condition=Condition( StringEquals({ 'AWS:SourceOwner': account_id_list, }))) ])) template.add_resource(topic_policy_resource) self.enabled = any_topic_enabled # Generate the Template self.set_template()
def handle(self, chain_context): """ This step adds in the shell of a pipeline. * s3 bucket * policies for the bucket and pipeline * your next step in the chain MUST be a source stage :param chain_context: :return: """ if self.create_bucket: pipeline_bucket = Bucket( "PipelineBucket%s" % chain_context.instance_name, BucketName=self.bucket_name, VersioningConfiguration=VersioningConfiguration( Status="Enabled")) chain_context.template.add_resource(pipeline_bucket) default_bucket_policies = self.get_default_bucket_policy_statements( self.bucket_name) if self.bucket_policy_statements: bucket_access_policy = self.get_bucket_policy( pipeline_bucket=self.bucket_name, bucket_policy_statements=self.bucket_policy_statements, ) chain_context.template.add_resource(bucket_access_policy) pipeline_bucket_access_policy = iam.ManagedPolicy( "PipelineBucketAccessPolicy", Path='/managed/', PolicyDocument=awacs.aws.PolicyDocument( Version="2012-10-17", Id="bucket-access-policy%s" % chain_context.instance_name, Statement=default_bucket_policies)) chain_context.metadata[cumulus.steps.dev_tools. META_PIPELINE_BUCKET_NAME] = self.bucket_name chain_context.metadata[ cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref( pipeline_bucket_access_policy) # TODO: this can be cleaned up by using a policytype and passing in the pipeline role it should add itself to. pipeline_policy = iam.Policy( PolicyName="%sPolicy" % self.name, PolicyDocument=awacs.aws.PolicyDocument( Version="2012-10-17", Id="PipelinePolicy", Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, # TODO: actions here could be limited more Action=[awacs.aws.Action("s3", "*")], Resource=[ troposphere.Join( '', [awacs.s3.ARN(), self.bucket_name, "/*"]), troposphere.Join('', [ awacs.s3.ARN(), self.bucket_name, ]), ], ), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("kms", "*")], Resource=['*'], ), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("cloudformation", "*"), awacs.aws.Action("codebuild", "*"), ], # TODO: restrict more accurately Resource=["*"]), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.codecommit.GetBranch, awacs.codecommit.GetCommit, awacs.codecommit.UploadArchive, awacs.codecommit.GetUploadArchiveStatus, awacs.codecommit.CancelUploadArchive ], Resource=["*"]), awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.iam.PassRole], Resource=["*"]), awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("lambda", "*")], Resource=["*"]) ], )) pipeline_service_role = iam.Role( "PipelineServiceRole", Path="/", AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement(Effect=awacs.aws.Allow, Action=[awacs.sts.AssumeRole], Principal=awacs.aws.Principal( 'Service', "codepipeline.amazonaws.com")) ]), Policies=[pipeline_policy] + self.pipeline_policies) generic_pipeline = codepipeline.Pipeline( "Pipeline", RoleArn=troposphere.GetAtt(pipeline_service_role, "Arn"), Stages=[], ArtifactStore=codepipeline.ArtifactStore( Type="S3", Location=self.bucket_name, ) # TODO: optionally add kms key here ) if self.bucket_kms_key_arn: encryption_config = codepipeline.EncryptionKey( "ArtifactBucketKmsKey", Id=self.bucket_kms_key_arn, Type='KMS', ) generic_pipeline.ArtifactStore.EncryptionKey = encryption_config pipeline_output = troposphere.Output( "PipelineName", Description="Code Pipeline", Value=Ref(generic_pipeline), ) pipeline_bucket_output = troposphere.Output( "PipelineBucket", Description="Name of the input artifact bucket for the pipeline", Value=self.bucket_name, ) chain_context.template.add_resource(pipeline_bucket_access_policy) chain_context.template.add_resource(pipeline_service_role) chain_context.template.add_resource(generic_pipeline) chain_context.template.add_output(pipeline_output) chain_context.template.add_output(pipeline_bucket_output)
troposphere.Parameter("Compress", Type="String", Description="Compress archives with gzip", Default="true", AllowedValues=["true", "false"])) CompressTrue = t.add_condition( "CompressTrue", troposphere.Equals("true", troposphere.Ref(Compress))) sqsdlq = t.add_resource( troposphere.sqs.Queue("sqsdlq", MessageRetentionPeriod=1209600, DeletionPolicy=troposphere.Retain, Condition="NeedsDLQ")) DLQChoice = troposphere.If("NeedsDLQ", troposphere.GetAtt(sqsdlq, "Arn"), troposphere.Ref(S3BundlerDLQARN)) ManifestQueue = t.add_resource( troposphere.sqs.Queue("ManifestQueue", MessageRetentionPeriod=1209600, VisibilityTimeout=1800, RedrivePolicy=troposphere.sqs.RedrivePolicy( deadLetterTargetArn=DLQChoice, maxReceiveCount=10), DeletionPolicy=troposphere.Retain, Condition="NeedsQueue")) QueueChoice = troposphere.If("NeedsQueue", troposphere.GetAtt(ManifestQueue, "Arn"), troposphere.Ref(S3BundlerQueueARN))
def __init__(self, paco_ctx, account_ctx, aws_region, stack_group, stack_tags, app_id, grp_id, res_id, factory_name, cloudfront_config, config_ref): super().__init__(paco_ctx, account_ctx, aws_region, enabled=cloudfront_config.is_enabled(), config_ref=config_ref, stack_group=stack_group, stack_tags=stack_tags, change_protected=cloudfront_config.change_protected) self.set_aws_name('CloudFront', grp_id, res_id, factory_name) origin_access_id_enabled = False self.init_template('CloudFront Distribution') template = self.template target_origin_param = self.create_cfn_parameter( param_type='String', name='TargetOrigin', description='Target Origin', value=cloudfront_config.default_cache_behavior.target_origin, ) distribution_config_dict = { 'Enabled': cloudfront_config.is_enabled(), 'DefaultRootObject': cloudfront_config.default_root_object, 'HttpVersion': 'http1.1', 'DefaultCacheBehavior': { 'AllowedMethods': cloudfront_config.default_cache_behavior.allowed_methods, 'DefaultTTL': cloudfront_config.default_cache_behavior.default_ttl, 'TargetOriginId': troposphere.Ref(target_origin_param), 'ViewerProtocolPolicy': cloudfront_config.default_cache_behavior.viewer_protocol_policy }, 'PriceClass': 'PriceClass_' + cloudfront_config.price_class, 'ViewerCertificate': { 'AcmCertificateArn': self.paco_ctx.get_ref('paco.ref ' + self.config_ref + '.viewer_certificate.arn'), 'SslSupportMethod': cloudfront_config.viewer_certificate.ssl_supported_method, 'MinimumProtocolVersion': cloudfront_config.viewer_certificate.minimum_protocol_version } } if cloudfront_config.default_cache_behavior.min_ttl != -1: distribution_config_dict['DefaultCacheBehavior'][ 'MinTTL'] = cloudfront_config.default_cache_behavior.min_ttl if cloudfront_config.default_cache_behavior.max_ttl != -1: distribution_config_dict['DefaultCacheBehavior'][ 'MaxTTL'] = cloudfront_config.default_cache_behavior.max_ttl # Domain Alises and Record Sets aliases_list = [] aliases_param_map = {} for alias in cloudfront_config.domain_aliases: alias_hash = utils.md5sum(str_data=alias.domain_name) domain_name_param = 'DomainAlias' + alias_hash alias_param = self.create_cfn_parameter( param_type='String', name=domain_name_param, description='Domain Alias CNAME', value=alias.domain_name) aliases_list.append(troposphere.Ref(alias_param)) aliases_param_map[alias.domain_name] = alias_param distribution_config_dict['Aliases'] = aliases_list # DefaultcacheBehavior # Forward Values forwarded_values_config = cloudfront_config.default_cache_behavior.forwarded_values forwarded_values_dict = { 'Cookies': { 'Forward': 'none', }, 'QueryString': str(forwarded_values_config.query_string) } # Cookies if cloudfront_config.s3_origin_exists() == False: forwarded_values_dict['Cookies'][ 'Forward'] = forwarded_values_config.cookies.forward if len(forwarded_values_config.cookies.whitelisted_names) > 0: forwarded_values_dict['Cookies'][ 'WhitelistedNames'] = forwarded_values_config.cookies.whitelisted_names # Headers if cloudfront_config.s3_origin_exists() == False: forwarded_values_dict[ 'Headers'] = cloudfront_config.default_cache_behavior.forwarded_values.headers distribution_config_dict['DefaultCacheBehavior'][ 'ForwardedValues'] = forwarded_values_dict # Cache Behaviors if len(cloudfront_config.cache_behaviors) > 0: cache_behaviors_list = [] target_origin_param_map = {} for cache_behavior in cloudfront_config.cache_behaviors: target_origin_hash = utils.md5sum( str_data=cache_behavior.target_origin) if target_origin_hash not in target_origin_param_map.keys(): cb_target_origin_param = self.create_cfn_parameter( param_type='String', name=self.create_cfn_logical_id( 'TargetOriginCacheBehavior' + target_origin_hash), description='Target Origin', value=cache_behavior.target_origin, ) target_origin_param_map[ target_origin_hash] = cb_target_origin_param else: cb_target_origin_param = target_origin_param_map[ target_origin_hash] cache_behavior_dict = { 'PathPattern': cache_behavior.path_pattern, 'AllowedMethods': cache_behavior.allowed_methods, 'DefaultTTL': cache_behavior.default_ttl, 'TargetOriginId': troposphere.Ref(cb_target_origin_param), 'ViewerProtocolPolicy': cache_behavior.viewer_protocol_policy } cb_forwarded_values_config = cache_behavior.forwarded_values cb_forwarded_values_dict = { 'Cookies': { 'Forward': 'none', }, 'QueryString': str(cb_forwarded_values_config.query_string) } # Cookies cb_forwarded_values_dict['Cookies'][ 'Forward'] = cb_forwarded_values_config.cookies.forward if len(cb_forwarded_values_config.cookies.whitelisted_names ) > 0: cb_forwarded_values_dict['Cookies'][ 'WhitelistedNames'] = cb_forwarded_values_config.cookies.whitelisted_names # Headers if cloudfront_config.s3_origin_exists() == False: cb_forwarded_values_dict[ 'Headers'] = cache_behavior.forwarded_values.headers cache_behavior_dict[ 'ForwardedValues'] = cb_forwarded_values_dict cache_behaviors_list.append(cache_behavior_dict) distribution_config_dict['CacheBehaviors'] = cache_behaviors_list # Origin Access Identity if cloudfront_config.s3_origin_exists() == True: origin_id_res = troposphere.cloudfront.CloudFrontOriginAccessIdentity( title='CloudFrontOriginAccessIdentity', template=template, CloudFrontOriginAccessIdentityConfig=troposphere.cloudfront. CloudFrontOriginAccessIdentityConfig( Comment=troposphere.Ref('AWS::StackName'))) troposphere.Output(title='CloudFrontOriginAccessIdentity', template=template, Value=troposphere.Ref(origin_id_res)) # Origins origins_list = [] for origin_name, origin in cloudfront_config.origins.items(): if origin.s3_bucket != None: domain_hash = utils.md5sum(str_data=origin.s3_bucket) origin_domain_name = self.paco_ctx.get_ref(origin.s3_bucket + '.url') else: domain_hash = utils.md5sum(str_data=origin.domain_name) origin_domain_name = origin.domain_name origin_dict = {'Id': origin_name, 'DomainName': origin_domain_name} if origin.s3_bucket == None: origin_dict['CustomOriginConfig'] = { 'HTTPSPort': origin.custom_origin_config.https_port, 'OriginKeepaliveTimeout': origin.custom_origin_config.keepalive_timeout, 'OriginProtocolPolicy': origin.custom_origin_config.protocol_policy, 'OriginReadTimeout': origin.custom_origin_config.read_timeout, 'OriginSSLProtocols': origin.custom_origin_config.ssl_protocols } if origin.custom_origin_config.http_port: origin_dict['CustomOriginConfig']['HTTPPort'] = str( origin.custom_origin_config.http_port) else: s3_config = self.paco_ctx.get_ref(origin.s3_bucket) origin_dict['S3OriginConfig'] = {} if s3_config.cloudfront_origin == False: origin_dict['S3OriginConfig']['OriginAccessIdentity'] = '' else: origin_access_id_enabled = True param_name = "OriginAccessIdentiy" + domain_hash access_id_ref = origin.s3_bucket + '.origin_id' s3_cf_origin_id_param = self.create_cfn_parameter( param_type='String', name=param_name, description='Origin Access Identity', value=access_id_ref, ) origin_dict['S3OriginConfig'][ 'OriginAccessIdentity'] = troposphere.Sub( 'origin-access-identity/cloudfront/${OriginAccessId}', { 'OriginAccessId': troposphere.Ref(s3_cf_origin_id_param) }) origins_list.append(origin_dict) distribution_config_dict['Origins'] = origins_list # Custom Error error_resp_list = [] for error_resp in cloudfront_config.custom_error_responses: error_resp_dict = { 'ErrorCachingMinTTL': error_resp.error_caching_min_ttl, 'ErrorCode': error_resp.error_code, 'ResponseCode': error_resp.response_code, 'ResponsePagePath': error_resp.response_page_path } error_resp_list.append(error_resp_dict) if len(error_resp_list) > 0: distribution_config_dict['CustomErrorResponses'] = error_resp_list # Web ACL if cloudfront_config.webacl_id != None: webacl_id_param = self.create_cfn_parameter( param_type='String', name='WebAclId', description='WAF Web Acl ID', value=cloudfront_config.webacl_id) distribution_config_dict['WebACLId'] = troposphere.Ref( webacl_id_param) distribution_dict = {'DistributionConfig': distribution_config_dict} distribution_res = troposphere.cloudfront.Distribution.from_dict( 'Distribution', distribution_dict) if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True: if cloudfront_config.is_dns_enabled() == True: for alias in cloudfront_config.domain_aliases: alias_hash = utils.md5sum(str_data=alias.domain_name) zone_param_name = 'AliasHostedZoneId' + alias_hash alias_zone_id_param = self.create_cfn_parameter( param_type='String', name=zone_param_name, description='Domain Alias Hosted Zone Id', value=alias.hosted_zone + '.id', ) record_set_res = troposphere.route53.RecordSetType( title=self.create_cfn_logical_id_join( ['RecordSet', alias_hash]), template=template, HostedZoneId=troposphere.Ref(alias_zone_id_param), Name=troposphere.Ref( aliases_param_map[alias.domain_name]), Type='A', AliasTarget=troposphere.route53.AliasTarget( DNSName=troposphere.GetAtt(distribution_res, 'DomainName'), HostedZoneId='Z2FDTNDATAQYW2')) record_set_res.DependsOn = distribution_res self.create_output(title='CloudFrontURL', value=troposphere.GetAtt('Distribution', 'DomainName'), ref=self.config_ref + '.domain_name') self.create_output(title='CloudFrontId', value=troposphere.Ref(distribution_res), ref=self.config_ref + '.id') template.add_resource(distribution_res) self.set_template() if origin_access_id_enabled: self.stack.wait_for_delete = True if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False: route53_ctl = self.paco_ctx.get_controller('route53') if cloudfront_config.is_dns_enabled() == True: for alias in cloudfront_config.domain_aliases: route53_ctl.add_record_set( self.account_ctx, self.aws_region, enabled=cloudfront_config.is_enabled(), dns=alias, record_set_type='Alias', alias_dns_name='paco.ref ' + self.config_ref + '.domain_name', alias_hosted_zone_id='Z2FDTNDATAQYW2', stack_group=self.stack_group, config_ref=self.config_ref + '.record_set')
def __init__(self, stack, paco_ctx): super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.apigatewayrestapi = apigatewayrestapi = stack.resource self.set_aws_name('ApiGatewayRestApi', self.resource_group_name, self.resource.name) self.init_template('ApiGateway: {}'.format(apigatewayrestapi.title)) template = self.template if not self.apigatewayrestapi.is_enabled(): return # Parameters method_params = [] for method in self.apigatewayrestapi.methods.values(): param_name = 'MethodArn' + self.create_cfn_logical_id(method.name) lambda_arn_param = self.create_cfn_parameter( name=param_name, param_type='String', description='Lambda ARN parameter.', value=method.integration.integration_lambda + '.arn', ) method.parameter_arn_ref = troposphere.Ref(param_name) # Resources restapi_logical_id = 'ApiGatewayRestApi' restapi_resource = troposphere.apigateway.RestApi.from_dict( restapi_logical_id, self.apigatewayrestapi.cfn_export_dict ) template.add_resource(restapi_resource) # Model for model in self.apigatewayrestapi.models.values(): model.logical_id = self.create_cfn_logical_id('ApiGatewayModel' + model.name) cfn_export_dict = model.cfn_export_dict cfn_export_dict['RestApiId'] = troposphere.Ref(restapi_resource) if 'Schema' not in cfn_export_dict: cfn_export_dict['Schema'] = {} model_resource = troposphere.apigateway.Model.from_dict(model.logical_id, cfn_export_dict) model.resource = model_resource template.add_resource(model_resource) # Resource for resource in self.apigatewayrestapi.resources.values(): resource_id = 'ApiGatewayResource' + self.create_cfn_logical_id(resource.name) cfn_export_dict = resource.cfn_export_dict if resource.parent_id == "RootResourceId": cfn_export_dict["ParentId"] = troposphere.GetAtt(restapi_resource, "RootResourceId") cfn_export_dict["RestApiId"] = troposphere.Ref(restapi_resource) else: raise NotImplemented("ToDo: handle nested resources") resource_resource = troposphere.apigateway.Resource.from_dict(resource_id, cfn_export_dict) resource.resource = resource_resource resource_resource.DependsOn = restapi_logical_id template.add_resource(resource_resource) # Method for method in self.apigatewayrestapi.methods.values(): method_id = 'ApiGatewayMethod' + self.create_cfn_logical_id(method.name) method.logical_id = method_id cfn_export_dict = method.cfn_export_dict for resource in self.apigatewayrestapi.resources.values(): if resource.name == method.resource_id: cfn_export_dict["ResourceId"] = troposphere.Ref(resource.resource) if 'ResourceId' not in cfn_export_dict: cfn_export_dict["ResourceId"] = troposphere.GetAtt(restapi_resource, 'RootResourceId') cfn_export_dict["RestApiId"] = troposphere.Ref(restapi_resource) uri = troposphere.Join('', ["arn:aws:apigateway:", method.region_name, ":lambda:path/2015-03-31/functions/", method.parameter_arn_ref, "/invocations"]) cfn_export_dict["Integration"]["Uri"] = uri if method.integration.integration_type == 'AWS_PROXY': # IAM Role - allows API Gateway to invoke Lambda # ToDo: enable Api Gateway to invoke things other than Lambda ... iam_role_resource = troposphere.iam.Role( self.create_cfn_logical_id('ApiGatewayIamRole' + self.apigatewayrestapi.name + method.name), Path='/', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service',['apigateway.amazonaws.com']) ) ], ), Policies=[ troposphere.iam.Policy( PolicyName=self.create_cfn_logical_id('LambdaAccessApiGateway' + self.apigatewayrestapi.name + method.name), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.awslambda.InvokeFunction], Resource=[method.parameter_arn_ref], ) ] ) ) ] ) template.add_resource(iam_role_resource) cfn_export_dict["Integration"]["Credentials"] = troposphere.GetAtt(iam_role_resource, "Arn") elif method.integration.integration_type == 'AWS': # Enable Lambda (custom) integration # When send to a Lambda (Custom) the HTTP Method must always be POST regardless of # the HttpMethod cfn_export_dict["Integration"]["IntegrationHttpMethod"] = "POST" lambda_permission_resource = troposphere.awslambda.Permission( self.create_cfn_logical_id('LambdaPermissionApiGateway' + method.name), Action = 'lambda:InvokeFunction', FunctionName = method.parameter_arn_ref, Principal = 'apigateway.amazonaws.com', SourceArn = troposphere.Sub( "arn:aws:execute-api:${AWS::Region}:${AWS::AccountId}:${%s}/*/%s/" % ( restapi_logical_id, method.http_method ) ) ) template.add_resource(lambda_permission_resource) # look-up the method_names and assign a Ref to the model resource # ToDo: validate model_names in the model responses = [] for method_response in method.method_responses: response_dict = {"StatusCode": method_response.status_code} if method_response.response_models: response_dict["ResponseModels"] = {} for response_model in method_response.response_models: for model in self.apigatewayrestapi.models.values(): if model.name == response_model.model_name: response_dict["ResponseModels"][response_model.content_type] = troposphere.Ref(model.resource) responses.append(response_dict) cfn_export_dict["MethodResponses"] = responses method_resource = troposphere.apigateway.Method.from_dict(method_id, cfn_export_dict) method_resource.DependsOn = restapi_logical_id template.add_resource(method_resource) # Deployment deployment_resource = troposphere.apigateway.Deployment.from_dict( 'ApiGatewayDeployment', {'Description': 'Deployment', 'RestApiId': troposphere.Ref(restapi_resource) } ) # ToDo: Deployment depends upon all Methods for method in self.apigatewayrestapi.methods.values(): deployment_resource.DependsOn = method.logical_id template.add_resource(deployment_resource) # Stage for stage in self.apigatewayrestapi.stages.values(): stage_id = self.create_cfn_logical_id('ApiGatewayStage' + stage.name) cfn_export_dict = stage.cfn_export_dict cfn_export_dict["RestApiId"] = troposphere.Ref(restapi_resource) cfn_export_dict["DeploymentId"] = troposphere.Ref(deployment_resource) stage_resource = troposphere.apigateway.Stage.from_dict(stage_id, cfn_export_dict) template.add_resource(stage_resource)
def __init__(self, stack, paco_ctx,): rds_aurora = stack.resource super().__init__(stack, paco_ctx, iam_capabilities=["CAPABILITY_IAM"]) self.set_aws_name('RDSAurora', self.resource_group_name, self.resource.name) self.init_template('RDSAurora') if not rds_aurora.is_enabled(): return rds_cluster_logical_id = 'DBCluster' db_cluster_dict = rds_aurora.cfn_export_dict self.notification_groups = {} # DB Subnet Group db_subnet_id_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='DBSubnetIdList', description='The list of subnet IDs where this database will be provisioned.', value=rds_aurora.segment + '.subnet_id_list', ) db_subnet_group_resource = troposphere.rds.DBSubnetGroup( title='DBSubnetGroup', template=self.template, DBSubnetGroupDescription=troposphere.Ref('AWS::StackName'), SubnetIds=troposphere.Ref(db_subnet_id_list_param), ) db_cluster_dict['DBSubnetGroupName'] = troposphere.Ref(db_subnet_group_resource) # DB Cluster Parameter Group if rds_aurora.cluster_parameter_group == None: # If no Cluster Parameter Group supplied then create one param_group_family = gen_vocabulary.rds_engine_versions[rds_aurora.engine][rds_aurora.engine_version]['param_group_family'] cluster_parameter_group_ref = troposphere.rds.DBClusterParameterGroup( "DBClusterParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) else: # Use existing Parameter Group cluster_parameter_group_ref = self.create_cfn_parameter( name='DBClusterParameterGroupName', param_type='String', description='DB Cluster Parameter Group Name', value=rds_aurora.cluster_parameter_group + '.name', ) db_cluster_dict['DBClusterParameterGroupName'] = troposphere.Ref(cluster_parameter_group_ref) # Default DB Parameter Group need_db_pg = False default_instance = rds_aurora.default_instance for db_instance in rds_aurora.db_instances.values(): if default_instance.parameter_group == None and db_instance.parameter_group == None: need_db_pg = True if need_db_pg: # create default DB Parameter Group param_group_family = gen_vocabulary.rds_engine_versions[rds_config.engine][rds_config.engine_version]['param_group_family'] default_dbparametergroup_resource = troposphere.rds.DBParameterGroup( "DBParameterGroup", template=self.template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) # Enhanced Monitoring Role need_monitoring_role = False for db_instance in rds_aurora.db_instances.values(): enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: need_monitoring_role = True if need_monitoring_role: enhanced_monitoring_role_resource = troposphere.iam.Role( title='MonitoringIAMRole', template=self.template, AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRole")], Principal=Principal("Service", "monitoring.rds.amazonaws.com") ) ] ), ManagedPolicyArns=["arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"], Path="/", ) # DB Snapshot Identifier if rds_aurora.db_snapshot_identifier == '' or rds_aurora.db_snapshot_identifier == None: db_snapshot_id_enabled = False else: db_snapshot_id_enabled = True if db_snapshot_id_enabled == True: db_cluster_dict['SnapshotIdentifier'] = rds_aurora.db_snapshot_identifier # KMS-CMK key encryption if rds_aurora.enable_kms_encryption == True and db_snapshot_id_enabled == False: key_policy = Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action('kms', '*'),], Principal=Principal("AWS", [f'arn:aws:iam::{self.stack.account_ctx.id}:root']), Resource=['*'], ), Statement( Effect=Allow, Action=[ awacs.kms.Encrypt, awacs.kms.Decrypt, Action('kms', 'ReEncrypt*'), Action('kms', 'GenerateDataKey*'), awacs.kms.CreateGrant, awacs.kms.ListGrants, awacs.kms.DescribeKey, ], Principal=Principal('AWS',['*']), Resource=['*'], Condition=Condition([ StringEquals({ 'kms:CallerAccount': f'{self.stack.account_ctx.id}', 'kms:ViaService': f'rds.{self.stack.aws_region}.amazonaws.com' }) ]), ), ], ) kms_key_resource = troposphere.kms.Key( title='AuroraKMSCMK', template=self.template, KeyPolicy=key_policy, ) db_cluster_dict['StorageEncrypted'] = True db_cluster_dict['KmsKeyId'] = troposphere.Ref(kms_key_resource) kms_key_alias_resource = troposphere.kms.Alias( title="AuroraKMSCMKAlias", template=self.template, AliasName=troposphere.Sub('alias/${' + rds_cluster_logical_id + '}'), TargetKeyId=troposphere.Ref(kms_key_resource), ) kms_key_alias_resource.DependsOn = rds_cluster_logical_id # Username and Passsword - only if there is no DB Snapshot Identifier if db_snapshot_id_enabled == False: db_cluster_dict['MasterUsername'] = rds_aurora.master_username if rds_aurora.secrets_password: # Password from Secrets Manager sta_logical_id = 'SecretTargetAttachmentRDS' secret_arn_param = self.create_cfn_parameter( param_type='String', name='RDSSecretARN', description='The ARN for the secret for the RDS master password.', value=rds_aurora.secrets_password + '.arn', ) secret_target_attachment_resource = troposphere.secretsmanager.SecretTargetAttachment( title=sta_logical_id, template=self.template, SecretId=troposphere.Ref(secret_arn_param), TargetId=troposphere.Ref(rds_cluster_logical_id), TargetType='AWS::RDS::DBCluster' ) secret_target_attachment_resource.DependsOn = rds_cluster_logical_id db_cluster_dict['MasterUserPassword'] = troposphere.Join( '', ['{{resolve:secretsmanager:', troposphere.Ref(secret_arn_param), ':SecretString:password}}' ] ) else: master_password_param = self.create_cfn_parameter( param_type='String', name='MasterUserPassword', description='The master user password.', value=rds_aurora.master_user_password, noecho=True, ) db_cluster_dict['MasterUserPassword'] = troposphere.Ref(master_password_param) db_cluster_res = troposphere.rds.DBCluster.from_dict( rds_cluster_logical_id, db_cluster_dict ) self.template.add_resource(db_cluster_res) # Cluster Event Notifications if hasattr(rds_aurora, 'cluster_event_notifications'): for group in rds_aurora.cluster_event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"ClusterEventSubscription{group}"), template=self.template, EventCategories=rds_aurora.cluster_event_notifications.event_categories, SourceIds=[troposphere.Ref(db_cluster_res)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-cluster', ) # DB Instance(s) for db_instance in rds_aurora.db_instances.values(): logical_name = self.create_cfn_logical_id(db_instance.name) db_instance_dict = { 'DBClusterIdentifier': troposphere.Ref(db_cluster_res), 'DBInstanceClass': db_instance.get_value_or_default('db_instance_type'), 'DBSubnetGroupName': troposphere.Ref(db_subnet_group_resource), 'EnablePerformanceInsights': db_instance.get_value_or_default('enable_performance_insights'), 'Engine': rds_aurora.engine, 'PubliclyAccessible': db_instance.get_value_or_default('publicly_accessible'), 'AllowMajorVersionUpgrade': db_instance.get_value_or_default('allow_major_version_upgrade'), 'AutoMinorVersionUpgrade': db_instance.get_value_or_default('auto_minor_version_upgrade'), } enhanced_monitoring_interval = db_instance.get_value_or_default('enhanced_monitoring_interval_in_seconds') if enhanced_monitoring_interval != 0: db_instance_dict['MonitoringInterval'] = enhanced_monitoring_interval db_instance_dict['MonitoringRoleArn'] = troposphere.GetAtt(enhanced_monitoring_role_resource, "Arn") if db_instance.availability_zone != None: subnet_id_ref = f'{rds_aurora.segment}.az{db_instance.availability_zone}.availability_zone' db_instance_subnet_param = self.create_cfn_parameter( param_type='String', name=f'DBInstanceAZ{logical_name}', description=f'Subnet where DB Instance {logical_name} is provisioned', value=subnet_id_ref, ) db_instance_dict['AvailabilityZone'] = troposphere.Ref(db_instance_subnet_param) # DB Parameter Group if default_instance.parameter_group == None and db_instance.parameter_group == None: dbparametergroup_resource = default_dbparametergroup_resource elif db_instance.parameter_group != None: # Use instance-specific DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=db_instance.parameter_group + '.name', ) else: # Use default DB Parameter Group dbparametergroup_resource = self.create_cfn_parameter( name=f'DBParameterGroupName{logical_name}', param_type='String', description='DB Parameter Group Name', value=default_instance.parameter_group + '.name', ) db_instance_dict['DBParameterGroupName'] = troposphere.Ref(dbparametergroup_resource) db_instance_resource = troposphere.rds.DBInstance.from_dict( f'DBInstance{logical_name}', db_instance_dict ) self.template.add_resource(db_instance_resource) # DB Event Notifications event_notifications = db_instance.get_value_or_default('event_notifications') if event_notifications != None: for group in event_notifications.groups: notif_param = self.create_notification_param(group) event_subscription_resource = troposphere.rds.EventSubscription( title=self.create_cfn_logical_id(f"DBEventSubscription{logical_name}{group}"), template=self.template, EventCategories=event_notifications.event_categories, SourceIds=[troposphere.Ref(db_instance_resource)], SnsTopicArn=troposphere.Ref(notif_param), SourceType='db-instance', ) # DB Instance Outputs self.create_output( title=f'DBInstanceName{logical_name}', description=f'DB Instance Name for {logical_name}', value=troposphere.Ref(db_instance_resource), ref=db_instance.paco_ref_parts + ".name", ) # DB Cluster Outputs self.create_output( title='DBClusterName', description='DB Cluster Name', value=troposphere.Ref(db_cluster_res), ref=self.resource.paco_ref_parts + ".name", ) self.create_output( title='ClusterEndpointAddress', description='Cluster Endpoint Address', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Address'), ref=self.resource.paco_ref_parts + ".endpoint.address", ) self.create_output( title='ClusterEndpointPort', description='Cluster Endpoint Port', value=troposphere.GetAtt(db_cluster_res, 'Endpoint.Port'), ref=self.resource.paco_ref_parts + ".endpoint.port", ) self.create_output( title='ClusterReadEndpointAddress', description='Cluster ReadEndpoint Address', value=troposphere.GetAtt(db_cluster_res, 'ReadEndpoint.Address'), ref=self.resource.paco_ref_parts + ".readendpoint.address", ) # DNS - Route53 Record Set if rds_aurora.is_dns_enabled() == True: route53_ctl = self.paco_ctx.get_controller('route53') for dns in rds_aurora.dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.endpoint.address'], stack_group=self.stack.stack_group, ) for read_dns in rds_aurora.read_dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_aurora, enabled=rds_aurora.is_enabled(), dns=read_dns, record_set_type='CNAME', resource_records=[rds_aurora.paco_ref + '.readendpoint.address'], stack_group=self.stack.stack_group, )
def __init__(self, stack, paco_ctx,): rds_config = stack.resource config_ref = rds_config.paco_ref_parts super().__init__(stack, paco_ctx) self.set_aws_name('RDS', self.resource_group_name, self.resource.name) self.init_template('RDS') template = self.template if not rds_config.is_enabled(): return rds_logical_id = 'PrimaryDBInstance' # DB Subnet Group db_subnet_id_list_param = self.create_cfn_parameter( param_type='List<AWS::EC2::Subnet::Id>', name='DBSubnetIdList', description='The list of subnet IDs where this database will be provisioned.', value=rds_config.segment+'.subnet_id_list', ) db_subnet_group_res = troposphere.rds.DBSubnetGroup( title='DBSubnetGroup', template =template, DBSubnetGroupDescription=troposphere.Ref('AWS::StackName'), SubnetIds=troposphere.Ref(db_subnet_id_list_param), ) # DB Parameter Group if rds_config.parameter_group == None: # No Parameter Group supplied, create one engine_major_version = '.'.join(rds_config.engine_version.split('.')[0:2]) param_group_family = gen_vocabulary.rds_engine_versions[rds_config.engine][rds_config.engine_version]['param_group_family'] dbparametergroup_ref = troposphere.rds.DBParameterGroup( "DBParameterGroup", template = template, Family=param_group_family, Description=troposphere.Ref('AWS::StackName') ) else: # Use an existing Parameter Group dbparametergroup_ref = self.create_cfn_parameter( name='DBParameterGroupName', param_type='String', description='DB Parameter Group Name', value=rds_config.parameter_group + '.name', ) # Option Group option_group_res = None if len(rds_config.option_configurations) > 0: option_group_dict = { 'EngineName': rds_config.engine, 'MajorEngineVersion': engine_major_version, 'OptionGroupDescription': troposphere.Ref('AWS::StackName') } if len(rds_config.option_configurations) > 0: option_config_list = [] for option_config in rds_config.option_configurations: option_config_dict = { 'OptionName': option_config.option_name, } if len(option_config.option_settings) > 0: option_config_dict['OptionSettings'] = [] for option_setting in option_config.option_settings: option_setting_dict = { 'Name': option_setting.name, 'Value': option_setting.value } option_config_dict['OptionSettings'].append(option_setting_dict) option_config_list.append(option_config_dict) option_group_dict['OptionConfigurations'] = option_config_list option_group_res = troposphere.rds.OptionGroup.from_dict( 'OptionGroup', option_group_dict ) template.add_resource(option_group_res) # RDS MultiAZ (Mysql, Postgresql) if schemas.IRDSMultiAZ.providedBy(rds_config): sg_param_ref_list = [] for sg_ref in rds_config.security_groups: sg_hash = utils.md5sum(str_data=sg_ref) sg_param = self.create_cfn_parameter( param_type='AWS::EC2::SecurityGroup::Id', name=self.create_cfn_logical_id('SecurityGroup'+sg_hash), description='VPC Security Group to attach to the RDS.', value=sg_ref+'.id', ) sg_param_ref_list.append(troposphere.Ref(sg_param)) db_instance_dict = { 'Engine': rds_config.engine, 'EngineVersion': rds_config.engine_version, 'DBInstanceIdentifier': troposphere.Ref('AWS::StackName'), 'DBInstanceClass': rds_config.db_instance_type, 'DBSubnetGroupName': troposphere.Ref(db_subnet_group_res), 'DBParameterGroupName': troposphere.Ref(dbparametergroup_ref), 'CopyTagsToSnapshot': True, 'AllowMajorVersionUpgrade': rds_config.allow_major_version_upgrade, 'AutoMinorVersionUpgrade': rds_config.auto_minor_version_upgrade, 'MultiAZ': rds_config.multi_az, 'AllocatedStorage': rds_config.storage_size_gb, 'StorageType': rds_config.storage_type, 'BackupRetentionPeriod': rds_config.backup_retention_period, 'Port': rds_config.port, 'PreferredBackupWindow': rds_config.backup_preferred_window, 'PreferredMaintenanceWindow': rds_config.maintenance_preferred_window, 'VPCSecurityGroups': sg_param_ref_list } # License Model if rds_config.license_model: db_instance_dict['LicenseModel'] = rds_config.license_model # Deletion Protection if rds_config.deletion_protection: db_instance_dict['DeletionProtection'] = rds_config.deletion_protection # CloudWatch Logs Exports if len(rds_config.cloudwatch_logs_exports) > 0: db_instance_dict['EnableCloudwatchLogsExports'] = rds_config.cloudwatch_logs_exports # Option Group if option_group_res != None: db_instance_dict['OptionGroupName'] = troposphere.Ref(option_group_res) # DB Snapshot Identifier if rds_config.db_snapshot_identifier == '' or rds_config.db_snapshot_identifier == None: db_snapshot_id_enabled = False else: db_snapshot_id_enabled = True if db_snapshot_id_enabled == True: db_instance_dict['DBSnapshotIdentifier'] = rds_config.db_snapshot_identifier # Encryption if rds_config.kms_key_id == '' or rds_config.kms_key_id == None: encryption_enabled = False else: encryption_enabled = True if db_snapshot_id_enabled == False: db_instance_dict['StorageEncrypted'] = encryption_enabled if encryption_enabled: db_instance_dict['KmsKeyId'] = rds_config.kms_key_id # Username and Passsword if db_snapshot_id_enabled == False: db_instance_dict['MasterUsername'] = rds_config.master_username if rds_config.secrets_password: # Password from Secrets Manager sta_logical_id = 'SecretTargetAttachmentRDS' secret_arn_param = self.create_cfn_parameter( param_type='String', name='RDSSecretARN', description='The ARN for the secret for the RDS master password.', value=rds_config.secrets_password + '.arn', ) secret_target_attachment_resource = troposphere.secretsmanager.SecretTargetAttachment( title=sta_logical_id, SecretId=troposphere.Ref(secret_arn_param), TargetId=troposphere.Ref(rds_logical_id), TargetType='AWS::RDS::DBInstance' ) template.add_resource(secret_target_attachment_resource) db_instance_dict['MasterUserPassword'] = troposphere.Join( '', ['{{resolve:secretsmanager:', troposphere.Ref(secret_arn_param), ':SecretString:password}}' ] ) else: master_password_param = self.create_cfn_parameter( param_type='String', name='MasterUserPassword', description='The master user password.', value=rds_config.master_user_password, noecho=True, ) db_instance_dict['MasterUserPassword'] = troposphere.Ref(master_password_param) db_instance_res = troposphere.rds.DBInstance.from_dict( rds_logical_id, db_instance_dict ) template.add_resource(db_instance_res) # Outputs self.create_output( title='DBInstanceName', description='DB Instance Name', value=troposphere.Ref(db_instance_res), ref=config_ref + ".name", ) self.create_output( title='RDSEndpointAddress', description='RDS Endpoint URL', value=troposphere.GetAtt(db_instance_res, 'Endpoint.Address'), ref=config_ref + ".endpoint.address", ) # Legacy Route53 Record Set if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True: if rds_config.is_dns_enabled() == True: for dns_config in rds_config.dns_config: dns_hash = utils.md5sum(str_data=(rds_config.hosted_zone+rds_config.domain_name)) primary_hosted_zone_id_param = self.create_cfn_parameter( param_type='String', name='DNSHostedZoneId'+dns_hash, description='The hosted zone id to create the Route53 record set.', value=rds_config.primary_hosted_zone+'.id', ) record_set_res = troposphere.route53.RecordSetType( title = 'RecordSet'+dns_hash, template = template, Comment = 'RDS Primary DNS', HostedZoneId = troposphere.Ref(primary_hosted_zone_id_param), Name = rds_config.primary_domain_name, Type = 'CNAME', TTL = dns_config.ttl, ResourceRecords = [ troposphere.GetAtt(db_instance_res, 'Endpoint.Address')] ) record_set_res.DependsOn = db_instance_res # DNS - Route53 Record Set if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False: if rds_config.is_dns_enabled() == True: route53_ctl = self.paco_ctx.get_controller('route53') for dns_config in rds_config.dns: route53_ctl.add_record_set( self.account_ctx, self.aws_region, rds_config, enabled=rds_config.is_enabled(), dns=dns_config, record_set_type='CNAME', resource_records=['paco.ref ' + config_ref + '.endpoint.address'], stack_group=self.stack.stack_group, config_ref=rds_config.paco_ref_parts + '.dns' )