コード例 #1
0
 def create_output(
     self,
     title=None,
     description=None,
     value=None,
     ref=None,
 ):
     "Create a Troposphere output, add it to the template and register the Stack Output(s)"
     if description != None:
         troposphere.Output(
             title=title,
             template=self.template,
             Value=value,
             Description=description
         )
     else:
         troposphere.Output(
             title=title,
             template=self.template,
             Value=value,
         )
     if type(ref) == list:
         for ref_item in ref:
             self.register_stack_output_config(ref_item, title)
     elif type(ref) == str:
         self.register_stack_output_config(ref, title)
コード例 #2
0
    def handle(self, chain_context):
        print("Adding source action %s." % self.action_name)

        template = chain_context.template
        policy_name = "CodeBuildPolicy%s" % chain_context.instance_name
        codebuild_policy = cumulus.policies.codebuild.get_policy_code_build_general_access(
            policy_name)

        role_name = "PipelineSourceRole%s" % self.action_name
        codebuild_role = iam.Role(
            role_name,
            Path="/",
            AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[
                awacs.aws.Statement(Effect=awacs.aws.Allow,
                                    Action=[awacs.sts.AssumeRole],
                                    Principal=awacs.aws.Principal(
                                        'Service', "codebuild.amazonaws.com"))
            ]),
            Policies=[codebuild_policy],
            ManagedPolicyArns=[
                chain_context.metadata[META_PIPELINE_BUCKET_POLICY_REF]
            ])

        source_action = SourceCodeCommitAction(
            Name=self.action_name,
            OutputArtifacts=[
                codepipeline.OutputArtifacts(Name=self.output_artifact_name)
            ],
            # TODO: when parameters are figured out, inject tehm into the template here.
            Configuration={
                "RepositoryName": Ref("RepositoryName"),
                "BranchName": Ref("RepositoryBranch"),
            },
        )

        template.add_resource(codebuild_role)

        found_pipelines = TemplateQuery.get_resource_by_type(
            template=chain_context.template,
            type_to_find=codepipeline.Pipeline)
        pipeline = found_pipelines[0]

        # Alternate way to get this
        # dummy = TemplateQuery.get_resource_by_title(chain_context.template, 'AppPipeline')

        stages = pipeline.Stages  # type: list

        # TODO: find stage by name
        first_stage = stages[0]

        # TODO accept a parallel action to the previous action, and don't +1 here.
        first_stage.Actions.append(source_action)

        template.add_output(
            troposphere.Output("RepoName%s" % self.action_name,
                               Value=Ref("RepositoryName")))

        template.add_output(
            troposphere.Output("RepoBranch%s" % self.action_name,
                               Value=Ref("RepositoryBranch")))
コード例 #3
0
ファイル: helpers.py プロジェクト: aws-samples/aws-organized
def generate_role_template(
    command: str,
    actions: list,
    role_name: str,
    path: str,
    assuming_account_id: str,
    assuming_resource: str,
    additional_statements: list = [],
) -> troposphere.Template:
    t = troposphere.Template()
    t.description = f"Role used to run the {command} command"
    role = iam.Role(
        title="role",
        RoleName=role_name,
        Path=path,
        Policies=[
            iam.Policy(
                PolicyName=f"{command}-permissions",
                PolicyDocument=aws.PolicyDocument(
                    Version="2012-10-17",
                    Id=f"{command}-permissions",
                    Statement=[
                        aws.Statement(Sid="1",
                                      Effect=aws.Allow,
                                      Action=actions,
                                      Resource=["*"])
                    ] + additional_statements,
                ),
            )
        ],
        AssumeRolePolicyDocument=aws.Policy(
            Version="2012-10-17",
            Id="AllowAssume",
            Statement=[
                aws.Statement(
                    Sid="1",
                    Effect=aws.Allow,
                    Principal=aws.Principal(
                        "AWS",
                        [IAM_ARN(assuming_resource, "", assuming_account_id)]),
                    Action=[awacs_sts.AssumeRole],
                )
            ],
        ),
    )
    t.add_resource(role)
    t.add_output(troposphere.Output("RoleName", Value=troposphere.Ref(role)))
    t.add_output(
        troposphere.Output("RoleArn", Value=troposphere.GetAtt(role, "Arn")))
    return t
コード例 #4
0
    def user_delegate_role_and_policies(self, user, permissions_list):
        "Create and add an account delegate Role to template"
        user_arn = 'arn:aws:iam::{}:user/{}'.format(self.master_account_id,
                                                    user.username)
        assume_role_res = troposphere.iam.Role(
            "UserAccountDelegateRole",
            RoleName="IAM-User-Account-Delegate-Role-{}".format(
                self.create_resource_name(user.name,
                                          filter_id='IAM.Role.RoleName')),
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(Effect=Allow,
                              Action=[AssumeRole],
                              Principal=Principal("AWS", [user_arn]),
                              Condition=Condition(
                                  [AWACSBool({MultiFactorAuthPresent: True})]))
                ]))
        # Iterate over permissions and create a delegate role and policices
        for permission_config in permissions_list:
            init_method = getattr(
                self,
                "init_{}_permission".format(permission_config.type.lower()))
            init_method(permission_config, assume_role_res)

        self.template.add_resource(assume_role_res)
        self.template.add_output(
            troposphere.Output(
                title='SigninUrl',
                Value=troposphere.Sub(
                    'https://signin.aws.amazon.com/switchrole?account=${AWS::AccountId}&roleName=${UserAccountDelegateRole}'
                )))
コード例 #5
0
    def register_type_project_template(cls, project, template):
        """Registers into the project stack a S3 bucket where all lambdas
        code will be stored, as well as an output so any subsequent template
        can have a reference to this resource."""

        bucket_name = troposphere.Join(
            "-",
            [
                utils.validate_code_bucket(project.settings['code-bucket']),
                troposphere.Ref(troposphere.AWS_REGION),
                troposphere.Ref('Stage')
            ]
        )
        code_bucket = s3.Bucket(
            "CodeBucket",
            BucketName=bucket_name,
            AccessControl=s3.Private,
            VersioningConfiguration=s3.VersioningConfiguration(
                Status='Enabled'
            )
        )
        template.add_resource(code_bucket)
        template.add_output([
            troposphere.Output(
                "CodeBucket",
                Description="CodeBucket name",
                Value=bucket_name,
            )
        ])
コード例 #6
0
    def add_log_group(self, loggroup_name, logical_name=None):
        "Add a LogGroup resource to the template"
        if not logical_name:
            logical_name = loggroup_name
        cfn_export_dict = {
            'LogGroupName': loggroup_name,
        }
        if not hasattr(self.awslambda, 'expire_events_after_days'):
            self.awslambda.expire_events_after_days = 'Never'

        if self.awslambda.expire_events_after_days != 'Never' and self.awslambda.expire_events_after_days != '':
            cfn_export_dict['RetentionInDays'] = int(self.awslambda.expire_events_after_days)
        loggroup_logical_id = self.create_cfn_logical_id('LogGroup' + logical_name)
        loggroup_resource = troposphere.logs.LogGroup.from_dict(
            loggroup_logical_id,
            cfn_export_dict
        )
        loggroup_resource.DependsOn = self.awslambda_resource
        self.template.add_resource(loggroup_resource)

        # LogGroup Output
        self.register_stack_output_config(
            '{}.log_groups.{}.arn'.format(self.awslambda.paco_ref_parts, logical_name), loggroup_logical_id + 'Arn'
        )
        loggroup_output = troposphere.Output(
            loggroup_logical_id + 'Arn',
            Value=troposphere.GetAtt(loggroup_resource, "Arn")
        )
        self.template.add_output(loggroup_output)
        return loggroup_resource
コード例 #7
0
def add_output_ref(template, description, value):
    title = description.replace(' ', '').replace('-', '').replace('_', '')
    template.add_output([
        troposphere.Output(title,
                           Description=description,
                           Value=troposphere.Ref(value),
                           Export=troposphere.Export(title))
    ])
コード例 #8
0
def create_template_yaml(zone_name, list_of_resources):
    template = troposphere.Template()
    template.add_parameter(zone_name)
    template.add_resource(list_of_resources)
    output = troposphere.Output(
        'Endpoint',
        Description='dummy endpoint required by aodnstack playbook',
        Value='NO_ENDPOINT')
    template.add_output(output)
    return template.to_yaml(long_form=True)
コード例 #9
0
    def test_tropo_to_string(self):
        utility.tropo_to_string(tropo.Template())
        utility.tropo_to_string(tropo.Base64('efsdfsdf'))
        utility.tropo_to_string(tropo.Output('efsdfsdf', Value='dsfsdfs'))
        utility.tropo_to_string(tropo.Parameter('efsdfsdf', Type='dsfsdfs'))

        # These constructors recursively call themselves for some reason
        # Don't instantiate directly
        # utility.tropo_to_string(tropo.AWSProperty())
        # utility.tropo_to_string(tropo.AWSAttribute())

        utility.tropo_to_string(
            ec2.Instance("ec2instance",
                         InstanceType="m3.medium",
                         ImageId="ami-951945d0"))
コード例 #10
0
ファイル: rds.py プロジェクト: knowledgewarrior/paco
    def __init__(self,
                 paco_ctx,
                 account_ctx,
                 aws_region,
                 stack_group,
                 stack_tags,
                 grp_id,
                 resource,
                 config_ref=None):
        super().__init__(paco_ctx,
                         account_ctx,
                         aws_region,
                         enabled=resource.is_enabled(),
                         config_ref=config_ref,
                         stack_group=stack_group,
                         stack_tags=stack_tags)
        self.set_aws_name('DBParameterGroup', grp_id, resource.name)
        self.init_template('DB Parameter Group')

        # Resources
        cfn_export_dict = {'Family': resource.family, 'Parameters': {}}
        if resource.description != None:
            cfn_export_dict['Description'] = resource.description
        else:
            cfn_export_dict['Description'] = troposphere.Ref('AWS::StackName')

        for key, value in resource.parameters.items():
            cfn_export_dict['Parameters'][key] = value

        dbparametergroup_resource = troposphere.rds.DBParameterGroup.from_dict(
            'DBParameterGroup', cfn_export_dict)
        self.template.add_resource(dbparametergroup_resource)

        # Outputs
        dbparametergroup_name_output = troposphere.Output(
            title='DBParameterGroupName',
            Description='DB Parameter Group Name',
            Value=troposphere.Ref(dbparametergroup_resource))
        self.template.add_output(dbparametergroup_name_output)
        self.register_stack_output_config(config_ref, 'DBParameterGroupName')
        self.register_stack_output_config(config_ref + '.name',
                                          'DBParameterGroupName')

        # All done, let's go home!
        self.set_template(self.template.to_yaml())
コード例 #11
0
    def __init__(self, stack, paco_ctx, factory_name):
        cloudfront_config = stack.resource
        config_ref = stack.stack_ref
        super().__init__(stack, paco_ctx)
        self.set_aws_name('CloudFront', self.resource_group_name,
                          self.resource_name, factory_name)
        origin_access_id_enabled = False

        self.init_template('CloudFront Distribution')
        template = self.template

        target_origin_param = self.create_cfn_parameter(
            param_type='String',
            name='TargetOrigin',
            description='Target Origin',
            value=cloudfront_config.default_cache_behavior.target_origin,
        )

        distribution_config_dict = {
            'Enabled': cloudfront_config.is_enabled(),
            'DefaultRootObject': cloudfront_config.default_root_object,
            'HttpVersion': 'http2',
            'DefaultCacheBehavior': {
                'AllowedMethods':
                cloudfront_config.default_cache_behavior.allowed_methods,
                'DefaultTTL':
                cloudfront_config.default_cache_behavior.default_ttl,
                'TargetOriginId':
                troposphere.Ref(target_origin_param),
                'ViewerProtocolPolicy':
                cloudfront_config.default_cache_behavior.viewer_protocol_policy
            },
            'PriceClass': 'PriceClass_' + cloudfront_config.price_class
        }
        if cloudfront_config.is_enabled() == True:
            # force the certificate to be in us-east-1, as that's the only CloudFront region
            if cloudfront_config.viewer_certificate.certificate != None:
                certificate = get_model_obj_from_ref(
                    cloudfront_config.viewer_certificate.certificate,
                    self.paco_ctx.project)
                if certificate.region != 'us-east-1':
                    raise InvalidCloudFrontCertificateRegion(
                        f'Certficate region is: {certificate.region}: {certificate.paco_ref}'
                    )
                viewer_certificate_param = self.create_cfn_parameter(
                    name='ViewerCertificateArn',
                    description="ACM Viewer Certificate ARN",
                    param_type='String',
                    value=cloudfront_config.viewer_certificate.certificate +
                    '.arn',
                )
                distribution_config_dict['ViewerCertificate'] = {
                    'AcmCertificateArn':
                    troposphere.Ref(viewer_certificate_param),
                    'SslSupportMethod':
                    cloudfront_config.viewer_certificate.ssl_supported_method,
                    'MinimumProtocolVersion':
                    cloudfront_config.viewer_certificate.
                    minimum_protocol_version
                }
        if cloudfront_config.default_cache_behavior.min_ttl != -1:
            distribution_config_dict['DefaultCacheBehavior'][
                'MinTTL'] = cloudfront_config.default_cache_behavior.min_ttl
        if cloudfront_config.default_cache_behavior.max_ttl != -1:
            distribution_config_dict['DefaultCacheBehavior'][
                'MaxTTL'] = cloudfront_config.default_cache_behavior.max_ttl

        # Lambda Function Association Parameters - for both DefaultCacheBehaviour and CacheBehaviours
        lambda_associations = []
        lambda_params = {}
        associations = cloudfront_config.default_cache_behavior.lambda_function_associations[:]
        for cache_behaviour in cloudfront_config.cache_behaviors:
            for lambda_association in cache_behaviour.lambda_function_associations:
                associations.append(lambda_association)
        for lambda_association in associations:
            lambda_ref = lambda_association.lambda_function
            if lambda_ref not in lambda_params:
                if lambda_ref.endswith('.autoversion.arn'):
                    lambda_name = self.create_cfn_logical_id(
                        'Lambda' + utils.md5sum(str_data=lambda_ref))
                    lambda_params[lambda_ref] = self.create_cfn_parameter(
                        param_type='String',
                        name=lambda_name,
                        description=
                        f'Lambda Function Associated for {lambda_ref}',
                        value=lambda_ref,
                    )
        # Lambda Function Association for DefaultCacheBehavior
        for lambda_association in cloudfront_config.default_cache_behavior.lambda_function_associations:
            lambda_associations.append({
                'EventType':
                lambda_association.event_type,
                'IncludeBody':
                lambda_association.include_body,
                'LambdaFunctionARN':
                troposphere.Ref(
                    lambda_params[lambda_association.lambda_function]),
            })
        if len(lambda_associations) > 0:
            # ToDo: PR this monkey-patch into Troposphere
            from troposphere.validators import boolean
            troposphere.cloudfront.LambdaFunctionAssociation.props[
                'IncludeBody'] = (boolean, False)
            distribution_config_dict['DefaultCacheBehavior'][
                'LambdaFunctionAssociations'] = lambda_associations

        # Domain Alises and Record Sets
        aliases_list = []
        aliases_param_map = {}
        for alias in cloudfront_config.domain_aliases:
            alias_hash = utils.md5sum(str_data=alias.domain_name)
            domain_name_param = 'DomainAlias' + alias_hash
            alias_param = self.create_cfn_parameter(
                param_type='String',
                name=domain_name_param,
                description='Domain Alias CNAME',
                value=alias.domain_name)
            aliases_list.append(troposphere.Ref(alias_param))
            aliases_param_map[alias.domain_name] = alias_param

        distribution_config_dict['Aliases'] = aliases_list

        # DefaultcacheBehavior
        # Forward Values
        if cloudfront_config.default_cache_behavior.origin_request_policy_id != None:
            distribution_config_dict['DefaultCacheBehavior'][
                'OriginRequestPolicyId'] = cloudfront_config.default_cache_behavior.origin_request_policy_id
        if cloudfront_config.default_cache_behavior.cache_policy_id != None:
            distribution_config_dict['DefaultCacheBehavior'][
                'CachePolicyId'] = cloudfront_config.default_cache_behavior.cache_policy_id
        else:
            forwarded_values_config = cloudfront_config.default_cache_behavior.forwarded_values
            forwarded_values_dict = {
                'Cookies': {
                    'Forward': 'none',
                },
                'QueryString': str(forwarded_values_config.query_string)
            }
            # Cookies
            if cloudfront_config.s3_origin_exists() == False:
                forwarded_values_dict['Cookies'][
                    'Forward'] = forwarded_values_config.cookies.forward
            if len(forwarded_values_config.cookies.whitelisted_names) > 0:
                forwarded_values_dict['Cookies'][
                    'WhitelistedNames'] = forwarded_values_config.cookies.whitelisted_names
            # Headers
            if cloudfront_config.s3_origin_exists() == False:
                forwarded_values_dict[
                    'Headers'] = cloudfront_config.default_cache_behavior.forwarded_values.headers
            distribution_config_dict['DefaultCacheBehavior'][
                'ForwardedValues'] = forwarded_values_dict

        # Cache Behaviors
        if len(cloudfront_config.cache_behaviors) > 0:
            cache_behaviors_list = []
            target_origin_param_map = {}
            for cache_behavior in cloudfront_config.cache_behaviors:
                target_origin_hash = utils.md5sum(
                    str_data=cache_behavior.target_origin)
                if target_origin_hash not in target_origin_param_map.keys():
                    cb_target_origin_param = self.create_cfn_parameter(
                        param_type='String',
                        name=self.create_cfn_logical_id(
                            'TargetOriginCacheBehavior' + target_origin_hash),
                        description='Target Origin',
                        value=cache_behavior.target_origin,
                    )
                    target_origin_param_map[
                        target_origin_hash] = cb_target_origin_param
                else:
                    cb_target_origin_param = target_origin_param_map[
                        target_origin_hash]

                cache_behavior_dict = {
                    'PathPattern': cache_behavior.path_pattern,
                    'AllowedMethods': cache_behavior.allowed_methods,
                    'DefaultTTL': cache_behavior.default_ttl,
                    'TargetOriginId': troposphere.Ref(cb_target_origin_param),
                    'ViewerProtocolPolicy':
                    cache_behavior.viewer_protocol_policy
                }
                # CacheBehavior Lambda Function Associations
                if len(cache_behavior.lambda_function_associations) > 0:
                    lambda_associations = []
                    for lambda_association in cache_behavior.lambda_function_associations:
                        lambda_associations.append({
                            'EventType':
                            lambda_association.event_type,
                            'IncludeBody':
                            lambda_association.include_body,
                            'LambdaFunctionARN':
                            troposphere.Ref(lambda_params[
                                lambda_association.lambda_function]),
                        })
                    cache_behavior_dict[
                        'LambdaFunctionAssociations'] = lambda_associations

                # CachePolicyId or ForwardedValues, not both
                if cache_behavior.origin_request_policy_id != None:
                    cache_behavior_dict[
                        'OriginRequestPolicyId'] = cache_behavior.origin_request_policy_id
                if cache_behavior.cache_policy_id != None:
                    cache_behavior_dict[
                        'CachePolicyId'] = cache_behavior.cache_policy_id
                else:
                    cb_forwarded_values_config = cache_behavior.forwarded_values
                    cb_forwarded_values_dict = {
                        'QueryString':
                        str(cb_forwarded_values_config.query_string)
                    }

                    # Cookies
                    if cb_forwarded_values_config.cookies != None:
                        cb_forwarded_values_dict['Cookies'] = {
                            'Forward': 'none'
                        }
                        cb_forwarded_values_dict['Cookies'][
                            'Forward'] = cb_forwarded_values_config.cookies.forward
                        if len(cb_forwarded_values_config.cookies.
                               whitelisted_names) > 0:
                            cb_forwarded_values_dict['Cookies'][
                                'WhitelistedNames'] = cb_forwarded_values_config.cookies.whitelisted_names

                    # Headers
                    if cloudfront_config.s3_origin_exists() == False:
                        cb_forwarded_values_dict[
                            'Headers'] = cache_behavior.forwarded_values.headers
                    cache_behavior_dict[
                        'ForwardedValues'] = cb_forwarded_values_dict
                cache_behaviors_list.append(cache_behavior_dict)

            distribution_config_dict['CacheBehaviors'] = cache_behaviors_list

        # Origin Access Identity
        if cloudfront_config.s3_origin_exists() == True:
            origin_id_res = troposphere.cloudfront.CloudFrontOriginAccessIdentity(
                title='CloudFrontOriginAccessIdentity',
                template=template,
                CloudFrontOriginAccessIdentityConfig=troposphere.cloudfront.
                CloudFrontOriginAccessIdentityConfig(
                    Comment=troposphere.Ref('AWS::StackName')))
            troposphere.Output(title='CloudFrontOriginAccessIdentity',
                               template=template,
                               Value=troposphere.Ref(origin_id_res))

        # Origins
        origins_list = []
        for origin_name, origin in cloudfront_config.origins.items():
            if origin.s3_bucket != None:
                domain_hash = utils.md5sum(str_data=origin.s3_bucket)
                origin_domain_name = self.paco_ctx.get_ref(origin.s3_bucket +
                                                           '.url')
            else:
                domain_hash = utils.md5sum(str_data=origin.domain_name)
                origin_domain_name = origin.domain_name
            origin_dict = {'Id': origin_name, 'DomainName': origin_domain_name}
            if origin.s3_bucket == None:
                origin_dict['CustomOriginConfig'] = {
                    'OriginKeepaliveTimeout':
                    origin.custom_origin_config.keepalive_timeout,
                    'OriginProtocolPolicy':
                    origin.custom_origin_config.protocol_policy,
                    'OriginReadTimeout':
                    origin.custom_origin_config.read_timeout,
                }
                if len(origin.custom_origin_config.ssl_protocols) > 0:
                    origin_dict['CustomOriginConfig'][
                        'OriginSSLProtocols'] = origin.custom_origin_config.ssl_protocols
                if origin.custom_origin_config.https_port != None:
                    origin_dict['CustomOriginConfig'][
                        'HTTPSPort'] = origin.custom_origin_config.https_port
                if origin.custom_origin_config.http_port:
                    origin_dict['CustomOriginConfig']['HTTPPort'] = str(
                        origin.custom_origin_config.http_port)
            else:
                s3_config = self.paco_ctx.get_ref(origin.s3_bucket)
                origin_dict['S3OriginConfig'] = {}
                if s3_config.cloudfront_origin == False:
                    origin_dict['S3OriginConfig']['OriginAccessIdentity'] = ''
                else:
                    origin_access_id_enabled = True
                    param_name = "OriginAccessIdentiy" + domain_hash
                    access_id_ref = origin.s3_bucket + '.origin_id'
                    s3_cf_origin_id_param = self.create_cfn_parameter(
                        param_type='String',
                        name=param_name,
                        description='Origin Access Identity',
                        value=access_id_ref,
                    )
                    origin_dict['S3OriginConfig'][
                        'OriginAccessIdentity'] = troposphere.Sub(
                            'origin-access-identity/cloudfront/${OriginAccessId}',
                            {
                                'OriginAccessId':
                                troposphere.Ref(s3_cf_origin_id_param)
                            })
            origins_list.append(origin_dict)
        distribution_config_dict['Origins'] = origins_list

        # Custom Error
        error_resp_list = []
        for error_resp in cloudfront_config.custom_error_responses:
            error_resp_dict = {
                'ErrorCachingMinTTL': error_resp.error_caching_min_ttl,
                'ErrorCode': error_resp.error_code,
                'ResponseCode': error_resp.response_code,
                'ResponsePagePath': error_resp.response_page_path
            }
            error_resp_list.append(error_resp_dict)
        if len(error_resp_list) > 0:
            distribution_config_dict['CustomErrorResponses'] = error_resp_list

        # Web ACL
        if cloudfront_config.webacl_id != None:
            webacl_id_value = cloudfront_config.webacl_id
            if is_ref(webacl_id_value):
                webacl_id_value = cloudfront_config.webacl_id + '.arn'
            webacl_id_param = self.create_cfn_parameter(
                param_type='String',
                name='WebAclId',
                description='WAF Web Acl Arn',
                value=webacl_id_value)
            distribution_config_dict['WebACLId'] = troposphere.Ref(
                webacl_id_param)

        distribution_dict = {'DistributionConfig': distribution_config_dict}
        distribution_res = troposphere.cloudfront.Distribution.from_dict(
            'Distribution', distribution_dict)
        template.add_resource(distribution_res)

        self.create_output(title='CloudFrontURL',
                           value=troposphere.GetAtt('Distribution',
                                                    'DomainName'),
                           ref=self.config_ref + '.domain_name')
        self.create_output(title='CloudFrontId',
                           value=troposphere.Ref(distribution_res),
                           ref=self.config_ref + '.id')

        if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True:
            if cloudfront_config.is_dns_enabled() == True:
                for alias in cloudfront_config.domain_aliases:
                    alias_hash = utils.md5sum(str_data=alias.domain_name)
                    zone_param_name = 'AliasHostedZoneId' + alias_hash
                    alias_zone_id_param = self.create_cfn_parameter(
                        param_type='String',
                        name=zone_param_name,
                        description='Domain Alias Hosted Zone Id',
                        value=alias.hosted_zone + '.id',
                    )
                    record_set_res = troposphere.route53.RecordSetType(
                        title=self.create_cfn_logical_id_join(
                            ['RecordSet', alias_hash]),
                        template=template,
                        HostedZoneId=troposphere.Ref(alias_zone_id_param),
                        Name=troposphere.Ref(
                            aliases_param_map[alias.domain_name]),
                        Type='A',
                        AliasTarget=troposphere.route53.AliasTarget(
                            DNSName=troposphere.GetAtt(distribution_res,
                                                       'DomainName'),
                            HostedZoneId='Z2FDTNDATAQYW2'))
                    record_set_res.DependsOn = distribution_res

        if origin_access_id_enabled:
            self.stack.wait_for_delete = True

        if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False:
            route53_ctl = self.paco_ctx.get_controller('route53')
            if cloudfront_config.is_dns_enabled() == True:
                for alias in cloudfront_config.domain_aliases:
                    account_ctx = self.account_ctx
                    if alias.hosted_zone:
                        if is_ref(alias.hosted_zone):
                            hosted_zone = get_model_obj_from_ref(
                                alias.hosted_zone, self.paco_ctx.project)
                            account_ctx = self.paco_ctx.get_account_context(
                                account_ref=hosted_zone.account)
                        route53_ctl.add_record_set(
                            account_ctx,
                            self.aws_region,
                            cloudfront_config,
                            enabled=cloudfront_config.is_enabled(),
                            dns=alias,
                            record_set_type='Alias',
                            alias_dns_name='paco.ref ' + self.stack.stack_ref +
                            '.domain_name',
                            alias_hosted_zone_id=
                            'Z2FDTNDATAQYW2',  # This is always the hosted zone ID when you create an alias record that routes traffic to a CloudFront distribution
                            stack_group=self.stack.stack_group,
                            async_stack_provision=True,
                            config_ref=config_ref + '.record_set')
コード例 #12
0
    def register_resources_template(self, template):
        """Register the lambda Function into the troposphere template. If
        this function requires a custom Role, register it too."""

        role = self.get_role()
        depends_on = []
        if isinstance(role, iam.Role):
            template.add_resource(role)
            depends_on.append(role.name)
            role = troposphere.GetAtt(role, 'Arn')

        template.add_parameter(
            troposphere.Parameter(
                utils.valid_cloudformation_name(self.name, "s3version"),
                Type="String",
            ))

        extra = {}
        if self.settings.get('vpc'):
            vpc = self.project.get_resource('vpc::{}'.format(
                self.settings.get('vpc')))

            if isinstance(vpc.settings['security-groups'], troposphere.Ref):
                vpc.settings[
                    'security-groups']._type = 'List<AWS::EC2::SecurityGroup::Id>'

            if isinstance(vpc.settings['subnet-ids'], troposphere.Ref):
                vpc.settings['subnet-ids']._type = 'List<AWS::EC2::Subnet::Id>'

            extra['VpcConfig'] = awslambda.VPCConfig(
                SecurityGroupIds=vpc.settings['security-groups'],
                SubnetIds=vpc.settings['subnet-ids'])

        function = template.add_resource(
            awslambda.Function(self.in_project_cf_name,
                               DependsOn=depends_on,
                               Code=awslambda.Code(
                                   S3Bucket=troposphere.Ref("CodeBucket"),
                                   S3Key=self.get_bucket_key(),
                                   S3ObjectVersion=troposphere.Ref(
                                       utils.valid_cloudformation_name(
                                           self.name, "s3version")),
                               ),
                               Description=self.settings.get(
                                   'description', ''),
                               Handler=self.get_handler(),
                               MemorySize=self.get_memory(),
                               Role=role,
                               Runtime=self.get_runtime(),
                               Timeout=self.get_timeout(),
                               **extra))

        lambda_version = 'lambda:contrib_lambdas:version'
        lambda_ref = troposphere.GetAtt(self.project.reference(lambda_version),
                                        'Arn')
        if not self.in_project_name.startswith('lambda:contrib_lambdas:'):
            lambda_version = '{}:current'.format(lambda_version)
            lambda_ref = troposphere.Ref(
                self.project.reference(lambda_version))

        version = template.add_resource(
            LambdaVersion.create_with(
                utils.valid_cloudformation_name(self.name, "Version"),
                DependsOn=[
                    self.project.reference(lambda_version), function.name
                ],
                lambda_arn=lambda_ref,
                FunctionName=troposphere.Ref(function),
                S3ObjectVersion=troposphere.Ref(
                    utils.valid_cloudformation_name(self.name, "s3version")),
            ))

        alias = template.add_resource(
            awslambda.Alias(
                self.current_alias_cf_name,
                DependsOn=[version.name],
                FunctionName=troposphere.Ref(function),
                FunctionVersion=troposphere.GetAtt(version, "Version"),
                Name="current",
            ))
        if self._get_true_false('cli-output', 't'):
            template.add_output([
                troposphere.Output(
                    utils.valid_cloudformation_name("Clioutput",
                                                    self.in_project_name),
                    Value=troposphere.Ref(alias),
                )
            ])
コード例 #13
0
cat << 'EOF' >> /etc/ecs/ecs.config
ECS_CLUSTER=''',
            troposphere.Ref(ECSCluster), "\n",
            '''ECS_DOCKER_GRAPH_PATH=/mnt/docker
EOF
'''
        ]))

    return troposphere.ec2.LaunchSpecifications(**spec)


SpotFleet = t.add_resource(
    troposphere.ec2.SpotFleet(
        "SpotFleet",
        SpotFleetRequestConfigData=troposphere.ec2.SpotFleetRequestConfigData(
            IamFleetRole=troposphere.GetAtt(spotfleetrole, "Arn"),
            SpotPrice=troposphere.Ref(VCPUSpotBid),
            TargetCapacity=troposphere.Ref(VCPUTarget),
            AllocationStrategy="diversified",
            LaunchSpecifications=[
                mklaunchspecification(
                    i, instancespecs[i]['vCPU'],
                    instancespecs[i].get('storage', {}).get('devices', 0))
                for i in instancetypes
            ])))

t.add_output(troposphere.Output("SpotFleet", Value=troposphere.Ref(SpotFleet)))

print(t.to_json())
コード例 #14
0
    def handle(self, chain_context):
        """
        This step adds in the shell of a pipeline.
         * s3 bucket
         * policies for the bucket and pipeline
         * your next step in the chain MUST be a source stage
        :param chain_context:
        :return:
        """

        if self.create_bucket:
            pipeline_bucket = Bucket(
                "PipelineBucket%s" % chain_context.instance_name,
                BucketName=self.bucket_name,
                VersioningConfiguration=VersioningConfiguration(
                    Status="Enabled"))
            chain_context.template.add_resource(pipeline_bucket)

        default_bucket_policies = self.get_default_bucket_policy_statements(
            self.bucket_name)

        if self.bucket_policy_statements:
            bucket_access_policy = self.get_bucket_policy(
                pipeline_bucket=self.bucket_name,
                bucket_policy_statements=self.bucket_policy_statements,
            )
            chain_context.template.add_resource(bucket_access_policy)

        pipeline_bucket_access_policy = iam.ManagedPolicy(
            "PipelineBucketAccessPolicy",
            Path='/managed/',
            PolicyDocument=awacs.aws.PolicyDocument(
                Version="2012-10-17",
                Id="bucket-access-policy%s" % chain_context.instance_name,
                Statement=default_bucket_policies))

        chain_context.metadata[cumulus.steps.dev_tools.
                               META_PIPELINE_BUCKET_NAME] = self.bucket_name
        chain_context.metadata[
            cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref(
                pipeline_bucket_access_policy)

        # TODO: this can be cleaned up by using a policytype and passing in the pipeline role it should add itself to.
        pipeline_policy = iam.Policy(
            PolicyName="%sPolicy" % self.name,
            PolicyDocument=awacs.aws.PolicyDocument(
                Version="2012-10-17",
                Id="PipelinePolicy",
                Statement=[
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        # TODO: actions here could be limited more
                        Action=[awacs.aws.Action("s3", "*")],
                        Resource=[
                            troposphere.Join(
                                '', [awacs.s3.ARN(), self.bucket_name, "/*"]),
                            troposphere.Join('', [
                                awacs.s3.ARN(),
                                self.bucket_name,
                            ]),
                        ],
                    ),
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        Action=[awacs.aws.Action("kms", "*")],
                        Resource=['*'],
                    ),
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        Action=[
                            awacs.aws.Action("cloudformation", "*"),
                            awacs.aws.Action("codebuild", "*"),
                        ],
                        # TODO: restrict more accurately
                        Resource=["*"]),
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        Action=[
                            awacs.codecommit.GetBranch,
                            awacs.codecommit.GetCommit,
                            awacs.codecommit.UploadArchive,
                            awacs.codecommit.GetUploadArchiveStatus,
                            awacs.codecommit.CancelUploadArchive
                        ],
                        Resource=["*"]),
                    awacs.aws.Statement(Effect=awacs.aws.Allow,
                                        Action=[awacs.iam.PassRole],
                                        Resource=["*"]),
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        Action=[awacs.aws.Action("lambda", "*")],
                        Resource=["*"])
                ],
            ))

        pipeline_service_role = iam.Role(
            "PipelineServiceRole",
            Path="/",
            AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[
                awacs.aws.Statement(Effect=awacs.aws.Allow,
                                    Action=[awacs.sts.AssumeRole],
                                    Principal=awacs.aws.Principal(
                                        'Service',
                                        "codepipeline.amazonaws.com"))
            ]),
            Policies=[pipeline_policy] + self.pipeline_policies)

        generic_pipeline = codepipeline.Pipeline(
            "Pipeline",
            RoleArn=troposphere.GetAtt(pipeline_service_role, "Arn"),
            Stages=[],
            ArtifactStore=codepipeline.ArtifactStore(
                Type="S3",
                Location=self.bucket_name,
            )
            # TODO: optionally add kms key here
        )

        if self.bucket_kms_key_arn:
            encryption_config = codepipeline.EncryptionKey(
                "ArtifactBucketKmsKey",
                Id=self.bucket_kms_key_arn,
                Type='KMS',
            )
            generic_pipeline.ArtifactStore.EncryptionKey = encryption_config

        pipeline_output = troposphere.Output(
            "PipelineName",
            Description="Code Pipeline",
            Value=Ref(generic_pipeline),
        )
        pipeline_bucket_output = troposphere.Output(
            "PipelineBucket",
            Description="Name of the input artifact bucket for the pipeline",
            Value=self.bucket_name,
        )

        chain_context.template.add_resource(pipeline_bucket_access_policy)
        chain_context.template.add_resource(pipeline_service_role)
        chain_context.template.add_resource(generic_pipeline)
        chain_context.template.add_output(pipeline_output)
        chain_context.template.add_output(pipeline_bucket_output)
コード例 #15
0
    def handle(self, chain_context):
        print("Adding source action %s." % self.action_name)

        template = chain_context.template

        policy_name = "CodeBuildPolicy%s" % chain_context.instance_name
        codebuild_policy = cumulus.policies.codebuild.get_policy_code_build_general_access(
            policy_name)

        role_name = "PipelineSourceRole%s" % self.action_name
        codebuild_role = iam.Role(
            role_name,
            Path="/",
            AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[
                awacs.aws.Statement(Effect=awacs.aws.Allow,
                                    Action=[awacs.sts.AssumeRole],
                                    Principal=awacs.aws.Principal(
                                        'Service', "codebuild.amazonaws.com"))
            ]),
            Policies=[codebuild_policy],
            ManagedPolicyArns=[
                chain_context.metadata[META_PIPELINE_BUCKET_POLICY_REF]
            ])

        source_action = SourceS3Action(
            Name=self.action_name,
            OutputArtifacts=[
                codepipeline.OutputArtifacts(Name=self.output_artifact_name)
            ],
            Configuration={
                "S3Bucket": self.s3_bucket_name,
                "S3ObjectKey": self.s3_object_key,
            },
        )

        # TODO: support CFN params here. Use conditionals. Set to NoValue instead of None, using the same logic as below
        if self.poll_for_source_changes is not None:
            # if it's none - we shouldn't touch this.
            source_action.Configuration[
                'PollForSourceChanges'] = self.poll_for_source_changes

        template.add_resource(codebuild_role)

        found_pipelines = TemplateQuery.get_resource_by_type(
            template=chain_context.template,
            type_to_find=codepipeline.Pipeline)
        pipeline = found_pipelines[0]

        # Alternate way to get this
        # dummy = TemplateQuery.get_resource_by_title(chain_context.template, 'AppPipeline')

        stages = pipeline.Stages  # type: list

        # TODO: find stage by name
        first_stage = stages[0]

        # TODO accept a parallel action to the previous action, and don't +1 here.
        first_stage.Actions.append(source_action)

        template.add_output(
            troposphere.Output(
                "PipelineBucket%s" % self.action_name,
                Value=self.s3_bucket_name,
                Description="A pipeline source bucket",
            ))
        template.add_output(
            troposphere.Output(
                "PipelineTriggerObject%s" % self.action_name,
                Value=self.s3_object_key,
                Description="An s3 object key in the pipeline bucket "
                "that will trigger the pipeline",
            ))
コード例 #16
0
                        "awslogs-region":
                        troposphere.Ref(troposphere.AWS_REGION)
                    }),
                Essential=True,
                DisableNetworking=False,
                ReadonlyRootFilesystem=False,
            )
        ]))

s3bundlerservice = []
for n in range(0, 50):
    s3bundlerservice.append(
        t.add_resource(
            troposphere.ecs.Service(
                "s3bundlerservice{0}".format(str(n)),
                Cluster=troposphere.Ref(ECSCluster),
                TaskDefinition=troposphere.Ref(s3bundlertask),
                DesiredCount=0)))

t.add_output(
    troposphere.Output("ECSCluster", Value=troposphere.Ref(ECSCluster)))

t.add_output(troposphere.Output("ArchiveBucket", Value=ArchiveS3Choice))

t.add_output(troposphere.Output("ManifestQueue", Value=QueueChoice))

t.add_output(
    troposphere.Output("S3GrouperTask", Value=troposphere.Ref(s3groupertask)))

print(t.to_json())
コード例 #17
0
    def __init__(self, paco_ctx, account_ctx, aws_region, stack_group,
                 stack_tags, app_id, grp_id, res_id, factory_name,
                 cloudfront_config, config_ref):
        super().__init__(paco_ctx,
                         account_ctx,
                         aws_region,
                         enabled=cloudfront_config.is_enabled(),
                         config_ref=config_ref,
                         stack_group=stack_group,
                         stack_tags=stack_tags,
                         change_protected=cloudfront_config.change_protected)
        self.set_aws_name('CloudFront', grp_id, res_id, factory_name)
        origin_access_id_enabled = False

        self.init_template('CloudFront Distribution')
        template = self.template

        target_origin_param = self.create_cfn_parameter(
            param_type='String',
            name='TargetOrigin',
            description='Target Origin',
            value=cloudfront_config.default_cache_behavior.target_origin,
        )

        distribution_config_dict = {
            'Enabled': cloudfront_config.is_enabled(),
            'DefaultRootObject': cloudfront_config.default_root_object,
            'HttpVersion': 'http1.1',
            'DefaultCacheBehavior': {
                'AllowedMethods':
                cloudfront_config.default_cache_behavior.allowed_methods,
                'DefaultTTL':
                cloudfront_config.default_cache_behavior.default_ttl,
                'TargetOriginId':
                troposphere.Ref(target_origin_param),
                'ViewerProtocolPolicy':
                cloudfront_config.default_cache_behavior.viewer_protocol_policy
            },
            'PriceClass': 'PriceClass_' + cloudfront_config.price_class,
            'ViewerCertificate': {
                'AcmCertificateArn':
                self.paco_ctx.get_ref('paco.ref ' + self.config_ref +
                                      '.viewer_certificate.arn'),
                'SslSupportMethod':
                cloudfront_config.viewer_certificate.ssl_supported_method,
                'MinimumProtocolVersion':
                cloudfront_config.viewer_certificate.minimum_protocol_version
            }
        }
        if cloudfront_config.default_cache_behavior.min_ttl != -1:
            distribution_config_dict['DefaultCacheBehavior'][
                'MinTTL'] = cloudfront_config.default_cache_behavior.min_ttl
        if cloudfront_config.default_cache_behavior.max_ttl != -1:
            distribution_config_dict['DefaultCacheBehavior'][
                'MaxTTL'] = cloudfront_config.default_cache_behavior.max_ttl

        # Domain Alises and Record Sets
        aliases_list = []
        aliases_param_map = {}
        for alias in cloudfront_config.domain_aliases:
            alias_hash = utils.md5sum(str_data=alias.domain_name)
            domain_name_param = 'DomainAlias' + alias_hash
            alias_param = self.create_cfn_parameter(
                param_type='String',
                name=domain_name_param,
                description='Domain Alias CNAME',
                value=alias.domain_name)
            aliases_list.append(troposphere.Ref(alias_param))
            aliases_param_map[alias.domain_name] = alias_param

        distribution_config_dict['Aliases'] = aliases_list

        # DefaultcacheBehavior
        # Forward Values
        forwarded_values_config = cloudfront_config.default_cache_behavior.forwarded_values
        forwarded_values_dict = {
            'Cookies': {
                'Forward': 'none',
            },
            'QueryString': str(forwarded_values_config.query_string)
        }
        # Cookies
        if cloudfront_config.s3_origin_exists() == False:
            forwarded_values_dict['Cookies'][
                'Forward'] = forwarded_values_config.cookies.forward
        if len(forwarded_values_config.cookies.whitelisted_names) > 0:
            forwarded_values_dict['Cookies'][
                'WhitelistedNames'] = forwarded_values_config.cookies.whitelisted_names
        # Headers
        if cloudfront_config.s3_origin_exists() == False:
            forwarded_values_dict[
                'Headers'] = cloudfront_config.default_cache_behavior.forwarded_values.headers
        distribution_config_dict['DefaultCacheBehavior'][
            'ForwardedValues'] = forwarded_values_dict

        # Cache Behaviors
        if len(cloudfront_config.cache_behaviors) > 0:
            cache_behaviors_list = []
            target_origin_param_map = {}
            for cache_behavior in cloudfront_config.cache_behaviors:
                target_origin_hash = utils.md5sum(
                    str_data=cache_behavior.target_origin)
                if target_origin_hash not in target_origin_param_map.keys():
                    cb_target_origin_param = self.create_cfn_parameter(
                        param_type='String',
                        name=self.create_cfn_logical_id(
                            'TargetOriginCacheBehavior' + target_origin_hash),
                        description='Target Origin',
                        value=cache_behavior.target_origin,
                    )
                    target_origin_param_map[
                        target_origin_hash] = cb_target_origin_param
                else:
                    cb_target_origin_param = target_origin_param_map[
                        target_origin_hash]

                cache_behavior_dict = {
                    'PathPattern': cache_behavior.path_pattern,
                    'AllowedMethods': cache_behavior.allowed_methods,
                    'DefaultTTL': cache_behavior.default_ttl,
                    'TargetOriginId': troposphere.Ref(cb_target_origin_param),
                    'ViewerProtocolPolicy':
                    cache_behavior.viewer_protocol_policy
                }
                cb_forwarded_values_config = cache_behavior.forwarded_values
                cb_forwarded_values_dict = {
                    'Cookies': {
                        'Forward': 'none',
                    },
                    'QueryString': str(cb_forwarded_values_config.query_string)
                }
                # Cookies
                cb_forwarded_values_dict['Cookies'][
                    'Forward'] = cb_forwarded_values_config.cookies.forward
                if len(cb_forwarded_values_config.cookies.whitelisted_names
                       ) > 0:
                    cb_forwarded_values_dict['Cookies'][
                        'WhitelistedNames'] = cb_forwarded_values_config.cookies.whitelisted_names
                # Headers
                if cloudfront_config.s3_origin_exists() == False:
                    cb_forwarded_values_dict[
                        'Headers'] = cache_behavior.forwarded_values.headers
                cache_behavior_dict[
                    'ForwardedValues'] = cb_forwarded_values_dict
                cache_behaviors_list.append(cache_behavior_dict)

            distribution_config_dict['CacheBehaviors'] = cache_behaviors_list

        # Origin Access Identity
        if cloudfront_config.s3_origin_exists() == True:
            origin_id_res = troposphere.cloudfront.CloudFrontOriginAccessIdentity(
                title='CloudFrontOriginAccessIdentity',
                template=template,
                CloudFrontOriginAccessIdentityConfig=troposphere.cloudfront.
                CloudFrontOriginAccessIdentityConfig(
                    Comment=troposphere.Ref('AWS::StackName')))
            troposphere.Output(title='CloudFrontOriginAccessIdentity',
                               template=template,
                               Value=troposphere.Ref(origin_id_res))

        # Origins
        origins_list = []
        for origin_name, origin in cloudfront_config.origins.items():
            if origin.s3_bucket != None:
                domain_hash = utils.md5sum(str_data=origin.s3_bucket)
                origin_domain_name = self.paco_ctx.get_ref(origin.s3_bucket +
                                                           '.url')
            else:
                domain_hash = utils.md5sum(str_data=origin.domain_name)
                origin_domain_name = origin.domain_name
            origin_dict = {'Id': origin_name, 'DomainName': origin_domain_name}
            if origin.s3_bucket == None:
                origin_dict['CustomOriginConfig'] = {
                    'HTTPSPort': origin.custom_origin_config.https_port,
                    'OriginKeepaliveTimeout':
                    origin.custom_origin_config.keepalive_timeout,
                    'OriginProtocolPolicy':
                    origin.custom_origin_config.protocol_policy,
                    'OriginReadTimeout':
                    origin.custom_origin_config.read_timeout,
                    'OriginSSLProtocols':
                    origin.custom_origin_config.ssl_protocols
                }
                if origin.custom_origin_config.http_port:
                    origin_dict['CustomOriginConfig']['HTTPPort'] = str(
                        origin.custom_origin_config.http_port)
            else:
                s3_config = self.paco_ctx.get_ref(origin.s3_bucket)
                origin_dict['S3OriginConfig'] = {}
                if s3_config.cloudfront_origin == False:
                    origin_dict['S3OriginConfig']['OriginAccessIdentity'] = ''
                else:
                    origin_access_id_enabled = True
                    param_name = "OriginAccessIdentiy" + domain_hash
                    access_id_ref = origin.s3_bucket + '.origin_id'
                    s3_cf_origin_id_param = self.create_cfn_parameter(
                        param_type='String',
                        name=param_name,
                        description='Origin Access Identity',
                        value=access_id_ref,
                    )
                    origin_dict['S3OriginConfig'][
                        'OriginAccessIdentity'] = troposphere.Sub(
                            'origin-access-identity/cloudfront/${OriginAccessId}',
                            {
                                'OriginAccessId':
                                troposphere.Ref(s3_cf_origin_id_param)
                            })
            origins_list.append(origin_dict)
        distribution_config_dict['Origins'] = origins_list

        # Custom Error
        error_resp_list = []
        for error_resp in cloudfront_config.custom_error_responses:
            error_resp_dict = {
                'ErrorCachingMinTTL': error_resp.error_caching_min_ttl,
                'ErrorCode': error_resp.error_code,
                'ResponseCode': error_resp.response_code,
                'ResponsePagePath': error_resp.response_page_path
            }
            error_resp_list.append(error_resp_dict)
        if len(error_resp_list) > 0:
            distribution_config_dict['CustomErrorResponses'] = error_resp_list

        # Web ACL
        if cloudfront_config.webacl_id != None:
            webacl_id_param = self.create_cfn_parameter(
                param_type='String',
                name='WebAclId',
                description='WAF Web Acl ID',
                value=cloudfront_config.webacl_id)
            distribution_config_dict['WebACLId'] = troposphere.Ref(
                webacl_id_param)

        distribution_dict = {'DistributionConfig': distribution_config_dict}
        distribution_res = troposphere.cloudfront.Distribution.from_dict(
            'Distribution', distribution_dict)

        if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == True:
            if cloudfront_config.is_dns_enabled() == True:
                for alias in cloudfront_config.domain_aliases:
                    alias_hash = utils.md5sum(str_data=alias.domain_name)
                    zone_param_name = 'AliasHostedZoneId' + alias_hash
                    alias_zone_id_param = self.create_cfn_parameter(
                        param_type='String',
                        name=zone_param_name,
                        description='Domain Alias Hosted Zone Id',
                        value=alias.hosted_zone + '.id',
                    )
                    record_set_res = troposphere.route53.RecordSetType(
                        title=self.create_cfn_logical_id_join(
                            ['RecordSet', alias_hash]),
                        template=template,
                        HostedZoneId=troposphere.Ref(alias_zone_id_param),
                        Name=troposphere.Ref(
                            aliases_param_map[alias.domain_name]),
                        Type='A',
                        AliasTarget=troposphere.route53.AliasTarget(
                            DNSName=troposphere.GetAtt(distribution_res,
                                                       'DomainName'),
                            HostedZoneId='Z2FDTNDATAQYW2'))
                    record_set_res.DependsOn = distribution_res

        self.create_output(title='CloudFrontURL',
                           value=troposphere.GetAtt('Distribution',
                                                    'DomainName'),
                           ref=self.config_ref + '.domain_name')
        self.create_output(title='CloudFrontId',
                           value=troposphere.Ref(distribution_res),
                           ref=self.config_ref + '.id')

        template.add_resource(distribution_res)

        self.set_template()
        if origin_access_id_enabled:
            self.stack.wait_for_delete = True

        if self.paco_ctx.legacy_flag('route53_record_set_2019_10_16') == False:
            route53_ctl = self.paco_ctx.get_controller('route53')
            if cloudfront_config.is_dns_enabled() == True:
                for alias in cloudfront_config.domain_aliases:
                    route53_ctl.add_record_set(
                        self.account_ctx,
                        self.aws_region,
                        enabled=cloudfront_config.is_enabled(),
                        dns=alias,
                        record_set_type='Alias',
                        alias_dns_name='paco.ref ' + self.config_ref +
                        '.domain_name',
                        alias_hosted_zone_id='Z2FDTNDATAQYW2',
                        stack_group=self.stack_group,
                        config_ref=self.config_ref + '.record_set')
コード例 #18
0
    def register_resources_template(self, template):

        deployment_resources = []
        api = RestApi(
            self.in_project_cf_name,
            Name=troposphere.Join("-", [self.name, troposphere.Ref('Stage')]),
            Description=self.settings.get('description', '')
        )
        template.add_resource(api)
        deployment_resources.append(api)

        invoke_lambda_role = troposphere.iam.Role(
            utils.valid_cloudformation_name(self.name, 'Role'),
            AssumeRolePolicyDocument={
                "Version": "2012-10-17",
                "Statement": [{
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["apigateway.amazonaws.com"]
                    },
                    "Action": ["sts:AssumeRole"]
                }]
            },
            Policies=[
                troposphere.iam.Policy(
                    PolicyName=utils.valid_cloudformation_name(self.name, 'Role', 'Policy'),
                    PolicyDocument={
                        "Version": "2012-10-17",
                        "Statement": [
                            {
                                "Effect": "Allow",
                                "Action": [
                                    "lambda:InvokeFunction"
                                ],
                                "Resource": [
                                    "*"
                                ]
                            }
                        ]
                    }
                )
            ]
        )

        template.add_resource(invoke_lambda_role)
        deployment_resources.append(invoke_lambda_role)

        deployment_dependencies = []
        for path, resource in six.iteritems(self.settings.get('resources', {})):
            resource_reference = self.get_or_create_resource(path, api, template)
            methods = resource['methods']

            if isinstance(methods, six.string_types):
                methods = [methods]

            if not isinstance(methods, dict):
                method_properties = copy.deepcopy(resource)
                method_properties.pop('methods', None)
                methods = dict([[method, method_properties] for method in methods])

            for method, configuration in six.iteritems(methods):
                method_name = [self.name]
                method_name.extend(path.split('/'))
                method_name.append(method)

                extra = {}
                if 'parameters' in configuration:
                    extra['RequestParameters'] = configuration['parameters']
                m = Method(
                    utils.valid_cloudformation_name(*method_name),
                    HttpMethod=method,
                    AuthorizationType=self.get_authorization_type(configuration),
                    ApiKeyRequired=self.get_api_key_required(configuration),
                    Integration=self.get_integration(configuration, invoke_lambda_role),
                    MethodResponses=self.get_method_responses(configuration),
                    ResourceId=resource_reference,
                    RestApiId=troposphere.Ref(api),
                    **extra
                )
                template.add_resource(m)
                deployment_dependencies.append(m.name)
                deployment_resources.append(m)

        deploy_hash = hashlib.sha1(six.text_type(uuid.uuid4()).encode('utf-8')).hexdigest()
        deploy = Deployment(
            utils.valid_cloudformation_name(self.name, "Deployment", deploy_hash[:8]),
            DependsOn=sorted(deployment_dependencies),
            StageName=troposphere.Ref('Stage'),
            RestApiId=troposphere.Ref(api)
        )

        template.add_resource(deploy)

        if self._get_true_false('cli-output', 't'):
            template.add_output([
                troposphere.Output(
                    utils.valid_cloudformation_name("Clioutput", self.in_project_name),
                    Value=troposphere.Join(
                        "",
                        [
                            "https://",
                            troposphere.Ref(api),
                            ".execute-api.",
                            troposphere.Ref(troposphere.AWS_REGION),
                            ".amazonaws.com/",
                            troposphere.Ref('Stage')
                        ]
                    ),
                )
            ])
コード例 #19
0
def get_template(
    puppet_version,
    all_regions,
    source,
    is_caching_enabled,
    is_manual_approvals: bool,
    scm_skip_creation_of_repo: bool,
    should_validate: bool,
) -> t.Template:
    is_codecommit = source.get("Provider", "").lower() == "codecommit"
    is_github = source.get("Provider", "").lower() == "github"
    is_codestarsourceconnection = (source.get(
        "Provider", "").lower() == "codestarsourceconnection")
    is_custom = (source.get("Provider", "").lower() == "custom")
    is_s3 = source.get("Provider", "").lower() == "s3"
    description = f"""Bootstrap template used to bring up the main ServiceCatalog-Puppet AWS CodePipeline with dependencies
{{"version": "{puppet_version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master"}}"""

    template = t.Template(Description=description)

    version_parameter = template.add_parameter(
        t.Parameter("Version", Default=puppet_version, Type="String"))
    org_iam_role_arn_parameter = template.add_parameter(
        t.Parameter("OrgIamRoleArn", Default="None", Type="String"))
    with_manual_approvals_parameter = template.add_parameter(
        t.Parameter(
            "WithManualApprovals",
            Type="String",
            AllowedValues=["Yes", "No"],
            Default="No",
        ))
    puppet_code_pipeline_role_permission_boundary_parameter = template.add_parameter(
        t.Parameter(
            "PuppetCodePipelineRolePermissionBoundary",
            Type="String",
            Description=
            "IAM Permission Boundary to apply to the PuppetCodePipelineRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    source_role_permissions_boundary_parameter = template.add_parameter(
        t.Parameter(
            "SourceRolePermissionsBoundary",
            Type="String",
            Description="IAM Permission Boundary to apply to the SourceRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    puppet_generate_role_permission_boundary_parameter = template.add_parameter(
        t.Parameter(
            "PuppetGenerateRolePermissionBoundary",
            Type="String",
            Description=
            "IAM Permission Boundary to apply to the PuppetGenerateRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    puppet_deploy_role_permission_boundary_parameter = template.add_parameter(
        t.Parameter(
            "PuppetDeployRolePermissionBoundary",
            Type="String",
            Description=
            "IAM Permission Boundary to apply to the PuppetDeployRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    puppet_provisioning_role_permissions_boundary_parameter = template.add_parameter(
        t.Parameter(
            "PuppetProvisioningRolePermissionsBoundary",
            Type="String",
            Description=
            "IAM Permission Boundary to apply to the PuppetProvisioningRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    cloud_formation_deploy_role_permissions_boundary_parameter = template.add_parameter(
        t.Parameter(
            "CloudFormationDeployRolePermissionsBoundary",
            Type="String",
            Description=
            "IAM Permission Boundary to apply to the CloudFormationDeployRole",
            Default=awscs_iam.ARN(resource="policy/AdministratorAccess").data,
        ))
    deploy_environment_compute_type_parameter = template.add_parameter(
        t.Parameter(
            "DeployEnvironmentComputeType",
            Type="String",
            Description="The AWS CodeBuild Environment Compute Type",
            Default="BUILD_GENERAL1_SMALL",
        ))
    spoke_deploy_environment_compute_type_parameter = template.add_parameter(
        t.Parameter(
            "SpokeDeployEnvironmentComputeType",
            Type="String",
            Description=
            "The AWS CodeBuild Environment Compute Type for spoke execution mode",
            Default="BUILD_GENERAL1_SMALL",
        ))
    deploy_num_workers_parameter = template.add_parameter(
        t.Parameter(
            "DeployNumWorkers",
            Type="Number",
            Description=
            "Number of workers that should be used when running a deploy",
            Default=10,
        ))
    puppet_role_name_parameter = template.add_parameter(
        t.Parameter("PuppetRoleName", Type="String", Default="PuppetRole"))
    puppet_role_path_template_parameter = template.add_parameter(
        t.Parameter("PuppetRolePath",
                    Type="String",
                    Default="/servicecatalog-puppet/"))

    template.add_condition(
        "ShouldUseOrgs",
        t.Not(t.Equals(t.Ref(org_iam_role_arn_parameter), "None")))
    template.add_condition(
        "HasManualApprovals",
        t.Equals(t.Ref(with_manual_approvals_parameter), "Yes"))

    template.add_resource(
        s3.Bucket(
            "StacksRepository",
            BucketName=t.Sub("sc-puppet-stacks-repository-${AWS::AccountId}"),
            VersioningConfiguration=s3.VersioningConfiguration(
                Status="Enabled"),
            BucketEncryption=s3.BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    s3.ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=s3.
                        ServerSideEncryptionByDefault(SSEAlgorithm="AES256"))
                ]),
            PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}),
        ))

    manual_approvals_param = template.add_resource(
        ssm.Parameter(
            "ManualApprovalsParam",
            Type="String",
            Name="/servicecatalog-puppet/manual-approvals",
            Value=t.Ref(with_manual_approvals_parameter),
        ))
    template.add_resource(
        ssm.Parameter(
            "SpokeDeployEnvParameter",
            Type="String",
            Name=constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
            Value=t.Ref(spoke_deploy_environment_compute_type_parameter),
        ))
    param = template.add_resource(
        ssm.Parameter(
            "Param",
            Type="String",
            Name="service-catalog-puppet-version",
            Value=t.Ref(version_parameter),
        ))
    partition_parameter = template.add_resource(
        ssm.Parameter(
            "PartitionParameter",
            Type="String",
            Name="/servicecatalog-puppet/partition",
            Value=t.Ref("AWS::Partition"),
        ))
    puppet_role_name_parameter = template.add_resource(
        ssm.Parameter(
            "PuppetRoleNameParameter",
            Type="String",
            Name="/servicecatalog-puppet/puppet-role/name",
            Value=t.Ref(puppet_role_name_parameter),
        ))
    puppet_role_path_parameter = template.add_resource(
        ssm.Parameter(
            "PuppetRolePathParameter",
            Type="String",
            Name="/servicecatalog-puppet/puppet-role/path",
            Value=t.Ref(puppet_role_path_template_parameter),
        ))
    share_accept_function_role = template.add_resource(
        iam.Role(
            "ShareAcceptFunctionRole",
            RoleName="ShareAcceptFunctionRole",
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
                )
            ],
            Path=t.Ref(puppet_role_path_template_parameter),
            Policies=[
                iam.Policy(
                    PolicyName="ServiceCatalogActions",
                    PolicyDocument={
                        "Version":
                        "2012-10-17",
                        "Statement": [{
                            "Action": ["sts:AssumeRole"],
                            "Resource": {
                                "Fn::Sub":
                                "arn:${AWS::Partition}:iam::*:role${PuppetRolePath}${PuppetRoleName}"
                            },
                            "Effect": "Allow",
                        }],
                    },
                )
            ],
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Action": ["sts:AssumeRole"],
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["lambda.amazonaws.com"]
                    },
                }],
            },
        ))

    provisioning_role = template.add_resource(
        iam.Role(
            "ProvisioningRole",
            RoleName="PuppetProvisioningRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "Service": ["codebuild.amazonaws.com"]
                        },
                    },
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "AWS": {
                                "Fn::Sub": "${AWS::AccountId}"
                            }
                        },
                    },
                ],
            },
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess"
                )
            ],
            PermissionsBoundary=t.Ref(
                puppet_provisioning_role_permissions_boundary_parameter),
            Path=t.Ref(puppet_role_path_template_parameter),
        ))

    cloud_formation_deploy_role = template.add_resource(
        iam.Role(
            "CloudFormationDeployRole",
            RoleName="CloudFormationDeployRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "Service": ["cloudformation.amazonaws.com"]
                        },
                    },
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "AWS": {
                                "Fn::Sub": "${AWS::AccountId}"
                            }
                        },
                    },
                ],
            },
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess"
                )
            ],
            PermissionsBoundary=t.Ref(
                cloud_formation_deploy_role_permissions_boundary_parameter),
            Path=t.Ref(puppet_role_path_template_parameter),
        ))

    pipeline_role = template.add_resource(
        iam.Role(
            "PipelineRole",
            RoleName="PuppetCodePipelineRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Action": ["sts:AssumeRole"],
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["codepipeline.amazonaws.com"]
                    },
                }],
            },
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess"
                )
            ],
            PermissionsBoundary=t.Ref(
                puppet_code_pipeline_role_permission_boundary_parameter),
            Path=t.Ref(puppet_role_path_template_parameter),
        ))

    source_role = template.add_resource(
        iam.Role(
            "SourceRole",
            RoleName="PuppetSourceRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "Service": ["codepipeline.amazonaws.com"]
                        },
                    },
                    {
                        "Action": ["sts:AssumeRole"],
                        "Effect": "Allow",
                        "Principal": {
                            "AWS": {
                                "Fn::Sub":
                                "arn:${AWS::Partition}:iam::${AWS::AccountId}:root"
                            }
                        },
                    },
                ],
            },
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess"
                )
            ],
            PermissionsBoundary=t.Ref(
                source_role_permissions_boundary_parameter),
            Path=t.Ref(puppet_role_path_template_parameter),
        ))

    dry_run_notification_topic = template.add_resource(
        sns.Topic(
            "DryRunNotificationTopic",
            DisplayName="service-catalog-puppet-dry-run-approvals",
            TopicName="service-catalog-puppet-dry-run-approvals",
            Condition="HasManualApprovals",
        ))

    deploy_role = template.add_resource(
        iam.Role(
            "DeployRole",
            RoleName="PuppetDeployRole",
            AssumeRolePolicyDocument={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Action": ["sts:AssumeRole"],
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["codebuild.amazonaws.com"]
                    },
                }],
            },
            ManagedPolicyArns=[
                t.Sub(
                    "arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess"
                )
            ],
            PermissionsBoundary=t.Ref(
                puppet_deploy_role_permission_boundary_parameter),
            Path=t.Ref(puppet_role_path_template_parameter),
        ))

    num_workers_ssm_parameter = template.add_resource(
        ssm.Parameter(
            "NumWorkersSSMParameter",
            Type="String",
            Name="/servicecatalog-puppet/deploy/num-workers",
            Value=t.Sub("${DeployNumWorkers}"),
        ))

    parameterised_source_bucket = template.add_resource(
        s3.Bucket(
            "ParameterisedSourceBucket",
            PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
                IgnorePublicAcls=True,
                BlockPublicPolicy=True,
                BlockPublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            BucketEncryption=s3.BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    s3.ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=s3.
                        ServerSideEncryptionByDefault(SSEAlgorithm="AES256"))
                ]),
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
            BucketName=t.Sub("sc-puppet-parameterised-runs-${AWS::AccountId}"),
            VersioningConfiguration=s3.VersioningConfiguration(
                Status="Enabled"),
        ))

    source_stage = codepipeline.Stages(
        Name="Source",
        Actions=[
            codepipeline.Actions(
                RunOrder=1,
                RoleArn=t.GetAtt("SourceRole", "Arn"),
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="AWS",
                    Version="1",
                    Provider="S3",
                ),
                OutputArtifacts=[
                    codepipeline.OutputArtifacts(Name="ParameterisedSource")
                ],
                Configuration={
                    "S3Bucket": t.Ref(parameterised_source_bucket),
                    "S3ObjectKey": "parameters.zip",
                    "PollForSourceChanges": True,
                },
                Name="ParameterisedSource",
            )
        ],
    )

    install_spec = {
        "runtime-versions":
        dict(python="3.7"),
        "commands": [
            f"pip install {puppet_version}" if "http" in puppet_version else
            f"pip install aws-service-catalog-puppet=={puppet_version}",
        ],
    }

    deploy_env_vars = [
        {
            "Type": "PLAINTEXT",
            "Name": "PUPPET_ACCOUNT_ID",
            "Value": t.Ref("AWS::AccountId"),
        },
        {
            "Type": "PLAINTEXT",
            "Name": "PUPPET_REGION",
            "Value": t.Ref("AWS::Region"),
        },
        {
            "Type": "PARAMETER_STORE",
            "Name": "PARTITION",
            "Value": t.Ref(partition_parameter),
        },
        {
            "Type": "PARAMETER_STORE",
            "Name": "PUPPET_ROLE_NAME",
            "Value": t.Ref(puppet_role_name_parameter),
        },
        {
            "Type": "PARAMETER_STORE",
            "Name": "PUPPET_ROLE_PATH",
            "Value": t.Ref(puppet_role_path_parameter),
        },
    ]

    if is_codecommit:
        template.add_resource(
            codecommit.Repository(
                "CodeRepo",
                RepositoryName=source.get("Configuration").get(
                    "RepositoryName"),
                RepositoryDescription=
                "Repo to store the servicecatalog puppet solution",
                DeletionPolicy="Retain",
            ))

        source_stage.Actions.append(
            codepipeline.Actions(
                RunOrder=1,
                RoleArn=t.GetAtt("SourceRole", "Arn"),
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="AWS",
                    Version="1",
                    Provider="CodeCommit",
                ),
                OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
                Configuration={
                    "RepositoryName":
                    source.get("Configuration").get("RepositoryName"),
                    "BranchName":
                    source.get("Configuration").get("BranchName"),
                    "PollForSourceChanges":
                    source.get("Configuration").get("PollForSourceChanges",
                                                    True),
                },
                Name="Source",
            ))

    if is_github:
        source_stage.Actions.append(
            codepipeline.Actions(
                RunOrder=1,
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="ThirdParty",
                    Version="1",
                    Provider="GitHub",
                ),
                OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
                Configuration={
                    "Owner":
                    source.get("Configuration").get("Owner"),
                    "Repo":
                    source.get("Configuration").get("Repo"),
                    "Branch":
                    source.get("Configuration").get("Branch"),
                    "OAuthToken":
                    t.Join(
                        "",
                        [
                            "{{resolve:secretsmanager:",
                            source.get("Configuration").get(
                                "SecretsManagerSecret"),
                            ":SecretString:OAuthToken}}",
                        ],
                    ),
                    "PollForSourceChanges":
                    source.get("Configuration").get("PollForSourceChanges"),
                },
                Name="Source",
            ))

    if is_custom:
        source_stage.Actions.append(
            codepipeline.Actions(
                RunOrder=1,
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="Custom",
                    Version=source.get("Configuration").get(
                        "CustomActionTypeVersion"),
                    Provider=source.get("Configuration").get(
                        "CustomActionTypeProvider"),
                ),
                OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
                Configuration={
                    "GitUrl": source.get("Configuration").get("GitUrl"),
                    "Branch": source.get("Configuration").get("Branch"),
                    "PipelineName": t.Sub("${AWS::StackName}-pipeline"),
                },
                Name="Source",
            ))
        webhook = codepipeline.Webhook(
            "Webhook",
            Authentication="IP",
            TargetAction="Source",
            AuthenticationConfiguration=codepipeline.WebhookAuthConfiguration(
                AllowedIPRange=source.get("Configuration").get(
                    "GitWebHookIpAddress")),
            Filters=[
                codepipeline.WebhookFilterRule(
                    JsonPath="$.changes[0].ref.id",
                    MatchEquals="refs/heads/{Branch}")
            ],
            TargetPipelineVersion=1,
            TargetPipeline=t.Sub("${AWS::StackName}-pipeline"),
        )
        template.add_resource(webhook)
        values_for_sub = {
            "GitUrl": source.get("Configuration").get("GitUrl"),
            "WebhookUrl": t.GetAtt(webhook, "Url"),
        }
        output_to_add = t.Output("WebhookUrl")
        output_to_add.Value = t.Sub("${GitUrl}||${WebhookUrl}",
                                    **values_for_sub)
        output_to_add.Export = t.Export(t.Sub("${AWS::StackName}-pipeline"))
        template.add_output(output_to_add)

    if is_codestarsourceconnection:
        source_stage.Actions.append(
            codepipeline.Actions(
                RunOrder=1,
                RoleArn=t.GetAtt("SourceRole", "Arn"),
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="AWS",
                    Version="1",
                    Provider="CodeStarSourceConnection",
                ),
                OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
                Configuration={
                    "ConnectionArn":
                    source.get("Configuration").get("ConnectionArn"),
                    "FullRepositoryId":
                    source.get("Configuration").get("FullRepositoryId"),
                    "BranchName":
                    source.get("Configuration").get("BranchName"),
                    "OutputArtifactFormat":
                    source.get("Configuration").get("OutputArtifactFormat"),
                },
                Name="Source",
            ))

    if is_s3:
        bucket_name = source.get("Configuration").get("S3Bucket")
        if not scm_skip_creation_of_repo:
            template.add_resource(
                s3.Bucket(
                    bucket_name,
                    PublicAccessBlockConfiguration=s3.
                    PublicAccessBlockConfiguration(
                        IgnorePublicAcls=True,
                        BlockPublicPolicy=True,
                        BlockPublicAcls=True,
                        RestrictPublicBuckets=True,
                    ),
                    BucketEncryption=s3.BucketEncryption(
                        ServerSideEncryptionConfiguration=[
                            s3.ServerSideEncryptionRule(
                                ServerSideEncryptionByDefault=s3.
                                ServerSideEncryptionByDefault(
                                    SSEAlgorithm="AES256"))
                        ]),
                    Tags=t.Tags.from_dict(
                        **{"ServiceCatalogPuppet:Actor": "Framework"}),
                    BucketName=bucket_name,
                    VersioningConfiguration=s3.VersioningConfiguration(
                        Status="Enabled"),
                ))

        source_stage.Actions.append(
            codepipeline.Actions(
                RunOrder=1,
                ActionTypeId=codepipeline.ActionTypeId(
                    Category="Source",
                    Owner="AWS",
                    Version="1",
                    Provider="S3",
                ),
                OutputArtifacts=[codepipeline.OutputArtifacts(Name="Source")],
                Configuration={
                    "S3Bucket":
                    bucket_name,
                    "S3ObjectKey":
                    source.get("Configuration").get("S3ObjectKey"),
                    "PollForSourceChanges":
                    source.get("Configuration").get("PollForSourceChanges"),
                },
                Name="Source",
            ))

    single_account_run_project_build_spec = dict(
        version=0.2,
        phases=dict(
            install=install_spec,
            build={
                "commands": [
                    'echo "single_account: \\"${SINGLE_ACCOUNT_ID}\\"" > parameters.yaml',
                    "cat parameters.yaml",
                    "zip parameters.zip parameters.yaml",
                    "aws s3 cp parameters.zip s3://sc-puppet-parameterised-runs-${PUPPET_ACCOUNT_ID}/parameters.zip",
                ]
            },
            post_build={
                "commands": [
                    "servicecatalog-puppet wait-for-parameterised-run-to-complete",
                ]
            },
        ),
        artifacts=dict(
            name="DeployProject",
            files=[
                "ServiceCatalogPuppet/manifest.yaml",
                "ServiceCatalogPuppet/manifest-expanded.yaml",
                "results/*/*",
                "output/*/*",
                "exploded_results/*/*",
                "tasks.log",
            ],
        ),
    )

    single_account_run_project_args = dict(
        Name="servicecatalog-puppet-single-account-run",
        Description="Runs puppet for a single account - SINGLE_ACCOUNT_ID",
        ServiceRole=t.GetAtt(deploy_role, "Arn"),
        Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
        Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ),
        TimeoutInMinutes=480,
        Environment=codebuild.Environment(
            ComputeType=t.Ref(deploy_environment_compute_type_parameter),
            Image="aws/codebuild/standard:4.0",
            Type="LINUX_CONTAINER",
            EnvironmentVariables=[
                {
                    "Type": "PLAINTEXT",
                    "Name": "SINGLE_ACCOUNT_ID",
                    "Value": "CHANGE_ME",
                },
            ] + deploy_env_vars,
        ),
        Source=codebuild.Source(
            Type="NO_SOURCE",
            BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
        ),
    )

    single_account_run_project = template.add_resource(
        codebuild.Project("SingleAccountRunProject",
                          **single_account_run_project_args))

    single_account_run_project_build_spec["phases"]["post_build"]["commands"] = [
        "servicecatalog-puppet wait-for-parameterised-run-to-complete --on-complete-url $CALLBACK_URL"
    ]
    single_account_run_project_args[
        "Name"] = "servicecatalog-puppet-single-account-run-with-callback"
    single_account_run_project_args[
        "Description"] = "Runs puppet for a single account - SINGLE_ACCOUNT_ID and then does a http put"
    single_account_run_project_args.get(
        "Environment").EnvironmentVariables.append({
            "Type": "PLAINTEXT",
            "Name": "CALLBACK_URL",
            "Value": "CHANGE_ME",
        })
    single_account_run_project_args["Source"] = codebuild.Source(
        Type="NO_SOURCE",
        BuildSpec=yaml.safe_dump(single_account_run_project_build_spec),
    )
    single_account_run_project_with_callback = template.add_resource(
        codebuild.Project("SingleAccountRunWithCallbackProject",
                          **single_account_run_project_args))

    stages = [source_stage]

    if should_validate:
        template.add_resource(
            codebuild.Project(
                "ValidateProject",
                Name="servicecatalog-puppet-validate",
                ServiceRole=t.GetAtt("DeployRole", "Arn"),
                Tags=t.Tags.from_dict(
                    **{"ServiceCatalogPuppet:Actor": "Framework"}),
                Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"),
                TimeoutInMinutes=60,
                Environment=codebuild.Environment(
                    ComputeType="BUILD_GENERAL1_SMALL",
                    Image="aws/codebuild/standard:4.0",
                    Type="LINUX_CONTAINER",
                ),
                Source=codebuild.Source(
                    BuildSpec=yaml.safe_dump(
                        dict(
                            version="0.2",
                            phases={
                                "install": {
                                    "runtime-versions": {
                                        "python": "3.7",
                                    },
                                    "commands": [
                                        f"pip install {puppet_version}"
                                        if "http" in puppet_version else
                                        f"pip install aws-service-catalog-puppet=={puppet_version}",
                                    ],
                                },
                                "build": {
                                    "commands": [
                                        "servicecatalog-puppet validate manifest.yaml"
                                    ]
                                },
                            },
                        )),
                    Type="CODEPIPELINE",
                ),
                Description="Validate the manifest.yaml file",
            ))
        stages.append(
            codepipeline.Stages(
                Name="Validate",
                Actions=[
                    codepipeline.Actions(
                        InputArtifacts=[
                            codepipeline.InputArtifacts(Name="Source"),
                        ],
                        Name="Validate",
                        ActionTypeId=codepipeline.ActionTypeId(
                            Category="Build",
                            Owner="AWS",
                            Version="1",
                            Provider="CodeBuild",
                        ),
                        OutputArtifacts=[
                            codepipeline.OutputArtifacts(
                                Name="ValidateProject")
                        ],
                        Configuration={
                            "ProjectName": t.Ref("ValidateProject"),
                            "PrimarySource": "Source",
                        },
                        RunOrder=1,
                    ),
                ],
            ))

    if is_manual_approvals:
        deploy_stage = codepipeline.Stages(
            Name="Deploy",
            Actions=[
                codepipeline.Actions(
                    InputArtifacts=[
                        codepipeline.InputArtifacts(Name="Source"),
                        codepipeline.InputArtifacts(
                            Name="ParameterisedSource"),
                    ],
                    Name="DryRun",
                    ActionTypeId=codepipeline.ActionTypeId(
                        Category="Build",
                        Owner="AWS",
                        Version="1",
                        Provider="CodeBuild",
                    ),
                    OutputArtifacts=[
                        codepipeline.OutputArtifacts(Name="DryRunProject")
                    ],
                    Configuration={
                        "ProjectName": t.Ref("DryRunProject"),
                        "PrimarySource": "Source",
                    },
                    RunOrder=1,
                ),
                codepipeline.Actions(
                    ActionTypeId=codepipeline.ActionTypeId(
                        Category="Approval",
                        Owner="AWS",
                        Version="1",
                        Provider="Manual",
                    ),
                    Configuration={
                        "NotificationArn":
                        t.Ref("DryRunNotificationTopic"),
                        "CustomData":
                        "Approve when you are happy with the dry run.",
                    },
                    Name="DryRunApproval",
                    RunOrder=2,
                ),
                codepipeline.Actions(
                    InputArtifacts=[
                        codepipeline.InputArtifacts(Name="Source"),
                        codepipeline.InputArtifacts(
                            Name="ParameterisedSource"),
                    ],
                    Name="Deploy",
                    ActionTypeId=codepipeline.ActionTypeId(
                        Category="Build",
                        Owner="AWS",
                        Version="1",
                        Provider="CodeBuild",
                    ),
                    OutputArtifacts=[
                        codepipeline.OutputArtifacts(Name="DeployProject")
                    ],
                    Configuration={
                        "ProjectName": t.Ref("DeployProject"),
                        "PrimarySource": "Source",
                    },
                    RunOrder=3,
                ),
            ],
        )
    else:
        deploy_stage = codepipeline.Stages(
            Name="Deploy",
            Actions=[
                codepipeline.Actions(
                    InputArtifacts=[
                        codepipeline.InputArtifacts(Name="Source"),
                        codepipeline.InputArtifacts(
                            Name="ParameterisedSource"),
                    ],
                    Name="Deploy",
                    ActionTypeId=codepipeline.ActionTypeId(
                        Category="Build",
                        Owner="AWS",
                        Version="1",
                        Provider="CodeBuild",
                    ),
                    OutputArtifacts=[
                        codepipeline.OutputArtifacts(Name="DeployProject")
                    ],
                    Configuration={
                        "ProjectName":
                        t.Ref("DeployProject"),
                        "PrimarySource":
                        "Source",
                        "EnvironmentVariables":
                        '[{"name":"EXECUTION_ID","value":"#{codepipeline.PipelineExecutionId}","type":"PLAINTEXT"}]',
                    },
                    RunOrder=1,
                ),
            ],
        )

    stages.append(deploy_stage)

    pipeline = template.add_resource(
        codepipeline.Pipeline(
            "Pipeline",
            RoleArn=t.GetAtt("PipelineRole", "Arn"),
            Stages=stages,
            Name=t.Sub("${AWS::StackName}-pipeline"),
            ArtifactStore=codepipeline.ArtifactStore(
                Type="S3",
                Location=t.Sub(
                    "sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}"
                ),
            ),
            RestartExecutionOnUpdate=True,
        ))

    if is_github:
        template.add_resource(
            codepipeline.Webhook(
                "Webhook",
                AuthenticationConfiguration=codepipeline.
                WebhookAuthConfiguration(SecretToken=t.Join(
                    "",
                    [
                        "{{resolve:secretsmanager:",
                        source.get("Configuration").get(
                            "SecretsManagerSecret"),
                        ":SecretString:SecretToken}}",
                    ],
                )),
                Filters=[
                    codepipeline.WebhookFilterRule(
                        JsonPath="$.ref",
                        MatchEquals="refs/heads/" +
                        source.get("Configuration").get("Branch"),
                    )
                ],
                Authentication="GITHUB_HMAC",
                TargetPipeline=t.Ref(pipeline),
                TargetAction="Source",
                Name=t.Sub("${AWS::StackName}-webhook"),
                TargetPipelineVersion=t.GetAtt(pipeline, "Version"),
                RegisterWithThirdParty="true",
            ))

    deploy_project_build_spec = dict(
        version=0.2,
        phases=dict(
            install={
                "runtime-versions":
                dict(python="3.7"),
                "commands": [
                    f"pip install {puppet_version}"
                    if "http" in puppet_version else
                    f"pip install aws-service-catalog-puppet=={puppet_version}",
                ],
            },
            pre_build={
                "commands": [
                    "servicecatalog-puppet --info expand --parameter-override-file $CODEBUILD_SRC_DIR_ParameterisedSource/parameters.yaml manifest.yaml",
                ]
            },
            build={
                "commands": [
                    "servicecatalog-puppet --info deploy --num-workers ${NUM_WORKERS} manifest-expanded.yaml",
                ]
            },
        ),
        artifacts=dict(
            name="DeployProject",
            files=[
                "manifest-expanded.yaml",
                "results/*/*",
                "output/*/*",
                "exploded_results/*/*",
                "tasks.log",
            ],
        ),
    )

    deploy_project_args = dict(
        Name="servicecatalog-puppet-deploy",
        ServiceRole=t.GetAtt(deploy_role, "Arn"),
        Tags=t.Tags.from_dict(**{"ServiceCatalogPuppet:Actor": "Framework"}),
        Artifacts=codebuild.Artifacts(Type="CODEPIPELINE", ),
        TimeoutInMinutes=480,
        Environment=codebuild.Environment(
            ComputeType=t.Ref(deploy_environment_compute_type_parameter),
            Image="aws/codebuild/standard:4.0",
            Type="LINUX_CONTAINER",
            EnvironmentVariables=[
                {
                    "Type": "PARAMETER_STORE",
                    "Name": "NUM_WORKERS",
                    "Value": t.Ref(num_workers_ssm_parameter),
                },
                {
                    "Type":
                    "PARAMETER_STORE",
                    "Name":
                    "SPOKE_EXECUTION_MODE_DEPLOY_ENV",
                    "Value":
                    constants.SPOKE_EXECUTION_MODE_DEPLOY_ENV_PARAMETER_NAME,
                },
            ] + deploy_env_vars,
        ),
        Source=codebuild.Source(
            Type="CODEPIPELINE",
            BuildSpec=yaml.safe_dump(deploy_project_build_spec),
        ),
        Description="deploys out the products to be deployed",
    )

    deploy_project = template.add_resource(
        codebuild.Project("DeployProject", **deploy_project_args))

    deploy_project_build_spec["phases"]["build"]["commands"] = [
        "servicecatalog-puppet --info dry-run manifest-expanded.yaml"
    ]
    deploy_project_build_spec["artifacts"]["name"] = "DryRunProject"
    deploy_project_args["Name"] = "servicecatalog-puppet-dryrun"
    deploy_project_args[
        "Description"] = "dry run of servicecatalog-puppet-dryrun"
    deploy_project_args["Source"] = codebuild.Source(
        Type="CODEPIPELINE",
        BuildSpec=yaml.safe_dump(deploy_project_build_spec),
    )

    dry_run_project = template.add_resource(
        codebuild.Project("DryRunProject", **deploy_project_args))

    bootstrap_project = template.add_resource(
        codebuild.Project(
            "BootstrapProject",
            Name="servicecatalog-puppet-bootstrap-spokes-in-ou",
            ServiceRole=t.GetAtt("DeployRole", "Arn"),
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
            Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
            TimeoutInMinutes=60,
            Environment=codebuild.Environment(
                ComputeType="BUILD_GENERAL1_SMALL",
                Image="aws/codebuild/standard:4.0",
                Type="LINUX_CONTAINER",
                EnvironmentVariables=[
                    {
                        "Type": "PLAINTEXT",
                        "Name": "OU_OR_PATH",
                        "Value": "CHANGE_ME"
                    },
                    {
                        "Type": "PLAINTEXT",
                        "Name": "IAM_ROLE_NAME",
                        "Value": "OrganizationAccountAccessRole",
                    },
                    {
                        "Type": "PLAINTEXT",
                        "Name": "IAM_ROLE_ARNS",
                        "Value": ""
                    },
                ],
            ),
            Source=codebuild.Source(
                BuildSpec=
                "version: 0.2\nphases:\n  install:\n    runtime-versions:\n      python: 3.7\n    commands:\n      - pip install aws-service-catalog-puppet\n  build:\n    commands:\n      - servicecatalog-puppet bootstrap-spokes-in-ou $OU_OR_PATH $IAM_ROLE_NAME $IAM_ROLE_ARNS\nartifacts:\n  files:\n    - results/*/*\n    - output/*/*\n  name: BootstrapProject\n",
                Type="NO_SOURCE",
            ),
            Description="Bootstrap all the accounts in an OU",
        ))

    template.add_resource(
        codebuild.Project(
            "BootstrapASpokeProject",
            Name="servicecatalog-puppet-bootstrap-spoke",
            ServiceRole=t.GetAtt("DeployRole", "Arn"),
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
            Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS"),
            TimeoutInMinutes=60,
            Environment=codebuild.Environment(
                ComputeType="BUILD_GENERAL1_SMALL",
                Image="aws/codebuild/standard:4.0",
                Type="LINUX_CONTAINER",
                EnvironmentVariables=[
                    {
                        "Type": "PLAINTEXT",
                        "Name": "PUPPET_ACCOUNT_ID",
                        "Value": t.Sub("${AWS::AccountId}"),
                    },
                    {
                        "Type": "PLAINTEXT",
                        "Name": "ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN",
                        "Value": "CHANGE_ME",
                    },
                    {
                        "Type": "PLAINTEXT",
                        "Name": "ASSUMABLE_ROLE_IN_ROOT_ACCOUNT",
                        "Value": "CHANGE_ME",
                    },
                ],
            ),
            Source=codebuild.Source(
                BuildSpec=yaml.safe_dump(
                    dict(
                        version=0.2,
                        phases=dict(
                            install=install_spec,
                            build={
                                "commands": [
                                    "servicecatalog-puppet bootstrap-spoke-as ${PUPPET_ACCOUNT_ID} ${ASSUMABLE_ROLE_IN_ROOT_ACCOUNT} ${ORGANIZATION_ACCOUNT_ACCESS_ROLE_ARN}"
                                ]
                            },
                        ),
                    )),
                Type="NO_SOURCE",
            ),
            Description="Bootstrap given account as a spoke",
        ))

    cloud_formation_events_queue = template.add_resource(
        sqs.Queue(
            "CloudFormationEventsQueue",
            QueueName="servicecatalog-puppet-cloudformation-events",
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
        ))

    cloud_formation_events_queue_policy = template.add_resource(
        sqs.QueuePolicy(
            "CloudFormationEventsQueuePolicy",
            Queues=[t.Ref(cloud_formation_events_queue)],
            PolicyDocument={
                "Id":
                "AllowSNS",
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Sid": "allow-send-message",
                    "Effect": "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action": ["sqs:SendMessage"],
                    "Resource": "*",
                    "Condition": {
                        "ArnEquals": {
                            "aws:SourceArn":
                            t.Sub(
                                "arn:${AWS::Partition}:sns:*:${AWS::AccountId}:servicecatalog-puppet-cloudformation-regional-events"
                            )
                        }
                    },
                }],
            },
        ))

    spoke_deploy_bucket = template.add_resource(
        s3.Bucket(
            "SpokeDeployBucket",
            PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
                IgnorePublicAcls=True,
                BlockPublicPolicy=True,
                BlockPublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            BucketEncryption=s3.BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    s3.ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=s3.
                        ServerSideEncryptionByDefault(SSEAlgorithm="AES256"))
                ]),
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
            BucketName=t.Sub("sc-puppet-spoke-deploy-${AWS::AccountId}"),
            VersioningConfiguration=s3.VersioningConfiguration(
                Status="Enabled"),
        ))

    caching_bucket = template.add_resource(
        s3.Bucket(
            "CachingBucket",
            PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            BucketEncryption=s3.BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    s3.ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=s3.
                        ServerSideEncryptionByDefault(SSEAlgorithm="AES256"))
                ]),
            Tags=t.Tags.from_dict(
                **{"ServiceCatalogPuppet:Actor": "Framework"}),
            BucketName=t.Sub(
                "sc-puppet-caching-bucket-${AWS::AccountId}-${AWS::Region}"),
            VersioningConfiguration=s3.VersioningConfiguration(
                Status="Enabled"),
        ))

    template.add_output(
        t.Output(
            "CloudFormationEventsQueueArn",
            Value=t.GetAtt(cloud_formation_events_queue, "Arn"),
        ))
    template.add_output(t.Output("Version", Value=t.GetAtt(param, "Value")))
    template.add_output(
        t.Output("ManualApprovalsParam",
                 Value=t.GetAtt(manual_approvals_param, "Value")))

    template.add_resource(
        ssm.Parameter(
            "DefaultTerraformVersion",
            Type="String",
            Name=constants.DEFAULT_TERRAFORM_VERSION_PARAMETER_NAME,
            Value=constants.DEFAULT_TERRAFORM_VERSION_VALUE,
        ))

    return template
コード例 #20
0
def create_cdk_pipeline(name, version, product_name, product_version,
                        template_config, p) -> t.Template:
    description = f"""Builds a cdk pipeline
{{"version": "{constants.VERSION}", "framework": "servicecatalog-factory", "role": "product-pipeline", "type": "{name}", "version": "{version}"}}"""
    configuration = template_config.get("Configuration")
    template = t.Template(Description=description)

    template.add_parameter(t.Parameter("PuppetAccountId", Type="String"))
    template.add_parameter(
        t.Parameter("CDKSupportCDKDeployRequireApproval",
                    Type="String",
                    Default="never"))
    template.add_parameter(
        t.Parameter("CDKSupportCDKComputeType",
                    Type="String",
                    Default="BUILD_GENERAL1_SMALL"))
    template.add_parameter(
        t.Parameter("CDKSupportCDKDeployImage",
                    Type="String",
                    Default="aws/codebuild/standard:4.0"))
    template.add_parameter(
        t.Parameter("CDKSupportCDKToolkitStackName",
                    Type="String",
                    Default="CDKToolKit"))
    template.add_parameter(
        t.Parameter(
            "CDKSupportCDKDeployExtraArgs",
            Type="String",
            Default="",
            Description="Extra args to pass to CDK deploy",
        ))
    template.add_parameter(
        t.Parameter(
            "CDKSupportStartCDKDeployFunctionArn",
            Type="String",
        ))
    template.add_parameter(
        t.Parameter(
            "CDKSupportGetOutputsForGivenCodebuildIdFunctionArn",
            Type="String",
        ))
    template.add_parameter(
        t.Parameter("CDKSupportIAMRolePaths",
                    Type="String",
                    Default="/servicecatalog-factory-cdk-support/"))
    template.add_parameter(
        t.Parameter("CDKSupportCDKDeployRoleName",
                    Type="String",
                    Default="CDKDeployRoleName"))

    manifest = json.loads(open(f"{p}/{PREFIX}/manifest.json", "r").read())

    cdk_deploy_parameter_args = list()

    for artifact_name, artifact in manifest.get("artifacts", {}).items():
        if artifact.get("type") == "aws:cloudformation:stack":
            artifact_template_file_path = artifact.get("properties",
                                                       {}).get("templateFile")
            assert (
                artifact_template_file_path
            ), f"Could not find template file in manifest.json for {artifact_name}"
            artifact_template = json.loads(
                open(f"{p}/{PREFIX}/{artifact_template_file_path}",
                     "r").read())
            for parameter_name, parameter_details in artifact_template.get(
                    "Parameters", {}).items():
                if template.parameters.get(parameter_name) is None:
                    template.add_parameter(
                        t.Parameter(parameter_name, **parameter_details))
                cdk_deploy_parameter_args.append(
                    f"--parameters {artifact_name}:{parameter_name}=${{{parameter_name}}}"
                )

            for output_name, output_details in artifact_template.get(
                    "Outputs", {}).items():
                if template.outputs.get(output_name) is None:
                    new_output = dict(**output_details)
                    new_output["Value"] = t.GetAtt("GetOutputsCode",
                                                   output_name)
                    template.add_output(t.Output(output_name, **new_output))
    cdk_deploy_parameter_args = " ".join(cdk_deploy_parameter_args)

    class DeployDetailsCustomResource(cloudformation.AWSCustomObject):
        resource_type = "Custom::DeployDetails"
        props = dict()

    runtime_versions = dict(
        nodejs=constants.BUILDSPEC_RUNTIME_VERSIONS_NODEJS_DEFAULT, )
    if configuration.get("runtime-versions"):
        runtime_versions.update(configuration.get("runtime-versions"))

    extra_commands = list(configuration.get("install", {}).get("commands", []))

    template.add_resource(
        codebuild.Project(
            "CDKDeploy",
            Name=t.Sub("${AWS::StackName}-deploy"),
            Description='Run CDK deploy for given source code',
            ServiceRole=t.Sub(
                "arn:aws:iam::${AWS::AccountId}:role${CDKSupportIAMRolePaths}${CDKSupportCDKDeployRoleName}"
            ),
            Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ),
            Environment=codebuild.Environment(
                ComputeType=t.Ref('CDKSupportCDKComputeType'),
                EnvironmentVariables=[
                    codebuild.EnvironmentVariable(
                        Name="CDK_DEPLOY_REQUIRE_APPROVAL",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="CDK_DEPLOY_EXTRA_ARGS",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(
                        Name="CDK_TOOLKIT_STACK_NAME",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="UId",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="PUPPET_ACCOUNT_ID",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="REGION",
                                                  Type="PLAINTEXT",
                                                  Value=t.Ref("AWS::Region")),
                    codebuild.EnvironmentVariable(
                        Name="CDK_DEPLOY_PARAMETER_ARGS",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="ON_COMPLETE_URL",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="NAME",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="VERSION",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                ],
                Image=t.Ref('CDKSupportCDKDeployImage'),
                Type="LINUX_CONTAINER",
            ),
            Source=codebuild.Source(
                Type="NO_SOURCE",
                BuildSpec=t.Sub(
                    yaml.safe_dump(
                        dict(
                            version=0.2,
                            phases=dict(
                                install={
                                    "runtime-versions":
                                    runtime_versions,
                                    "commands": [
                                        "aws s3 cp s3://sc-factory-artifacts-$PUPPET_ACCOUNT_ID-$REGION/CDK/1.0.0/$NAME/$VERSION/$NAME-$VERSION.zip $NAME-$VERSION.zip",
                                        "unzip $NAME-$VERSION.zip",
                                        "npm install",
                                    ] + extra_commands
                                },
                                build={
                                    "commands": [
                                        "npm run cdk deploy -- --toolkit-stack-name $CDK_TOOLKIT_STACK_NAME --require-approval $CDK_DEPLOY_REQUIRE_APPROVAL --outputs-file scf_outputs.json $CDK_DEPLOY_EXTRA_ARGS $CDK_DEPLOY_PARAMETER_ARGS '*'",
                                        "aws s3 cp scf_outputs.json s3://sc-cdk-artifacts-${AWS::AccountId}/CDK/1.0.0/$NAME/$VERSION/scf_outputs-$CODEBUILD_BUILD_ID.json",
                                    ]
                                },
                            ),
                            artifacts={
                                "name": "CDKDeploy",
                                "files": ["*", "**/*"],
                            },
                        ))),
            ),
            TimeoutInMinutes=480,
        ))

    template.add_resource(
        codebuild.Project(
            "CDKDestroy",
            Name=t.Sub("${AWS::StackName}-destroy"),
            Description='Run CDK destroy for given source code',
            ServiceRole=t.Sub(
                "arn:aws:iam::${AWS::AccountId}:role${CDKSupportIAMRolePaths}${CDKSupportCDKDeployRoleName}"
            ),
            Artifacts=codebuild.Artifacts(Type="NO_ARTIFACTS", ),
            Environment=codebuild.Environment(
                ComputeType=t.Ref('CDKSupportCDKComputeType'),
                EnvironmentVariables=[
                    codebuild.EnvironmentVariable(
                        Name="CDK_DEPLOY_REQUIRE_APPROVAL",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="CDK_DEPLOY_EXTRA_ARGS",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(
                        Name="CDK_TOOLKIT_STACK_NAME",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="UId",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="PUPPET_ACCOUNT_ID",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="REGION",
                                                  Type="PLAINTEXT",
                                                  Value=t.Ref("AWS::Region")),
                    codebuild.EnvironmentVariable(
                        Name="CDK_DEPLOY_PARAMETER_ARGS",
                        Type="PLAINTEXT",
                        Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="ON_COMPLETE_URL",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="NAME",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                    codebuild.EnvironmentVariable(Name="VERSION",
                                                  Type="PLAINTEXT",
                                                  Value="CHANGE_ME"),
                ],
                Image=t.Ref('CDKSupportCDKDeployImage'),
                Type="LINUX_CONTAINER",
            ),
            Source=codebuild.Source(
                Type="NO_SOURCE",
                BuildSpec=t.Sub(
                    yaml.safe_dump(
                        dict(
                            version=0.2,
                            phases=dict(
                                install={
                                    "runtime-versions":
                                    runtime_versions,
                                    "commands": [
                                        "aws s3 cp s3://sc-factory-artifacts-$PUPPET_ACCOUNT_ID-$REGION/CDK/1.0.0/$NAME/$VERSION/$NAME-$VERSION.zip $NAME-$VERSION.zip",
                                        "unzip $NAME-$VERSION.zip",
                                        "npm install",
                                    ] + extra_commands
                                },
                                build={
                                    "commands": [
                                        "npm run cdk destroy -- --toolkit-stack-name $CDK_TOOLKIT_STACK_NAME --force --ignore-errors '*'"
                                    ]
                                },
                            ),
                            artifacts={
                                "name": "CDKDeploy",
                                "files": ["*", "**/*"],
                            },
                        ))),
            ),
            TimeoutInMinutes=480,
        ))

    template.add_resource(
        DeployDetailsCustomResource(
            "StartCDKDeploy",
            DependsOn=["CDKDeploy", "CDKDestroy"],
            ServiceToken=t.Ref("CDKSupportStartCDKDeployFunctionArn"),
            CreateUpdateProject=t.Ref("CDKDeploy"),
            DeleteProject=t.Ref("CDKDestroy"),
            CDK_DEPLOY_EXTRA_ARGS=t.Ref("CDKSupportCDKDeployExtraArgs"),
            CDK_TOOLKIT_STACK_NAME=t.Ref("CDKSupportCDKToolkitStackName"),
            PUPPET_ACCOUNT_ID=t.Ref("PuppetAccountId"),
            CDK_DEPLOY_PARAMETER_ARGS=t.Sub(cdk_deploy_parameter_args),
            CDK_DEPLOY_REQUIRE_APPROVAL=t.Ref(
                "CDKSupportCDKDeployRequireApproval"),
            NAME=product_name,
            VERSION=product_version,
        ))

    template.add_resource(
        DeployDetailsCustomResource(
            "GetOutputsCode",
            DependsOn=[
                "StartCDKDeploy",
            ],
            ServiceToken=t.Ref(
                "CDKSupportGetOutputsForGivenCodebuildIdFunctionArn"),
            CodeBuildBuildId=t.GetAtt("StartCDKDeploy", "BuildId"),
            BucketName=t.Sub("sc-cdk-artifacts-${AWS::AccountId}"),
            ObjectKeyPrefix=t.Sub(
                f"CDK/1.0.0/{product_name}/{product_version}"),
        ))

    return template
コード例 #21
0
ファイル: pipeline.py プロジェクト: john-shaskin/cumulus
    def handle(self, chain_context):
        """
        This step adds in the shell of a pipeline.
         * s3 bucket
         * policies for the bucket and pipeline
         * your next step in the chain MUST be a source stage
        :param chain_context:
        :return:
        """
        if self.create_bucket:
            pipeline_bucket = Bucket(
                "PipelineBucket%s" % self.name,
                BucketName=self.bucket_name,
                VersioningConfiguration=VersioningConfiguration(
                    Status="Enabled"
                )
            )
            chain_context.template.add_resource(pipeline_bucket)

        default_bucket_policies = self.get_default_bucket_policy_statements(self.bucket_name)

        if self.bucket_policy_statements:
            bucket_access_policy = self.get_bucket_policy(
                pipeline_bucket=self.bucket_name,
                bucket_policy_statements=self.bucket_policy_statements,
            )
            chain_context.template.add_resource(bucket_access_policy)

        pipeline_bucket_access_policy = iam.ManagedPolicy(
            "PipelineBucketAccessPolicy",
            Path='/managed/',
            PolicyDocument=awacs.aws.PolicyDocument(
                Version="2012-10-17",
                Id="bucket-access-policy%s" % chain_context.instance_name,
                Statement=default_bucket_policies
            )
        )

        chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_NAME] = self.bucket_name
        chain_context.metadata[cumulus.steps.dev_tools.META_PIPELINE_BUCKET_POLICY_REF] = Ref(
            pipeline_bucket_access_policy)

        default_pipeline_role = self.get_default_pipeline_role()
        pipeline_service_role_arn = self.pipeline_service_role_arn or troposphere.GetAtt(default_pipeline_role, "Arn")

        generic_pipeline = codepipeline.Pipeline(
            "Pipeline",
            RoleArn=pipeline_service_role_arn,
            Stages=[],
            ArtifactStore=codepipeline.ArtifactStore(
                Type="S3",
                Location=self.bucket_name,
            )
        )

        if self.bucket_kms_key_arn:
            encryption_config = codepipeline.EncryptionKey(
                "ArtifactBucketKmsKey",
                Id=self.bucket_kms_key_arn,
                Type='KMS',
            )
            generic_pipeline.ArtifactStore.EncryptionKey = encryption_config

        pipeline_output = troposphere.Output(
            "PipelineName",
            Description="Code Pipeline",
            Value=Ref(generic_pipeline),
        )
        pipeline_bucket_output = troposphere.Output(
            "PipelineBucket",
            Description="Name of the input artifact bucket for the pipeline",
            Value=self.bucket_name,
        )

        if not self.pipeline_service_role_arn:
            chain_context.template.add_resource(default_pipeline_role)

        chain_context.template.add_resource(pipeline_bucket_access_policy)
        chain_context.template.add_resource(generic_pipeline)
        chain_context.template.add_output(pipeline_output)
        chain_context.template.add_output(pipeline_bucket_output)
コード例 #22
0
    def ec2_nat_gateway(self, network_config, nat_sg_config, nat_sg_config_ref,
                        nat_config):

        nat_az = nat_config.availability_zone
        nat_segment = nat_config.segment.split('.')[-1]
        ec2_resource = {}
        for az_idx in range(1, network_config.availability_zones + 1):
            # Add security groups created for NAT Bastions
            nat_security_groups = []
            nat_security_groups.extend(nat_config.security_groups)
            if nat_az == 'all':
                nat_sg_id = nat_config.name + "_az" + str(az_idx)
                nat_security_groups.append('paco.ref ' + nat_sg_config_ref +
                                           '.' + nat_sg_id)
            elif az_idx == int(nat_config.availability_zone):
                for nat_sg_id in nat_sg_config.keys():
                    nat_security_groups.append('paco.ref ' +
                                               nat_sg_config_ref + '.' +
                                               nat_sg_id)

            if nat_az == 'all' or nat_az == str(az_idx):
                security_group_list_param = self.create_cfn_ref_list_param(
                    param_type='List<AWS::EC2::SecurityGroup::Id>',
                    name='NATSecurityGroupListAZ' + str(az_idx),
                    description=
                    'List of security group ids to attach to the instances.',
                    value=nat_security_groups,
                    ref_attribute='id',
                )

                subnet_id_param = self.create_cfn_parameter(
                    name=self.create_cfn_logical_id_join(
                        str_list=['SubnetIdAZ',
                                  str(az_idx), nat_segment],
                        camel_case=True),
                    param_type='String',
                    description='SubnetId to launch an EC2 NAT instance',
                    value=nat_config.segment + '.az' + str(az_idx) +
                    '.subnet_id',
                )
                ref_parts = nat_config.paco_ref_parts.split('.')
                instance_name = utils.big_join(str_list=[
                    ref_parts[1], ref_parts[2], 'NGW', nat_config.name,
                    'AZ' + str(az_idx)
                ],
                                               separator_ch='-',
                                               camel_case=True)
                # ToDo: expose latest ami id as an API and call it directly
                # SLOW: takes a couple seconds to resolve this every Paco run
                latest_image_ref = Reference(
                    'paco.ref function.aws.ec2.ami.latest.amazon-linux-nat')
                latest_image_ref.set_region(self.aws_region)
                nat_ami_id = latest_image_ref.resolve(self.paco_ctx.project,
                                                      self.account_ctx)
                ec2_resource[az_idx] = troposphere.ec2.Instance(
                    title=self.create_cfn_logical_id_join(
                        str_list=['EC2NATInstance',
                                  str(az_idx)],
                        camel_case=True),
                    template=self.template,
                    SubnetId=troposphere.Ref(subnet_id_param),
                    ImageId=nat_ami_id,
                    InstanceType=nat_config.ec2_instance_type,
                    KeyName=self.paco_ctx.get_ref(nat_config.ec2_key_pair +
                                                  '.keypair_name'),
                    SecurityGroupIds=troposphere.Ref(
                        security_group_list_param),
                    SourceDestCheck=False,
                    Tags=troposphere.ec2.Tags(Name=instance_name))

                ec2_instance_id_output = troposphere.Output(
                    title=ec2_resource[az_idx].title + 'Id',
                    Description="EC2 NAT Instance Id",
                    Value=troposphere.Ref(ec2_resource[az_idx]))
                self.template.add_output(ec2_instance_id_output)

                troposphere.ec2.EIP(title=self.create_cfn_logical_id_join(
                    str_list=['ElasticIP', str(az_idx)], camel_case=True),
                                    template=self.template,
                                    Domain='vpc',
                                    InstanceId=troposphere.Ref(
                                        ec2_resource[az_idx]))

                self.register_stack_output_config(
                    nat_config.paco_ref_parts + ".ec2.az" + str(az_idx),
                    ec2_instance_id_output.title)

        # Add DefaultRoute to the route tables in each AZ
        for segment_ref in nat_config.default_route_segments:
            segment_id = segment_ref.split('.')[-1]
            # Routes
            for az_idx in range(1, network_config.availability_zones + 1):
                if nat_config.availability_zone == 'all':
                    instance_id_ref = troposphere.Ref(ec2_resource[az_idx])
                else:
                    instance_id_ref = troposphere.Ref(
                        ec2_resource[int(nat_az)])

                route_table_id_param = self.create_cfn_parameter(
                    name=self.create_cfn_logical_id_join(
                        str_list=['RouteTable', segment_id, 'AZ',
                                  str(az_idx)],
                        camel_case=True),
                    param_type='String',
                    description='RouteTable ID for ' + segment_id + ' AZ' +
                    str(az_idx),
                    value=segment_ref + ".az{}.route_table.id".format(az_idx),
                )

                troposphere.ec2.Route(
                    title="EC2NATRouteAZ" + str(az_idx),
                    template=self.template,
                    DestinationCidrBlock="0.0.0.0/0",
                    InstanceId=instance_id_ref,
                    RouteTableId=troposphere.Ref(route_table_id_param))
コード例 #23
0
def get_template(version: str, default_region_value) -> t.Template:
    description = f"""Bootstrap template used to bootstrap a region of ServiceCatalog-Puppet master
{{"version": "{version}", "framework": "servicecatalog-puppet", "role": "bootstrap-master-region"}}"""

    template = t.Template(Description=description)

    version_parameter = template.add_parameter(
        t.Parameter("Version", Default=version, Type="String")
    )
    default_region_value_parameter = template.add_parameter(
        t.Parameter("DefaultRegionValue", Default=default_region_value, Type="String")
    )

    template.add_resource(
        ssm.Parameter(
            "DefaultRegionParam",
            Name="/servicecatalog-puppet/home-region",
            Type="String",
            Value=t.Ref(default_region_value_parameter),
            Tags={"ServiceCatalogPuppet:Actor": "Framework"},
        )
    )
    version_ssm_parameter = template.add_resource(
        ssm.Parameter(
            "Param",
            Name="service-catalog-puppet-regional-version",
            Type="String",
            Value=t.Ref(version_parameter),
            Tags={"ServiceCatalogPuppet:Actor": "Framework"},
        )
    )

    template.add_resource(
        s3.Bucket(
            "PipelineArtifactBucket",
            BucketName=t.Sub(
                "sc-puppet-pipeline-artifacts-${AWS::AccountId}-${AWS::Region}"
            ),
            VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
            BucketEncryption=s3.BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    s3.ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault(
                            SSEAlgorithm="AES256"
                        )
                    )
                ]
            ),
            PublicAccessBlockConfiguration=s3.PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            Tags=t.Tags({"ServiceCatalogPuppet:Actor": "Framework"}),
        )
    )

    regional_product_topic = template.add_resource(
        sns.Topic(
            "RegionalProductTopic",
            DisplayName="servicecatalog-puppet-cloudformation-regional-events",
            TopicName="servicecatalog-puppet-cloudformation-regional-events",
            Subscription=[
                sns.Subscription(
                    Endpoint=t.Sub(
                        "arn:${AWS::Partition}:sqs:${DefaultRegionValue}:${AWS::AccountId}:servicecatalog-puppet-cloudformation-events"
                    ),
                    Protocol="sqs",
                )
            ],
        ),
    )

    template.add_output(
        t.Output("Version", Value=t.GetAtt(version_ssm_parameter, "Value"))
    )
    template.add_output(
        t.Output("RegionalProductTopic", Value=t.Ref(regional_product_topic))
    )

    return template
コード例 #24
0
def add_output(template, description, value):
    title = description.replace(' ', '').replace('-', '').replace('_', '')
    template.add_output(
        [troposphere.Output(title, Description=description, Value=value)])
コード例 #25
0
def create_cloudformation(
    key_name,
    ami_id,
    instance_type,
    security_group=None,
    subnet_id=None,
    price=None,
    size=100,
    user_script=USER_SCRIPT_DEFAULT,
    extra_user_data="",
):
    user_script = USER_SCRIPT_DEFAULT.replace("<<extra_user_data>>",
                                              extra_user_data)
    # XXX set this to get real bool values
    os.environ["TROPO_REAL_BOOL"] = "true"

    t = troposphere.Template(Description="TchoTcho EC2 train")

    instance_security_group = t.add_resource(
        troposphere.ec2.SecurityGroup(
            "InstanceSecurityGroup",
            VpcId=get_default_vpc_id(),
            GroupDescription=
            "Enable only SSH ingoing via port 22 and all outgoing",
            SecurityGroupIngress=[
                troposphere.ec2.SecurityGroupRule(IpProtocol="tcp",
                                                  FromPort=22,
                                                  ToPort=22,
                                                  CidrIp="0.0.0.0/0"),
                troposphere.ec2.SecurityGroupRule(IpProtocol="tcp",
                                                  FromPort=22,
                                                  ToPort=22,
                                                  CidrIpv6="::/0"),
            ],
            SecurityGroupEgress=[
                troposphere.ec2.SecurityGroupRule(IpProtocol="-1",
                                                  CidrIp="0.0.0.0/0"),
                troposphere.ec2.SecurityGroupRule(IpProtocol="-1",
                                                  CidrIpv6="::/0"),
            ],
        ))

    instance_role = t.add_resource(
        troposphere.iam.Role(
            "InstanceRole",
            AssumeRolePolicyDocument=awacs.aws.Policy(
                Version="2012-10-17",
                Statement=[
                    awacs.aws.Statement(
                        Effect=awacs.aws.Allow,
                        Principal=awacs.aws.Principal("Service",
                                                      "ec2.amazonaws.com"),
                        Action=[awacs.sts.AssumeRole],
                    ),
                ],
            ),
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role",
            ],
            Policies=[
                troposphere.iam.Policy(
                    PolicyName="S3FullAccess",
                    PolicyDocument={
                        "Statement": [{
                            "Effect": "Allow",
                            "Action": "s3:*",
                            "Resource": "*"
                        }],
                    },
                )
            ],
        ))

    instance_profile = t.add_resource(
        troposphere.iam.InstanceProfile(
            "InstanceProfile",
            Roles=[troposphere.Ref(instance_role)],
        ))

    launch_template = t.add_resource(
        troposphere.ec2.LaunchTemplate(
            "InstanceLaunchTemplate",
            # https://github.com/cloudtools/troposphere/blob/07dde6b66fca28dd401903027d8ac13bc107e0b6/examples/CloudFormation_Init_ConfigSet.py#L45
            # https://stackoverflow.com/questions/35095950/what-are-the-benefits-of-cfn-init-over-userdata
            # XXX for now we are not using this we always just delete the stack
            # Metadata=troposphere.cloudformation.Metadata(
            #     troposphere.cloudformation.Init(
            #         troposphere.cloudformation.InitConfigSets(default=["bootstrap"]),
            #         awspackages=troposphere.cloudformation.InitConfig(
            #             commands={
            #                 "001-bootstrap": {"command": "touch ~/bootstra.txt"},
            #             },
            #         ),
            #     ),
            # ),
            LaunchTemplateData=troposphere.ec2.LaunchTemplateData(
                KeyName=key_name,
                ImageId=ami_id,
                InstanceType=instance_type,
                UserData=troposphere.Base64(
                    # Sub is needed if we have variables
                    troposphere.Sub(textwrap.dedent(user_script.strip()), ), ),
                IamInstanceProfile=troposphere.ec2.IamInstanceProfile(
                    Arn=troposphere.GetAtt(instance_profile, "Arn"), ),
                BlockDeviceMappings=[
                    troposphere.ec2.LaunchTemplateBlockDeviceMapping(
                        DeviceName="/dev/sda1",
                        Ebs=troposphere.ec2.EBSBlockDevice(
                            DeleteOnTermination=True,
                            VolumeSize=size,
                            Encrypted=True),
                    )
                ],
            ),
        ))

    if price:
        instance_market_options = troposphere.ec2.InstanceMarketOptions(
            MarketType="spot",
            SpotOptions=troposphere.ec2.SpotOptions(
                SpotInstanceType="one-time",
                MaxPrice=str(price),
                InstanceInterruptionBehavior="terminate",
            ),
        )

        launch_template.properties["LaunchTemplateData"].properties[
            "InstanceMarketOptions"] = instance_market_options

    if not security_group:
        security_group = [troposphere.Ref(instance_security_group)]

    if subnet_id:
        network_interfaces = [
            troposphere.ec2.NetworkInterfaces(
                SubnetId=subnet_id,
                DeviceIndex=0,
                Groups=[security_group],
            )
        ]

        launch_template.properties["LaunchTemplateData"].properties[
            "NetworkInterfaces"] = network_interfaces
    else:
        launch_template.properties["LaunchTemplateData"].properties[
            "SecurityGroupIds"] = [troposphere.Ref(instance_security_group)]

    ec2_instance = t.add_resource(
        troposphere.ec2.Instance(
            "TchoTchoInstance",
            LaunchTemplate=troposphere.ec2.LaunchTemplateSpecification(
                LaunchTemplateId=troposphere.Ref(launch_template),
                Version=troposphere.GetAtt(launch_template,
                                           "LatestVersionNumber"),
            ),
            CreationPolicy=troposphere.policies.CreationPolicy(
                ResourceSignal=troposphere.policies.ResourceSignal(
                    Timeout='PT15M')),
        ))

    t.add_output([
        troposphere.Output(
            "InstanceId",
            Description="InstanceId of the EC2 instance",
            Value=troposphere.Ref(ec2_instance),
        ),
        troposphere.Output(
            "AZ",
            Description="Availability Zone of the EC2 instance",
            Value=troposphere.GetAtt(ec2_instance, "AvailabilityZone"),
        ),
        troposphere.Output(
            "PublicIP",
            Description="Public IP address of the EC2 instance",
            Value=troposphere.GetAtt(ec2_instance, "PublicIp"),
        ),
        troposphere.Output(
            "PrivateIP",
            Description="Private IP address of the EC2 instance",
            Value=troposphere.GetAtt(ec2_instance, "PrivateIp"),
        ),
        troposphere.Output(
            "PublicDNS",
            Description="Public DNSName of the EC2 instance",
            Value=troposphere.GetAtt(ec2_instance, "PublicDnsName"),
        ),
        troposphere.Output(
            "PrivateDNS",
            Description="Private DNSName of the EC2 instance",
            Value=troposphere.GetAtt(ec2_instance, "PrivateDnsName"),
        ),
    ])
    # XXX moto has some problems with yaml; validate, LaunchTemplateData is
    # not parsed so the ec2instance ImageId other keys are not found
    # return t.to_yaml()
    return t.to_json()