Example #1
0
 def set_bucket_lifecycle(self):
     """
     returns:
     LifecycleConfiguration for S3Bucket
     """
     config = LifecycleConfiguration(Rules=[
         LifecycleRule(
             Status='Enabled',
             AbortIncompleteMultipartUpload=AbortIncompleteMultipartUpload(
                 DaysAfterInitiation=3))
     ])
     setattr(self, 'LifecycleConfiguration', config)
 def add_lifecycle_config(self, configuration):
     lifecycle_rules = []
     for rule_kwargs in configuration:
         # Parse transitions if present in the rule definition
         if "Transitions" in rule_kwargs:
             transitions = []
             for transition in rule_kwargs.pop("Transitions"):
                 rule_transition = LifecycleRuleTransition(**transition)
                 transitions.append(rule_transition)
             rule_kwargs.update({"Transitions": transitions})
         lifecycle_rule = LifecycleRule(**rule_kwargs)
         lifecycle_rules.append(lifecycle_rule)
     lifecycle_config_kwargs = {"Rules": lifecycle_rules}
     lifecycle_config = LifecycleConfiguration(**lifecycle_config_kwargs)
     return {"LifecycleConfiguration": lifecycle_config}
upload_bucket = template.add_resource(
    Bucket(
        'UploadBucket',
        BucketName=
        _upload_bucket_name,  # Setting the bucket name is stupid, but this resolves a circular dependency.
        CorsConfiguration=CorsConfiguration(CorsRules=[
            CorsRules(
                AllowedOrigins=['*'],
                AllowedMethods=['GET', 'HEAD', 'PUT', 'POST'],
                AllowedHeaders=['*'],
            )
        ]),
        LifecycleConfiguration=LifecycleConfiguration(Rules=[
            LifecycleRule(
                Id='DeleteUploadsAfterOneDay',
                Status='Enabled',
                ExpirationInDays=1,
                Prefix='upload/',
            )
        ], ),
        NotificationConfiguration=NotificationConfiguration(
            TopicConfigurations=[
                TopicConfigurations(Event='s3:ObjectCreated:*',
                                    Filter=Filter(S3Key=S3Key(Rules=[
                                        Rules(
                                            Name='prefix',
                                            Value='upload/',
                                        )
                                    ], ), ),
                                    Topic=Ref(upload_topic))
            ], ),
    ))
Example #4
0
     # Attach a LifeCycle Confiragtion
     LifecycleConfiguration=LifecycleConfiguration(Rules=[
         # Add a rule to
         LifecycleRule(
             # Rule attributes
             Id="S3BucketRule001",
             Prefix="/only-this-sub-dir",
             Status="Enabled",
             # Applies to current objects
             ExpirationInDays=3650,
             Transitions=[
                 LifecycleRuleTransition(
                     StorageClass="STANDARD_IA",
                     TransitionInDays=60,
                 ),
             ],
             # Applies to Non Current objects
             NoncurrentVersionExpirationInDays=365,
             NoncurrentVersionTransitions=[
                 NoncurrentVersionTransition(
                     StorageClass="STANDARD_IA",
                     TransitionInDays=30,
                 ),
                 NoncurrentVersionTransition(
                     StorageClass="GLACIER",
                     TransitionInDays=120,
                 ),
             ],
         ),
     ]),
 ))
Example #5
0
region = "eu-west-1"
template = Template(region + " s3")

# TODO seperate out management and log buckets
management_bucket = Bucket(
    region.replace("-", "") + "managementbucket",
    BucketName="mgmt.eu-west-1.weblox.io",
    BucketEncryption=BucketEncryption(ServerSideEncryptionConfiguration=[
        ServerSideEncryptionRule(
            ServerSideEncryptionByDefault=ServerSideEncryptionByDefault(
                SSEAlgorithm="AES256")),
    ]),
    LifecycleConfiguration=LifecycleConfiguration(Rules=[
        LifecycleRule(
            Id="ExpireLogs",
            Prefix="logs/",
            Status="Enabled",
            ExpirationInDays=30,
        ),
    ]),
    PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
        BlockPublicAcls=True,
        BlockPublicPolicy=True,
        IgnorePublicAcls=True,
        RestrictPublicBuckets=True))

template.add_resource(management_bucket)

# TODO the account id are hardcoded per region, see: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html
management_bucket_policy = BucketPolicy(
    region.replace("-", "") + "managementbucketpolicy",
    Bucket=Ref(management_bucket),
                                )
                        ]
                    }]
                })
        ]))

BUCKET = Bucket(
    'ReplicatedBucket',
    DependsOn=[KMS_KEY, KMS_ALIAS],
    BucketName=Sub('${BucketName}-${AWS::Region}'),
    VersioningConfiguration=VersioningConfiguration(Status='Enabled'),
    LifecycleConfiguration=LifecycleConfiguration(Rules=[
        LifecycleRule(
            Status='Enabled',
            AbortIncompleteMultipartUpload=AbortIncompleteMultipartUpload(
                DaysAfterInitiation=3),
            NoncurrentVersionExpirationInDays=1,
            Transition=LifecycleRuleTransition(StorageClass='GLACIER',
                                               TransitionInDays=If(
                                                   SOURCE_REGION_CON, 31, 14)))
    ]),
    BucketEncryption=BucketEncryption(ServerSideEncryptionConfiguration=[
        ServerSideEncryptionRule(
            ServerSideEncryptionByDefault=ServerSideEncryptionByDefault(
                SSEAlgorithm='aws:kms', KMSMasterKeyID=Ref(KMS_KEY)))
    ]),
    ReplicationConfiguration=If(
        SOURCE_REGION_CON,
        ReplicationConfiguration(
            Role=GetAtt(IAM_ROLE, 'Arn'),
            Rules=[
                ReplicationConfigurationRules(
Example #7
0
    def _create_project_stack(self):
        update = True
        try:
            self._cloudformation_client.describe_stacks(
                StackName=self._stack_name())
        except ClientError as e:
            if 'does not exist' not in str(e):
                raise e
            update = False

        self.info('Creating project stack')
        template = Template()
        template.set_version('2010-09-09')

        memory_size = template.add_parameter(
            Parameter(f'{self._stack_name()}MemorySize',
                      Type=NUMBER,
                      Default=self._aws_config.get('memory_sync', '3008')))

        timeout_gateway = template.add_parameter(
            Parameter(f'{self._stack_name()}GatewayTimeout',
                      Type=NUMBER,
                      Default='30'))

        template.add_resource(
            Bucket(inflection.camelize(inflection.underscore(self._bucket)),
                   BucketName=self._bucket,
                   AccessControl='Private',
                   LifecycleConfiguration=LifecycleConfiguration(Rules=[
                       LifecycleRule(
                           Prefix='tmp', Status='Enabled', ExpirationInDays=1)
                   ])))

        api = template.add_resource(
            Api(self._rest_api_name(),
                Name=
                f'{inflection.humanize(self._project)} {inflection.humanize(self._env)} API',
                ProtocolType='HTTP'))

        role_title = f'{self._rest_api_name()}Role'
        self._add_role(role_title, template)

        default_lambda = template.add_resource(
            Function(
                f'{self._rest_api_name()}Function',
                FunctionName=self._rest_api_name(),
                Code=Code(ZipFile='\n'.join(
                    ['def handler(event, context):', '    return event'])),
                Handler='index.handler',
                Role=GetAtt(role_title, 'Arn'),
                Runtime='python3.7',
                MemorySize=Ref(memory_size),
                Timeout=Ref(timeout_gateway)))

        integration = template.add_resource(
            Integration(self._integration_name(),
                        ApiId=Ref(api),
                        IntegrationType='AWS_PROXY',
                        PayloadFormatVersion='2.0',
                        IntegrationUri=Join('', [
                            'arn:aws:lambda:',
                            self._region,
                            ':',
                            self._account_id,
                            ':function:',
                            Ref(default_lambda),
                        ]),
                        DependsOn=f'{self._rest_api_name()}Function'))

        template.add_resource(
            Route(self._route_name(),
                  ApiId=Ref(api),
                  RouteKey='$default',
                  AuthorizationType='NONE',
                  Target=Join(
                      '/', ['integrations', Ref(integration)]),
                  DependsOn=[integration]))

        # Deprecated
        template.add_resource(
            Stage(f'{self._rest_api_name()}Stage',
                  StageName='v2',
                  ApiId=Ref(api),
                  AutoDeploy=True))

        # Deprecated
        template.add_resource(
            Deployment(f'{self._rest_api_name()}Deployment',
                       ApiId=Ref(api),
                       StageName='v2',
                       DependsOn=[
                           f'{self._rest_api_name()}Stage',
                           self._route_name(),
                           self._integration_name(),
                           self._rest_api_name(),
                       ]))

        template.add_resource(
            Stage(f'{self._rest_api_name()}Stage1',
                  StageName='api',
                  ApiId=Ref(api),
                  AutoDeploy=True))

        template.add_resource(
            Deployment(f'{self._rest_api_name()}Deployment1',
                       ApiId=Ref(api),
                       StageName='api',
                       DependsOn=[
                           f'{self._rest_api_name()}Stage1',
                           self._route_name(),
                           self._integration_name(),
                           self._rest_api_name(),
                       ]))

        template.add_output([
            Output(self._rest_api_reference(),
                   Export=Export(self._rest_api_reference()),
                   Value=Ref(api)),
        ])

        self._s3_client.put_object(Body=template.to_json(),
                                   Bucket=self._bucket,
                                   Key=self._template_key)
        url = self._s3_client.generate_presigned_url(ClientMethod='get_object',
                                                     Params={
                                                         'Bucket':
                                                         self._bucket,
                                                         'Key':
                                                         self._template_key
                                                     })

        if update:
            self._update_stack(self._stack_name(), url)
        else:
            self._create_stack(self._stack_name(), url)
Example #8
0
def create_primary_template():
    template = Template(
        Description="Root stack for VERY STRONG Lambda function")

    image_digest = template.add_parameter(
        Parameter("ImageDigest", Type="String", Default=""))

    is_image_digest_defined = "IsImageDigestDefined"
    template.add_condition(is_image_digest_defined,
                           Not(Equals(Ref(image_digest), "")))

    artifact_repository = template.add_resource(
        Repository(
            "ArtifactRepository",
            ImageTagMutability="MUTABLE",
            LifecyclePolicy=LifecyclePolicy(LifecyclePolicyText=json.dumps(
                {
                    "rules": [{
                        "rulePriority": 1,
                        "selection": {
                            "tagStatus": "untagged",
                            "countType": "imageCountMoreThan",
                            "countNumber": 3,
                        },
                        "action": {
                            "type": "expire",
                        },
                    }]
                },
                indent=None,
                sort_keys=True,
                separators=(",", ":"),
            )),
        ))

    artifact_repository_url = Join(
        "/",
        [
            Join(
                ".",
                [
                    AccountId,
                    "dkr",
                    "ecr",
                    Region,
                    URLSuffix,
                ],
            ),
            Ref(artifact_repository),
        ],
    )
    image_uri = Join("@", [artifact_repository_url, Ref(image_digest)])

    artifact_bucket = template.add_resource(
        Bucket(
            "ArtifactBucket",
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        BucketKeyEnabled=True,
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            SSEAlgorithm="aws:kms",
                            KMSMasterKeyID=Join(":", [
                                "arn", Partition, "kms", Region, AccountId,
                                "alias/aws/s3"
                            ]),
                        ),
                    )
                ], ),
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=3, ),
                    Status="Enabled",
                ),
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    deployment_id_stack = template.add_resource(
        Stack(
            "DeploymentId",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), deployment_id.create_template()),
            Parameters={
                "ArtifactBucket": Ref(artifact_bucket),
            },
            Condition=is_image_digest_defined,
        ))

    availability_zones_stack = template.add_resource(
        Stack(
            "AvailabilityZones",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), availability_zones.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    vpc_stack = template.add_resource(
        Stack(
            "Vpc",
            TemplateURL=common.get_template_s3_url(Ref(artifact_bucket),
                                                   vpc.create_template()),
            Parameters={
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_eip_allocator_stack = template.add_resource(
        Stack(
            "LambdaEipAllocator",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_eip_allocator.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId": GetAtt(vpc_stack, "Outputs.VpcId"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    elastic_file_system_stack = template.add_resource(
        Stack(
            "ElasticFileSystem",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), elastic_file_system.create_template()),
            Parameters={
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_function_stack = template.add_resource(
        Stack(
            "LambdaFunction",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_function.create_template()),
            Parameters={
                "DeploymentId":
                GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "FileSystemAccessPointArn":
                GetAtt(elastic_file_system_stack, "Outputs.AccessPointArn"),
                "ImageUri":
                image_uri,
            },
            DependsOn=[lambda_eip_allocator_stack],
            Condition=is_image_digest_defined,
        ))

    image_tagger_stack = template.add_resource(
        Stack(
            "ImageTagger",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), image_tagger.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ArtifactRepository": Ref(artifact_repository),
                "DesiredImageTag": "current-cloudformation",
                "ImageDigest": Ref(image_digest),
                "ImageUri": image_uri,
            },
            DependsOn=list(template.resources),
            Condition=is_image_digest_defined,
        ))

    template.add_output(Output(
        "ArtifactBucket",
        Value=Ref(artifact_bucket),
    ))

    template.add_output(
        Output(
            "ArtifactRepositoryUrl",
            Value=artifact_repository_url,
        ))

    return template
Example #9
0
def create_template():
    template = Template(Description=(
        "Static website hosted with S3 and CloudFront. "
        "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website"
    ))

    partition_config = add_mapping(
        template,
        "PartitionConfig",
        {
            "aws": {
                # the region with the control plane for CloudFront, IAM, Route 53, etc
                "PrimaryRegion":
                "us-east-1",
                # assume that Lambda@Edge replicates to all default enabled regions, and that
                # future regions will be opt-in. generated with AWS CLI:
                # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)"
                "DefaultRegions": [
                    "ap-northeast-1",
                    "ap-northeast-2",
                    "ap-northeast-3",
                    "ap-south-1",
                    "ap-southeast-1",
                    "ap-southeast-2",
                    "ca-central-1",
                    "eu-central-1",
                    "eu-north-1",
                    "eu-west-1",
                    "eu-west-2",
                    "eu-west-3",
                    "sa-east-1",
                    "us-east-1",
                    "us-east-2",
                    "us-west-1",
                    "us-west-2",
                ],
            },
            # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn
            "aws-cn": {
                "PrimaryRegion": "cn-north-1",
                "DefaultRegions": ["cn-north-1", "cn-northwest-1"],
            },
        },
    )

    acm_certificate_arn = template.add_parameter(
        Parameter(
            "AcmCertificateArn",
            Description=
            "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.",
            Type="String",
            AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)",
            Default="",
        ))

    hosted_zone_id = template.add_parameter(
        Parameter(
            "HostedZoneId",
            Description=
            "Existing Route 53 zone to use for validating a new TLS certificate.",
            Type="String",
            AllowedPattern="(Z[A-Z0-9]+|)",
            Default="",
        ))

    dns_names = template.add_parameter(
        Parameter(
            "DomainNames",
            Description=
            "Comma-separated list of additional domain names to serve.",
            Type="CommaDelimitedList",
            Default="",
        ))

    tls_protocol_version = template.add_parameter(
        Parameter(
            "TlsProtocolVersion",
            Description=
            "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.",
            Type="String",
            Default="TLSv1.2_2019",
        ))

    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Description=
            "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.",
            Type="Number",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
            Default=365,
        ))

    default_ttl_seconds = template.add_parameter(
        Parameter(
            "DefaultTtlSeconds",
            Description="Cache time-to-live when not set by S3 object headers.",
            Type="Number",
            Default=int(datetime.timedelta(minutes=5).total_seconds()),
        ))

    enable_price_class_hack = template.add_parameter(
        Parameter(
            "EnablePriceClassHack",
            Description="Cut your bill in half with this one weird trick.",
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    using_price_class_hack = add_condition(
        template, "UsingPriceClassHack",
        Equals(Ref(enable_price_class_hack), "true"))

    using_acm_certificate = add_condition(
        template, "UsingAcmCertificate",
        Not(Equals(Ref(acm_certificate_arn), "")))

    using_hosted_zone = add_condition(template, "UsingHostedZone",
                                      Not(Equals(Ref(hosted_zone_id), "")))

    using_certificate = add_condition(
        template,
        "UsingCertificate",
        Or(Condition(using_acm_certificate), Condition(using_hosted_zone)),
    )

    should_create_certificate = add_condition(
        template,
        "ShouldCreateCertificate",
        And(Condition(using_hosted_zone),
            Not(Condition(using_acm_certificate))),
    )

    using_dns_names = add_condition(template, "UsingDnsNames",
                                    Not(Equals(Select(0, Ref(dns_names)), "")))

    is_primary_region = "IsPrimaryRegion"
    template.add_condition(
        is_primary_region,
        Equals(Region, FindInMap(partition_config, Partition,
                                 "PrimaryRegion")),
    )

    precondition_region_is_primary = template.add_resource(
        WaitConditionHandle(
            "PreconditionIsPrimaryRegionForPartition",
            Condition=is_primary_region,
        ))

    log_ingester_dlq = template.add_resource(
        Queue(
            "LogIngesterDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
        ))

    log_ingester_role = template.add_resource(
        Role(
            "LogIngesterRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[GetAtt(log_ingester_dlq, "Arn")],
                            )
                        ],
                    ),
                )
            ],
        ))

    log_ingester = template.add_resource(
        Function(
            "LogIngester",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(log_ingest.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(log_ingest)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(log_ingester_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(log_ingester_dlq, "Arn")),
        ))

    log_ingester_permission = template.add_resource(
        Permission(
            "LogIngesterPermission",
            FunctionName=GetAtt(log_ingester, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="s3.amazonaws.com",
            SourceAccount=AccountId,
        ))

    log_bucket = template.add_resource(
        Bucket(
            "LogBucket",
            # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails.
            # When the CloudFront distribution is created, it adds an additional bucket ACL.
            # That ACL is not possible to model in CloudFormation.
            AccessControl="LogDeliveryWrite",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(ExpirationInDays=1, Status="Enabled"),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=1),
                    Status="Enabled",
                ),
            ]),
            NotificationConfiguration=NotificationConfiguration(
                LambdaConfigurations=[
                    LambdaConfigurations(Event="s3:ObjectCreated:*",
                                         Function=GetAtt(log_ingester, "Arn"))
                ]),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # if we use KMS, we can't read the logs
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            DependsOn=[log_ingester_permission],
        ))

    log_ingester_log_group = template.add_resource(
        LogGroup(
            "LogIngesterLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/", Ref(log_ingester)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    log_ingester_policy = template.add_resource(
        PolicyType(
            "LogIngesterPolicy",
            Roles=[Ref(log_ingester_role)],
            PolicyName="IngestLogPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/cloudfront/*",
                                ],
                            ),
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/s3/*",
                                ],
                            ),
                            GetAtt(log_ingester_log_group, "Arn"),
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    bucket = template.add_resource(
        Bucket(
            "ContentBucket",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                # not supported by CFN yet:
                # LifecycleRule(
                # Transitions=[
                # LifecycleRuleTransition(
                # StorageClass='INTELLIGENT_TIERING',
                # TransitionInDays=1,
                # ),
                # ],
                # Status="Enabled",
                # ),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=7),
                    Status="Enabled",
                )
            ]),
            LoggingConfiguration=LoggingConfiguration(
                DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # Origin Access Identities can't use KMS
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    origin_access_identity = template.add_resource(
        CloudFrontOriginAccessIdentity(
            "CloudFrontIdentity",
            CloudFrontOriginAccessIdentityConfig=
            CloudFrontOriginAccessIdentityConfig(
                Comment=GetAtt(bucket, "Arn")),
        ))

    bucket_policy = template.add_resource(
        BucketPolicy(
            "ContentBucketPolicy",
            Bucket=Ref(bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "CanonicalUser",
                            GetAtt(origin_access_identity,
                                   "S3CanonicalUserId"),
                        ),
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs
    # state "In some circumstances [...] S3 resets permissions on the bucket to the default value",
    # and this allows logging to work without any ACLs in place.
    log_bucket_policy = template.add_resource(
        BucketPolicy(
            "LogBucketPolicy",
            Bucket=Ref(log_bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join(
                                "/",
                                [GetAtt(log_bucket, "Arn"), "cloudfront", "*"])
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.ListBucket],
                        Resource=[Join("/", [GetAtt(log_bucket, "Arn")])],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"])
                        ],
                    ),
                ],
            ),
        ))

    certificate_validator_dlq = template.add_resource(
        Queue(
            "CertificateValidatorDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
            Condition=should_create_certificate,
        ))

    certificate_validator_role = template.add_resource(
        Role(
            "CertificateValidatorRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[
                                    GetAtt(certificate_validator_dlq, "Arn")
                                ],
                            )
                        ],
                    ),
                )
            ],
            # TODO scope down
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
                "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
                "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
            ],
            Condition=should_create_certificate,
        ))

    certificate_validator_function = template.add_resource(
        Function(
            "CertificateValidatorFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(certificate_validator.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(certificate_validator)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(certificate_validator_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(certificate_validator_dlq, "Arn")),
            Environment=Environment(
                Variables={
                    certificate_validator.EnvVars.HOSTED_ZONE_ID.name:
                    Ref(hosted_zone_id)
                }),
            Condition=should_create_certificate,
        ))

    certificate_validator_log_group = template.add_resource(
        LogGroup(
            "CertificateValidatorLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/",
                     Ref(certificate_validator_function)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
            Condition=should_create_certificate,
        ))

    certificate_validator_rule = template.add_resource(
        Rule(
            "CertificateValidatorRule",
            EventPattern={
                "detail-type": ["AWS API Call via CloudTrail"],
                "detail": {
                    "eventSource": ["acm.amazonaws.com"],
                    "eventName": ["AddTagsToCertificate"],
                    "requestParameters": {
                        "tags": {
                            "key": [certificate_validator_function.title],
                            "value":
                            [GetAtt(certificate_validator_function, "Arn")],
                        }
                    },
                },
            },
            Targets=[
                Target(
                    Id="certificate-validator-lambda",
                    Arn=GetAtt(certificate_validator_function, "Arn"),
                )
            ],
            DependsOn=[certificate_validator_log_group],
            Condition=should_create_certificate,
        ))

    certificate_validator_permission = template.add_resource(
        Permission(
            "CertificateValidatorPermission",
            FunctionName=GetAtt(certificate_validator_function, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="events.amazonaws.com",
            SourceArn=GetAtt(certificate_validator_rule, "Arn"),
            Condition=should_create_certificate,
        ))

    certificate = template.add_resource(
        Certificate(
            "Certificate",
            DomainName=Select(0, Ref(dns_names)),
            SubjectAlternativeNames=Ref(
                dns_names),  # duplicate first name works fine
            ValidationMethod="DNS",
            Tags=Tags(
                **{
                    certificate_validator_function.title:
                    GetAtt(certificate_validator_function, "Arn")
                }),
            DependsOn=[certificate_validator_permission],
            Condition=should_create_certificate,
        ))

    edge_hook_role = template.add_resource(
        Role(
            "EdgeHookRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal(
                            "Service",
                            [
                                "lambda.amazonaws.com",
                                "edgelambda.amazonaws.com"
                            ],
                        ),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
        ))

    edge_hook_function = template.add_resource(
        Function(
            "EdgeHookFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.handler",
            Code=Code(ZipFile=inspect.getsource(edge_hook)),
            MemorySize=128,
            Timeout=3,
            Role=GetAtt(edge_hook_role, "Arn"),
        ))
    edge_hook_function_hash = (hashlib.sha256(
        json.dumps(edge_hook_function.to_dict(),
                   sort_keys=True).encode("utf-8")).hexdigest()[:10].upper())

    edge_hook_version = template.add_resource(
        Version(
            "EdgeHookVersion" + edge_hook_function_hash,
            FunctionName=GetAtt(edge_hook_function, "Arn"),
        ))

    replica_log_group_name = Join(
        "/",
        [
            "/aws/lambda",
            Join(
                ".",
                [
                    FindInMap(partition_config, Partition, "PrimaryRegion"),
                    Ref(edge_hook_function),
                ],
            ),
        ],
    )

    edge_hook_role_policy = template.add_resource(
        PolicyType(
            "EdgeHookRolePolicy",
            PolicyName="write-logs",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    "*",
                                    AccountId,
                                    "log-group",
                                    replica_log_group_name,
                                    "log-stream",
                                    "*",
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            Roles=[Ref(edge_hook_role)],
        ))

    stack_set_administration_role = template.add_resource(
        Role(
            "StackSetAdministrationRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "cloudformation.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
        ))

    stack_set_execution_role = template.add_resource(
        Role(
            "StackSetExecutionRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "AWS", GetAtt(stack_set_administration_role,
                                          "Arn")),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="create-stackset-instances",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.DescribeStacks,
                                    logs.DescribeLogGroups,
                                ],
                                Resource=["*"],
                            ),
                            # stack instances communicate with the CFN service via SNS
                            Statement(
                                Effect=Allow,
                                Action=[sns.Publish],
                                NotResource=[
                                    Join(
                                        ":",
                                        [
                                            "arn", Partition, "sns", "*",
                                            AccountId, "*"
                                        ],
                                    )
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    logs.CreateLogGroup,
                                    logs.DeleteLogGroup,
                                    logs.PutRetentionPolicy,
                                    logs.DeleteRetentionPolicy,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "logs",
                                            "*",
                                            AccountId,
                                            "log-group",
                                            replica_log_group_name,
                                            "log-stream",
                                            "",
                                        ],
                                    ),
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.CreateStack,
                                    cloudformation.DeleteStack,
                                    cloudformation.UpdateStack,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "cloudformation",
                                            "*",
                                            AccountId,
                                            Join(
                                                "/",
                                                [
                                                    "stack",
                                                    Join(
                                                        "-",
                                                        [
                                                            "StackSet",
                                                            StackName, "*"
                                                        ],
                                                    ),
                                                ],
                                            ),
                                        ],
                                    )
                                ],
                            ),
                        ],
                    ),
                ),
            ],
        ))

    stack_set_administration_role_policy = template.add_resource(
        PolicyType(
            "StackSetAdministrationRolePolicy",
            PolicyName="assume-execution-role",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Resource=[GetAtt(stack_set_execution_role, "Arn")],
                    ),
                ],
            ),
            Roles=[Ref(stack_set_administration_role)],
        ))

    edge_log_groups = template.add_resource(
        StackSet(
            "EdgeLambdaLogGroupStackSet",
            AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"),
            ExecutionRoleName=Ref(stack_set_execution_role),
            StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]),
            PermissionModel="SELF_MANAGED",
            Description="Multi-region log groups for Lambda@Edge replicas",
            Parameters=[
                StackSetParameter(
                    ParameterKey="LogGroupName",
                    ParameterValue=replica_log_group_name,
                ),
                StackSetParameter(
                    ParameterKey="LogRetentionDays",
                    ParameterValue=Ref(log_retention_days),
                ),
            ],
            OperationPreferences=OperationPreferences(
                FailureToleranceCount=0,
                MaxConcurrentPercentage=100,
            ),
            StackInstancesGroup=[
                StackInstances(
                    DeploymentTargets=DeploymentTargets(Accounts=[AccountId]),
                    Regions=FindInMap(partition_config, Partition,
                                      "DefaultRegions"),
                )
            ],
            TemplateBody=create_log_group_template().to_json(indent=None),
            DependsOn=[stack_set_administration_role_policy],
        ))

    price_class_distribution = template.add_resource(
        Distribution(
            "PriceClassDistribution",
            DistributionConfig=DistributionConfig(
                Comment="Dummy distribution used for price class hack",
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    ViewerProtocolPolicy="allow-all",
                    ForwardedValues=ForwardedValues(QueryString=False),
                ),
                Enabled=True,
                Origins=[
                    Origin(Id="default",
                           DomainName=GetAtt(bucket, "DomainName"))
                ],
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    CloudFrontDefaultCertificate=True),
                PriceClass="PriceClass_All",
            ),
            Condition=using_price_class_hack,
        ))

    distribution = template.add_resource(
        Distribution(
            "ContentDistribution",
            DistributionConfig=DistributionConfig(
                Enabled=True,
                Aliases=If(using_dns_names, Ref(dns_names), NoValue),
                Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"),
                                Prefix="cloudfront/"),
                DefaultRootObject="index.html",
                Origins=[
                    Origin(
                        Id="default",
                        DomainName=GetAtt(bucket, "DomainName"),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Join(
                                "",
                                [
                                    "origin-access-identity/cloudfront/",
                                    Ref(origin_access_identity),
                                ],
                            )),
                    )
                ],
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    Compress=True,
                    ForwardedValues=ForwardedValues(QueryString=False),
                    ViewerProtocolPolicy="redirect-to-https",
                    DefaultTTL=Ref(default_ttl_seconds),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType="origin-request",
                            LambdaFunctionARN=Ref(edge_hook_version),
                        )
                    ],
                ),
                HttpVersion="http2",
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    AcmCertificateArn=If(
                        using_acm_certificate,
                        Ref(acm_certificate_arn),
                        If(using_hosted_zone, Ref(certificate), NoValue),
                    ),
                    SslSupportMethod=If(using_certificate, "sni-only",
                                        NoValue),
                    CloudFrontDefaultCertificate=If(using_certificate, NoValue,
                                                    True),
                    MinimumProtocolVersion=Ref(tls_protocol_version),
                ),
                PriceClass=If(using_price_class_hack, "PriceClass_100",
                              "PriceClass_All"),
            ),
            DependsOn=[
                bucket_policy,
                log_ingester_policy,
                edge_log_groups,
                precondition_region_is_primary,
            ],
        ))

    distribution_log_group = template.add_resource(
        LogGroup(
            "DistributionLogGroup",
            LogGroupName=Join(
                "", ["/aws/cloudfront/", Ref(distribution)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    bucket_log_group = template.add_resource(
        LogGroup(
            "BucketLogGroup",
            LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    template.add_output(Output("DistributionId", Value=Ref(distribution)))

    template.add_output(
        Output("DistributionDomain", Value=GetAtt(distribution, "DomainName")))

    template.add_output(
        Output(
            "DistributionDnsTarget",
            Value=If(
                using_price_class_hack,
                GetAtt(price_class_distribution, "DomainName"),
                GetAtt(distribution, "DomainName"),
            ),
        ))

    template.add_output(
        Output(
            "DistributionUrl",
            Value=Join("",
                       ["https://",
                        GetAtt(distribution, "DomainName"), "/"]),
        ))

    template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket,
                                                                "Arn")))

    return template