Exemplo n.º 1
0
def CreateLifeCycleRules(id,
                         prefix,
                         expirationInDay,
                         status,
                         transitions=None):
    if (transitions != None):
        myRule = s3.LifecycleRule(Id=id,
                                  Prefix=prefix,
                                  ExpirationInDays=expirationInDay,
                                  Status=status,
                                  Transitions=transitions)
    else:
        myRule = s3.LifecycleRule(Id=id,
                                  Prefix=prefix,
                                  ExpirationInDays=expirationInDay,
                                  Status=status)
    return (myRule)
Exemplo n.º 2
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        # variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Static Website - Dependencies')

        # Resources
        awslogbucket = template.add_resource(
            s3.Bucket('AWSLogBucket',
                      AccessControl=s3.Private,
                      VersioningConfiguration=s3.VersioningConfiguration(
                          Status='Enabled')))
        template.add_output(
            Output('AWSLogBucketName',
                   Description='Name of bucket storing AWS logs',
                   Value=awslogbucket.ref()))

        template.add_resource(
            s3.BucketPolicy(
                'AllowAWSLogWriting',
                Bucket=awslogbucket.ref(),
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Action=[awacs.s3.PutObject],
                            Effect=Allow,
                            Principal=AWSPrincipal(
                                Join(':',
                                     ['arn:aws:iam:', AccountId, 'root'])),
                            Resource=[
                                Join('', [
                                    'arn:aws:s3:::',
                                    awslogbucket.ref(), '/*'
                                ])
                            ])
                    ])))
        artifacts = template.add_resource(
            s3.Bucket(
                'Artifacts',
                AccessControl=s3.Private,
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled')))
        template.add_output(
            Output('ArtifactsBucketName',
                   Description='Name of bucket storing artifacts',
                   Value=artifacts.ref()))
Exemplo n.º 3
0
    def create_template(self) -> None:
        """Create a template from the Blueprint."""
        self.template.set_description("CFNgin Bucket")
        self.template.set_version("2010-09-09")

        bucket = s3.Bucket(
            "Bucket",
            template=self.template,
            AccessControl=s3.Private,
            BucketName=self.bucket_name,
            DeletionPolicy=self.variables["DeletionPolicy"],
            LifecycleConfiguration=s3.LifecycleConfiguration(
                Rules=[
                    s3.LifecycleRule(
                        NoncurrentVersionExpirationInDays=30, Status="Enabled"
                    )
                ]
            ),
            VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"),
        )
        self.add_output(f"{bucket.title}Name", bucket.ref())
        self.add_output(f"{bucket.title}Arn", bucket.get_att("Arn"))
        'Bucket',
        BucketName=If('HasBucketName', Ref(param_bucket_name),
                      Ref(AWS_NO_VALUE)),
        LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
            # Add a rule to
            s3.LifecycleRule(
                # Rule attributes
                Id='S3BucketRule1',
                Prefix='',
                Status='Enabled',
                # Applies to current objects
                # ExpirationInDays=3650,
                Transitions=[
                    s3.LifecycleRuleTransition(
                        StorageClass='STANDARD_IA',
                        TransitionInDays=365,
                    ),
                ],
                # Applies to Non Current objects
                NoncurrentVersionExpirationInDays=365,
                NoncurrentVersionTransitions=[
                    s3.NoncurrentVersionTransition(
                        StorageClass='STANDARD_IA',
                        TransitionInDays=30,
                    ),
                ],
            ),
        ]),
    ))

bucket_policy = t.add_resource(
    s3.BucketPolicy(
Exemplo n.º 5
0
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False):
    """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards."""
    if type(account_list) == tuple:
        account_list = list(account_list)

    parameter_groups = []

    region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2']
    t = Template()
    t.add_version("2010-09-09")
    t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.")

    # Create Kinesis and IAM Roles
    log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount",
                                             Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.",
                                             Type="Number",
                                             MinValue=1,
                                             MaxValue=64,
                                             Default=1))

    log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod",
                                                  Description = "Number of hours to retain logs in the Kinesis stream.",
                                                  Type="Number",
                                                  MinValue=24,
                                                  MaxValue=120,
                                                  Default=24))

    parameter_groups.append({'Label': {'default': 'Log Stream Inputs'},
                         'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]})


    log_stream = t.add_resource(k.Stream("LogStream",
                                RetentionPeriodHours=Ref(log_stream_retention_period),
                                ShardCount=Ref(log_stream_shard_count)))

    firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket'))

    firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole',
                                            AssumeRolePolicyDocument=Policy(
                                                Statement=[Statement(
                                                Effect=Allow,
                                                Action=[AssumeRole],
                                                Principal=Principal('Service', 'firehose.amazonaws.com'),
                                                Condition=Condition(StringEquals('sts:ExternalId', AccountId)))])))

    log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy',
                                           Roles=[Ref(firehose_delivery_role)],
                                           PolicyName='LogS3DeliveryPolicy',
                                           PolicyDocument=Policy(
                                               Statement=[Statement(
                                                   Effect=Allow,
                                                   Action=[as3.AbortMultipartUpload,
                                                           as3.GetBucketLocation,
                                                           as3.GetObject,
                                                           as3.ListBucket,
                                                           as3.ListBucketMultipartUploads,
                                                           as3.PutObject],
                                                   Resource=[
                                                        Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]),
                                                        Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]),
                                                Statement(
                                                    Effect=Allow,
                                                    Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams],
                                                    Resource=[
                                                        GetAtt(log_stream, 'Arn')
                                                    ])])))

    s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream',
                                 DependsOn=[log_s3_delivery_policy.name],
                                 DeliveryStreamName='LogToS3DeliveryStream',
                                 DeliveryStreamType='KinesisStreamAsSource',
                                 KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration(
                                    KinesisStreamARN=GetAtt(log_stream, 'Arn'),
                                    RoleARN=GetAtt(firehose_delivery_role, 'Arn')
                                 ),
                                 S3DestinationConfiguration=fh.S3DestinationConfiguration(
                                    BucketARN=GetAtt(firehose_bucket, 'Arn'),
                                    BufferingHints=fh.BufferingHints(
                                        IntervalInSeconds=300,
                                        SizeInMBs=50
                                    ) ,
                                    CompressionFormat='UNCOMPRESSED',
                                    Prefix='firehose/' ,
                                    RoleARN=GetAtt(firehose_delivery_role, 'Arn'),
                                 )))

    t.add_output(Output('SplunkKinesisLogStream',
                 Value=GetAtt(log_stream, 'Arn'),
                 Description='ARN of the kinesis stream for log aggregation.'))


    # Generate Bucket with Lifecycle Policies

    ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix',
                                       Type='String',
                                       Default='',
                                       Description='Key name prefix for logs being sent to S3'))

    bucket_name = t.add_parameter(Parameter("BucketName",
                                  Description="Name to assign to the central logging retention bucket",
                                  Type="String",
                                  AllowedPattern="([a-z]|[0-9])+",
                                  MinLength=2,
                                  MaxLength=64))

    glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays",
                                             Description="Number of days until logs are expired from S3 and transitioned to Glacier",
                                             Type="Number",
                                             Default=365))

    glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays",
                                            Description="Number of days until logs are expired from Glacier and deleted",
                                            Type="Number",
                                            Default=365*7))

    parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'},
                             'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]})

    dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue'))

    queue = t.add_resource(sqs.Queue('s3DeliveryQueue',
                           MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s
                           VisibilityTimeout=5*60,
                           RedrivePolicy=sqs.RedrivePolicy(
                               deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'),
                               maxReceiveCount=10
                           ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS

    t.add_output(Output('SplunkS3Queue',
                 Value=GetAtt(queue, 'Arn'),
                 Description='Queue for Splunk SQS S3 ingest'))

    t.add_output(Output('SplunkS3DeadLetterQueue',
                Value=GetAtt(dead_letter_queue, 'Arn'),
                Description="Dead letter queue for Splunk SQS S3 ingest"))


    t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy',
                   PolicyDocument=Policy(
                   Statement=[Statement(
                       Effect=Allow,
                       Principal=Principal("AWS", "*"),
                       Action=[asqs.SendMessage],
                       Resource=[GetAtt(queue, 'Arn')],
                       Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]),
                   Queues=[Ref(queue)]))

    bucket = t.add_resource(s3.Bucket("LogDeliveryBucket",
                            DependsOn=[log_stream.name, queue.name],
                            BucketName=Ref(bucket_name),
                            AccessControl="LogDeliveryWrite",
                            NotificationConfiguration=s3.NotificationConfiguration(
                                QueueConfigurations=[s3.QueueConfigurations(
                                    Event="s3:ObjectCreated:*",
                                    Queue=GetAtt(queue, 'Arn'))]),
                            LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                                s3.LifecycleRule(
                                    Id="S3ToGlacierTransition",
                                    Status="Enabled",
                                    ExpirationInDays=Ref(glacier_deletion_days),
                                    Transition=s3.LifecycleRuleTransition(
                                        StorageClass="Glacier",
                                        TransitionInDays=Ref(glacier_migration_days)))])))

    bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy",
                                    Bucket=Ref(bucket),
                                    PolicyDocument=Policy(
                                        Statement=[
                                            Statement(
                                                Effect="Allow",
                                                Principal=Principal("Service", "cloudtrail.amazonaws.com"),
                                                Action=[GetBucketAcl],
                                                Resource=[GetAtt(bucket, 'Arn')]),
                                            Statement(
                                                Effect="Allow",
                                                Principal=Principal("Service", "cloudtrail.amazonaws.com"),
                                                Action=[PutObject],
                                                Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})),
                                                Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])])))

    splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser',
                                        Path='/',
                                        UserName='******'))

    splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)]))

    t.add_output(Output('BucketName',
                 Description="Name of the bucket for CloudTrail log delivery",
                 Value=Ref(bucket)))

    # Log destination setup

    cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole',
                                         AssumeRolePolicyDocument=Policy(
                                            Statement=[Statement(
                                                Effect=Allow,
                                                Action=[AssumeRole],
                                                Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))])))

    cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy',
                                               PolicyName='CWLtoKinesisPolicy',
                                               Roles=[Ref(cwl_to_kinesis_role)],
                                               PolicyDocument=Policy(
                                                 Statement=[
                                                     Statement(
                                                         Effect=Allow,
                                                         Resource=[GetAtt(log_stream, 'Arn')],
                                                         Action=[akinesis.PutRecord]),
                                                     Statement(
                                                         Effect=Allow,
                                                         Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')],
                                                         Action=[IAMPassRole])])))

    log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination',
                                     DependsOn=[cwl_to_kinesis_policy_link.name],
                                     DestinationName='CWLtoKinesisDestination',
                                     DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list),
                                     RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'),
                                     TargetArn=GetAtt(log_stream, 'Arn')))

    t.add_output(Output('childAccountLogDeliveryDestinationArn',
                 Value=GetAtt(log_destination,'Arn'),
                 Description='Log Destination to specify when deploying the source cloudformation template in other accounts.'))

    if output_keys:
        splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds',
                                           UserName=Ref(splunk_sqs_s3_user)))

        t.add_output(Output('splunkUserAccessKey',
                     Description='AWS Access Key for the user created for splunk to use when accessing logs',
                     Value=Ref(splunk_user_creds)))

        t.add_output(Output('splunkUserSecretKey',
                     Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs',
                     Value=GetAtt(splunk_user_creds, 'SecretAccessKey')))


    t.add_output(Output('splunkCWLRegion',
                 Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.",
                 Value=Region))

    t.add_output(Output("DeploymentAccount",
                 Value=AccountId,
                 Description="Convenience Output for referencing AccountID of the log aggregation account"))

    t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}})

    if dry_run:
        print(t.to_json())
    else:
        save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json')
        with open (save_path, 'w') as f:
            f.write(t.to_json())
Exemplo n.º 6
0
        'Bucket',
        BucketName=If('HasBucketName', Ref(param_bucket_name),
                      Ref(AWS_NO_VALUE)),
        LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
            # Add a rule to
            s3.LifecycleRule(
                # Rule attributes
                Id='S3BucketRule1',
                Prefix='',
                Status='Enabled',
                # Applies to current objects
                ExpirationInDays=Ref(param_retire_days),
                Transitions=[
                    s3.LifecycleRuleTransition(
                        StorageClass='STANDARD_IA',
                        TransitionInDays=Ref(param_ia_days),
                    ),
                ],
                # Applies to Non Current objects
                # NoncurrentVersionExpirationInDays=90,
                # NoncurrentVersionTransitions=[
                #     s3.NoncurrentVersionTransition(
                #         StorageClass='STANDARD_IA',
                #         TransitionInDays=30,
                #     ),
                # ],
            ),
        ]),
    ))

bucket_policy = t.add_resource(
    s3.BucketPolicy(
Exemplo n.º 7
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        self.template.add_version('2010-09-09')
        self.template.add_description('Terraform State Resources')

        # Conditions
        for i in ['BucketName', 'TableName']:
            template.add_condition(
                "%sOmitted" % i,
                Or(Equals(variables[i].ref, ''),
                   Equals(variables[i].ref, 'undefined')))

        # Resources
        terraformlocktable = template.add_resource(
            dynamodb.Table(
                'TerraformStateTable',
                AttributeDefinitions=[
                    dynamodb.AttributeDefinition(AttributeName='LockID',
                                                 AttributeType='S')
                ],
                KeySchema=[
                    dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH')
                ],
                ProvisionedThroughput=dynamodb.ProvisionedThroughput(
                    ReadCapacityUnits=2, WriteCapacityUnits=2),
                TableName=If('TableNameOmitted', NoValue,
                             variables['TableName'].ref)))
        template.add_output(
            Output('%sName' % terraformlocktable.title,
                   Description='Name of DynamoDB table for Terraform state',
                   Value=terraformlocktable.ref()))

        terraformstatebucket = template.add_resource(
            s3.Bucket(
                'TerraformStateBucket',
                AccessControl=s3.Private,
                BucketName=If('BucketNameOmitted', NoValue,
                              variables['BucketName'].ref),
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled')))
        template.add_output(
            Output('%sName' % terraformstatebucket.title,
                   Description='Name of bucket storing Terraform state',
                   Value=terraformstatebucket.ref()))
        template.add_output(
            Output('%sArn' % terraformstatebucket.title,
                   Description='Arn of bucket storing Terraform state',
                   Value=terraformstatebucket.get_att('Arn')))

        managementpolicy = template.add_resource(
            iam.ManagedPolicy(
                'ManagementPolicy',
                Description='Managed policy for Terraform state management.',
                Path='/',
                PolicyDocument=PolicyDocument(
                    Version='2012-10-17',
                    Statement=[
                        # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions
                        Statement(
                            Action=[awacs.s3.ListBucket],
                            Effect=Allow,
                            Resource=[terraformstatebucket.get_att('Arn')]),
                        Statement(
                            Action=[awacs.s3.GetObject, awacs.s3.PutObject],
                            Effect=Allow,
                            Resource=[
                                Join('', [
                                    terraformstatebucket.get_att('Arn'), '/*'
                                ])
                            ]),
                        Statement(Action=[
                            awacs.dynamodb.GetItem, awacs.dynamodb.PutItem,
                            awacs.dynamodb.DeleteItem
                        ],
                                  Effect=Allow,
                                  Resource=[terraformlocktable.get_att('Arn')])
                    ])))
        template.add_output(
            Output('PolicyArn',
                   Description='Managed policy Arn',
                   Value=managementpolicy.ref()))
bucket = t.add_resource(
    s3.Bucket('Bucket',
              BucketName=If('HasBucketName', Ref(param_bucket_name),
                            Ref(AWS_NO_VALUE)),
              VersioningConfiguration=s3.VersioningConfiguration(
                  Status=Ref(param_versioning)),
              LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                  s3.LifecycleRule(
                      Id='S3BucketRule1',
                      Prefix='',
                      Status='Enabled',
                      Transitions=[
                          s3.LifecycleRuleTransition(
                              StorageClass='STANDARD_IA',
                              TransitionInDays=365,
                          ),
                      ],
                      NoncurrentVersionExpirationInDays=90,
                      NoncurrentVersionTransitions=[
                          s3.NoncurrentVersionTransition(
                              StorageClass='STANDARD_IA',
                              TransitionInDays=30,
                          ),
                      ],
                  ),
              ])))

user = t.add_resource(
    iam.User(
        'BackupUser',
        Policies=[
            iam.Policy(
Exemplo n.º 9
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Static Website - Bucket and Distribution')

        # Conditions
        template.add_condition(
            'AcmCertSpecified',
            And(Not(Equals(variables['AcmCertificateArn'].ref, '')),
                Not(Equals(variables['AcmCertificateArn'].ref, 'undefined'))))
        template.add_condition(
            'AliasesSpecified',
            And(Not(Equals(Select(0, variables['Aliases'].ref), '')),
                Not(Equals(Select(0, variables['Aliases'].ref), 'undefined'))))
        template.add_condition(
            'CFLoggingEnabled',
            And(Not(Equals(variables['LogBucketName'].ref, '')),
                Not(Equals(variables['LogBucketName'].ref, 'undefined'))))
        template.add_condition(
            'DirectoryIndexSpecified',
            And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')),
                Not(Equals(variables['RewriteDirectoryIndex'].ref,
                           'undefined')))  # noqa
        )
        template.add_condition(
            'WAFNameSpecified',
            And(Not(Equals(variables['WAFWebACL'].ref, '')),
                Not(Equals(variables['WAFWebACL'].ref, 'undefined'))))

        # Resources
        oai = template.add_resource(
            cloudfront.CloudFrontOriginAccessIdentity(
                'OAI',
                CloudFrontOriginAccessIdentityConfig=cloudfront.
                CloudFrontOriginAccessIdentityConfig(  # noqa pylint: disable=line-too-long
                    Comment='CF access to website')))

        bucket = template.add_resource(
            s3.Bucket(
                'Bucket',
                AccessControl=s3.Private,
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled'),
                WebsiteConfiguration=s3.WebsiteConfiguration(
                    IndexDocument='index.html', ErrorDocument='error.html')))
        template.add_output(
            Output('BucketName',
                   Description='Name of website bucket',
                   Value=bucket.ref()))

        allowcfaccess = template.add_resource(
            s3.BucketPolicy(
                'AllowCFAccess',
                Bucket=bucket.ref(),
                PolicyDocument=PolicyDocument(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Action=[awacs.s3.GetObject],
                            Effect=Allow,
                            Principal=Principal(
                                'CanonicalUser',
                                oai.get_att('S3CanonicalUserId')),
                            Resource=[Join('', [bucket.get_att('Arn'), '/*'])])
                    ])))

        cfdirectoryindexrewriterole = template.add_resource(
            iam.Role('CFDirectoryIndexRewriteRole',
                     Condition='DirectoryIndexSpecified',
                     AssumeRolePolicyDocument=PolicyDocument(
                         Version='2012-10-17',
                         Statement=[
                             Statement(Effect=Allow,
                                       Action=[awacs.sts.AssumeRole],
                                       Principal=Principal(
                                           'Service', [
                                               'lambda.amazonaws.com',
                                               'edgelambda.amazonaws.com'
                                           ]))
                         ]),
                     ManagedPolicyArns=[
                         IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole'
                     ]))

        cfdirectoryindexrewrite = template.add_resource(
            awslambda.Function(
                'CFDirectoryIndexRewrite',
                Condition='DirectoryIndexSpecified',
                Code=awslambda.Code(ZipFile=Join(
                    '',
                    [
                        "'use strict';\n",
                        "exports.handler = (event, context, callback) => {\n",
                        "\n",
                        "    // Extract the request from the CloudFront event that is sent to Lambda@Edge\n",  # noqa pylint: disable=line-too-long
                        "    var request = event.Records[0].cf.request;\n",
                        "    // Extract the URI from the request\n",
                        "    var olduri = request.uri;\n",
                        "    // Match any '/' that occurs at the end of a URI. Replace it with a default index\n",  # noqa pylint: disable=line-too-long
                        "    var newuri = olduri.replace(/\\/$/, '\\/",
                        variables['RewriteDirectoryIndex'].ref,
                        "');\n",  # noqa
                        "    // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n",  # noqa pylint: disable=line-too-long
                        "    console.log(\"Old URI: \" + olduri);\n",
                        "    console.log(\"New URI: \" + newuri);\n",
                        "    // Replace the received URI with the URI that includes the index page\n",  # noqa pylint: disable=line-too-long
                        "    request.uri = newuri;\n",
                        "    // Return to CloudFront\n",
                        "    return callback(null, request);\n",
                        "\n",
                        "};\n"
                    ])),
                Description=
                'Rewrites CF directory HTTP requests to default page',  # noqa
                Handler='index.handler',
                Role=cfdirectoryindexrewriterole.get_att('Arn'),
                Runtime='nodejs8.10'))

        # Generating a unique resource name here for the Lambda version, so it
        # updates automatically if the lambda code changes
        code_hash = hashlib.md5(
            str(cfdirectoryindexrewrite.properties['Code'].
                properties['ZipFile'].to_dict()).encode()  # noqa pylint: disable=line-too-long
        ).hexdigest()

        cfdirectoryindexrewritever = template.add_resource(
            awslambda.Version('CFDirectoryIndexRewriteVer' + code_hash,
                              Condition='DirectoryIndexSpecified',
                              FunctionName=cfdirectoryindexrewrite.ref()))

        cfdistribution = template.add_resource(
            cloudfront.Distribution(
                'CFDistribution',
                DependsOn=allowcfaccess.title,
                DistributionConfig=cloudfront.DistributionConfig(
                    Aliases=If('AliasesSpecified', variables['Aliases'].ref,
                               NoValue),
                    Origins=[
                        cloudfront.Origin(
                            DomainName=Join(
                                '.', [bucket.ref(), 's3.amazonaws.com']),
                            S3OriginConfig=cloudfront.S3Origin(
                                OriginAccessIdentity=Join(
                                    '', [
                                        'origin-access-identity/cloudfront/',
                                        oai.ref()
                                    ])),
                            Id='S3Origin')
                    ],
                    DefaultCacheBehavior=cloudfront.DefaultCacheBehavior(
                        AllowedMethods=['GET', 'HEAD'],
                        Compress=False,
                        DefaultTTL='86400',
                        ForwardedValues=cloudfront.ForwardedValues(
                            Cookies=cloudfront.Cookies(Forward='none'),
                            QueryString=False,
                        ),
                        LambdaFunctionAssociations=If(
                            'DirectoryIndexSpecified',
                            [
                                cloudfront.LambdaFunctionAssociation(
                                    EventType='origin-request',
                                    LambdaFunctionARN=cfdirectoryindexrewritever
                                    .ref()  # noqa
                                )
                            ],
                            NoValue),
                        TargetOriginId='S3Origin',
                        ViewerProtocolPolicy='redirect-to-https'),
                    DefaultRootObject='index.html',
                    Logging=If(
                        'CFLoggingEnabled',
                        cloudfront.Logging(Bucket=Join('.', [
                            variables['LogBucketName'].ref, 's3.amazonaws.com'
                        ])), NoValue),
                    PriceClass=variables['PriceClass'].ref,
                    Enabled=True,
                    WebACLId=If('WAFNameSpecified', variables['WAFWebACL'].ref,
                                NoValue),
                    ViewerCertificate=If(
                        'AcmCertSpecified',
                        cloudfront.ViewerCertificate(
                            AcmCertificateArn=variables['AcmCertificateArn'].
                            ref,  # noqa
                            SslSupportMethod='sni-only'),
                        NoValue))))
        template.add_output(
            Output('CFDistributionId',
                   Description='CloudFront distribution ID',
                   Value=cfdistribution.ref()))
        template.add_output(
            Output('CFDistributionDomainName',
                   Description='CloudFront distribution domain name',
                   Value=cfdistribution.get_att('DomainName')))
Exemplo n.º 10
0
 def add_expiration_rule(self, path, days=30, enabled=True):
     lcr = s3.LifecycleRule(ExpirationInDays=days,
                            Prefix=path,
                            Status=('Enabled' if enabled else 'Disabled'))
     self.lifecycle_rules.append(lcr)
Exemplo n.º 11
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.set_version('2010-09-09')
        template.set_description('App - Build Pipeline')

        # Resources
        boundary_arn = Join('', [
            'arn:', Partition, ':iam::', AccountId, ':policy/',
            variables['RolePermissionsBoundaryName'].ref
        ])

        # Repo image limit is 1000 by default; this lambda function will prune
        # old images
        image_param_path = Join(
            '', ['/', variables['AppPrefix'].ref, '/current-hash'])
        image_param_arn = Join('', [
            'arn:', Partition, ':ssm:', Region, ':', AccountId, ':parameter',
            image_param_path
        ])
        ecr_repo_arn = Join('', [
            'arn:', Partition, ':ecr:', Region, ':', AccountId, ':repository/',
            variables['EcrRepoName'].ref
        ])
        cleanuplambdarole = template.add_resource(
            iam.Role('CleanupLambdaRole',
                     AssumeRolePolicyDocument=make_simple_assume_policy(
                         'lambda.amazonaws.com'),
                     ManagedPolicyArns=[
                         IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole'
                     ],
                     PermissionsBoundary=boundary_arn,
                     Policies=[
                         iam.Policy(
                             PolicyName=Join(
                                 '',
                                 [variables['AppPrefix'].ref, '-ecrcleanup']),
                             PolicyDocument=PolicyDocument(
                                 Version='2012-10-17',
                                 Statement=[
                                     Statement(Action=[awacs.ssm.GetParameter],
                                               Effect=Allow,
                                               Resource=[image_param_arn]),
                                     Statement(Action=[
                                         awacs.ecr.DescribeImages,
                                         awacs.ecr.BatchDeleteImage
                                     ],
                                               Effect=Allow,
                                               Resource=[ecr_repo_arn])
                                 ]))
                     ]))
        cleanupfunction = template.add_resource(
            awslambda.Function(
                'CleanupFunction',
                Description='Cleanup stale ECR images',
                Code=awslambda.Code(
                    ZipFile=variables['ECRCleanupLambdaFunction']),
                Environment=awslambda.Environment(
                    Variables={
                        'ECR_REPO_NAME': variables['EcrRepoName'].ref,
                        'SSM_PARAM': image_param_path
                    }),
                Handler='index.handler',
                Role=cleanuplambdarole.get_att('Arn'),
                Runtime='python3.6',
                Timeout=120))
        cleanuprule = template.add_resource(
            events.Rule('CleanupRule',
                        Description='Regularly invoke CleanupFunction',
                        ScheduleExpression='rate(7 days)',
                        State='ENABLED',
                        Targets=[
                            events.Target(Arn=cleanupfunction.get_att('Arn'),
                                          Id='CleanupFunction')
                        ]))
        template.add_resource(
            awslambda.Permission(
                'AllowCWLambdaInvocation',
                FunctionName=cleanupfunction.ref(),
                Action=awacs.awslambda.InvokeFunction.JSONrepr(),
                Principal='events.amazonaws.com',
                SourceArn=cleanuprule.get_att('Arn')))

        appsource = template.add_resource(
            codecommit.Repository(
                'AppSource',
                RepositoryName=Join('-',
                                    [variables['AppPrefix'].ref, 'source'])))
        for i in ['Name', 'Arn']:
            template.add_output(
                Output("AppRepo%s" % i,
                       Description="%s of app source repo" % i,
                       Value=appsource.get_att(i)))

        bucket = template.add_resource(
            s3.Bucket(
                'Bucket',
                AccessControl=s3.Private,
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled')))
        template.add_output(
            Output('PipelineBucketName',
                   Description='Name of pipeline bucket',
                   Value=bucket.ref()))

        # This list must be kept in sync between the CodeBuild project and its
        # role
        build_name = Join('', [variables['AppPrefix'].ref, '-build'])

        build_role = template.add_resource(
            iam.Role(
                'BuildRole',
                AssumeRolePolicyDocument=make_simple_assume_policy(
                    'codebuild.amazonaws.com'),
                PermissionsBoundary=boundary_arn,
                Policies=[
                    iam.Policy(
                        PolicyName=Join('', [build_name, '-policy']),
                        PolicyDocument=PolicyDocument(
                            Version='2012-10-17',
                            Statement=[
                                Statement(
                                    Action=[awacs.s3.GetObject],
                                    Effect=Allow,
                                    Resource=[
                                        Join('', [bucket.get_att('Arn'), '/*'])
                                    ]),
                                Statement(
                                    Action=[awacs.ecr.GetAuthorizationToken],
                                    Effect=Allow,
                                    Resource=['*']),
                                Statement(Action=[
                                    awacs.ecr.BatchCheckLayerAvailability,
                                    awacs.ecr.BatchGetImage,
                                    awacs.ecr.CompleteLayerUpload,
                                    awacs.ecr.DescribeImages,
                                    awacs.ecr.GetDownloadUrlForLayer,
                                    awacs.ecr.InitiateLayerUpload,
                                    awacs.ecr.PutImage,
                                    awacs.ecr.UploadLayerPart
                                ],
                                          Effect=Allow,
                                          Resource=[ecr_repo_arn]),
                                Statement(Action=[
                                    awacs.ssm.GetParameter,
                                    awacs.ssm.PutParameter
                                ],
                                          Effect=Allow,
                                          Resource=[image_param_arn]),
                                Statement(Action=[
                                    awacs.logs.CreateLogGroup,
                                    awacs.logs.CreateLogStream,
                                    awacs.logs.PutLogEvents
                                ],
                                          Effect=Allow,
                                          Resource=[
                                              Join('', [
                                                  'arn:', Partition, ':logs:',
                                                  Region, ':', AccountId,
                                                  ':log-group:/aws/codebuild/',
                                                  build_name
                                              ] + x)
                                              for x in [[':*'], [':*/*']]
                                          ])
                            ]))
                ]))

        buildproject = template.add_resource(
            codebuild.Project(
                'BuildProject',
                Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'),
                Environment=codebuild.Environment(
                    ComputeType='BUILD_GENERAL1_SMALL',
                    EnvironmentVariables=[
                        codebuild.EnvironmentVariable(
                            Name='AWS_DEFAULT_REGION',
                            Type='PLAINTEXT',
                            Value=Region),
                        codebuild.EnvironmentVariable(Name='AWS_ACCOUNT_ID',
                                                      Type='PLAINTEXT',
                                                      Value=AccountId),
                        codebuild.EnvironmentVariable(
                            Name='IMAGE_REPO_NAME',
                            Type='PLAINTEXT',
                            Value=variables['EcrRepoName'].ref),
                    ],
                    Image='aws/codebuild/docker:18.09.0',
                    Type='LINUX_CONTAINER'),
                Name=build_name,
                ServiceRole=build_role.get_att('Arn'),
                Source=codebuild.Source(
                    Type='CODEPIPELINE',
                    BuildSpec=variables['BuildProjectBuildSpec'])))

        pipelinerole = template.add_resource(
            iam.Role(
                'PipelineRole',
                AssumeRolePolicyDocument=make_simple_assume_policy(
                    'codepipeline.amazonaws.com'),
                PermissionsBoundary=boundary_arn,
                Policies=[
                    iam.Policy(
                        PolicyName=Join('', [build_name, '-pipeline-policy']),
                        PolicyDocument=PolicyDocument(
                            Version='2012-10-17',
                            Statement=[
                                Statement(
                                    Action=[
                                        awacs.codecommit.GetBranch,
                                        awacs.codecommit.GetCommit,
                                        awacs.codecommit.UploadArchive,
                                        awacs.codecommit.
                                        GetUploadArchiveStatus,  # noqa
                                        awacs.codecommit.CancelUploadArchive
                                    ],  # noqa
                                    Effect=Allow,
                                    Resource=[appsource.get_att('Arn')]),
                                Statement(
                                    Action=[awacs.s3.GetBucketVersioning],
                                    Effect=Allow,
                                    Resource=[bucket.get_att('Arn')]),
                                Statement(
                                    Action=[
                                        awacs.s3.GetObject, awacs.s3.PutObject
                                    ],
                                    Effect=Allow,
                                    Resource=[
                                        Join('', [bucket.get_att('Arn'), '/*'])
                                    ]),
                                Statement(
                                    Action=[
                                        awacs.codebuild.BatchGetBuilds,
                                        awacs.codebuild.StartBuild
                                    ],
                                    Effect=Allow,
                                    Resource=[buildproject.get_att('Arn')])
                            ]))
                ]))

        template.add_resource(
            codepipeline.Pipeline(
                'Pipeline',
                ArtifactStore=codepipeline.ArtifactStore(Location=bucket.ref(),
                                                         Type='S3'),
                Name=build_name,
                RoleArn=pipelinerole.get_att('Arn'),
                Stages=[
                    codepipeline.Stages(
                        Name='Source',
                        Actions=[
                            codepipeline.Actions(
                                Name='CodeCommit',
                                ActionTypeId=codepipeline.ActionTypeId(
                                    Category='Source',
                                    Owner='AWS',
                                    Provider='CodeCommit',
                                    Version='1'),
                                Configuration={
                                    'RepositoryName':
                                    appsource.get_att('Name'),  # noqa
                                    'BranchName': 'master'
                                },
                                OutputArtifacts=[
                                    codepipeline.OutputArtifacts(
                                        Name='CodeCommitRepo')
                                ]),
                        ]),
                    codepipeline.Stages(
                        Name='Build',
                        Actions=[
                            codepipeline.Actions(
                                Name='Build',
                                ActionTypeId=codepipeline.ActionTypeId(
                                    Category='Build',
                                    Owner='AWS',
                                    Provider='CodeBuild',
                                    Version='1'),
                                Configuration={
                                    'ProjectName': buildproject.ref()
                                },
                                InputArtifacts=[
                                    codepipeline.InputArtifacts(
                                        Name='CodeCommitRepo')
                                ])
                        ])
                ]))
Exemplo n.º 12
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Static Website - Bucket and Distribution')

        # Conditions
        template.add_condition(
            'AcmCertSpecified',
            And(Not(Equals(variables['AcmCertificateArn'].ref, '')),
                Not(Equals(variables['AcmCertificateArn'].ref, 'undefined'))))
        template.add_condition(
            'AliasesSpecified',
            And(Not(Equals(Select(0, variables['Aliases'].ref), '')),
                Not(Equals(Select(0, variables['Aliases'].ref), 'undefined'))))
        template.add_condition(
            'CFLoggingEnabled',
            And(Not(Equals(variables['LogBucketName'].ref, '')),
                Not(Equals(variables['LogBucketName'].ref, 'undefined'))))
        template.add_condition(
            'WAFNameSpecified',
            And(Not(Equals(variables['WAFWebACL'].ref, '')),
                Not(Equals(variables['WAFWebACL'].ref, 'undefined'))))

        # Resources
        oai = template.add_resource(
            cloudfront.CloudFrontOriginAccessIdentity(
                'OAI',
                CloudFrontOriginAccessIdentityConfig=cloudfront.
                CloudFrontOriginAccessIdentityConfig(  # noqa pylint: disable=line-too-long
                    Comment='CF access to website')))

        bucket = template.add_resource(
            s3.Bucket(
                'Bucket',
                AccessControl=s3.Private,
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled'),
                WebsiteConfiguration=s3.WebsiteConfiguration(
                    IndexDocument='index.html', ErrorDocument='error.html')))
        template.add_output(
            Output('BucketName',
                   Description='Name of website bucket',
                   Value=bucket.ref()))

        allowcfaccess = template.add_resource(
            s3.BucketPolicy(
                'AllowCFAccess',
                Bucket=bucket.ref(),
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Action=[awacs.s3.GetObject],
                            Effect=Allow,
                            Principal=Principal(
                                'CanonicalUser',
                                oai.get_att('S3CanonicalUserId')),
                            Resource=[Join('', [bucket.get_att('Arn'), '/*'])])
                    ])))

        cfdistribution = template.add_resource(
            cloudfront.Distribution(
                'CFDistribution',
                DependsOn=allowcfaccess.title,
                DistributionConfig=cloudfront.DistributionConfig(
                    Aliases=If('AliasesSpecified', variables['Aliases'].ref,
                               NoValue),
                    Origins=[
                        cloudfront.Origin(
                            DomainName=Join(
                                '.', [bucket.ref(), 's3.amazonaws.com']),
                            S3OriginConfig=cloudfront.S3Origin(
                                OriginAccessIdentity=Join(
                                    '', [
                                        'origin-access-identity/cloudfront/',
                                        oai.ref()
                                    ])),
                            Id='S3Origin')
                    ],
                    DefaultCacheBehavior=cloudfront.DefaultCacheBehavior(
                        AllowedMethods=['GET', 'HEAD'],
                        Compress=False,
                        DefaultTTL='86400',
                        ForwardedValues=cloudfront.ForwardedValues(
                            Cookies=cloudfront.Cookies(Forward='none'),
                            QueryString=False,
                        ),
                        TargetOriginId='S3Origin',
                        ViewerProtocolPolicy='redirect-to-https'),
                    DefaultRootObject='index.html',
                    Logging=If(
                        'CFLoggingEnabled',
                        cloudfront.Logging(Bucket=Join('.', [
                            variables['LogBucketName'].ref, 's3.amazonaws.com'
                        ])), NoValue),
                    PriceClass=variables['PriceClass'].ref,
                    Enabled=True,
                    WebACLId=If('WAFNameSpecified', variables['WAFWebACL'].ref,
                                NoValue),
                    ViewerCertificate=If(
                        'AcmCertSpecified',
                        cloudfront.ViewerCertificate(
                            AcmCertificateArn=variables['AcmCertificateArn'].
                            ref,  # noqa
                            SslSupportMethod='sni-only'),
                        NoValue))))
        template.add_output(
            Output('CFDistributionId',
                   Description='CloudFront distribution ID',
                   Value=cfdistribution.ref()))
        template.add_output(
            Output('CFDistributionDomainName',
                   Description='CloudFront distribution domain name',
                   Value=cfdistribution.get_att('DomainName')))
Exemplo n.º 13
0
    def create_template(self):
        """Create template (main function called by Stacker)."""
        template = self.template
        variables = self.get_variables()
        template.add_version('2010-09-09')
        template.add_description('Sample app')

        # Conditions
        template.add_condition(
            'BucketNameOmitted',
            Or(Equals(variables['BucketName'].ref, ''),
               Equals(variables['BucketName'].ref, 'undefined')))

        # Resources
        bucket = template.add_resource(
            s3.Bucket(
                'Bucket',
                AccessControl=s3.Private,
                BucketName=If('BucketNameOmitted', Ref('AWS::NoValue'),
                              variables['BucketName'].ref),
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                Tags=Tags(application=variables['ApplicationName'].ref,
                          customer=variables['CustomerName'].ref,
                          environment=variables['EnvironmentName'].ref),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled')))
        template.add_output(
            Output('BucketName',
                   Description='Name of bucket',
                   Value=Ref(bucket)))
        template.add_output(
            Output('BucketArn',
                   Description='Arn of bucket',
                   Value=GetAtt(bucket, 'Arn')))

        # https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
        template.add_resource(
            s3.BucketPolicy(
                'RequireBucketEncryption',
                Bucket=Ref(bucket),
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Sid='DenyIncorrectEncryptionHeader',
                            Action=[awacs.s3.PutObject],
                            Condition=Condition(
                                StringNotEquals(
                                    's3:x-amz-server-side-encryption',
                                    'AES256')),
                            Effect=Deny,
                            Principal=Principal('*'),
                            Resource=[Join('',
                                           [GetAtt(bucket, 'Arn'), '/*'])]),
                        Statement(
                            Sid='DenyUnEncryptedObjectUploads',
                            Action=[awacs.s3.PutObject],
                            Condition=Condition(
                                Null('s3:x-amz-server-side-encryption',
                                     'true')),
                            Effect=Deny,
                            Principal=Principal('*'),
                            Resource=[Join('', [GetAtt(bucket, 'Arn'), '/*'])])
                    ])))
Exemplo n.º 14
0
    def add_resources(self):
        """Add resources to template."""
        template = self.template
        variables = self.get_variables()

        chefbucket = template.add_resource(
            s3.Bucket(
                'ChefBucket',
                AccessControl=s3.Private,
                BucketName=If('ChefBucketNameOmitted', Ref('AWS::NoValue'),
                              variables['ChefBucketName'].ref),
                LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[
                    s3.LifecycleRule(NoncurrentVersionExpirationInDays=90,
                                     Status='Enabled')
                ]),
                VersioningConfiguration=s3.VersioningConfiguration(
                    Status='Enabled')))
        template.add_output(
            Output(
                '%sName' % chefbucket.title,
                Description='Name of bucket storing core Chef configuration',
                Export=Export(
                    Sub('${AWS::StackName}-%sName' % chefbucket.title)),
                Value=Ref(chefbucket)))
        template.add_output(
            Output('%sArn' % chefbucket.title,
                   Description='Arn of bucket storing core Chef configuration',
                   Export=Export(
                       Sub('${AWS::StackName}-%sArn' % chefbucket.title)),
                   Value=GetAtt(chefbucket, 'Arn')))

        chefdatabucket = template.add_resource(
            s3.Bucket('ChefDataBucket',
                      AccessControl=s3.Private,
                      BucketName=If('ChefDataBucketNameOmitted',
                                    Ref('AWS::NoValue'),
                                    variables['ChefDataBucketName'].ref),
                      VersioningConfiguration=s3.VersioningConfiguration(
                          Status='Enabled')))
        template.add_output(
            Output(
                '%sName' % chefdatabucket.title,
                Description='Name of bucket storing extra/restricted Chef data',
                Export=Export(
                    Sub('${AWS::StackName}-'
                        '%sName' % chefdatabucket.title)),
                Value=Ref(chefdatabucket)))
        template.add_output(
            Output(
                '%sArn' % chefdatabucket.title,
                Description='Arn of bucket storing extra/restricted Chef data',
                Export=Export(
                    Sub('${AWS::StackName}-'
                        '%sArn' % chefdatabucket.title)),
                Value=GetAtt(chefdatabucket, 'Arn')))

        # https://docs.aws.amazon.com/AmazonS3/latest/dev/
        #         UsingServerSideEncryption.html
        template.add_resource(
            s3.BucketPolicy(
                'RequireChefDataBucketEncryption',
                Bucket=Ref(chefdatabucket),
                PolicyDocument=Policy(
                    Version='2012-10-17',
                    Statement=[
                        Statement(
                            Sid='DenyIncorrectEncryptionHeader',
                            Action=[awacs.s3.PutObject],
                            Condition=Condition(
                                StringNotEquals(
                                    's3:x-amz-server-side-encryption',
                                    'AES256')),
                            Effect=Deny,
                            Principal=Principal('*'),
                            Resource=[
                                Join('', [GetAtt(chefdatabucket, 'Arn'), '/*'])
                            ]),
                        Statement(
                            Sid='DenyUnEncryptedObjectUploads',
                            Action=[awacs.s3.PutObject],
                            Condition=Condition(
                                Null('s3:x-amz-server-side-encryption',
                                     'true')),
                            Effect=Deny,
                            Principal=Principal('*'),
                            Resource=[
                                Join('', [GetAtt(chefdatabucket, 'Arn'), '/*'])
                            ])
                    ])))