def create_s3_resources(self): s3_bucket = self.add_resource( s3.Bucket('s3TileCacheBucket', BucketName=Join( '.', ['tile-cache', Ref(self.public_hosted_zone_name)]), AccessControl=s3.PublicRead, CorsConfiguration=s3.CorsConfiguration(CorsRules=[ s3.CorsRules( AllowedOrigins=['*'], AllowedMethods=['GET'], MaxAge=3000, AllowedHeaders=['*'], ) ]))) self.add_resource( s3.BucketPolicy( 's3TileCacheBucketPolicy', Bucket=Ref(s3_bucket), PolicyDocument={ 'Statement': [{ 'Action': ['s3:GetObject'], 'Effect': 'Allow', 'Resource': { 'Fn::Join': ['', ['arn:aws:s3:::', Ref(s3_bucket), '/*']] }, 'Principal': '*' }] })) self.add_resource( r53.RecordSetGroup( 'dnsPublicRecordsCache', HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']), RecordSets=[ r53.RecordSet('dnsTileServersCache', AliasTarget=r53.AliasTarget( AMAZON_S3_HOSTED_ZONE_ID, AMAZON_S3_WEBSITE_DOMAIN, True, ), Name=Join('', [ 'tile-cache.', Ref(self.public_hosted_zone_name), '.' ]), Type='A') ]))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template # variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Static Website - Dependencies') # Resources awslogbucket = template.add_resource( s3.Bucket('AWSLogBucket', AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('AWSLogBucketName', Description='Name of bucket storing AWS logs', Value=awslogbucket.ref())) template.add_resource( s3.BucketPolicy( 'AllowAWSLogWriting', Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(':', ['arn:aws:iam:', AccountId, 'root'])), Resource=[ Join('', [ 'arn:aws:s3:::', awslogbucket.ref(), '/*' ]) ]) ]))) artifacts = template.add_resource( s3.Bucket( 'Artifacts', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('ArtifactsBucketName', Description='Name of bucket storing artifacts', Value=artifacts.ref()))
def create_template(self): t = self.template variables = self.get_variables() bucket_ids = [] for title, attrs in variables["Buckets"].items(): bucket_id = Ref(title) t.add_resource(s3.Bucket.from_dict(title, attrs)) t.add_output(Output(title + "BucketId", Value=bucket_id)) t.add_output(Output(title + "BucketArn", Value=s3_arn(bucket_id))) t.add_output( Output(title + "BucketDomainName", Value=GetAtt(title, "DomainName"))) if "WebsiteConfiguration" in attrs: t.add_mapping("WebsiteEndpoints", S3_WEBSITE_ENDPOINTS) t.add_resource( s3.BucketPolicy( title + "BucketPolicy", Bucket=bucket_id, PolicyDocument=static_website_bucket_policy(bucket_id), )) t.add_output( Output(title + "WebsiteUrl", Value=GetAtt(title, "WebsiteURL"))) t.add_output( Output(title + "WebsiteEndpoint", Value=FindInMap("WebsiteEndpoints", Region, "endpoint"))) bucket_ids.append(bucket_id) read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), PolicyDocument=read_write_s3_bucket_policy(bucket_ids), Roles=read_write_roles, )) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=Sub("${AWS::StackName}ReadPolicy"), PolicyDocument=read_only_s3_bucket_policy(bucket_ids), Roles=read_only_roles, ))
def add_module_bucket(self: Template): self._bucket = self.add_resource( s3.Bucket('TerraformModules', AccessControl='Private', BucketEncryption=s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3. ServerSideEncryptionByDefault( SSEAlgorithm='AES256')) ]), PublicAccessBlockConfiguration=s3. PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True))) self.add_resource( s3.BucketPolicy( 'TerraformModulesBucketPolicy', Bucket=Ref(self._bucket), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Deny, Action=[Action('s3', 'GetObject')], Principal=Principal('*'), Resource=[ Join( '', ['arn:aws:s3:::', Ref(self._bucket), '/*']) ], Condition=Condition( Bool({'aws:SecureTransport': False}))), Statement( Effect=Deny, Action=[Action('s3', 'GetObject')], Principal=Principal('*'), Resource=[ Join( '', ['arn:aws:s3:::', Ref(self._bucket), '/*']) ], Condition=Condition( Bool({'aws:SecureTransport': False}))) ]), ))
def add_bucket(self, name, access_control, static_site, route53, public_hosted_zone): """ Helper method creates a directory service resource @param name [string] Fully qualified name for the bucket (corp.example.com) @param access_control [string] type of access control for the bucket @param static_site [boolean] should the bucket host a static site @param route53 [boolean] create a route53 entry? """ if route53: self.add_dns_alias(name, "s3-website-us-east-1.amazonaws.com", "Z3AQBSTGFYJSTF", public_hosted_zone) if access_control == "PublicRead": policy = s3.BucketPolicy(name.replace('.', '') + "BucketPolicy", Bucket=name, PolicyDocument={ "Statement": [{ "Sid": "PublicReadForGetBucketObjects", "Effect": "Allow", "Principal": "*", "Action": "s3:GetObject", "Resource": "arn:aws:s3:::%s/*" % name }] }) self.add_resource(policy) bucket = s3.Bucket( name.replace('.', '') + "Bucket", BucketName=name, AccessControl=access_control, ) if static_site: web_config = s3.WebsiteConfiguration(IndexDocument='index.html') bucket.properties['WebsiteConfiguration'] = web_config return self.add_resource(bucket)
def create_log_bucket(self): """Create S3 bucket with bucket policy for ALB logs""" t = self.template t.add_mapping("RegionalELBAccountIds", self.ELB_ACCOUNT_ID) log_bucket = t.add_resource( s3.Bucket( "LogBucket", DeletionPolicy="Retain", BucketName=self.vars["LogBucket"], )) t.add_resource( s3.BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument={ "Statement": [{ "Action": ["s3:PutObject"], "Principal": { "AWS": [ FindInMap( "RegionalELBAccountIds", Region, "ELBAccountId", ) ] }, "Resource": Join("/", [ Join("", ["arn:aws:s3:::", Ref(log_bucket)]), self.vars["LogPrefix"], "AWSLogs", AccountId, "*" ]), "Effect": "Allow", }] })) t.add_output( Output( "LogBucket", Description="Cloudfront S3 website log bucket", Value=Ref(log_bucket), )) return log_bucket
def _add_bucket_policy(template, bucket_title, bucket_name): template.add_resource( s3.BucketPolicy("%sPolicy" % bucket_title, Bucket=bucket_name, PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AddPerm", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::%s/*" % bucket_name] }] }))
def resources(self, stack: Stack) -> list[AWSObject]: """Construct and return a s3.Bucket and its associated s3.BucketPolicy.""" # Handle versioning configuration versioning_status = "Suspended" if self.enable_versioning: versioning_status = "Enabled" # Block all public accesses public_access_block_config = s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ) # Set default bucket encryption to AES256 bucket_encryption = s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault( SSEAlgorithm="AES256" ) ) ] ) return [ s3.Bucket( name_to_id(self.name), BucketName=self.name, BucketEncryption=bucket_encryption, PublicAccessBlockConfiguration=public_access_block_config, VersioningConfiguration=s3.VersioningConfiguration( Status=versioning_status ), ), s3.BucketPolicy( name_to_id(self.name) + "Policy", Bucket=self.ref, PolicyDocument=self.policy_document.as_dict, ), ]
def create_template(self): self.vars = self.validate_user_data() t = self.template t.add_description(self.vars["BucketName"] + "log bucket") t.add_mapping("RegionalELBAccountIds", self.ELB_ACCOUNT_ID) self.log_bucket = t.add_resource( s3.Bucket( "LogBucket", DeletionPolicy="Retain", BucketName=self.vars["BucketName"], )) t.add_resource( s3.BucketPolicy( "LogBucketPolicy", Bucket=Ref(self.log_bucket), PolicyDocument={ "Statement": [{ "Action": ["s3:PutObject"], "Principal": { "AWS": [ FindInMap( "RegionalELBAccountIds", Region, "ELBAccountId", ) ] }, "Resource": self.munge_policy_resourse(), "Effect": "Allow", }] }, )) t.add_output( Output( "LogBucket", Description="Cloudfront S3 website log bucket", Value=Ref(self.log_bucket), ))
def allow_access_bucket(bucket_name): # UNUSED return s3.BucketPolicy( 'S3BucketPolicy', Bucket=Ref(bucket_name), PolicyDocument={ "Version": "2016-05-21", "Statement": [{ "Effect": "Allow", "Action": ["s3:GetBucketLocation", "s3:ListAllMyBuckets"], "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", "/tesis.project.files"]] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "tesis.project.files" }]] } }, { "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "tesis.project.files" }, "/zookeeper.sh" ] ] } }] })
def origin_bucket_policy(self): t = self.template t.add_resource( s3.BucketPolicy( "SiteBucketPolicy", Bucket=Ref(self.SiteBucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["s3:GetObject"], "Resource": Join("", ["arn:aws:s3:::", Ref(self.SiteBucket), "/*"]), "Effect": "Allow", "Principal": { "CanonicalUser": self.vars["S3CanonicalUserId"] }, }], }, ))
def render_s3(context, template): for bucket_name in context['s3']: props = { 'DeletionPolicy': context['s3'][bucket_name]['deletion-policy'].capitalize() } bucket_title = _sanitize_title(bucket_name) + "Bucket" if context['s3'][bucket_name]['cors']: # generic configuration for allowing read-only access props['CorsConfiguration'] = s3.CorsConfiguration(CorsRules=[ s3.CorsRules(AllowedHeaders=['*'], AllowedMethods=['GET', 'HEAD'], AllowedOrigins=['*']) ]) if context['s3'][bucket_name]['website-configuration']: index_document = context['s3'][bucket_name][ 'website-configuration'].get('index-document', 'index.html') props['WebsiteConfiguration'] = s3.WebsiteConfiguration( IndexDocument=index_document) template.add_resource( s3.BucketPolicy("%sPolicy" % bucket_title, Bucket=bucket_name, PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AddPerm", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::%s/*" % bucket_name] }] })) template.add_resource( s3.Bucket(bucket_title, BucketName=bucket_name, **props))
def add_utility_bucket(self, name='demo'): ''' Method adds a bucket to be used for infrastructure utility purposes such as backups @param name [str] friendly name to prepend to the CloudFormation asset name ''' self.utility_bucket = self.template.add_resource( s3.Bucket(name.lower() + 'UtilityBucket', AccessControl=s3.BucketOwnerFullControl, DeletionPolicy=Retain)) bucket_policy_statements = self.get_logging_bucket_policy_document( self.utility_bucket, elb_log_prefix=self.strings.get('elb_log_prefix', ''), cloudtrail_log_prefix=self.strings.get('cloudtrail_log_prefix', '')) self.template.add_resource( s3.BucketPolicy(name.lower() + 'UtilityBucketLoggingPolicy', Bucket=Ref(self.utility_bucket), PolicyDocument=bucket_policy_statements)) self.manual_parameter_bindings['utilityBucket'] = Ref( self.utility_bucket)
def _add_regional_bucket(builder: Template) -> (s3.Bucket, s3.BucketPolicy): name = "RegionalBucket" bucket = builder.add_resource(s3.Bucket(name, Tags=DEFAULT_TAGS)) policy = s3.BucketPolicy( f"{name}Policy", Bucket=bucket.ref(), PolicyDocument=dict( Version="2012-10-17", Statement=[ dict( Sid="CloudTrailAclCheck", Effect="Allow", Principal=dict(Service="cloudtrail.amazonaws.com"), Action=S3.GetBucketAcl, Resource=Sub( f"arn:${{{AWS_PARTITION}}}:s3:::${{{bucket.title}}}"), ), dict( Sid="CloudTrailWrite", Effect="Allow", Principal=dict(Service="cloudtrail.amazonaws.com"), Action=S3.PutObject, Resource=Sub( f"arn:${{{AWS_PARTITION}}}:s3:::${{{bucket.title}}}/accretion/cloudtrail/*" ), Condition=dict(StringEquals={ "s3:x-amz-acl": "bucket-owner-full-control" }), ), ], ), ) builder.add_resource(policy) return bucket, policy
def _build_template(self, template): t = template s3b = t.add_resource(s3.Bucket(self.name)) if self.public_read: s3b.AccessControl = s3.PublicRead t.add_resource( s3.BucketPolicy('{}BucketPolicy'.format(self.name), Bucket=Ref(s3b), PolicyDocument={ "Statement": [{ "Action": ["s3:GetObject"], "Effect": "Allow", "Resource": Join('', ["arn:aws:s3:::", Ref(s3b), "/*"]), "Principal": "*" }] })) versioning = "Suspended" if self.versioning: versioning = "Enabled" s3b.VersioningConfiguration = s3.VersioningConfiguration( Status=versioning) if self.website_mode: s3b.WebsiteConfiguration = s3.WebsiteConfiguration( **self.website_config) if self.cors_enabled is True \ and len(self.cors_rules) <= 0: self.add_cors_rule("CorsAll", ['*'], ['GET', 'POST', 'PUT'], ['*'], 3000) if len(self.cors_rules) > 0: cors = s3.CorsConfiguration(CorsRules=self.cors_rules) s3b.CorsConfiguration = cors if len(self.lifecycle_rules) > 0: s3b.LifecycleConfiguration = s3.LifecycleConfiguration(Rules=[]) for lcr in self.lifecycle_rules: s3b.LifecycleConfiguration.Rules.append(lcr) t.add_output([ Output("{}BucketName".format(self.name), Value=Ref(s3b), Description="{} Bucket Name".format(self.name)), Output("{}BucketUrl".format(self.name), Value=GetAtt(s3b, "DomainName"), Description="{} Bucket Name".format(self.name)), Output('{}WebsiteUrl'.format(self.name), Value=GetAtt(s3b, 'WebsiteURL')) ]) return s3b
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined')))) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined')))) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined')))) template.add_condition( 'DirectoryIndexSpecified', And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')), Not(Equals(variables['RewriteDirectoryIndex'].ref, 'undefined'))) # noqa ) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined')))) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront. CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website'))) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html'))) template.add_output( Output('BucketName', Description='Name of website bucket', Value=bucket.ref())) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId')), Resource=[Join('', [bucket.get_att('Arn'), '/*'])]) ]))) cfdirectoryindexrewriterole = template.add_resource( iam.Role('CFDirectoryIndexRewriteRole', Condition='DirectoryIndexSpecified', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal( 'Service', [ 'lambda.amazonaws.com', 'edgelambda.amazonaws.com' ])) ]), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ])) cfdirectoryindexrewrite = template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Condition='DirectoryIndexSpecified', Code=awslambda.Code(ZipFile=Join( '', [ "'use strict';\n", "exports.handler = (event, context, callback) => {\n", "\n", " // Extract the request from the CloudFront event that is sent to Lambda@Edge\n", # noqa pylint: disable=line-too-long " var request = event.Records[0].cf.request;\n", " // Extract the URI from the request\n", " var olduri = request.uri;\n", " // Match any '/' that occurs at the end of a URI. Replace it with a default index\n", # noqa pylint: disable=line-too-long " var newuri = olduri.replace(/\\/$/, '\\/", variables['RewriteDirectoryIndex'].ref, "');\n", # noqa " // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n", # noqa pylint: disable=line-too-long " console.log(\"Old URI: \" + olduri);\n", " console.log(\"New URI: \" + newuri);\n", " // Replace the received URI with the URI that includes the index page\n", # noqa pylint: disable=line-too-long " request.uri = newuri;\n", " // Return to CloudFront\n", " return callback(null, request);\n", "\n", "};\n" ])), Description= 'Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=cfdirectoryindexrewriterole.get_att('Arn'), Runtime='nodejs8.10')) # Generating a unique resource name here for the Lambda version, so it # updates automatically if the lambda code changes code_hash = hashlib.md5( str(cfdirectoryindexrewrite.properties['Code']. properties['ZipFile'].to_dict()).encode() # noqa pylint: disable=line-too-long ).hexdigest() cfdirectoryindexrewritever = template.add_resource( awslambda.Version('CFDirectoryIndexRewriteVer' + code_hash, Condition='DirectoryIndexSpecified', FunctionName=cfdirectoryindexrewrite.ref())) cfdistribution = template.add_resource( cloudfront.Distribution( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=cloudfront.DistributionConfig( Aliases=If('AliasesSpecified', variables['Aliases'].ref, NoValue), Origins=[ cloudfront.Origin( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=cloudfront.S3Origin( OriginAccessIdentity=Join( '', [ 'origin-access-identity/cloudfront/', oai.ref() ])), Id='S3Origin') ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), LambdaFunctionAssociations=If( 'DirectoryIndexSpecified', [ cloudfront.LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=cfdirectoryindexrewritever .ref() # noqa ) ], NoValue), TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https'), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging(Bucket=Join('.', [ variables['LogBucketName'].ref, 's3.amazonaws.com' ])), NoValue), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If('WAFNameSpecified', variables['WAFWebACL'].ref, NoValue), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn']. ref, # noqa SslSupportMethod='sni-only'), NoValue)))) template.add_output( Output('CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref())) template.add_output( Output('CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName')))
s3.BucketPolicy( 'BucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ # { # 'Sid': 'DenyNotBucketOwnerControl', # 'Effect': 'Deny', # 'Principal': '*', # 'Action': [ # 's3:PutObject', # ], # 'Resource': [ # Sub('arn:${PARTITION}:s3:::${BUCKET_NAME}/*', # PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), # BUCKET_NAME=Ref(bucket), # ) # ], # 'Condition': { # 'StringNotEquals': { # 's3:x-amz-acl': 'bucket-owner-full-control' # }, # # } # }, { 'Sid': 'DenyIncorrectEncryptionHeader', 'Effect': 'Deny', 'Principal': '*', 'Action': [ 's3:PutObject', ], 'Resource': [ Sub( 'arn:${PARTITION}:s3:::${BUCKET_NAME}/*', PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), BUCKET_NAME=Ref(bucket), ) ], 'Condition': { 'StringNotEquals': { 's3:x-amz-server-side-encryption': 'AES256' }, } }, { 'Sid': 'DenyUnEncryptedObjectUploads', 'Effect': 'Deny', 'Principal': '*', 'Action': [ 's3:PutObject', ], 'Resource': [ Sub( 'arn:${PARTITION}:s3:::${BUCKET_NAME}/*', PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), BUCKET_NAME=Ref(bucket), ) ], 'Condition': { 'Null': { 's3:x-amz-server-side-encryption': 'true' }, } } ] }))
)) example_bucket_policy = template.add_resource( s3.BucketPolicy( "DefaultOriginBucketPolicy", Bucket=Ref(example_bucket), PolicyDocument={ "Version": "2012-10-17", "Id": "PolicyForCloudFrontPrivateContent", "Statement": [ { "Sid": " Grant a CloudFront Origin Identity access to support private content", "Effect": "Allow", "Principal": { "CanonicalUser": GetAtt(example_bucket_oai, 'S3CanonicalUserId') }, "Action": "s3:GetObject", "Resource": Join('', ["arn:aws:s3:::", Ref(example_bucket), "/*"]), }, ], }, )) example_distribution = template.add_resource(
def resources(self, stack: Stack) -> list[AWSObject]: """Construct and return a s3.Bucket and its associated s3.BucketPolicy.""" # Handle versioning configuration optional_resources = [] versioning_status = "Suspended" if self.enable_versioning: versioning_status = "Enabled" # Block all public accesses public_access_block_config = s3.PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ) # Set default bucket encryption to AES256 bucket_encryption = None if self.default_bucket_encryption: bucket_encryption = s3.BucketEncryption( ServerSideEncryptionConfiguration=[ s3.ServerSideEncryptionRule( ServerSideEncryptionByDefault=s3.ServerSideEncryptionByDefault( SSEAlgorithm=self.default_bucket_encryption.value ) ) ] ) lifecycle_config = None if self.lifecycle_rules: lifecycle_config = s3.LifecycleConfiguration( name_to_id(self.name) + "LifeCycleConfig", Rules=self.lifecycle_rules ) notification_config, notification_resources = self.notification_setup optional_resources.extend(notification_resources) attr = {} for key, val in { "BucketName": self.name, "BucketEncryption": bucket_encryption, "PublicAccessBlockConfiguration": public_access_block_config, "VersioningConfiguration": s3.VersioningConfiguration( Status=versioning_status ), "LifecycleConfiguration": lifecycle_config, "NotificationConfiguration": notification_config, "DependsOn": self.depends_on, }.items(): if val: attr[key] = val return [ s3.Bucket(name_to_id(self.name), **attr), s3.BucketPolicy( name_to_id(self.name) + "Policy", Bucket=self.ref, PolicyDocument=self.policy_document.as_dict, ), *optional_resources, ]
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def buildInfrastructure(t, args): if (not args.recovery): t.add_resource( kms.Key( 'OpenEMRKey', DeletionPolicy='Retain' if args.recovery else 'Delete' if args.dev else 'Retain', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) t.add_resource( ec2.SecurityGroup('ApplicationSecurityGroup', GroupDescription='Application Security Group', VpcId=Ref('VPC'), Tags=Tags(Name='Application'))) return t
"CFS3SiteLogBucket", AccessControl="LogDeliveryWrite", BucketName=Join(".", ["cfs3site-log-bucket", AccountId]), )) t.add_resource( s3.BucketPolicy( "CFS3SiteLogBucketPolicy", Bucket=Ref("CFS3SiteLogBucket"), PolicyDocument={ "Statement": [{ "Action": ["s3:PutObject"], "Principal": { "AWS": [AccountId] }, "Resource": Join("", ["arn:aws:s3:::", Ref("CFS3SiteLogBucket"), "/*"]), "Effect": "Allow", "Sid": "CFS3SiteLogBucketPolicy", }] }, )) t.add_output( Output( "CFS3SiteLogBucket", Description="Cloudfront S3 website log bucket", Value=Ref(CFS3SiteLogBucket),
def s3_policy_adder(self, name, bucket, policy): self.template.add_resource( s3.BucketPolicy(name, Bucket=bucket, PolicyDocument=policy))
def buildInfrastructure(t, args): t.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', EnableDnsSupport='true', EnableDnsHostnames='true')) t.add_resource( ec2.Subnet('PublicSubnet1', VpcId=Ref('VPC'), CidrBlock='10.0.1.0/24', AvailabilityZone=Select("0", GetAZs("")))) t.add_resource(ec2.InternetGateway('ig')) t.add_resource( ec2.VPCGatewayAttachment('igAttach', VpcId=Ref('VPC'), InternetGatewayId=Ref('ig'))) t.add_resource(ec2.RouteTable('rtTablePublic', VpcId=Ref('VPC'))) t.add_resource( ec2.Route('rtPublic', RouteTableId=Ref('rtTablePublic'), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref('ig'), DependsOn='igAttach')) t.add_resource( ec2.SubnetRouteTableAssociation('rtPublic1Attach', SubnetId=Ref('PublicSubnet1'), RouteTableId=Ref('rtTablePublic'))) t.add_resource( kms.Key('OpenEMRKey', DeletionPolicy='Delete', KeyPolicy={ "Version": "2012-10-17", "Id": "key-default-1", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": [Join(':', ['arn:aws:iam:', ref_account, 'root'])] }, "Action": "kms:*", "Resource": "*" }] })) t.add_resource( s3.Bucket( 'S3Bucket', DeletionPolicy='Retain', BucketName=Join( '-', ['openemr', Select('2', Split('/', ref_stack_id))]))) t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref('S3Bucket'), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:GetBucketAcl", "Resource": { "Fn::Join": ["", ["arn:aws:s3:::", { "Ref": "S3Bucket" }]] } }, { "Sid": "AWSCloudTrailWrite", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3Bucket" }, "/AWSLogs/", { "Ref": "AWS::AccountId" }, "/*" ] ] }, "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] })) t.add_resource( cloudtrail.Trail('CloudTrail', DependsOn='BucketPolicy', IsLogging=True, IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True, S3BucketName=Ref('S3Bucket'))) return t
root_bucket = template.add_resource( s3.Bucket('RootBucket', AccessControl=s3.PublicRead, BucketName=Ref(domain), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument=Ref(index_page), ErrorDocument=Ref(error_page), ))) root_bucket_arn = Join('', ['arn:aws:s3:::', Ref(root_bucket), '/*']) template.add_resource( s3.BucketPolicy('RootBucketPolicy', Bucket=Ref(root_bucket), PolicyDocument={ 'Statement': [{ 'Action': ['s3:GetObject'], 'Effect': 'Allow', 'Resource': root_bucket_arn, 'Principal': '*', }] })) cdn = template.add_resource( cloudfront.Distribution( 'WebsiteDistribution', DistributionConfig=cloudfront.DistributionConfig( Aliases=[Ref(domain)], Origins=[ cloudfront.Origin(Id=Ref(root_bucket), DomainName=GetAtt(root_bucket, 'DomainName'), S3OriginConfig=cloudfront.S3Origin()) ],
def generate_cf_template(): """ Returns an entire CloudFormation stack by using troposphere to construct each piece """ # Header of CloudFormation template t = Template() t.add_version("2010-09-09") t.add_description("Lambda Chat AWS Resources") # Paramters description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com" google_oauth_client_id = t.add_parameter(Parameter( "GoogleOAuthClientID", AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com", Type="String", Description="The Client ID of your Google project", ConstraintDescription=description )) website_s3_bucket_name = t.add_parameter(Parameter( "WebsiteS3BucketName", AllowedPattern="[a-zA-Z0-9\-]*", Type="String", Description="Name of S3 bucket to store the website in", ConstraintDescription="can contain only alphanumeric characters and dashes.", )) # The SNS topic the website will publish chat messages to website_sns_topic = t.add_resource(sns.Topic( 'WebsiteSnsTopic', TopicName='lambda-chat', DisplayName='Lambda Chat' )) t.add_output(Output( "WebsiteSnsTopic", Description="sns_topic_arn", Value=Ref(website_sns_topic), )) # The IAM Role and Policy the website will assume to publish to SNS website_role = t.add_resource(iam.Role( "WebsiteRole", Path="/", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[Action("sts", "AssumeRoleWithWebIdentity")], Principal=Principal("Federated", "accounts.google.com"), Condition=Condition( StringEquals( "accounts.google.com:aud", Ref(google_oauth_client_id) ) ), ), ], ), )) t.add_resource(iam.PolicyType( "WebsitePolicy", PolicyName="lambda-chat-website-policy", Roles=[Ref(website_role)], PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[Action("sns", "Publish")], Resource=[ Ref(website_sns_topic) ], ), ], ) )) t.add_output(Output( "WebsiteRole", Description="website_iam_role_arn", Value=GetAtt(website_role, "Arn"), )) website_bucket = t.add_resource(s3.Bucket( 'WebsiteS3Bucket', BucketName=Ref(website_s3_bucket_name), WebsiteConfiguration=s3.WebsiteConfiguration( ErrorDocument="error.html", IndexDocument="index.html" ) )) t.add_output(Output( "S3Bucket", Description="s3_bucket", Value=Ref(website_bucket), )) t.add_resource(s3.BucketPolicy( 'WebsiteS3BucketPolicy', Bucket=Ref(website_bucket), PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "PublicAccess", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [{ "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "WebsiteS3Bucket", }, "/*" ] ] }] } ] } )) return t
s3.BucketPolicy( 'cloudTrailBucketPolicy', Bucket=logging_bucket, PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Sid": "AWSCloudTrailAclCheck20131101", "Effect": "Allow", "Principal": { "AWS": [ "arn:aws:iam::903692715234:root", "arn:aws:iam::859597730677:root", "arn:aws:iam::814480443879:root", "arn:aws:iam::216624486486:root", "arn:aws:iam::086441151436:root", "arn:aws:iam::388731089494:root", "arn:aws:iam::284668455005:root", "arn:aws:iam::113285607260:root", "arn:aws:iam::035351147821:root" ] }, "Action": "s3:GetBucketAcl", "Resource": Join('', ["arn:aws:s3:::", logging_bucket]) }, { "Sid": "AWSCloudTrailWrite20131101", "Effect": "Allow", "Principal": { "AWS": [ "arn:aws:iam::903692715234:root", "arn:aws:iam::859597730677:root", "arn:aws:iam::814480443879:root", "arn:aws:iam::216624486486:root", "arn:aws:iam::086441151436:root", "arn:aws:iam::388731089494:root", "arn:aws:iam::284668455005:root", "arn:aws:iam::113285607260:root", "arn:aws:iam::035351147821:root" ] }, "Action": "s3:PutObject", "Resource": Join('', [ "arn:aws:s3:::", logging_bucket, "/AWSLogs/", Ref('AWS::AccountId'), "/*" ]), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }] }))
s3.BucketPolicy( 'BucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 's3:GetBucketAcl', 'Effect': 'Allow', 'Resource': GetAtt(bucket, 'Arn'), 'Principal': { 'Service': If('ChinaRegionCondition', Sub('logs.${AWS::Region}.amazonaws.com.cn'), Sub('logs.${AWS::Region}.amazonaws.com')) } }, { 'Action': 's3:PutObject', 'Effect': 'Allow', 'Resource': Join('/', [GetAtt(bucket, 'Arn'), '*']), 'Condition': { 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }, 'Principal': { 'Service': If('ChinaRegionCondition', Sub('logs.${AWS::Region}.amazonaws.com.cn'), Sub('logs.${AWS::Region}.amazonaws.com')) } }, ] }))
s3.BucketPolicy( 'BucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 's3:GetBucketAcl', 'Effect': 'Allow', 'Resource': Join('', ['arn:aws:s3:::', Ref(bucket)]), 'Principal': { 'Service': Sub('logs.${AWS::Region}.amazonaws.com') } }, { 'Action': 's3:PutObject', 'Effect': 'Allow', 'Resource': Join('', ['arn:aws:s3:::', Ref(bucket), '/*']), 'Condition': { 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }, 'Principal': { 'Service': Sub('logs.${AWS::Region}.amazonaws.com') } }, ] }))
Ref(param_hosted_domain)]), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument=Ref(param_index_doc), ErrorDocument=Ref(param_error_doc), ), )) bucket_policy = t.add_resource( s3.BucketPolicy( 'BucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Sid': 'PublicReadGetObject', 'Effect': 'Allow', 'Principal': '*', 'Action': ['s3:GetObject'], 'Resource': [Join('/', [GetAtt(bucket, 'Arn'), '*'])] }] }, )) # # Output # t.add_output([ Output( 'BucketName', Description='Bucket name',