def create_cloudfront_distr(self): t = self.template self.cloudfrontDistr = t.add_resource(Distribution( "jtdistr", DistributionConfig = DistributionConfig( Origins = [Origin( Id="Origin 1", DomainName=GetAtt(self.s3Bucket, "DomainName"), S3OriginConfig=S3Origin()) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues = ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all" ), Enabled = True, HttpVersion="http2" ))) t.add_output([ Output( "DistributionId", Value=Ref(self.cloudfrontDistr) ), Output( "DistributionName", Value=Join("", ["http://", GetAtt(self.cloudfrontDistr, "DomainName")])) ])
def cloudfront_adder(self, static_site=True): origin_id = Join("", ["S3-", Ref("S3Name"), Ref("Path")]) if static_site is True: origin = Origin( Id=origin_id, DomainName=Join( "", [Ref("S3Name"), ".s3-website-us-east-1.amazonaws.com"]), OriginPath=Ref("Path"), CustomOriginConfig=CustomOriginConfig( OriginProtocolPolicy="http-only")) else: origin = Origin(Id=origin_id, DomainName=Join( "", [Ref("S3Name"), ".s3.amazonaws.com"]), OriginPath=Ref("Path"), S3OriginConfig=S3Origin()) myDistribution = self.template.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[origin], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId=origin_id, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", MinTTL=3600, DefaultTTL=86400, MaxTTL=31536000), ViewerCertificate=ViewerCertificate( AcmCertificateArn=Ref("ACMarn"), SslSupportMethod='sni-only'), Aliases=Ref("URLs"), DefaultRootObject=Ref("rootObject"), Enabled=True, HttpVersion='http2'))) self.template.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output("DistributionName", Value=Join( "", ["http://", GetAtt(myDistribution, "DomainName")])), ])
def _construct_origins(self): origins = [] behaviors = [] origin_ids = self.cf_config[self.distribution_name].keys() for origin_id in origin_ids: kwargs = self.cf_config[self.distribution_name][origin_id].copy() CloudFront.capitalize_keys(kwargs) CloudFront._prepare_origins(kwargs) CloudFront._prepare_behaviors(kwargs) kwargs['Origin']['Id'] = origin_id origins.append(Origin(**kwargs['Origin'])) kwargs['Behaviors']['TargetOriginId'] = origin_id behaviors.append(CacheBehavior(**kwargs['Behaviors'])) return origins, behaviors
def cloudfront_custom(template, name, elbname, originname, enabled=True): """Create a cloudfront distribution of the custom origin type """ template.add_resource( Distribution( name, DistributionConfig=DistributionConfig( Origins=[ Origin(Id=originname, DomainName=GetAtt(elbname, 'DNSName'), CustomOriginConfig=CustomOrigin( OriginProtocolPolicy="match-viewer")) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId=originname, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all"), Enabled=enabled)))
def build_hook(self): """ Hook to add tier-specific assets within the build stage of initializing this class. """ if not self.dist_config: self.dist_config = DistributionConfig( Origins=[ Origin( Id="Origin", DomainName=self.domain_name, OriginPath=self.origin_path, S3OriginConfig=S3Origin(), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin", ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all"), Enabled=True) if self.utility_bucket: self.dist_config.Logging = Logging( Bucket=Join('.', [Ref(self.utility_bucket), 's3.amazonaws.com']), IncludeCookies=True, Prefix=Join( '/', ['AWSLogs', Ref(AWS_ACCOUNT_ID), 'CloudFront'])) cf_distribution = self.add_resource( Distribution(self.resource_name, DistributionConfig=self.dist_config)) self.add_output([ Output("DistributionId", Value=Ref(cf_distribution)), Output("DistributionName", Value=Join( "", ["http://", GetAtt(cf_distribution, "DomainName")])), ])
def build_hook(self): print "Building Template for AWS Frederick Bucket" public_hosted_zone_name = self.config.get('public_hosted_zone') hosted_zone_name = self.config.get('hosted_zone') buckets = self.config.get('buckets') if buckets is not None: for bucket in buckets: self.add_bucket( bucket.get('name'), bucket.get('access_control'), bucket.get('static_site'), bucket.get('route53'), public_hosted_zone_name, ) if bucket.get('cloudfront'): cloudfront = self.add_resource( Distribution( bucket.get('name').replace('.', ''), DistributionConfig=DistributionConfig( Aliases=[bucket.get('name')], DefaultRootObject='index.html', Origins=[ Origin(Id="Origin 1", DomainName=bucket.get('name') + '.s3.amazonaws.com', S3OriginConfig=S3Origin()) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=ForwardedValues( QueryString=False), ViewerProtocolPolicy="redirect-to-https"), Enabled=True, HttpVersion='http2', ViewerCertificate=ViewerCertificate( AcmCertificateArn= 'arn:aws:acm:us-east-1:422548007577:certificate/4d2f2450-7616-4daa-b7ed-c1fd2d53df90', SslSupportMethod='sni-only'))))
DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId='default', ViewerProtocolPolicy='redirect-to-https', ForwardedValues=ForwardedValues(QueryString=False, ), ), DefaultRootObject='index.html', Enabled=True, HttpVersion='http2', IPV6Enabled=True, Origins=[ Origin( Id='default', DomainName=Join('', [ Ref(s3_website_origin), '.s3.', Ref(AWS_REGION), '.amazonaws.com' ]), S3OriginConfig=S3OriginConfig(OriginAccessIdentity=Join( '', [ 'origin-access-identity/cloudfront/', Ref(origin_access_identity), ]), ), ) ], PriceClass='PriceClass_100', ViewerCertificate=ViewerCertificate( AcmCertificateArn=Ref(cloudfront_certificate), SslSupportMethod='sni-only', ), CustomErrorResponses=[ CustomErrorResponse(ErrorCode=403, ResponseCode=200, ResponsePagePath='/404.html'),
def create_cloud_front_template(): template = Template() template.set_transform('AWS::Serverless-2016-10-31') bucket = template.add_resource( resource=Bucket( title='SampleOriginBucket', BucketName=Sub('sample-origin-bucket-${AWS::AccountId}') ) ) identity = template.add_resource( resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment='sample-lambda-edge' ) ) ) template.add_resource( resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] } ) ) template.add_resource( resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues( QueryString=True, ), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType='viewer-request', LambdaFunctionARN=Sub([ '${FUNCTION_ARN}:8', {'FUNCTION_ARN': ImportValue(get_export_name())} ]), ) ], TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), Enabled=True, Origins=[ Origin( Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Sub('origin-access-identity/cloudfront/${' + identity.title + '}') ) ) ], ) ) ) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
def generate_template(d): t = Template() t.set_description(d["cf_template_description"]) S3bucket = t.add_resource( Bucket( "S3Bucket", BucketName=Join("-", [d["project_name"], d["env"]]), AccessControl=Private, PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), Tags=Tags(d["tags"], {"Name": d["project_name"]}), ) ) CFOriginAccessIdentity = t.add_resource( CloudFrontOriginAccessIdentity( "CFOriginAccessIdentity", CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment=Join(" ", ["Cloudfront Origin Access Identity", d["project_name"], d["env"]]) ), ) ) t.add_resource( BucketPolicy( "BucketPolicy", Bucket=Ref("S3Bucket"), PolicyDocument=dict( Statement=[ dict( Sid="Allow-cf", Effect="Allow", Action=[ "s3:GetObject", "s3:ListBucket" ], Principal=Principal( "CanonicalUser", GetAtt(CFOriginAccessIdentity, "S3CanonicalUserId") ), Resource=[ Join("", ["arn:aws:s3:::", Ref("S3Bucket"), "/*"]), Join("", ["arn:aws:s3:::", Ref("S3Bucket")]), ] ) ] ) ) ) myDistribution = t.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Enabled=True, HttpVersion='http2', DefaultRootObject=d['default_root_object'], Origins=[Origin( Id=Join("-", [d["project_name"], d["env"]]), DomainName=GetAtt(S3bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join("/",["origin-access-identity", "cloudfront", Ref(CFOriginAccessIdentity)]) ) )], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId=Join("-", [d["project_name"], d["env"]]), ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all", MaxTTL=0, MinTTL=0, DefaultTTL=0, ), CustomErrorResponses=[CustomErrorResponse( ErrorCachingMinTTL=0, ErrorCode=404, ResponsePagePath=Join("",["/", d['default_root_object']]), ResponseCode=200, )], ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True ), ), Tags=Tags(d["tags"], {"Name": d["project_name"]}), ) ) t.add_output(Output( "BucketName", Value=Ref(S3bucket), Description="Name of S3 bucket to hold website content" )) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName", Value=Join("", ["http://", GetAtt(myDistribution, "DomainName")])), ]) return t
AcmCertificateArn=Ref(assets_certificate), SslSupportMethod='sni-only', ), If( assets_certificate_arn_condition, ViewerCertificate( AcmCertificateArn=Ref(assets_certificate_arn), SslSupportMethod='sni-only', ), Ref("AWS::NoValue"), ), ), Origins=[ Origin( Id="Assets", DomainName=GetAtt(assets_bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity="", ), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Assets", ForwardedValues=ForwardedValues( # Cache results *should* vary based on querystring (e.g., 'style.css?v=3') QueryString=True, # make sure headers needed by CORS policy above get through to S3 # http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html#header-caching-web-cors Headers=[ 'Origin', 'Access-Control-Request-Headers', 'Access-Control-Request-Method', ],
def create_wordpress_environment(self): template = Template() template.add_version('2010-09-09') # Wordpress preparation: format vpc name and split private and public subnets in two lists vpc_name_formatted = ''.join( e for e in self.private_vpc_name if e.isalnum()).capitalize() filter_private_subnets = filter(lambda x : x["type"] == "private", self.private_vpc_subnets) filter_public_subnets = filter(lambda x : x["type"] == "public", self.private_vpc_subnets) private_subnets = [] for subnet in filter_private_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() private_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) public_subnets = [] for subnet in filter_public_subnets: subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize() public_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted))) # Instances Security Groups web_dmz_security_group = template.add_resource( SecurityGroup( "{}WebDMZSecurityGroup".format(self.stage), GroupName="{}webdmz-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Enables external http access to EC2 instance(s) that host the webpages", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", SourceSecurityGroupId=ImportValue("{}BastionHostSecurityGroupID".format(self.stage)) ) ] ) ) rds_private_security_group = template.add_resource( SecurityGroup( "{}RdsPrivateSecurityGroup".format(self.stage), GroupName="{}rds-private-sg".format(self.stage), VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), GroupDescription="Allow access to the mysql port from the webservers", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort=self.database_port, ToPort=self.database_port, SourceSecurityGroupId=Ref(web_dmz_security_group) ) ] ) ) # S3 Buckets for wordpress content bucket_wordpress_code = template.add_resource( Bucket( "{}BucketWordpressCode".format(self.stage), BucketName="{}-wordpress-code".format(self.stage), AccessControl=Private ) ) bucket_wordpress_media_assets = template.add_resource( Bucket( "{}BucketWordpressMediaAssets".format(self.stage), BucketName="{}-wordpress-media-assets".format(self.stage), AccessControl=Private ) ) # Database Instance to store wordpress data rds_subnet_group = template.add_resource( DBSubnetGroup( "{}PrivateRDSSubnetGroup".format(self.stage), DBSubnetGroupName="{}private-rds-subnet-group".format(self.stage), DBSubnetGroupDescription="Subnets available for the RDS DB Instance", SubnetIds=private_subnets ) ) template.add_resource( DBInstance( "{}RdsInstance".format(self.stage), DBInstanceIdentifier="{}RdsInstance".format(self.stage), DBName=self.database_name, AllocatedStorage="20", DBInstanceClass=self.database_instance_class, Engine=self.database_engine, EngineVersion=self.database_engine_version, MasterUsername=self.database_username, MasterUserPassword=self.database_password, Port=self.database_port, BackupRetentionPeriod=0, MultiAZ=self.database_multiaz, DBSubnetGroupName=Ref(rds_subnet_group), VPCSecurityGroups=[Ref(rds_private_security_group)], Tags=Tags( Name=self.database_name_tag ) ) ) # Cloudfront Distribution to load images cloudfront_origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "{}CloudfrontOriginAccessIdentity".format(self.stage), CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( "{}CloudFrontOriginAccessIdentityConfig".format(self.stage), Comment="WordPress Origin Access Identity" ) ) ) template.add_resource(BucketPolicy( "{}BucketWordpressMediaAssetsPolicy".format(self.stage), Bucket=Ref(bucket_wordpress_media_assets), PolicyDocument={ "Version": "2008-10-17", "Id": "PolicyForCloudFrontPrivateContent", "Statement": [ { "Sid": "1", "Effect": "Allow", "Principal": { "CanonicalUser": GetAtt(cloudfront_origin_access_identity, 'S3CanonicalUserId') }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::{}-wordpress-media-assets/*".format(self.stage) } ] } )) cloudfront_distribution = template.add_resource( Distribution( "{}CloudfrontDistribution".format(self.stage), DistributionConfig=DistributionConfig( Origins=[ Origin( Id="MediaAssetsOrigin", DomainName=GetAtt(bucket_wordpress_media_assets, 'DomainName'), S3OriginConfig=S3Origin( OriginAccessIdentity=Join("", [ "origin-access-identity/cloudfront/", Ref(cloudfront_origin_access_identity) ]) ) ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="MediaAssetsOrigin", ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all" ), Enabled=True, HttpVersion='http2' ) ) ) # Wordpress EC2 Instances ''' EC2 Instances types: Write node = To make changes to your blog. E.g: add new posts Read Nodes = Instances open to the internet for blog reading ''' wordpress_ec2_role = template.add_resource( Role( "{}WordPressEC2InstanceRole".format(self.stage), RoleName="{}WordPressEC2InstanceRole".format(self.stage), Path="/", AssumeRolePolicyDocument={"Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }]}, Policies=[ Policy( PolicyName="S3FullAccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "s3:*", "Resource": "*" }], } ) ] ) ) spotfleetrole = template.add_resource( Role( "{}spotfleetrole".format(self.stage), AssumeRolePolicyDocument={ "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "spotfleet.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ], "Version": "2012-10-17" }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole" ] ) ) ec2_instance_profile = template.add_resource( InstanceProfile( "{}WriteWordpressEc2InstanceProfile".format(self.stage), Roles=[Ref(wordpress_ec2_role)] ) ) template.add_resource( SpotFleet( "{}WriteWordpressEc2Instance".format(self.stage), SpotFleetRequestConfigData=SpotFleetRequestConfigData( AllocationStrategy="lowestPrice", IamFleetRole=GetAtt(spotfleetrole,"Arn"), LaunchSpecifications=[LaunchSpecifications( IamInstanceProfile=IamInstanceProfile( Arn=GetAtt(ec2_instance_profile, "Arn") ), ImageId=self.write_instance_image_id, InstanceType=self.write_instance_type, KeyName=self.write_instance_key_name, SecurityGroups=[SecurityGroups(GroupId=Ref(web_dmz_security_group))], SubnetId=next(iter(public_subnets)), UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html s3://""", Ref(bucket_wordpress_code), """">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete /var/www/html/wp-content/uploads s3://""", Ref(bucket_wordpress_media_assets), """">> /etc/crontab service httpd start """ ]) ) )], TargetCapacity=1, Type="request" ) ) ) template.add_resource( LaunchConfiguration( "{}WordPressReadLaunchConfiguration".format(self.stage), InstanceType=self.read_instance_type, ImageId=self.read_instance_image_id, KeyName=self.read_instance_key_name, LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], IamInstanceProfile=Ref(ec2_instance_profile), SpotPrice="0.5", UserData=Base64( Join("", [ """ #!/bin/bash yum install httpd php php-mysql -y cd /var/www/html echo \"healthy\" > healthy.html wget https://wordpress.org/latest.tar.gz tar -xzf latest.tar.gz cp -r wordpress/* /var/www/html/ rm -rf wordpress rm -rf latest.tar.gz chmod -R 755 wp-content chown -R apache:apache wp-content echo -e 'Options +FollowSymlinks \nRewriteEngine on \nrewriterule ^wp-content/uploads/(.*)$ http://""", GetAtt(cloudfront_distribution, 'DomainName'), """/$1 [r=301,nc]' > .htaccess chkconfig httpd on cd /var/www sudo chown -R apache /var/www/html cd html/ sudo find . -type d -exec chmod 0755 {} \; sudo find . -type f -exec chmod 0644 {} \; sed -i 's/AllowOverride None/AllowOverride All/g' /etc/httpd/conf/httpd.conf sed -i 's/AllowOverride none/AllowOverride All/g' /etc/httpd/conf/httpd.conf echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_code), """ /var/www/html">> /etc/crontab echo -e "*/1 * * * * root aws s3 sync --delete s3://""", Ref(bucket_wordpress_media_assets), """/var/www/html/wp-content/uploads">> /etc/crontab service httpd start """ ]) ) ) ) alb = template.add_resource( LoadBalancer( "{}ApplicationLoadBalancer".format(self.stage), Name="{}-wordpress-alb".format(self.stage), SecurityGroups=[Ref(web_dmz_security_group)], Subnets=public_subnets, Type="application" ) ) target_group = template.add_resource( TargetGroup( "{}TargetGroup".format(self.stage), Name="{}-wordpress-target-group".format(self.stage), Port=80, Protocol="HTTP", VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)), HealthCheckPort=8080 ) ) template.add_resource( AutoScalingGroup( "{}AutoScalingGroup".format(self.stage), DependsOn="{}WordPressReadLaunchConfiguration".format(self.stage), AutoScalingGroupName="{}-wordpress-auto-scaling".format(self.stage), LaunchConfigurationName="{}-wordpress-launch-config".format(self.stage), TargetGroupARNs=[Ref(target_group)], MaxSize="3", MinSize="1", VPCZoneIdentifier=public_subnets, Tags=[ Tag("Name", "{}-wordpress-read-node".format(self.stage), True) ] ) ) template.add_resource( Listener( "ALBListener", DefaultActions=[ Action( TargetGroupArn=Ref(target_group), Type="forward" ) ], LoadBalancerArn=Ref(alb), Port=80, Protocol="HTTP" ) ) f = open("modules/template_wordpress.yaml", 'w') print(template.to_yaml(), file=f)
)) # CloudFront distribution my_distribution = t.add_resource(Distribution( 'myDistribution', DependsOn = 'myCert', # config object here DistributionConfig = DistributionConfig( Aliases = [Ref(domain_name)], # list of origins Origins = [ Origin( Id = Ref('AWS::StackName'), DomainName = GetAtt('myBucket', 'DomainName'), S3OriginConfig = S3Origin(OriginAccessIdentity=Join('', [ 'origin-access-identity/cloudfront/', Ref(origin_access_id), ]) ) ) ], # default cache DefaultCacheBehavior = DefaultCacheBehavior( TargetOriginId = Ref('AWS::StackName'), ForwardedValues = ForwardedValues(QueryString=False), ViewerProtocolPolicy = 'redirect-to-https' ), # enable it Enabled = True, # we want http2 in 2017 HttpVersion = 'http2',
s3bucket = t.add_resource( Bucket( "S3Bucket", AccessControl=PublicRead, WebsiteConfiguration=WebsiteConfiguration(IndexDocument="index.html", ErrorDocument="error.html"), )) myDistribution = t.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[ Origin( Id="Origin 1", DomainName=GetAtt(s3bucket, "DomainName"), S3OriginConfig=S3OriginConfig(), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all", ), Enabled=True, HttpVersion="http2", ), )) t.add_output([ Output(
"You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter( Parameter( "S3DNSName", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource( Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname))], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="allow-all"), Enabled=True))) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output("DistributionName", Value=Join( "", ["http://", GetAtt(myDistribution, "DomainName")])), ]) print(t.to_json())
def create_template(): template = Template(Description=( "Static website hosted with S3 and CloudFront. " "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website" )) partition_config = add_mapping( template, "PartitionConfig", { "aws": { # the region with the control plane for CloudFront, IAM, Route 53, etc "PrimaryRegion": "us-east-1", # assume that Lambda@Edge replicates to all default enabled regions, and that # future regions will be opt-in. generated with AWS CLI: # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)" "DefaultRegions": [ "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", ], }, # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn "aws-cn": { "PrimaryRegion": "cn-north-1", "DefaultRegions": ["cn-north-1", "cn-northwest-1"], }, }, ) acm_certificate_arn = template.add_parameter( Parameter( "AcmCertificateArn", Description= "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.", Type="String", AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)", Default="", )) hosted_zone_id = template.add_parameter( Parameter( "HostedZoneId", Description= "Existing Route 53 zone to use for validating a new TLS certificate.", Type="String", AllowedPattern="(Z[A-Z0-9]+|)", Default="", )) dns_names = template.add_parameter( Parameter( "DomainNames", Description= "Comma-separated list of additional domain names to serve.", Type="CommaDelimitedList", Default="", )) tls_protocol_version = template.add_parameter( Parameter( "TlsProtocolVersion", Description= "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.", Type="String", Default="TLSv1.2_2019", )) log_retention_days = template.add_parameter( Parameter( "LogRetentionDays", Description= "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.", Type="Number", AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS, Default=365, )) default_ttl_seconds = template.add_parameter( Parameter( "DefaultTtlSeconds", Description="Cache time-to-live when not set by S3 object headers.", Type="Number", Default=int(datetime.timedelta(minutes=5).total_seconds()), )) enable_price_class_hack = template.add_parameter( Parameter( "EnablePriceClassHack", Description="Cut your bill in half with this one weird trick.", Type="String", Default="false", AllowedValues=["true", "false"], )) retention_defined = add_condition(template, "RetentionDefined", Not(Equals(Ref(log_retention_days), 0))) using_price_class_hack = add_condition( template, "UsingPriceClassHack", Equals(Ref(enable_price_class_hack), "true")) using_acm_certificate = add_condition( template, "UsingAcmCertificate", Not(Equals(Ref(acm_certificate_arn), ""))) using_hosted_zone = add_condition(template, "UsingHostedZone", Not(Equals(Ref(hosted_zone_id), ""))) using_certificate = add_condition( template, "UsingCertificate", Or(Condition(using_acm_certificate), Condition(using_hosted_zone)), ) should_create_certificate = add_condition( template, "ShouldCreateCertificate", And(Condition(using_hosted_zone), Not(Condition(using_acm_certificate))), ) using_dns_names = add_condition(template, "UsingDnsNames", Not(Equals(Select(0, Ref(dns_names)), ""))) is_primary_region = "IsPrimaryRegion" template.add_condition( is_primary_region, Equals(Region, FindInMap(partition_config, Partition, "PrimaryRegion")), ) precondition_region_is_primary = template.add_resource( WaitConditionHandle( "PreconditionIsPrimaryRegionForPartition", Condition=is_primary_region, )) log_ingester_dlq = template.add_resource( Queue( "LogIngesterDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", )) log_ingester_role = template.add_resource( Role( "LogIngesterRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[GetAtt(log_ingester_dlq, "Arn")], ) ], ), ) ], )) log_ingester = template.add_resource( Function( "LogIngester", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(log_ingest.handler.__name__), Code=Code(ZipFile=inspect.getsource(log_ingest)), MemorySize=256, Timeout=300, Role=GetAtt(log_ingester_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(log_ingester_dlq, "Arn")), )) log_ingester_permission = template.add_resource( Permission( "LogIngesterPermission", FunctionName=GetAtt(log_ingester, "Arn"), Action="lambda:InvokeFunction", Principal="s3.amazonaws.com", SourceAccount=AccountId, )) log_bucket = template.add_resource( Bucket( "LogBucket", # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails. # When the CloudFront distribution is created, it adds an additional bucket ACL. # That ACL is not possible to model in CloudFormation. AccessControl="LogDeliveryWrite", LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(ExpirationInDays=1, Status="Enabled"), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=1), Status="Enabled", ), ]), NotificationConfiguration=NotificationConfiguration( LambdaConfigurations=[ LambdaConfigurations(Event="s3:ObjectCreated:*", Function=GetAtt(log_ingester, "Arn")) ]), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # if we use KMS, we can't read the logs SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), DependsOn=[log_ingester_permission], )) log_ingester_log_group = template.add_resource( LogGroup( "LogIngesterLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(log_ingester)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) log_ingester_policy = template.add_resource( PolicyType( "LogIngesterPolicy", Roles=[Ref(log_ingester_role)], PolicyName="IngestLogPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/cloudfront/*", ], ), Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/s3/*", ], ), GetAtt(log_ingester_log_group, "Arn"), ], ), Statement( Effect=Allow, Action=[s3.GetObject], Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])], ), ], ), )) bucket = template.add_resource( Bucket( "ContentBucket", LifecycleConfiguration=LifecycleConfiguration(Rules=[ # not supported by CFN yet: # LifecycleRule( # Transitions=[ # LifecycleRuleTransition( # StorageClass='INTELLIGENT_TIERING', # TransitionInDays=1, # ), # ], # Status="Enabled", # ), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=7), Status="Enabled", ) ]), LoggingConfiguration=LoggingConfiguration( DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # Origin Access Identities can't use KMS SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), )) origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "CloudFrontIdentity", CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig( Comment=GetAtt(bucket, "Arn")), )) bucket_policy = template.add_resource( BucketPolicy( "ContentBucketPolicy", Bucket=Ref(bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "CanonicalUser", GetAtt(origin_access_identity, "S3CanonicalUserId"), ), Action=[s3.GetObject], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])], ), ], ), )) # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs # state "In some circumstances [...] S3 resets permissions on the bucket to the default value", # and this allows logging to work without any ACLs in place. log_bucket_policy = template.add_resource( BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join( "/", [GetAtt(log_bucket, "Arn"), "cloudfront", "*"]) ], ), Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.ListBucket], Resource=[Join("/", [GetAtt(log_bucket, "Arn")])], ), Statement( Effect=Allow, Principal=Principal("Service", "s3.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"]) ], ), ], ), )) certificate_validator_dlq = template.add_resource( Queue( "CertificateValidatorDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", Condition=should_create_certificate, )) certificate_validator_role = template.add_resource( Role( "CertificateValidatorRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[ GetAtt(certificate_validator_dlq, "Arn") ], ) ], ), ) ], # TODO scope down ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", ], Condition=should_create_certificate, )) certificate_validator_function = template.add_resource( Function( "CertificateValidatorFunction", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(certificate_validator.handler.__name__), Code=Code(ZipFile=inspect.getsource(certificate_validator)), MemorySize=256, Timeout=300, Role=GetAtt(certificate_validator_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(certificate_validator_dlq, "Arn")), Environment=Environment( Variables={ certificate_validator.EnvVars.HOSTED_ZONE_ID.name: Ref(hosted_zone_id) }), Condition=should_create_certificate, )) certificate_validator_log_group = template.add_resource( LogGroup( "CertificateValidatorLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(certificate_validator_function)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), Condition=should_create_certificate, )) certificate_validator_rule = template.add_resource( Rule( "CertificateValidatorRule", EventPattern={ "detail-type": ["AWS API Call via CloudTrail"], "detail": { "eventSource": ["acm.amazonaws.com"], "eventName": ["AddTagsToCertificate"], "requestParameters": { "tags": { "key": [certificate_validator_function.title], "value": [GetAtt(certificate_validator_function, "Arn")], } }, }, }, Targets=[ Target( Id="certificate-validator-lambda", Arn=GetAtt(certificate_validator_function, "Arn"), ) ], DependsOn=[certificate_validator_log_group], Condition=should_create_certificate, )) certificate_validator_permission = template.add_resource( Permission( "CertificateValidatorPermission", FunctionName=GetAtt(certificate_validator_function, "Arn"), Action="lambda:InvokeFunction", Principal="events.amazonaws.com", SourceArn=GetAtt(certificate_validator_rule, "Arn"), Condition=should_create_certificate, )) certificate = template.add_resource( Certificate( "Certificate", DomainName=Select(0, Ref(dns_names)), SubjectAlternativeNames=Ref( dns_names), # duplicate first name works fine ValidationMethod="DNS", Tags=Tags( **{ certificate_validator_function.title: GetAtt(certificate_validator_function, "Arn") }), DependsOn=[certificate_validator_permission], Condition=should_create_certificate, )) edge_hook_role = template.add_resource( Role( "EdgeHookRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal( "Service", [ "lambda.amazonaws.com", "edgelambda.amazonaws.com" ], ), Action=[sts.AssumeRole], ) ], ), )) edge_hook_function = template.add_resource( Function( "EdgeHookFunction", Runtime=PYTHON_RUNTIME, Handler="index.handler", Code=Code(ZipFile=inspect.getsource(edge_hook)), MemorySize=128, Timeout=3, Role=GetAtt(edge_hook_role, "Arn"), )) edge_hook_function_hash = (hashlib.sha256( json.dumps(edge_hook_function.to_dict(), sort_keys=True).encode("utf-8")).hexdigest()[:10].upper()) edge_hook_version = template.add_resource( Version( "EdgeHookVersion" + edge_hook_function_hash, FunctionName=GetAtt(edge_hook_function, "Arn"), )) replica_log_group_name = Join( "/", [ "/aws/lambda", Join( ".", [ FindInMap(partition_config, Partition, "PrimaryRegion"), Ref(edge_hook_function), ], ), ], ) edge_hook_role_policy = template.add_resource( PolicyType( "EdgeHookRolePolicy", PolicyName="write-logs", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "*", ], ), ], ), ], ), Roles=[Ref(edge_hook_role)], )) stack_set_administration_role = template.add_resource( Role( "StackSetAdministrationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "cloudformation.amazonaws.com"), Action=[sts.AssumeRole], ), ], ), )) stack_set_execution_role = template.add_resource( Role( "StackSetExecutionRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "AWS", GetAtt(stack_set_administration_role, "Arn")), Action=[sts.AssumeRole], ), ], ), Policies=[ PolicyProperty( PolicyName="create-stackset-instances", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ cloudformation.DescribeStacks, logs.DescribeLogGroups, ], Resource=["*"], ), # stack instances communicate with the CFN service via SNS Statement( Effect=Allow, Action=[sns.Publish], NotResource=[ Join( ":", [ "arn", Partition, "sns", "*", AccountId, "*" ], ) ], ), Statement( Effect=Allow, Action=[ logs.CreateLogGroup, logs.DeleteLogGroup, logs.PutRetentionPolicy, logs.DeleteRetentionPolicy, ], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "", ], ), ], ), Statement( Effect=Allow, Action=[ cloudformation.CreateStack, cloudformation.DeleteStack, cloudformation.UpdateStack, ], Resource=[ Join( ":", [ "arn", Partition, "cloudformation", "*", AccountId, Join( "/", [ "stack", Join( "-", [ "StackSet", StackName, "*" ], ), ], ), ], ) ], ), ], ), ), ], )) stack_set_administration_role_policy = template.add_resource( PolicyType( "StackSetAdministrationRolePolicy", PolicyName="assume-execution-role", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Resource=[GetAtt(stack_set_execution_role, "Arn")], ), ], ), Roles=[Ref(stack_set_administration_role)], )) edge_log_groups = template.add_resource( StackSet( "EdgeLambdaLogGroupStackSet", AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"), ExecutionRoleName=Ref(stack_set_execution_role), StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]), PermissionModel="SELF_MANAGED", Description="Multi-region log groups for Lambda@Edge replicas", Parameters=[ StackSetParameter( ParameterKey="LogGroupName", ParameterValue=replica_log_group_name, ), StackSetParameter( ParameterKey="LogRetentionDays", ParameterValue=Ref(log_retention_days), ), ], OperationPreferences=OperationPreferences( FailureToleranceCount=0, MaxConcurrentPercentage=100, ), StackInstancesGroup=[ StackInstances( DeploymentTargets=DeploymentTargets(Accounts=[AccountId]), Regions=FindInMap(partition_config, Partition, "DefaultRegions"), ) ], TemplateBody=create_log_group_template().to_json(indent=None), DependsOn=[stack_set_administration_role_policy], )) price_class_distribution = template.add_resource( Distribution( "PriceClassDistribution", DistributionConfig=DistributionConfig( Comment="Dummy distribution used for price class hack", DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False), ), Enabled=True, Origins=[ Origin(Id="default", DomainName=GetAtt(bucket, "DomainName")) ], IPV6Enabled=True, ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True), PriceClass="PriceClass_All", ), Condition=using_price_class_hack, )) distribution = template.add_resource( Distribution( "ContentDistribution", DistributionConfig=DistributionConfig( Enabled=True, Aliases=If(using_dns_names, Ref(dns_names), NoValue), Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"), Prefix="cloudfront/"), DefaultRootObject="index.html", Origins=[ Origin( Id="default", DomainName=GetAtt(bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join( "", [ "origin-access-identity/cloudfront/", Ref(origin_access_identity), ], )), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", Compress=True, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", DefaultTTL=Ref(default_ttl_seconds), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType="origin-request", LambdaFunctionARN=Ref(edge_hook_version), ) ], ), HttpVersion="http2", IPV6Enabled=True, ViewerCertificate=ViewerCertificate( AcmCertificateArn=If( using_acm_certificate, Ref(acm_certificate_arn), If(using_hosted_zone, Ref(certificate), NoValue), ), SslSupportMethod=If(using_certificate, "sni-only", NoValue), CloudFrontDefaultCertificate=If(using_certificate, NoValue, True), MinimumProtocolVersion=Ref(tls_protocol_version), ), PriceClass=If(using_price_class_hack, "PriceClass_100", "PriceClass_All"), ), DependsOn=[ bucket_policy, log_ingester_policy, edge_log_groups, precondition_region_is_primary, ], )) distribution_log_group = template.add_resource( LogGroup( "DistributionLogGroup", LogGroupName=Join( "", ["/aws/cloudfront/", Ref(distribution)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) bucket_log_group = template.add_resource( LogGroup( "BucketLogGroup", LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) template.add_output(Output("DistributionId", Value=Ref(distribution))) template.add_output( Output("DistributionDomain", Value=GetAtt(distribution, "DomainName"))) template.add_output( Output( "DistributionDnsTarget", Value=If( using_price_class_hack, GetAtt(price_class_distribution, "DomainName"), GetAtt(distribution, "DomainName"), ), )) template.add_output( Output( "DistributionUrl", Value=Join("", ["https://", GetAtt(distribution, "DomainName"), "/"]), )) template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket, "Arn"))) return template
api_domain_name = "api." + root_domain_name aliases = [api_domain_name] template = Template() distribution = template.add_resource( Distribution( "ApiDistribution", DistributionConfig=DistributionConfig( Aliases=aliases, Origins=[ Origin( Id="ApiGatewayOrigin", DomainName=api_url, CustomOriginConfig=CustomOriginConfig( HTTPPort=80, HTTPSPort=443, # ApiGateway 는 https 만 허용한다. OriginProtocolPolicy="https-only"), ) ], ViewerCertificate=ViewerCertificate( # 인증키는 미국동부(버지니아 북부) 리전에서 생성한 것만 사용가능하다. AcmCertificateArn=acm_certificate_arn, SslSupportMethod='sni-only'), DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="ApiGatewayOrigin", # ApiGateway 는 https 만 허용한다. ViewerProtocolPolicy="https-only", ForwardedValues=ForwardedValues(QueryString=True), # ApiGateway 의 TTL 을 최대 10분으로 설정한다.
MinTTL=120, # 2 minutes DefaultTTL=300, # 5 minutes MaxTTL=300, # 5 minutes Compress=True, LambdaFunctionAssociations=[LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=VersionRef(rewrite_assets_function), )], )], Enabled=True, HttpVersion='http2', IPV6Enabled=True, Origins=[Origin( DomainName=Join('', [Ref(frontend_bucket), '.s3-website-', Ref(AWS_REGION), '.amazonaws.com']), Id='S3', CustomOriginConfig=CustomOriginConfig( OriginProtocolPolicy='http-only', ), )], PriceClass='PriceClass_100', ViewerCertificate=ViewerCertificate( AcmCertificateArn=Ref(cloudfront_certificate), SslSupportMethod='sni-only', MinimumProtocolVersion='TLSv1.1_2016', # We might need to raise this ), ), )) template.add_resource(RecordSetGroup( "DnsRecords", HostedZoneId=ImportValue(Join('-', [Ref(dns_stack), 'HostedZoneId'])),
ForwardedValues=ForwardedValues( QueryString=False, ), Compress=True, )], Enabled=True, HttpVersion='http2', IPV6Enabled=True, Origins=[Origin( Id='apigateway', DomainName=Join("", [Ref(api_gateway), ".execute-api.", Ref(AWS_REGION), ".amazonaws.com"]), CustomOriginConfig=CustomOriginConfig( HTTPPort=80, HTTPSPort=443, OriginProtocolPolicy='https-only', ), OriginCustomHeaders=[OriginCustomHeader( HeaderName='x-api-key', HeaderValue=api_key_secret, ), OriginCustomHeader( HeaderName='x-video-table', HeaderValue=Ref(video_table), )], )], PriceClass='PriceClass_100', ViewerCertificate=ViewerCertificate( AcmCertificateArn=Ref(cloudfront_certificate), SslSupportMethod='sni-only', MinimumProtocolVersion='TLSv1.1_2016', # We might need to raise this ), ), ))
def create_cloudfront_template(): template = Template() cname = template.add_parameter( parameter=Parameter(title='Cname', Type='String')) acm_certificate_arn = template.add_parameter( parameter=Parameter(title='AcmCertificateArn', Type='String')) host_zone_id = template.add_parameter( parameter=Parameter(title='HostZoneId', Type='String')) bucket = template.add_resource( resource=Bucket(title='SampleBucket', BucketName=Sub('sample-bucket-${AWS::AccountId}'))) identity = template.add_resource(resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig(Comment='sample'))) template.add_resource(resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] })) distribution = template.add_resource(resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( Aliases=[Ref(cname)], # CustomErrorResponses=[ # CustomErrorResponse( # ErrorCode=403, # ResponseCode=200, # ResponsePagePath='/404.html', # ErrorCachingMinTTL=30 # ) # ], DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues(QueryString=True, ), TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), # DefaultRootObject='index.html', Enabled=True, Origins=[ Origin(Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig(OriginAccessIdentity=Sub( 'origin-access-identity/cloudfront/${' + identity.title + '}'))) ], ViewerCertificate=ViewerCertificate(AcmCertificateArn=Ref( acm_certificate_arn), SslSupportMethod='sni-only')))) template.add_resource(resource=RecordSetType( title='SampleRecordSet', AliasTarget=AliasTarget(HostedZoneId='Z2FDTNDATAQYW2', DNSName=GetAtt(logicalName=distribution, attrName='DomainName')), HostedZoneId=Ref(host_zone_id), Name=Ref(cname), Type='A')) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
MaxTTL=300 # 5 minutes ), Enabled=True, HttpVersion='http2', IPV6Enabled=True, Origins=[ Origin( Id='S3', DomainName=Join('', [ ImportValue( Join('-', [ Ref(video_engine_stack), 'VideoBucket', 'Ref' ])), '.s3.', Ref(AWS_REGION), '.amazonaws.com' ]), S3OriginConfig=S3OriginConfig(OriginAccessIdentity=Join( '', [ 'origin-access-identity/cloudfront/', ImportValue( Join('-', [ Ref(video_engine_stack), 'VideoOriginAccessIdentity', 'Ref' ])), ]), ), ) ], PriceClass='PriceClass_100', ViewerCertificate=ViewerCertificate( AcmCertificateArn=Ref(cloudfront_certificate), SslSupportMethod='sni-only', MinimumProtocolVersion=
"S3 origin. " "**WARNING** This template creates a CloudFront distribution. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter(Parameter( "S3DNSName", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource(Distribution( "myDistribution", DistributionConfig=DistributionConfig( Origins=[Origin(Id="Origin 1", DomainName=Ref(s3dnsname), S3OriginConfig=S3Origin())], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="Origin 1", ForwardedValues=ForwardedValues( QueryString=False ), ViewerProtocolPolicy="allow-all"), Enabled=True, HttpVersion='http2' ) )) t.add_output([ Output("DistributionId", Value=Ref(myDistribution)), Output( "DistributionName",
def create_cloud_front_template(): template = Template() bucket = template.add_resource( resource=Bucket( title='SampleBucket', BucketName=Sub('sample-bucket-${AWS::AccountId}') ) ) identity = template.add_resource( resource=CloudFrontOriginAccessIdentity( title='SampleOriginAccessIdentity', CloudFrontOriginAccessIdentityConfig=CloudFrontOriginAccessIdentityConfig( Comment='sample' ) ) ) template.add_resource( resource=BucketPolicy( title='SampleBucketPolicy', Bucket=Ref(bucket), PolicyDocument={ 'Statement': [{ 'Action': 's3:GetObject', 'Effect': 'Allow', 'Resource': Join(delimiter='/', values=[GetAtt(bucket, 'Arn'), '*']), 'Principal': { 'CanonicalUser': GetAtt(logicalName=identity, attrName='S3CanonicalUserId') } }] } ) ) template.add_resource( resource=Distribution( title='SampleDistribution', DistributionConfig=DistributionConfig( Enabled=True, Origins=[ Origin( Id=Sub('S3-${' + bucket.title + '}'), DomainName=Sub('${' + bucket.title + '}.s3.amazonaws.com'), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Sub('origin-access-identity/cloudfront/${' + identity.title + '}') ) ) ], DefaultCacheBehavior=DefaultCacheBehavior( ForwardedValues=ForwardedValues( QueryString=True, ), TargetOriginId=Sub('S3-${' + bucket.title + '}'), ViewerProtocolPolicy='redirect-to-https', ), DefaultRootObject='index.html', CustomErrorResponses=[ CustomErrorResponse( ErrorCode=403, ResponseCode=200, ResponsePagePath='/404.html', ErrorCachingMinTTL=30 ) ] ) ) ) with open('./cloudfront.yml', mode='w') as file: file.write(template.to_yaml())
if not www_to_root: origin_static_url = "www.{0}".format(origin_static_url) StaticSiteBucketDistribution = t.add_resource( Distribution("StaticSiteBucketDistribution", DistributionConfig=DistributionConfig( Aliases=aliases, DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="staticSiteBucketOrigin", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False)), DefaultRootObject=config["index_document"], Origins=[ Origin( Id="staticSiteBucketOrigin", DomainName=origin_static_url, CustomOriginConfig=CustomOriginConfig( OriginProtocolPolicy="http-only"), ) ], Enabled=True, PriceClass="PriceClass_100"), DependsOn=["StaticSiteBucket", "wwwStaticSiteBucket"])) StaticSiteBucketPolicy = t.add_resource( BucketPolicy( "StaticSiteBucketPolicy", Bucket=Ref(StaticSiteBucket), PolicyDocument={ "Statement": [{ "Action": ["s3:GetObject"], "Effect": "Allow",
}, "/*"]] }, "Principal": "*" }] })) distribution = template.add_resource( Distribution( "AssetsDistribution", DistributionConfig=DistributionConfig( Aliases=aliases, Origins=[ Origin( Id="AssetsBucketOrigin", DomainName=GetAtt(assets_bucket, "DomainName"), CustomOriginConfig=CustomOriginConfig( HTTPPort=80, HTTPSPort=443, OriginProtocolPolicy="http-only"), ) ], ViewerCertificate=ViewerCertificate( # 인증키는 미국동부(버지니아 북부) 리전에서 생성한 것만 사용가능하다. AcmCertificateArn=acm_certificate_arn, SslSupportMethod='sni-only'), DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="AssetsBucketOrigin", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=True)), DefaultRootObject="index.html", Enabled=True, PriceClass="PriceClass_All",