def add_conditions(self): """Set up template conditions.""" template = self.template variables = self.get_variables() template.add_condition( 'SSHKeySpecified', And(Not(Equals(variables['KeyName'].ref, '')), Not(Equals(variables['KeyName'].ref, 'undefined')))) template.add_condition( 'MissingVPNAMI', Or(Equals(variables['VPNAMI'].ref, ''), Equals(variables['VPNAMI'].ref, 'undefined'))) template.add_condition( 'RHELUserData', Not(Equals(variables['VPNOS'].ref, 'ubuntu-16.04'))) template.add_condition( 'ChefRunListSpecified', And(Not(Equals(variables['ChefRunList'].ref, '')), Not(Equals(variables['ChefRunList'].ref, 'undefined')))) template.add_condition( 'PublicRouteTableSpecified', And(Not(Equals(variables['PublicRouteTable'].ref, '')), Not(Equals(variables['PublicRouteTable'].ref, 'undefined')))) template.add_condition( 'PublicSubnetsOmitted', Equals(Join('', variables['PublicSubnets'].ref), '')) for i in range(AZS): template.add_condition( '%iPrivateSubnetsCreated' % (i + 1), Equals(variables['PrivateSubnetCount'].ref, str(i + 1))) template.add_condition( 'PrivateSubnetCountOmitted', Equals(variables['PrivateSubnetCount'].ref, '0'))
def create_conditions(self): t = self.template ssl_condition = Not(Equals(Ref("ELBCertName"), "")) t.add_condition("UseHTTPS", ssl_condition) t.add_condition("UseHTTP", Not(ssl_condition)) self.template.add_condition( "UseIAMCert", Not(Equals(Ref("ELBCertType"), "acm"))) t.add_condition( "EnableSNSEvents", Equals(Ref("EventsBackend"), "sns")) t.add_condition( "CreateSNSTopic", And(Equals(Ref("EventsSNSTopicName"), ""), Condition("EnableSNSEvents"))) t.add_condition( "EnableCloudwatchLogs", Equals(Ref("RunLogsBackend"), "cloudwatch")) t.add_condition( "CreateRunLogsGroup", And(Equals(Ref("RunLogsCloudwatchGroup"), ""), Condition("EnableCloudwatchLogs"))) t.add_condition( "EnableAppEventStream", Equals(Ref("LogsStreamer"), "kinesis")) t.add_condition( "RequireCommitMessages", Equals(Ref("RequireCommitMessages"), "true"))
def create_conditions(self): self.template.add_condition("CreateELB", Not(Equals(Ref("ELBHostName"), ""))) self.template.add_condition("SetupDNS", Not(Equals(Ref("BaseDomain"), ""))) self.template.add_condition("UseSSL", Not(Equals(Ref("ELBCertName"), ""))) self.template.add_condition( "CreateSSLELB", And(Condition("CreateELB"), Condition("UseSSL"))) self.template.add_condition( "SetupELBDNS", And(Condition("CreateELB"), Condition("SetupDNS")))
def create_conditions(self): t = self.template t.add_condition("DefinedNotificationArn", Not(Equals(Ref("NotificationTopicArn"), ""))) t.add_condition("DefinedPort", Not(Equals(Ref("Port"), "0"))) t.add_condition( "DefinedAvailabilityZones", Not(Equals(Join(",", Ref("PreferredCacheClusterAZs")), ""))) t.add_condition("DefinedSnapshotArns", Not(Equals(Join(",", Ref("SnapshotArns")), ""))) t.add_condition("DefinedSnapshotWindow", Not(Equals(Ref("SnapshotWindow"), ""))) # DNS Conditions t.add_condition("HasInternalZone", Not(Equals(Ref("InternalZoneId"), ""))) t.add_condition("HasInternalZoneName", Not(Equals(Ref("InternalZoneName"), ""))) t.add_condition("HasInternalHostname", Not(Equals(Ref("InternalHostname"), ""))) t.add_condition( "CreateInternalHostname", And(Condition("HasInternalZone"), Condition("HasInternalZoneName"), Condition("HasInternalHostname")))
def create_conditions(self): t = self.template t.add_condition( "HasInternalZone", Not(Equals(Ref("InternalZoneId"), ""))) t.add_condition( "HasInternalZoneName", Not(Equals(Ref("InternalZoneName"), ""))) t.add_condition( "HasInternalHostname", Not(Equals(Ref("InternalHostname"), ""))) t.add_condition( "CreateInternalHostname", And(Condition("HasInternalZone"), Condition("HasInternalZoneName"), Condition("HasInternalHostname"))) t.add_condition( "HasProvisionedIOPS", Not(Equals(Ref("IOPS"), "0"))) t.add_condition( "HasStorageType", Not(Equals(Ref("StorageType"), "default"))) t.add_condition( "HasDBSnapshotIdentifier", Not(Equals(Ref("DBSnapshotIdentifier"), "")))
def condition_bucket_name_provided(self) -> str: """Condition BucketNameProvided.""" return self.template.add_condition( "BucketNameProvided", And( Not(Equals(self.variables["BucketName"].ref, "undefined")), Not(Equals(self.variables["BucketName"].ref, "")), ), )
def create_conditions(self): self.template.add_condition("HasInternalZone", Not(Equals(Ref("InternalZoneId"), ""))) self.template.add_condition("HasInternalZoneName", Not(Equals(Ref("InternalZoneName"), ""))) self.template.add_condition("HasInternalHostname", Not(Equals(Ref("InternalHostname"), ""))) self.template.add_condition( "CreateInternalHostname", And(Condition("HasInternalZone"), Condition("HasInternalZoneName"), Condition("HasInternalHostname")))
def add_conditions(self): """Set up AZ conditions.""" template = self.template for i in range(AZS): template.add_condition( 'PublicAZ%i' % (i + 1), Not(Equals(Ref('PublicSubnet%i' % (i + 1)), ''))) template.add_condition( 'PrivateAZ%i' % (i + 1), Not(Equals(Ref('PrivateSubnet%i' % (i + 1)), ''))) template.add_condition( 'CreateNATGateway%i' % (i + 1), And(Condition('PublicAZ%i' % (i + 1)), Condition('PrivateAZ%i' % (i + 1))))
def create_conditions(self): t = self.template t.add_condition( 'HasInternalZone', Not(Equals(Ref('InternalZoneId'), ''))) t.add_condition( 'HasInternalZoneName', Not(Equals(Ref('InternalZoneName'), ''))) t.add_condition( 'HasInternalHostname', Not(Equals(Ref('InternalHostname'), ''))) t.add_condition( 'CreateInternalHostname', And(Condition('HasInternalZone'), Condition('HasInternalZoneName'), Condition('HasInternalHostname')))
label="Enable SFTP Server", ) use_sftp_condition = "UseSFTPServerCondition" use_sftp_with_kms_condition = "UseSFTPWithKMSCondition" use_sftp_without_kms_condition = "UseSFTPWithoutKMSCondition" template.add_condition(use_sftp_condition, Equals(Ref(use_sftp_server), "true")) template.add_condition( # If this condition is true, we need to create policies and roles that give # access to the customer KMS. use_sftp_with_kms_condition, And( Equals(Ref(use_sftp_server), "true"), Condition(use_aes256_encryption_cond), Condition(use_cmk_arn), ), ) template.add_condition( # If this condition is true, we need to create policies and roles, # but they should not give access to customer KMS. use_sftp_without_kms_condition, And(Equals(Ref(use_sftp_server), "true"), Not(Condition(use_cmk_arn))), ) transfer_server = transfer.Server( "TransferServer", template=template, Condition=use_sftp_condition, IdentityProviderType="SERVICE_MANAGED",
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description= "Availability Zone the cluster will launch into. THIS IS REQUIRED", )) volume_size = t.add_parameter( Parameter( "VolumeSize", Type="CommaDelimitedList", Description="Size of EBS volume in GB, if creating a new one")) volume_type = t.add_parameter( Parameter( "VolumeType", Type="CommaDelimitedList", Description="Type of volume to create either new or from snapshot") ) volume_iops = t.add_parameter( Parameter( "VolumeIOPS", Type="CommaDelimitedList", Description= "Number of IOPS for volume type io1. Not used for other volume types.", )) ebs_encryption = t.add_parameter( Parameter( "EBSEncryption", Type="CommaDelimitedList", Description="Boolean flag to use EBS encryption for /shared volume. " "(Not to be used for snapshots)", )) ebs_kms_id = t.add_parameter( Parameter( "EBSKMSKeyId", Type="CommaDelimitedList", Description= "KMS ARN for customer created master key, will be used for EBS encryption", )) ebs_volume_id = t.add_parameter( Parameter("EBSVolumeId", Type="CommaDelimitedList", Description="Existing EBS volume Id")) ebs_snapshot_id = t.add_parameter( Parameter( "EBSSnapshotId", Type="CommaDelimitedList", Description= "Id of EBS snapshot if using snapshot as source for volume", )) ebs_vol_num = t.add_parameter( Parameter( "NumberOfEBSVol", Type="Number", Description="Number of EBS Volumes the user requested, up to %s" % number_of_vol, )) use_vol = [None] * number_of_vol use_existing_ebs_volume = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")) elif i == 1: use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(ebs_vol_num), str(i)))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Ref(ebs_vol_num), str(i))), Condition(use_vol[i - 1]))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(volume_type)), "io1")) use_vol_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select(str(i), Ref(volume_size)), "NONE"))) use_vol_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select(str(i), Ref(volume_type)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select(str(i), Ref(ebs_encryption)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select(str(i), Ref(ebs_kms_id)), "NONE"))), ) use_ebs_snapshot = t.add_condition( "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_snapshot_id)), "NONE"))) use_existing_ebs_volume[i] = t.add_condition( "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_volume_id)), "NONE"))) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_vol_type, Select(str(i), Ref(volume_type)), "gp2"), Size=If( use_ebs_snapshot, NoValue, If(use_vol_size, Select(str(i), Ref(volume_size)), "20")), SnapshotId=If(use_ebs_snapshot, Select(str(i), Ref(ebs_snapshot_id)), NoValue), Iops=If(use_ebs_iops, Select(str(i), Ref(volume_iops)), NoValue), Encrypted=If(use_ebs_encryption, Select(str(i), Ref(ebs_encryption)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select(str(i), Ref(ebs_kms_id)), NoValue), Condition=create_vol, )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = If(use_existing_ebs_volume[i], Select(str(i), Ref(ebs_volume_id)), Ref(v[i])) if i == 0: outputs[i] = vol_to_return[i] else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
"the existing primary fails.", Type="String", AllowedValues=["true", "false"], Default="false", ), group="Redis", label="Enable automatic failover", ) redis_uses_automatic_failover = "RedisAutomaticFailoverCondition" template.add_condition(redis_uses_automatic_failover, Equals(Ref(redis_automatic_failover), "true")) secure_redis_condition = "SecureRedisCondition" template.add_condition( secure_redis_condition, And(Condition(using_redis_condition), Condition(use_aes256_encryption_cond))) using_either_cache_condition = "EitherCacheCondition" template.add_condition( using_either_cache_condition, Or(Condition(using_memcached_condition), Condition(using_redis_condition))) # Subnet and security group shared by both clusters cache_subnet_group = elasticache.SubnetGroup( "CacheSubnetGroup", template=template, Description="Subnets available for the cache instance", Condition=using_either_cache_condition, SubnetIds=[Ref(private_subnet_a), Ref(private_subnet_b)],
GENERATED_CLUSTER_NAME_CON = Equals(Ref(ecs_params.CLUSTER_NAME), ecs_params.CLUSTER_NAME.Default) NOT_USE_CLUSTER_SG_CON_T = "NotUseClusterSecurityGroupCondition" NOT_USE_CLUSTER_SG_CON = Equals(Ref(ecs_params.CLUSTER_SG_ID), ecs_params.CLUSTER_SG_ID.Default) USE_CLUSTER_SG_CON_T = "UseClusterSecurityGroupCondition" USE_CLUSTER_SG_CON = Not(Condition(NOT_USE_CLUSTER_SG_CON_T)) SERVICE_COUNT_ZERO_CON_T = "ServiceCountIsZeroCondition" SERVICE_COUNT_ZERO_CON = Equals(Ref(ecs_params.SERVICE_COUNT), "0") USE_FARGATE_CON_T = "UseFargateCondition" USE_FARGATE_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE") SERVICE_COUNT_ZERO_AND_FARGATE_CON_T = "ServiceCountZeroAndFargate" SERVICE_COUNT_ZERO_AND_FARGATE_CON = And(Condition(USE_FARGATE_CON_T), Condition(SERVICE_COUNT_ZERO_CON_T)) NOT_USE_HOSTNAME_CON_T = "NotUseMicroserviceHostnameCondition" NOT_USE_HOSTNAME_CON = Equals(Ref(ecs_params.SERVICE_HOSTNAME), ecs_params.SERVICE_HOSTNAME.Default) USE_HOSTNAME_CON_T = "UseMicroserviceHostnameCondition" USE_HOSTNAME_CON = Not(Condition(NOT_USE_HOSTNAME_CON_T)) USE_CLUSTER_CAPACITY_PROVIDERS_CON_T = "UseClusterDefaultCapacityProviders" USE_CLUSTER_CAPACITY_PROVIDERS_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), ecs_params.LAUNCH_TYPE.Default)
DBS_SUBNET_GROUP_CON_T = "CreateSubnetGroupCondition" DBS_SUBNET_GROUP_CON = Equals(Ref(DBS_SUBNET_GROUP), DBS_SUBNET_GROUP.Default) NOT_USE_DB_SNAPSHOT_CON_T = "NotUseSnapshotToCreateDbCondition" NOT_USE_DB_SNAPSHOT_CON = Equals(Ref(DB_SNAPSHOT_ID), DB_SNAPSHOT_ID.Default) USE_DB_SNAPSHOT_CON_T = "UseSnapshotToCreateDbCondition" USE_DB_SNAPSHOT_CON = Not(Condition(NOT_USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_CON_T = "UseAuroraClusterCondition" USE_CLUSTER_CON = Equals("aurora", Select(0, Split("-", Ref(DB_ENGINE_NAME)))) NOT_USE_CLUSTER_CON_T = "NotUseClusterCondition" NOT_USE_CLUSTER_CON = Not(Condition(USE_CLUSTER_CON_T)) USE_CLUSTER_AND_SNAPSHOT_CON_T = "UseClusterAndSnapshotCondition" USE_CLUSTER_AND_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_NOT_SNAPSHOT_CON_T = "UseClusterAndNotSnapshotCondition" USE_CLUSTER_NOT_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(NOT_USE_DB_SNAPSHOT_CON_T)) NOT_USE_CLUSTER_USE_SNAPSHOT_CON_T = "NotUseClusterButUseSnapshotCondition" NOT_USE_CLUSTER_USE_SNAPSHOT_CON = And(Condition(NOT_USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_OR_SNAPSHOT_CON_T = "UseSnapshotOrClusterCondition" USE_CLUSTER_OR_SNAPSHOT_CON = Or(Condition(USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T))
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description="Availability Zone the cluster will launch into. " "THIS IS REQUIRED", )) raid_options = t.add_parameter( Parameter( "RAIDOptions", Type="CommaDelimitedList", Description="Comma separated list of RAID related options, " "8 parameters in total, " "[" "0 shared_dir," "1 raid_type," "2 num_of_vols," "3 vol_type," "4 vol_size," "5 vol_IOPS," "6 encrypted, " "7 ebs_kms_key]", )) use_vol = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), Not(Equals(Select("0", Ref(raid_options)), "NONE"))) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Select("2", Ref(raid_options)), str(i))), Condition(use_vol[i - 1])), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select("3", Ref(raid_options)), "io1")) use_volume_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select("4", Ref(raid_options)), "NONE"))) use_volume_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select("3", Ref(raid_options)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select("6", Ref(raid_options)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select("7", Ref(raid_options)), "NONE"))), ) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_volume_type, Select("3", Ref(raid_options)), "gp2"), Size=If(use_volume_size, Select("4", Ref(raid_options)), 20), Iops=If(use_ebs_iops, Select("5", Ref(raid_options)), NoValue), Encrypted=If(use_ebs_encryption, Select("6", Ref(raid_options)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select("7", Ref(raid_options)), NoValue), Condition=use_vol[i], )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = Ref(v[i]) if i == 0: outputs[i] = If(use_vol[i], vol_to_return[i], "NONE") else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted RAID EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version('2010-09-09') template.set_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined'))) ) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined'))) ) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined'))) ) template.add_condition( 'DirectoryIndexSpecified', And(Not(Equals(variables['RewriteDirectoryIndex'].ref, '')), Not(Equals(variables['RewriteDirectoryIndex'].ref, 'undefined'))) # noqa ) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined'))) ) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront.CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website' ) ) ) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status='Enabled' ) ] ), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled' ), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html' ) ) ) template.add_output(Output( 'BucketName', Description='Name of website bucket', Value=bucket.ref() )) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId') ), Resource=[ Join('', [bucket.get_att('Arn'), '/*']) ] ) ] ) ) ) cfdirectoryindexrewriterole = template.add_resource( iam.Role( 'CFDirectoryIndexRewriteRole', Condition='DirectoryIndexSpecified', AssumeRolePolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[awacs.sts.AssumeRole], Principal=Principal('Service', ['lambda.amazonaws.com', 'edgelambda.amazonaws.com']) ) ] ), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ] ) ) cfdirectoryindexrewrite = template.add_resource( awslambda.Function( 'CFDirectoryIndexRewrite', Condition='DirectoryIndexSpecified', Code=awslambda.Code( ZipFile=Join( '', ["'use strict';\n", "exports.handler = (event, context, callback) => {\n", "\n", " // Extract the request from the CloudFront event that is sent to Lambda@Edge\n", # noqa pylint: disable=line-too-long " var request = event.Records[0].cf.request;\n", " // Extract the URI from the request\n", " var olduri = request.uri;\n", " // Match any '/' that occurs at the end of a URI. Replace it with a default index\n", # noqa pylint: disable=line-too-long " var newuri = olduri.replace(/\\/$/, '\\/", variables['RewriteDirectoryIndex'].ref, "');\n", # noqa " // Log the URI as received by CloudFront and the new URI to be used to fetch from origin\n", # noqa pylint: disable=line-too-long " console.log(\"Old URI: \" + olduri);\n", " console.log(\"New URI: \" + newuri);\n", " // Replace the received URI with the URI that includes the index page\n", # noqa pylint: disable=line-too-long " request.uri = newuri;\n", " // Return to CloudFront\n", " return callback(null, request);\n", "\n", "};\n"] ) ), Description='Rewrites CF directory HTTP requests to default page', # noqa Handler='index.handler', Role=cfdirectoryindexrewriterole.get_att('Arn'), Runtime='nodejs8.10' ) ) # Generating a unique resource name here for the Lambda version, so it # updates automatically if the lambda code changes code_hash = hashlib.md5( str(cfdirectoryindexrewrite.properties['Code'].properties['ZipFile'].to_dict()).encode() # noqa pylint: disable=line-too-long ).hexdigest() cfdirectoryindexrewritever = template.add_resource( awslambda.Version( 'CFDirectoryIndexRewriteVer' + code_hash, Condition='DirectoryIndexSpecified', FunctionName=cfdirectoryindexrewrite.ref() ) ) # If custom associations defined, use them if variables['lambda_function_associations']: lambda_function_associations = [ cloudfront.LambdaFunctionAssociation( EventType=x['type'], LambdaFunctionARN=x['arn'] ) for x in variables['lambda_function_associations'] ] else: # otherwise fallback to pure CFN condition lambda_function_associations = If( 'DirectoryIndexSpecified', [cloudfront.LambdaFunctionAssociation( EventType='origin-request', LambdaFunctionARN=cfdirectoryindexrewritever.ref() )], NoValue ) cfdistribution = template.add_resource( get_cf_distribution_class()( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=get_cf_distro_conf_class()( Aliases=If( 'AliasesSpecified', variables['Aliases'].ref, NoValue ), Origins=[ get_cf_origin_class()( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=get_s3_origin_conf_class()( OriginAccessIdentity=Join( '', ['origin-access-identity/cloudfront/', oai.ref()]) ), Id='S3Origin' ) ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), LambdaFunctionAssociations=lambda_function_associations, # noqa TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https' ), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging( Bucket=Join('.', [variables['LogBucketName'].ref, 's3.amazonaws.com']) ), NoValue ), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If( 'WAFNameSpecified', variables['WAFWebACL'].ref, NoValue ), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn'].ref, # noqa SslSupportMethod='sni-only' ), NoValue ) ) ) ) template.add_output(Output( 'CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref() )) template.add_output( Output( 'CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName') ) )
bastion_type_is_openvpn_set = "BastionTypeIsOpenVPNSet" template.add_condition(bastion_type_is_openvpn_set, Equals("OpenVPN", Ref(bastion_type))) bastion_type_is_ssh_set = "BastionTypeIsSSHSet" template.add_condition(bastion_type_is_ssh_set, Equals("SSH", Ref(bastion_type))) bastion_ami_set = "BastionAMISet" template.add_condition(bastion_ami_set, Not(Equals("", Ref(bastion_ami)))) bastion_type_and_ami_set = "BastionTypeAndAMISet" template.add_condition( bastion_type_and_ami_set, And(Condition(bastion_type_set), Condition(bastion_ami_set))) bastion_security_group = ec2.SecurityGroup( 'BastionSecurityGroup', template=template, GroupDescription="Bastion security group.", VpcId=Ref(vpc), Condition=bastion_type_set, Tags=Tags(Name=Join("-", [Ref("AWS::StackName"), "bastion"]), ), ) bastion_security_group_ingress_ssh = ec2.SecurityGroupIngress( 'BastionSecurityGroupIngressSSH', template=template, GroupId=Ref(bastion_security_group), IpProtocol="tcp",
conditions = { "CreatePrivateSubnet1ACondition": Equals(Ref("CreatePrivateSubnet1A"), "True"), "CreatePrivateSubnet1BCondition": Equals(Ref("CreatePrivateSubnet1B"), "True"), "NAT1EIPCondition": Or(Condition("CreatePrivateSubnet1ACondition"), Condition("CreatePrivateSubnet1BCondition")), "CreatePublicSubnet2Condition": Equals(Ref("CreatePublicSubnet2"), "True"), "CreatePrivateSubnet2ACondition": Equals(Ref("CreatePrivateSubnet2A"), "True"), "CreatePrivateSubnet2BCondition": Equals(Ref("CreatePrivateSubnet2B"), "True"), "AttachNAT2ACondition": And(Condition("CreatePublicSubnet2Condition"), Condition("CreatePrivateSubnet2ACondition")), "AttachNAT2BCondition": And(Condition("CreatePublicSubnet2Condition"), Condition("CreatePrivateSubnet2BCondition")), "NAT2EIPCondition": Or(Condition("AttachNAT2ACondition"), Condition("AttachNAT2BCondition")) } resources = { # Add a VPC with user input CIDR block "VPC": ec2.VPC("VPC", CidrBlock=Ref("CIDRVPC"), EnableDnsSupport="True", EnableDnsHostnames="True",
cfn_params.USE_CFN_PARAMS.Default) NOT_USE_CFN_PARAMS_CON_T = f"Not{USE_CFN_PARAMS_CON_T}" NOT_USE_CFN_PARAMS_CON = Not(Condition(USE_CFN_PARAMS_CON_T)) USE_CFN_EXPORTS_T = "UseExportsCondition" USE_CFN_EXPORTS = Equals(Ref(cfn_params.USE_CFN_EXPORTS), "True") NOT_USE_CFN_EXPORTS_T = "NotUseCfnExportsCondition" NOT_USE_CFN_EXPORTS = Not(Condition(USE_CFN_EXPORTS_T)) USE_SSM_EXPORTS_T = "UseSsmExportsCondition" USE_SSM_EXPORTS = Equals(Ref(cfn_params.USE_SSM_EXPORTS), "True") USE_CFN_AND_SSM_EXPORTS_T = "UseCfnAndSsmCondition" USE_CFN_AND_SSM_EXPORTS = And(Condition(USE_CFN_EXPORTS_T), Condition(USE_SSM_EXPORTS_T)) USE_SSM_ONLY_T = "UseSsmOnlyCondition" USE_SSM_ONLY = And(Condition(USE_SSM_EXPORTS_T), Condition(NOT_USE_CFN_EXPORTS_T)) USE_SPOT_CON_T = "UseSpotFleetHostsCondition" USE_SPOT_CON = Equals(Ref(cfn_params.USE_FLEET), "True") NOT_USE_SPOT_CON_T = "NotUseSpotFleetHostsCondition" NOT_USE_SPOT_CON = Not(Condition(USE_SPOT_CON_T)) def pass_root_stack_name(): """ Function to add root_stack to a stack parameters
Not(Equals(Ref(db_class), dont_create_value))) db_replication = template.add_parameter(Parameter( "DatabaseReplication", Type="String", AllowedValues=["true", "false"], Default="false", Description="Whether to create a database server replica - " "WARNING this will fail if DatabaseBackupRetentionDays is 0.", ), group="Database", label="Database replication") db_replication_condition = "DatabaseReplicationCondition" template.add_condition( db_replication_condition, And(Condition(db_condition), Equals(Ref(db_replication), "true"))) db_engine = template.add_parameter( Parameter( "DatabaseEngine", Default="postgres", Description="Database engine to use", Type="String", AllowedValues=list(rds_engine_map.keys()), ConstraintDescription="must select a valid database engine.", ), group="Database", label="Engine", ) db_engine_version = template.add_parameter(
def main(args): t = Template() # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id, # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_head_node_mt, 8 exists_valid_compute_mt] efs_options = t.add_parameter( Parameter( "EFSOptions", Type="CommaDelimitedList", Description="Comma separated list of efs related options, 9 parameters in total", ) ) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="Security Group for Mount Target") ) head_node_subnet_id = t.add_parameter( Parameter("MasterSubnetId", Type="String", Description="Head node subnet id for head node mount target") ) compute_subnet_id = t.add_parameter( Parameter( "ComputeSubnetId", Type="String", Description="User provided compute subnet id. Will be use to create compute mount target if needed.", ) ) create_efs = t.add_condition( "CreateEFS", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")), ) create_head_node_mt = t.add_condition( "CreateMasterMT", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")), ) no_mt_in_compute_az = t.add_condition("NoMTInComputeAZ", Equals(Select(str(8), Ref(efs_options)), "NONE")) use_user_provided_compute_subnet = t.add_condition( "UseUserProvidedComputeSubnet", Not(Equals(Ref(compute_subnet_id), "NONE")) ) # Need to create compute mount target if: # user is providing a compute subnet and # there is no existing MT in compute subnet's AZ(includes case where head node AZ == compute AZ). # # If user is not providing a compute subnet, either we are using the head node subnet as compute subnet, # or we will be creating a compute subnet that is in the same AZ as head node subnet, # see ComputeSubnet resource in the main stack. # In both cases no compute MT is needed. create_compute_mt = t.add_condition( "CreateComputeMT", And(Condition(use_user_provided_compute_subnet), Condition(no_mt_in_compute_az)) ) use_performance_mode = t.add_condition("UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE"))) use_efs_encryption = t.add_condition("UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true")) use_efs_kms_key = t.add_condition( "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE"))) ) use_throughput_mode = t.add_condition("UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE"))) use_provisioned = t.add_condition("UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned")) use_provisioned_throughput = t.add_condition( "UseProvisionedThroughput", And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))), ) fs = t.add_resource( FileSystem( "EFSFS", PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue), ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue), ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue), Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue), KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue), Condition=create_efs, ) ) t.add_resource( MountTarget( "MasterSubnetEFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(head_node_subnet_id), Condition=create_head_node_mt, ) ) t.add_resource( MountTarget( "ComputeSubnetEFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(compute_subnet_id), Condition=create_compute_mt, ) ) t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))), ) ) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
from ecs_composex.acm.acm_params import ( VALIDATION_DOMAIN_NAME, VALIDATION_DOMAIN_ZONE_ID, CERT_ALT_NAMES, ) ACM_ZONE_ID_IS_NONE_T = "AcmZoneIsNoneCondition" ACM_ZONE_ID_IS_NONE = Equals(Ref(VALIDATION_DOMAIN_ZONE_ID), VALIDATION_DOMAIN_ZONE_ID.Default) ACM_ZONE_NAME_IS_NONE_T = "AcmZoneNameIsNoneCondition" ACM_ZONE_NAME_IS_NONE = Equals(Ref(VALIDATION_DOMAIN_NAME), VALIDATION_DOMAIN_NAME.Default) USE_ZONE_ID_T = "UseZoneIdOverZoneNameForValidation" USE_ZONE_ID = And(Not(Condition(ACM_ZONE_ID_IS_NONE_T)), Not(Condition(ACM_ZONE_NAME_IS_NONE_T))) NO_VALIDATION_CONDITION_T = "NoValidationConfiguredCondition" NO_VALIDATION_CONDITION = And(Condition(ACM_ZONE_ID_IS_NONE_T), Condition(ACM_ZONE_NAME_IS_NONE_T)) NO_ALT_NAMES_T = "NoAlternativeSubNamesCondition" NO_ALT_NAMES = Equals(Select(0, Ref(CERT_ALT_NAMES)), CERT_ALT_NAMES.Default) def add_all_conditions(template): """ Function to add all conditions to the template :param template: :return: """
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Static Website - Bucket and Distribution') # Conditions template.add_condition( 'AcmCertSpecified', And(Not(Equals(variables['AcmCertificateArn'].ref, '')), Not(Equals(variables['AcmCertificateArn'].ref, 'undefined')))) template.add_condition( 'AliasesSpecified', And(Not(Equals(Select(0, variables['Aliases'].ref), '')), Not(Equals(Select(0, variables['Aliases'].ref), 'undefined')))) template.add_condition( 'CFLoggingEnabled', And(Not(Equals(variables['LogBucketName'].ref, '')), Not(Equals(variables['LogBucketName'].ref, 'undefined')))) template.add_condition( 'WAFNameSpecified', And(Not(Equals(variables['WAFWebACL'].ref, '')), Not(Equals(variables['WAFWebACL'].ref, 'undefined')))) # Resources oai = template.add_resource( cloudfront.CloudFrontOriginAccessIdentity( 'OAI', CloudFrontOriginAccessIdentityConfig=cloudfront. CloudFrontOriginAccessIdentityConfig( # noqa pylint: disable=line-too-long Comment='CF access to website'))) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'), WebsiteConfiguration=s3.WebsiteConfiguration( IndexDocument='index.html', ErrorDocument='error.html'))) template.add_output( Output('BucketName', Description='Name of website bucket', Value=bucket.ref())) allowcfaccess = template.add_resource( s3.BucketPolicy( 'AllowCFAccess', Bucket=bucket.ref(), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Principal=Principal( 'CanonicalUser', oai.get_att('S3CanonicalUserId')), Resource=[Join('', [bucket.get_att('Arn'), '/*'])]) ]))) cfdistribution = template.add_resource( cloudfront.Distribution( 'CFDistribution', DependsOn=allowcfaccess.title, DistributionConfig=cloudfront.DistributionConfig( Aliases=If('AliasesSpecified', variables['Aliases'].ref, NoValue), Origins=[ cloudfront.Origin( DomainName=Join( '.', [bucket.ref(), 's3.amazonaws.com']), S3OriginConfig=cloudfront.S3Origin( OriginAccessIdentity=Join( '', [ 'origin-access-identity/cloudfront/', oai.ref() ])), Id='S3Origin') ], DefaultCacheBehavior=cloudfront.DefaultCacheBehavior( AllowedMethods=['GET', 'HEAD'], Compress=False, DefaultTTL='86400', ForwardedValues=cloudfront.ForwardedValues( Cookies=cloudfront.Cookies(Forward='none'), QueryString=False, ), TargetOriginId='S3Origin', ViewerProtocolPolicy='redirect-to-https'), DefaultRootObject='index.html', Logging=If( 'CFLoggingEnabled', cloudfront.Logging(Bucket=Join('.', [ variables['LogBucketName'].ref, 's3.amazonaws.com' ])), NoValue), PriceClass=variables['PriceClass'].ref, Enabled=True, WebACLId=If('WAFNameSpecified', variables['WAFWebACL'].ref, NoValue), ViewerCertificate=If( 'AcmCertSpecified', cloudfront.ViewerCertificate( AcmCertificateArn=variables['AcmCertificateArn']. ref, # noqa SslSupportMethod='sni-only'), NoValue)))) template.add_output( Output('CFDistributionId', Description='CloudFront distribution ID', Value=cfdistribution.ref())) template.add_output( Output('CFDistributionDomainName', Description='CloudFront distribution domain name', Value=cfdistribution.get_att('DomainName')))
UseVol = [None] * numberOfVol UseExistingEBSVolume = [None] * numberOfVol v = [None] * numberOfVol for i in range(numberOfVol): if i == 0: CreateVol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(EBSVolumeId)), "NONE")) elif i == 1: UseVol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(EBSVolumeNum), str(i)))) CreateVol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(UseVol[i]), Equals(Select(str(i), Ref(EBSVolumeId)), "NONE"))) else: UseVol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Ref(EBSVolumeNum), str(i))), Condition(UseVol[i - 1]))) CreateVol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(UseVol[i]), Equals(Select(str(i), Ref(EBSVolumeId)), "NONE"))) UseEBSPIOPS = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(VolumeType)), "io1")) UseVolumeSize = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1),
Condition("OneEqualsFoo") ), "BarEqualsTwo": Equals( "Bar", Ref("Two") ), "ThreeEqualsFour": Equals( Ref("Three"), Ref("Four") ), "OneEqualsFooOrBarEqualsTwo": Or( Condition("OneEqualsFoo"), Condition("BarEqualsTwo") ), "OneEqualsFooAndNotBarEqualsTwo": And( Condition("OneEqualsFoo"), Not(Condition("BarEqualsTwo")) ), "OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And( Condition("OneEqualsFoo"), Condition("BarEqualsTwo"), Equals(Ref("Three"), "Pft") ), "OneIsQuzAndThreeEqualsFour": And( Equals(Ref("One"), "Quz"), Condition("ThreeEqualsFour") ), "LaunchInstance": And( Condition("OneEqualsFoo"), Condition("NotOneEqualsFoo"), Condition("BarEqualsTwo"), Condition("OneEqualsFooAndNotBarEqualsTwo"),
Type="String", Default="", ), group="Static Media", label="CloudFront Custom Domain", ) assets_custom_domain_condition = "AssetsCloudFrontDomainCondition" template.add_condition(assets_custom_domain_condition, Not(Equals(Ref(assets_cloudfront_domain), ""))) # Currently, you can specify only certificates that are in the US East (N. Virginia) region. # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distributionconfig-viewercertificate.html assets_custom_domain_and_us_east_1_condition = "AssetsCloudFrontDomainAndUsEast1Condition" template.add_condition( assets_custom_domain_and_us_east_1_condition, And(Not(Equals(Ref(assets_cloudfront_domain), "")), Equals(Ref(AWS_REGION), "us-east-1"))) assets_certificate = template.add_resource( Certificate( 'AssetsCertificate', Condition=assets_custom_domain_and_us_east_1_condition, DomainName=Ref(assets_cloudfront_domain), DomainValidationOptions=[ DomainValidationOption( DomainName=Ref(assets_cloudfront_domain), ValidationDomain=Ref(assets_cloudfront_domain), ), ], )) assets_certificate_arn = template.add_parameter(
def main(args): t = Template() # ================= Parameters ================= # 0 1 2 3 4 5 6 # [shared_dir,fsx_fs_id,storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path, # 7 # weekly_maintenance_start_time] fsx_options = t.add_parameter( Parameter( "FSXOptions", Type="CommaDelimitedList", Description= "Comma separated list of fsx related options, 8 parameters in total, [shared_dir,fsx_fs_id," "storage_capacity,fsx_kms_key_id,imported_file_chunk_size,export_path,import_path," "weekly_maintenance_start_time]", )) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for FSx filesystem")) subnet_id = t.add_parameter( Parameter("SubnetId", Type="String", Description="SubnetId for FSx filesystem")) # ================= Conditions ================= create_fsx = t.add_condition( "CreateFSX", And(Not(Equals(Select(str(0), Ref(fsx_options)), "NONE")), Equals(Select(str(1), Ref(fsx_options)), "NONE")), ) use_storage_capacity = t.add_condition( "UseStorageCap", Not(Equals(Select(str(2), Ref(fsx_options)), "NONE"))) use_fsx_kms_key = t.add_condition( "UseFSXKMSKey", Not(Equals(Select(str(3), Ref(fsx_options)), "NONE"))) use_imported_file_chunk_size = t.add_condition( "UseImportedFileChunkSize", Not(Equals(Select(str(4), Ref(fsx_options)), "NONE"))) use_export_path = t.add_condition( "UseExportPath", Not(Equals(Select(str(5), Ref(fsx_options)), "NONE"))) use_import_path = t.add_condition( "UseImportPath", Not(Equals(Select(str(6), Ref(fsx_options)), "NONE"))) use_weekly_mainenance_start_time = t.add_condition( "UseWeeklyMaintenanceStartTime", Not(Equals(Select(str(7), Ref(fsx_options)), "NONE"))) # ================= Resources ================= fs = t.add_resource( FileSystem( "FileSystem", FileSystemType="LUSTRE", SubnetIds=[Ref(subnet_id)], SecurityGroupIds=[Ref(compute_security_group)], KmsKeyId=If(use_fsx_kms_key, Select(str(3), Ref(fsx_options)), NoValue), StorageCapacity=If(use_storage_capacity, Select(str(2), Ref(fsx_options)), NoValue), LustreConfiguration=LustreConfiguration( ImportedFileChunkSize=If(use_imported_file_chunk_size, Select(str(4), Ref(fsx_options)), NoValue), ExportPath=If(use_export_path, Select(str(5), Ref(fsx_options)), NoValue), ImportPath=If(use_import_path, Select(str(6), Ref(fsx_options)), NoValue), WeeklyMaintenanceStartTime=If(use_weekly_mainenance_start_time, Select(str(7), Ref(fsx_options)), NoValue), ), Condition=create_fsx, )) # ================= Outputs ================= t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_fsx, Ref(fs), Select("1", Ref(fsx_options))), )) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def main(args): t = Template() # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id, # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_mt] efs_options = t.add_parameter( Parameter( "EFSOptions", Type="CommaDelimitedList", Description="Comma separated list of efs related options, " "8 parameters in total", )) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for Mount Target")) subnet_id = t.add_parameter( Parameter("SubnetId", Type="String", Description="SubnetId for Mount Target")) create_efs = t.add_condition( "CreateEFS", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")), ) create_mt = t.add_condition( "CreateMT", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")), ) use_performance_mode = t.add_condition( "UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE"))) use_efs_encryption = t.add_condition( "UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true")) use_efs_kms_key = t.add_condition( "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE")))) use_throughput_mode = t.add_condition( "UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE"))) use_provisioned = t.add_condition( "UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned")) use_provisioned_throughput = t.add_condition( "UseProvisionedThroughput", And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))), ) fs = t.add_resource( FileSystem( "EFSFS", PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue), ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue), ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue), Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue), KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue), Condition=create_efs, )) mt = t.add_resource( MountTarget( "EFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(subnet_id), Condition=create_mt, )) t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))), )) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
) ]) t.add_condition('OneEqualsFoo', Equals(Ref('One'), 'Foo')) t.add_condition('NotOneEqualsFoo', Not(Condition('OneEqualsFoo'))) t.add_condition('BarEqualsTwo', Equals('Bar', Ref('Two'))) t.add_condition('ThreeEqualsFour', Equals(Ref('Three'), Ref('Four'))) t.add_condition('OneEqualsFooOrBarEqualsTwo', Or(Condition('OneEqualsFoo'), Condition('BarEqualsTwo'))) t.add_condition('OneEqualsFooAndNotBarEqualsTwo', And(Condition('OneEqualsFoo'), Not(Condition('BarEqualsTwo')))) t.add_condition( 'OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft', And(Condition('OneEqualsFoo'), Condition('BarEqualsTwo'), Equals(Ref('Three'), 'Pft'))) t.add_condition('OneIsQuzAndThreeEqualsFour', And(Equals(Ref('One'), 'Quz'), Condition('ThreeEqualsFour'))) t.add_condition( 'LaunchInstance', And(Condition('OneEqualsFoo'), Condition('NotOneEqualsFoo'), Condition('BarEqualsTwo'), Condition('OneEqualsFooAndNotBarEqualsTwo'), Condition('OneIsQuzAndThreeEqualsFour')))
def create_template(): template = Template(Description=( "Static website hosted with S3 and CloudFront. " "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website" )) partition_config = add_mapping( template, "PartitionConfig", { "aws": { # the region with the control plane for CloudFront, IAM, Route 53, etc "PrimaryRegion": "us-east-1", # assume that Lambda@Edge replicates to all default enabled regions, and that # future regions will be opt-in. generated with AWS CLI: # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)" "DefaultRegions": [ "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", ], }, # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn "aws-cn": { "PrimaryRegion": "cn-north-1", "DefaultRegions": ["cn-north-1", "cn-northwest-1"], }, }, ) acm_certificate_arn = template.add_parameter( Parameter( "AcmCertificateArn", Description= "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.", Type="String", AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)", Default="", )) hosted_zone_id = template.add_parameter( Parameter( "HostedZoneId", Description= "Existing Route 53 zone to use for validating a new TLS certificate.", Type="String", AllowedPattern="(Z[A-Z0-9]+|)", Default="", )) dns_names = template.add_parameter( Parameter( "DomainNames", Description= "Comma-separated list of additional domain names to serve.", Type="CommaDelimitedList", Default="", )) tls_protocol_version = template.add_parameter( Parameter( "TlsProtocolVersion", Description= "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.", Type="String", Default="TLSv1.2_2019", )) log_retention_days = template.add_parameter( Parameter( "LogRetentionDays", Description= "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.", Type="Number", AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS, Default=365, )) default_ttl_seconds = template.add_parameter( Parameter( "DefaultTtlSeconds", Description="Cache time-to-live when not set by S3 object headers.", Type="Number", Default=int(datetime.timedelta(minutes=5).total_seconds()), )) enable_price_class_hack = template.add_parameter( Parameter( "EnablePriceClassHack", Description="Cut your bill in half with this one weird trick.", Type="String", Default="false", AllowedValues=["true", "false"], )) retention_defined = add_condition(template, "RetentionDefined", Not(Equals(Ref(log_retention_days), 0))) using_price_class_hack = add_condition( template, "UsingPriceClassHack", Equals(Ref(enable_price_class_hack), "true")) using_acm_certificate = add_condition( template, "UsingAcmCertificate", Not(Equals(Ref(acm_certificate_arn), ""))) using_hosted_zone = add_condition(template, "UsingHostedZone", Not(Equals(Ref(hosted_zone_id), ""))) using_certificate = add_condition( template, "UsingCertificate", Or(Condition(using_acm_certificate), Condition(using_hosted_zone)), ) should_create_certificate = add_condition( template, "ShouldCreateCertificate", And(Condition(using_hosted_zone), Not(Condition(using_acm_certificate))), ) using_dns_names = add_condition(template, "UsingDnsNames", Not(Equals(Select(0, Ref(dns_names)), ""))) is_primary_region = "IsPrimaryRegion" template.add_condition( is_primary_region, Equals(Region, FindInMap(partition_config, Partition, "PrimaryRegion")), ) precondition_region_is_primary = template.add_resource( WaitConditionHandle( "PreconditionIsPrimaryRegionForPartition", Condition=is_primary_region, )) log_ingester_dlq = template.add_resource( Queue( "LogIngesterDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", )) log_ingester_role = template.add_resource( Role( "LogIngesterRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[GetAtt(log_ingester_dlq, "Arn")], ) ], ), ) ], )) log_ingester = template.add_resource( Function( "LogIngester", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(log_ingest.handler.__name__), Code=Code(ZipFile=inspect.getsource(log_ingest)), MemorySize=256, Timeout=300, Role=GetAtt(log_ingester_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(log_ingester_dlq, "Arn")), )) log_ingester_permission = template.add_resource( Permission( "LogIngesterPermission", FunctionName=GetAtt(log_ingester, "Arn"), Action="lambda:InvokeFunction", Principal="s3.amazonaws.com", SourceAccount=AccountId, )) log_bucket = template.add_resource( Bucket( "LogBucket", # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails. # When the CloudFront distribution is created, it adds an additional bucket ACL. # That ACL is not possible to model in CloudFormation. AccessControl="LogDeliveryWrite", LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(ExpirationInDays=1, Status="Enabled"), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=1), Status="Enabled", ), ]), NotificationConfiguration=NotificationConfiguration( LambdaConfigurations=[ LambdaConfigurations(Event="s3:ObjectCreated:*", Function=GetAtt(log_ingester, "Arn")) ]), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # if we use KMS, we can't read the logs SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), DependsOn=[log_ingester_permission], )) log_ingester_log_group = template.add_resource( LogGroup( "LogIngesterLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(log_ingester)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) log_ingester_policy = template.add_resource( PolicyType( "LogIngesterPolicy", Roles=[Ref(log_ingester_role)], PolicyName="IngestLogPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/cloudfront/*", ], ), Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/s3/*", ], ), GetAtt(log_ingester_log_group, "Arn"), ], ), Statement( Effect=Allow, Action=[s3.GetObject], Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])], ), ], ), )) bucket = template.add_resource( Bucket( "ContentBucket", LifecycleConfiguration=LifecycleConfiguration(Rules=[ # not supported by CFN yet: # LifecycleRule( # Transitions=[ # LifecycleRuleTransition( # StorageClass='INTELLIGENT_TIERING', # TransitionInDays=1, # ), # ], # Status="Enabled", # ), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=7), Status="Enabled", ) ]), LoggingConfiguration=LoggingConfiguration( DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # Origin Access Identities can't use KMS SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), )) origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "CloudFrontIdentity", CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig( Comment=GetAtt(bucket, "Arn")), )) bucket_policy = template.add_resource( BucketPolicy( "ContentBucketPolicy", Bucket=Ref(bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "CanonicalUser", GetAtt(origin_access_identity, "S3CanonicalUserId"), ), Action=[s3.GetObject], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])], ), ], ), )) # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs # state "In some circumstances [...] S3 resets permissions on the bucket to the default value", # and this allows logging to work without any ACLs in place. log_bucket_policy = template.add_resource( BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join( "/", [GetAtt(log_bucket, "Arn"), "cloudfront", "*"]) ], ), Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.ListBucket], Resource=[Join("/", [GetAtt(log_bucket, "Arn")])], ), Statement( Effect=Allow, Principal=Principal("Service", "s3.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"]) ], ), ], ), )) certificate_validator_dlq = template.add_resource( Queue( "CertificateValidatorDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", Condition=should_create_certificate, )) certificate_validator_role = template.add_resource( Role( "CertificateValidatorRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[ GetAtt(certificate_validator_dlq, "Arn") ], ) ], ), ) ], # TODO scope down ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", ], Condition=should_create_certificate, )) certificate_validator_function = template.add_resource( Function( "CertificateValidatorFunction", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(certificate_validator.handler.__name__), Code=Code(ZipFile=inspect.getsource(certificate_validator)), MemorySize=256, Timeout=300, Role=GetAtt(certificate_validator_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(certificate_validator_dlq, "Arn")), Environment=Environment( Variables={ certificate_validator.EnvVars.HOSTED_ZONE_ID.name: Ref(hosted_zone_id) }), Condition=should_create_certificate, )) certificate_validator_log_group = template.add_resource( LogGroup( "CertificateValidatorLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(certificate_validator_function)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), Condition=should_create_certificate, )) certificate_validator_rule = template.add_resource( Rule( "CertificateValidatorRule", EventPattern={ "detail-type": ["AWS API Call via CloudTrail"], "detail": { "eventSource": ["acm.amazonaws.com"], "eventName": ["AddTagsToCertificate"], "requestParameters": { "tags": { "key": [certificate_validator_function.title], "value": [GetAtt(certificate_validator_function, "Arn")], } }, }, }, Targets=[ Target( Id="certificate-validator-lambda", Arn=GetAtt(certificate_validator_function, "Arn"), ) ], DependsOn=[certificate_validator_log_group], Condition=should_create_certificate, )) certificate_validator_permission = template.add_resource( Permission( "CertificateValidatorPermission", FunctionName=GetAtt(certificate_validator_function, "Arn"), Action="lambda:InvokeFunction", Principal="events.amazonaws.com", SourceArn=GetAtt(certificate_validator_rule, "Arn"), Condition=should_create_certificate, )) certificate = template.add_resource( Certificate( "Certificate", DomainName=Select(0, Ref(dns_names)), SubjectAlternativeNames=Ref( dns_names), # duplicate first name works fine ValidationMethod="DNS", Tags=Tags( **{ certificate_validator_function.title: GetAtt(certificate_validator_function, "Arn") }), DependsOn=[certificate_validator_permission], Condition=should_create_certificate, )) edge_hook_role = template.add_resource( Role( "EdgeHookRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal( "Service", [ "lambda.amazonaws.com", "edgelambda.amazonaws.com" ], ), Action=[sts.AssumeRole], ) ], ), )) edge_hook_function = template.add_resource( Function( "EdgeHookFunction", Runtime=PYTHON_RUNTIME, Handler="index.handler", Code=Code(ZipFile=inspect.getsource(edge_hook)), MemorySize=128, Timeout=3, Role=GetAtt(edge_hook_role, "Arn"), )) edge_hook_function_hash = (hashlib.sha256( json.dumps(edge_hook_function.to_dict(), sort_keys=True).encode("utf-8")).hexdigest()[:10].upper()) edge_hook_version = template.add_resource( Version( "EdgeHookVersion" + edge_hook_function_hash, FunctionName=GetAtt(edge_hook_function, "Arn"), )) replica_log_group_name = Join( "/", [ "/aws/lambda", Join( ".", [ FindInMap(partition_config, Partition, "PrimaryRegion"), Ref(edge_hook_function), ], ), ], ) edge_hook_role_policy = template.add_resource( PolicyType( "EdgeHookRolePolicy", PolicyName="write-logs", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "*", ], ), ], ), ], ), Roles=[Ref(edge_hook_role)], )) stack_set_administration_role = template.add_resource( Role( "StackSetAdministrationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "cloudformation.amazonaws.com"), Action=[sts.AssumeRole], ), ], ), )) stack_set_execution_role = template.add_resource( Role( "StackSetExecutionRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "AWS", GetAtt(stack_set_administration_role, "Arn")), Action=[sts.AssumeRole], ), ], ), Policies=[ PolicyProperty( PolicyName="create-stackset-instances", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ cloudformation.DescribeStacks, logs.DescribeLogGroups, ], Resource=["*"], ), # stack instances communicate with the CFN service via SNS Statement( Effect=Allow, Action=[sns.Publish], NotResource=[ Join( ":", [ "arn", Partition, "sns", "*", AccountId, "*" ], ) ], ), Statement( Effect=Allow, Action=[ logs.CreateLogGroup, logs.DeleteLogGroup, logs.PutRetentionPolicy, logs.DeleteRetentionPolicy, ], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "", ], ), ], ), Statement( Effect=Allow, Action=[ cloudformation.CreateStack, cloudformation.DeleteStack, cloudformation.UpdateStack, ], Resource=[ Join( ":", [ "arn", Partition, "cloudformation", "*", AccountId, Join( "/", [ "stack", Join( "-", [ "StackSet", StackName, "*" ], ), ], ), ], ) ], ), ], ), ), ], )) stack_set_administration_role_policy = template.add_resource( PolicyType( "StackSetAdministrationRolePolicy", PolicyName="assume-execution-role", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Resource=[GetAtt(stack_set_execution_role, "Arn")], ), ], ), Roles=[Ref(stack_set_administration_role)], )) edge_log_groups = template.add_resource( StackSet( "EdgeLambdaLogGroupStackSet", AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"), ExecutionRoleName=Ref(stack_set_execution_role), StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]), PermissionModel="SELF_MANAGED", Description="Multi-region log groups for Lambda@Edge replicas", Parameters=[ StackSetParameter( ParameterKey="LogGroupName", ParameterValue=replica_log_group_name, ), StackSetParameter( ParameterKey="LogRetentionDays", ParameterValue=Ref(log_retention_days), ), ], OperationPreferences=OperationPreferences( FailureToleranceCount=0, MaxConcurrentPercentage=100, ), StackInstancesGroup=[ StackInstances( DeploymentTargets=DeploymentTargets(Accounts=[AccountId]), Regions=FindInMap(partition_config, Partition, "DefaultRegions"), ) ], TemplateBody=create_log_group_template().to_json(indent=None), DependsOn=[stack_set_administration_role_policy], )) price_class_distribution = template.add_resource( Distribution( "PriceClassDistribution", DistributionConfig=DistributionConfig( Comment="Dummy distribution used for price class hack", DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False), ), Enabled=True, Origins=[ Origin(Id="default", DomainName=GetAtt(bucket, "DomainName")) ], IPV6Enabled=True, ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True), PriceClass="PriceClass_All", ), Condition=using_price_class_hack, )) distribution = template.add_resource( Distribution( "ContentDistribution", DistributionConfig=DistributionConfig( Enabled=True, Aliases=If(using_dns_names, Ref(dns_names), NoValue), Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"), Prefix="cloudfront/"), DefaultRootObject="index.html", Origins=[ Origin( Id="default", DomainName=GetAtt(bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join( "", [ "origin-access-identity/cloudfront/", Ref(origin_access_identity), ], )), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", Compress=True, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", DefaultTTL=Ref(default_ttl_seconds), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType="origin-request", LambdaFunctionARN=Ref(edge_hook_version), ) ], ), HttpVersion="http2", IPV6Enabled=True, ViewerCertificate=ViewerCertificate( AcmCertificateArn=If( using_acm_certificate, Ref(acm_certificate_arn), If(using_hosted_zone, Ref(certificate), NoValue), ), SslSupportMethod=If(using_certificate, "sni-only", NoValue), CloudFrontDefaultCertificate=If(using_certificate, NoValue, True), MinimumProtocolVersion=Ref(tls_protocol_version), ), PriceClass=If(using_price_class_hack, "PriceClass_100", "PriceClass_All"), ), DependsOn=[ bucket_policy, log_ingester_policy, edge_log_groups, precondition_region_is_primary, ], )) distribution_log_group = template.add_resource( LogGroup( "DistributionLogGroup", LogGroupName=Join( "", ["/aws/cloudfront/", Ref(distribution)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) bucket_log_group = template.add_resource( LogGroup( "BucketLogGroup", LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) template.add_output(Output("DistributionId", Value=Ref(distribution))) template.add_output( Output("DistributionDomain", Value=GetAtt(distribution, "DomainName"))) template.add_output( Output( "DistributionDnsTarget", Value=If( using_price_class_hack, GetAtt(price_class_distribution, "DomainName"), GetAtt(distribution, "DomainName"), ), )) template.add_output( Output( "DistributionUrl", Value=Join("", ["https://", GetAtt(distribution, "DomainName"), "/"]), )) template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket, "Arn"))) return template