def add_conditions(self): """Add conditions to template.""" template = self.template variables = self.get_variables() template.add_condition('KmsKeyEnabled', Not(Equals(variables['KmsKey'].ref, ''))) template.add_condition( 'CustomParameterGroup', Not(Equals(variables['ParameterGroupName'].ref, ''))) template.add_condition( 'VpnAccessEnabled', Not(Equals(variables['VPNSecurityGroup'].ref, ''))) template.add_condition( 'SnsTopicSpecified', Not(Equals(Join('', variables['SNSTopic'].ref), ''))) template.add_condition( 'IdentifierSpecified', Not( Or(Equals(variables['RdsInstanceIdentifier'].ref, 'undefined'), Equals(variables['RdsInstanceIdentifier'].ref, '')))) template.add_condition( 'SnapshotSpecified', Not( Or(Equals(variables['RdsSnapshotIdentifier'].ref, 'undefined'), Equals(variables['RdsSnapshotIdentifier'].ref, ''))))
def build_tags_list(t): has_conditions = [] tags_list = [] for x in range(1, 11): name = t.add_parameter( Parameter( "Tag%sName" % x, Type="String", Default="-NONE-", )) value = t.add_parameter( Parameter( "Tag%sValue" % x, Type="String", Default="-NONE-", )) t.add_condition( "HasTag%s" % x, Not( Or( Equals(Ref(name), "-NONE-"), Equals(Ref(value), "-NONE-"), ), ), ) has_conditions.append({"Fn::Condition": "HasTag%s" % x}) tags_list.append( If( "HasTag%s" % x, { "Key": Ref(name), "Value": Ref(value), }, NoValue, ), ) t.add_condition( "HasTags", Or(*has_conditions), ) return If( "HasTags", TagsList(*tags_list), NoValue, )
def add_conditions(self): """Set up template conditions.""" template = self.template variables = self.get_variables() template.add_condition( 'SSHKeySpecified', And(Not(Equals(variables['KeyName'].ref, '')), Not(Equals(variables['KeyName'].ref, 'undefined')))) template.add_condition( 'MissingVPNAMI', Or(Equals(variables['VPNAMI'].ref, ''), Equals(variables['VPNAMI'].ref, 'undefined'))) template.add_condition( 'RHELUserData', Not(Equals(variables['VPNOS'].ref, 'ubuntu-16.04'))) template.add_condition( 'ChefRunListSpecified', And(Not(Equals(variables['ChefRunList'].ref, '')), Not(Equals(variables['ChefRunList'].ref, 'undefined')))) template.add_condition( 'PublicRouteTableSpecified', And(Not(Equals(variables['PublicRouteTable'].ref, '')), Not(Equals(variables['PublicRouteTable'].ref, 'undefined')))) template.add_condition( 'PublicSubnetsOmitted', Equals(Join('', variables['PublicSubnets'].ref), '')) for i in range(AZS): template.add_condition( '%iPrivateSubnetsCreated' % (i + 1), Equals(variables['PrivateSubnetCount'].ref, str(i + 1))) template.add_condition( 'PrivateSubnetCountOmitted', Equals(variables['PrivateSubnetCount'].ref, '0'))
def add_conditions(self): """Set up template conditions.""" template = self.template variables = self.get_variables() for i in ['ChefBucketName', 'ChefDataBucketName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined')))
def create_conditions(self): self.template.add_condition("HasInternalDomain", Not(Equals(Ref("InternalDomain"), ""))) self.template.add_condition("HasExternalDomain", Not(Equals(Ref("BaseDomain"), ""))) self.template.add_condition( "HasHostedZones", Or(Condition("HasInternalDomain"), Condition("HasExternalDomain"))) self.template.add_condition("NoHostedZones", Not(Condition("HasHostedZones")))
def create_policy(self): ns = self.context.namespace name_prefix = "%s-%s" % (ns, self.name) t = self.template t.add_condition( 'ExternalRoles', Not(Equals(Join(",", Ref('RoleNames')), '')), ) t.add_condition( 'ExternalGroups', Not(Equals(Join(",", Ref('GroupNames')), '')), ) t.add_condition( 'ExternalUsers', Not(Equals(Join(",", Ref('UserNames')), '')), ) t.add_condition( 'CreatePolicy', Or( TropoCondition("ExternalRoles"), TropoCondition("ExternalGroups"), TropoCondition("ExternalUsers"), )) t.add_resource( iam.PolicyType( FIREHOSE_WRITE_POLICY, PolicyName='{}-firehose'.format(name_prefix), PolicyDocument=firehose_write_policy(), Roles=If("ExternalRoles", Ref("RoleNames"), Ref("AWS::NoValue")), Groups=If("ExternalGroups", Ref("GroupNames"), Ref("AWS::NoValue")), Users=If("ExternalUsers", Ref("UserNames"), Ref("AWS::NoValue")), Condition='CreatePolicy', ), ) t.add_resource( iam.PolicyType( LOGS_POLICY, PolicyName='{}-logs'.format(name_prefix), PolicyDocument=logs_policy(), Roles=If("ExternalRoles", Ref("RoleNames"), Ref("AWS::NoValue")), Groups=If("ExternalGroups", Ref("GroupNames"), Ref("AWS::NoValue")), Users=If("ExternalUsers", Ref("UserNames"), Ref("AWS::NoValue")), Condition='CreatePolicy', ), )
def create_conditions(self): self.template.add_condition("HasInternalDomain", Not(Equals(Ref("InternalDomain"), ""))) self.template.add_condition("HasExternalDomain", Not(Equals(Ref("BaseDomain"), ""))) self.template.add_condition( "HasHostedZones", Or(Condition("HasInternalDomain"), Condition("HasExternalDomain"))) self.template.add_condition("NoHostedZones", Not(Condition("HasHostedZones"))) self.template.add_condition("UseNatGateway", Equals(Ref("UseNatGateway"), "true")) self.template.add_condition("UseNatInstances", Not(Condition("UseNatGateway")))
def create_conditions(): condition_counter = 4 base_condition = Equals(Ref(NumSRRHostsParam), 4) t.add_condition(CONDITION_COUNTER_PREFIX + str(condition_counter), base_condition) last_condition = CONDITION_COUNTER_PREFIX + str(condition_counter) for i in range(condition_counter + 1, MAX_INSTANCES + 1): t.add_condition( CONDITION_COUNTER_PREFIX + str(i), Or(Equals(Ref(NumSRRHostsParam), i), Condition(last_condition))) last_condition = CONDITION_COUNTER_PREFIX + str(i) t.add_condition("GovCloudCondition", Equals(Ref("AWS::Region"), "us-gov-west-1"))
label="Enable automatic failover", ) redis_uses_automatic_failover = "RedisAutomaticFailoverCondition" template.add_condition(redis_uses_automatic_failover, Equals(Ref(redis_automatic_failover), "true")) secure_redis_condition = "SecureRedisCondition" template.add_condition( secure_redis_condition, And(Condition(using_redis_condition), Condition(use_aes256_encryption_cond))) using_either_cache_condition = "EitherCacheCondition" template.add_condition( using_either_cache_condition, Or(Condition(using_memcached_condition), Condition(using_redis_condition))) # Subnet and security group shared by both clusters cache_subnet_group = elasticache.SubnetGroup( "CacheSubnetGroup", template=template, Description="Subnets available for the cache instance", Condition=using_either_cache_condition, SubnetIds=[Ref(private_subnet_a), Ref(private_subnet_b)], ) cache_security_group = ec2.SecurityGroup( 'CacheSecurityGroup', template=template,
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() self.template.add_version('2010-09-09') self.template.add_description('Terraform State Resources') # Conditions for i in ['BucketName', 'TableName']: template.add_condition( "%sOmitted" % i, Or(Equals(variables[i].ref, ''), Equals(variables[i].ref, 'undefined'))) # Resources terraformlocktable = template.add_resource( dynamodb.Table( 'TerraformStateTable', AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName='LockID', AttributeType='S') ], KeySchema=[ dynamodb.KeySchema(AttributeName='LockID', KeyType='HASH') ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If('TableNameOmitted', NoValue, variables['TableName'].ref))) template.add_output( Output('%sName' % terraformlocktable.title, Description='Name of DynamoDB table for Terraform state', Value=terraformlocktable.ref())) terraformstatebucket = template.add_resource( s3.Bucket( 'TerraformStateBucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', NoValue, variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('%sName' % terraformstatebucket.title, Description='Name of bucket storing Terraform state', Value=terraformstatebucket.ref())) template.add_output( Output('%sArn' % terraformstatebucket.title, Description='Arn of bucket storing Terraform state', Value=terraformstatebucket.get_att('Arn'))) managementpolicy = template.add_resource( iam.ManagedPolicy( 'ManagementPolicy', Description='Managed policy for Terraform state management.', Path='/', PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att('Arn')]), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join('', [ terraformstatebucket.get_att('Arn'), '/*' ]) ]), Statement(Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem ], Effect=Allow, Resource=[terraformlocktable.get_att('Arn')]) ]))) template.add_output( Output('PolicyArn', Description='Managed policy Arn', Value=managementpolicy.ref()))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.add_version('2010-09-09') template.add_description('Sample app') # Conditions template.add_condition( 'BucketNameOmitted', Or(Equals(variables['BucketName'].ref, ''), Equals(variables['BucketName'].ref, 'undefined'))) # Resources bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, BucketName=If('BucketNameOmitted', Ref('AWS::NoValue'), variables['BucketName'].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), Tags=Tags(application=variables['ApplicationName'].ref, customer=variables['CustomerName'].ref, environment=variables['EnvironmentName'].ref), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('BucketName', Description='Name of bucket', Value=Ref(bucket))) template.add_output( Output('BucketArn', Description='Arn of bucket', Value=GetAtt(bucket, 'Arn'))) # https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html template.add_resource( s3.BucketPolicy( 'RequireBucketEncryption', Bucket=Ref(bucket), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Sid='DenyIncorrectEncryptionHeader', Action=[awacs.s3.PutObject], Condition=Condition( StringNotEquals( 's3:x-amz-server-side-encryption', 'AES256')), Effect=Deny, Principal=Principal('*'), Resource=[Join('', [GetAtt(bucket, 'Arn'), '/*'])]), Statement( Sid='DenyUnEncryptedObjectUploads', Action=[awacs.s3.PutObject], Condition=Condition( Null('s3:x-amz-server-side-encryption', 'true')), Effect=Deny, Principal=Principal('*'), Resource=[Join('', [GetAtt(bucket, 'Arn'), '/*'])]) ])))
Description="" "An existing ACM certificate ARN to be used by the application ELB. " "DNS and Email validation will not work with this option.", ), group="Global", label="Custom App Certificate ARN", ) custom_app_certificate_arn_condition = "CustomAppCertArnCondition" template.add_condition(custom_app_certificate_arn_condition, Not(Equals(Ref(custom_app_certificate_arn), ""))) stack_cert_condition = "StackCertificateCondition" template.add_condition(stack_cert_condition, Not(Equals(Ref(certificate_validation_method), dont_create_value))) cert_condition = "CertificateCondition" template.add_condition(cert_condition, Or( Not(Equals(Ref(custom_app_certificate_arn), "")), Not(Equals(Ref(certificate_validation_method), dont_create_value)) )) application = If(custom_app_certificate_arn_condition, Ref(custom_app_certificate_arn), Ref(template.add_resource( Certificate( 'Certificate', Condition=stack_cert_condition, DomainName=domain_name, SubjectAlternativeNames=If(no_alt_domains, Ref("AWS::NoValue"), domain_name_alternates), DomainValidationOptions=[ DomainValidationOption( DomainName=domain_name, ValidationDomain=domain_name, ),
def ssm_network(): template = Template() default_route = "0.0.0.0/0" vpc_cidr = "192.168.0.0/16" template.add_parameter(Parameter( "VpcCidr", Type="String", Description="Cidr block for VPC", MinLength="9", MaxLength="18", Default=vpc_cidr, AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must match following pattern 'xxx.xxx.xxx.xxx/xx'" )) template.add_parameter(Parameter( "CreateEndpoints", Type="String", Description="Create VPC Endpoints", Default="No", AllowedValues=["Yes", "No"], ConstraintDescription="'Yes' or 'No' are only options" )) template.add_parameter(Parameter( "CreateNatGateway", Type="String", Description="Create NAT Gateway", Default="No", AllowedValues=["Yes", "No"], ConstraintDescription="'Yes' or 'No' are only options" )) conditions = { "CreateVpcEndpointsUpperYes": Equals( Ref("CreateEndpoints"), "Yes" ), "CreateVpcEndpointsLowerYes": Equals( Ref("CreateEndpoints"), "yes" ), "CreateVpcEndpoints": Or( Condition("CreateVpcEndpointsUpperYes"), Condition("CreateVpcEndpointsLowerYes") ), "CreateNatGatewayUpperYes": Equals( Ref("CreateNatGateway"), "Yes" ), "CreateNatGatewayLowerYes": Equals( Ref("CreateNatGateway"), "yes" ), "CreateNatGateway": Or( Condition("CreateNatGatewayUpperYes"), Condition("CreateNatGatewayLowerYes") ) } ssm_vpc = ec2.VPC( 'SsmVpc', CidrBlock=Ref("VpcCidr"), InstanceTenancy="default", EnableDnsHostnames=True, EnableDnsSupport=True, Tags=Tags( Name="SSM VPC" ) ) subnet_blocks = Cidr(GetAtt(ssm_vpc, "CidrBlock"), 256, 8) ssm_ig = ec2.InternetGateway( 'SsmIG', ) ssm_attach_gw = ec2.VPCGatewayAttachment( 'SsmAttachGateway', InternetGatewayId=Ref(ssm_ig), VpcId=Ref(ssm_vpc) ) ssm_public_subnet = ec2.Subnet( 'SsmPublicSubnet', DependsOn=ssm_attach_gw, AvailabilityZone=Select(0, GetAZs('')), CidrBlock=Select(0, subnet_blocks), VpcId=Ref(ssm_vpc), Tags=Tags( Name="Public Subnet" ) ) ssm_public_route_table = ec2.RouteTable( 'SsmPublicRouteTable', VpcId=Ref(ssm_vpc), ) ssm_public_route = ec2.Route( 'SsmPublicRoute', DestinationCidrBlock=default_route, GatewayId=Ref(ssm_ig), RouteTableId=Ref(ssm_public_route_table) ) ssm_public_subnet_route_table_association = ec2.SubnetRouteTableAssociation( 'SsmPublicSubnetRouteTableAssociation', RouteTableId=Ref(ssm_public_route_table), SubnetId=Ref(ssm_public_subnet) ) ssm_eip_nat_gateway = ec2.EIP( 'SsmEipNatGateway', Condition="CreateNatGateway" ) ssm_nat_gateway = ec2.NatGateway( 'SsmNatGateway', Condition="CreateNatGateway", DependsOn=ssm_eip_nat_gateway, SubnetId=Ref(ssm_public_subnet), AllocationId=GetAtt(ssm_eip_nat_gateway, "AllocationId"), ) ssm_private_subnet = ec2.Subnet( 'SsmPrivateSubnet', DependsOn=ssm_attach_gw, AvailabilityZone=Select(0, GetAZs('')), CidrBlock=Select(1, subnet_blocks), VpcId=Ref(ssm_vpc), Tags=Tags( Name="Private Subnet" ) ) ssm_private_route_table = ec2.RouteTable( 'SsmPrivateRouteTable', VpcId=Ref(ssm_vpc), ) ssm_private_route = ec2.Route( 'SsmPrivateRoute', Condition="CreateNatGateway", DestinationCidrBlock=default_route, NatGatewayId=Ref(ssm_nat_gateway), RouteTableId=Ref(ssm_private_route_table) ) ssm_private_subnet_route_table_association = ec2.SubnetRouteTableAssociation( 'SsmPrivateSubnetRouteTableAssociation', RouteTableId=Ref(ssm_private_route_table), SubnetId=Ref(ssm_private_subnet) ) ssm_sg_ingress_rules = [ ec2.SecurityGroupRule( ToPort=443, FromPort=443, IpProtocol="tcp", CidrIp=GetAtt(ssm_vpc, "CidrBlock") ) ] ssm_security_group = ec2.SecurityGroup( 'SsmSecurityGroup', GroupName="SsmSG", GroupDescription="SG for SSM usage", VpcId=Ref(ssm_vpc), SecurityGroupIngress=ssm_sg_ingress_rules ) ssm_s3e_vpc_endpoint = ec2.VPCEndpoint( 'SsmS3VpcEndpoint', Condition="CreateVpcEndpoints", RouteTableIds=[ Ref(ssm_private_route_table) ], ServiceName=vpc_endpoint("s3"), VpcId=Ref(ssm_vpc), VpcEndpointType="Gateway" ) ssm_ssm_vpc_endpoint = ec2.VPCEndpoint( 'SsmSsmVpcEndpoint', Condition="CreateVpcEndpoints", SubnetIds=[Ref(ssm_private_subnet)], ServiceName=vpc_endpoint("ssm"), VpcId=Ref(ssm_vpc), VpcEndpointType="Interface", SecurityGroupIds=[ Ref(ssm_security_group) ], PrivateDnsEnabled=True ) ssm_ssmmessages_vpc_endpoint = ec2.VPCEndpoint( 'SsmSsmMessagesVpcEndpoint', Condition="CreateVpcEndpoints", SubnetIds=[Ref(ssm_private_subnet)], ServiceName=vpc_endpoint("ssmmessages"), VpcId=Ref(ssm_vpc), VpcEndpointType="Interface", SecurityGroupIds=[ Ref(ssm_security_group) ], PrivateDnsEnabled=True ) ssm_ec2messages_vpc_endpoint = ec2.VPCEndpoint( 'SsmEc2MessagesVpcEndpoint', Condition="CreateVpcEndpoints", SubnetIds=[Ref(ssm_private_subnet)], ServiceName=vpc_endpoint("ec2messages"), VpcId=Ref(ssm_vpc), VpcEndpointType="Interface", SecurityGroupIds=[ Ref(ssm_security_group) ], PrivateDnsEnabled=True ) template.add_resource(ssm_vpc) template.add_resource(ssm_ig) template.add_resource(ssm_attach_gw) template.add_resource(ssm_eip_nat_gateway) template.add_resource(ssm_public_subnet) template.add_resource(ssm_public_route_table) template.add_resource(ssm_nat_gateway) template.add_resource(ssm_public_route) template.add_resource(ssm_public_subnet_route_table_association) template.add_resource(ssm_private_subnet) template.add_resource(ssm_private_route_table) template.add_resource(ssm_private_route) template.add_resource(ssm_private_subnet_route_table_association) template.add_resource(ssm_security_group) template.add_resource(ssm_s3e_vpc_endpoint) template.add_resource(ssm_ec2messages_vpc_endpoint) template.add_resource(ssm_ssm_vpc_endpoint) template.add_resource(ssm_ssmmessages_vpc_endpoint) for k in conditions: template.add_condition(k, conditions[k]) template.add_output(Output( 'SsmVpc', Description="VPC for SSM", Value=Ref(ssm_vpc), Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-vpc"])) )) template.add_output(Output( 'SsmSg', Description="Security Group for SSM", Value=Ref(ssm_security_group), Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-sg"])) )) template.add_output(Output( 'SsmPrivateSubnet', Description="Private Subnet for SSM", Value=Ref(ssm_private_subnet), Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-subnet"])) )) template.add_output(Output( 'SsmPrivateRouteTable', Description="Private RouteTable for SSM", Value=Ref(ssm_private_route_table), Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-route-table"])) )) with open(os.path.dirname(os.path.realpath(__file__)) + '/ssm_network.yml', 'w') as cf_file: cf_file.write(template.to_yaml()) return template.to_yaml()
DBS_SUBNET_GROUP_CON_T = "CreateSubnetGroupCondition" DBS_SUBNET_GROUP_CON = Equals(Ref(DBS_SUBNET_GROUP), DBS_SUBNET_GROUP.Default) NOT_USE_DB_SNAPSHOT_CON_T = "NotUseSnapshotToCreateDbCondition" NOT_USE_DB_SNAPSHOT_CON = Equals(Ref(DB_SNAPSHOT_ID), DB_SNAPSHOT_ID.Default) USE_DB_SNAPSHOT_CON_T = "UseSnapshotToCreateDbCondition" USE_DB_SNAPSHOT_CON = Not(Condition(NOT_USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_CON_T = "UseAuroraClusterCondition" USE_CLUSTER_CON = Equals("aurora", Select(0, Split("-", Ref(DB_ENGINE_NAME)))) NOT_USE_CLUSTER_CON_T = "NotUseClusterCondition" NOT_USE_CLUSTER_CON = Not(Condition(USE_CLUSTER_CON_T)) USE_CLUSTER_AND_SNAPSHOT_CON_T = "UseClusterAndSnapshotCondition" USE_CLUSTER_AND_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_NOT_SNAPSHOT_CON_T = "UseClusterAndNotSnapshotCondition" USE_CLUSTER_NOT_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T), Condition(NOT_USE_DB_SNAPSHOT_CON_T)) NOT_USE_CLUSTER_USE_SNAPSHOT_CON_T = "NotUseClusterButUseSnapshotCondition" NOT_USE_CLUSTER_USE_SNAPSHOT_CON = And(Condition(NOT_USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T)) USE_CLUSTER_OR_SNAPSHOT_CON_T = "UseSnapshotOrClusterCondition" USE_CLUSTER_OR_SNAPSHOT_CON = Or(Condition(USE_CLUSTER_CON_T), Condition(USE_DB_SNAPSHOT_CON_T))
Ref("AWS::Region"), "eu-west-1" ), "NotOneEqualsFoo": Not( Condition("OneEqualsFoo") ), "BarEqualsTwo": Equals( "Bar", Ref("Two") ), "ThreeEqualsFour": Equals( Ref("Three"), Ref("Four") ), "OneEqualsFooOrBarEqualsTwo": Or( Condition("OneEqualsFoo"), Condition("BarEqualsTwo") ), "OneEqualsFooAndNotBarEqualsTwo": And( Condition("OneEqualsFoo"), Not(Condition("BarEqualsTwo")) ), "OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And( Condition("OneEqualsFoo"), Condition("BarEqualsTwo"), Equals(Ref("Three"), "Pft") ), "OneIsQuzAndThreeEqualsFour": And( Equals(Ref("One"), "Quz"), Condition("ThreeEqualsFour") ), "LaunchInstance": And(
"SSHLocationBastion": Parameter( "SSHLocationBastion", Type="String", AllowedPattern= "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))$", Default="0.0.0.0/0") } conditions = { "CreatePrivateSubnet1ACondition": Equals(Ref("CreatePrivateSubnet1A"), "True"), "CreatePrivateSubnet1BCondition": Equals(Ref("CreatePrivateSubnet1B"), "True"), "NAT1EIPCondition": Or(Condition("CreatePrivateSubnet1ACondition"), Condition("CreatePrivateSubnet1BCondition")), "CreatePublicSubnet2Condition": Equals(Ref("CreatePublicSubnet2"), "True"), "CreatePrivateSubnet2ACondition": Equals(Ref("CreatePrivateSubnet2A"), "True"), "CreatePrivateSubnet2BCondition": Equals(Ref("CreatePrivateSubnet2B"), "True"), "AttachNAT2ACondition": And(Condition("CreatePublicSubnet2Condition"), Condition("CreatePrivateSubnet2ACondition")), "AttachNAT2BCondition": And(Condition("CreatePublicSubnet2Condition"), Condition("CreatePrivateSubnet2BCondition")), "NAT2EIPCondition": Or(Condition("AttachNAT2ACondition"), Condition("AttachNAT2BCondition")) }
# t.add_condition( 'DefaultEbsOptimizationCondition', Equals(Ref(param_ebs_optimized), 'default'), ) t.add_condition( 'AllocateElasticIpCondition', Equals(Ref(param_allocate_elastic_ip), 'true'), ) t.add_condition( 'PublicIpCondition', Or( Equals(Ref(param_associate_public_ip), 'true'), Equals(Ref(param_allocate_elastic_ip), 'true'), )) t.add_condition('CreateInstanceProfileCondition', Equals(Ref(param_instance_profile), '')) t.add_condition('CreateSecurityGroupCondition', Equals(Ref(param_instance_sg), '')) t.add_condition( 'Volume1Condition', Not(Equals(Ref(param_volume1_size), '0')), ) t.add_condition( 'Volume1IopsOptimizedCondition',
param_assign_public_ip = t.add_parameter( Parameter('AutoAssignPublicIp', Description='Automatically assign instance with a Public IP.', AllowedValues=['true', 'false'], Default='true', Type='String')) # # Conditions # t.add_condition('4AZCondition', Equals(Ref(param_number_of_azs), '4')) t.add_condition( '3AZCondition', Or(Equals(Ref(param_number_of_azs), '3'), Condition('4AZCondition'))) # # Resources # dhcp_options = t.add_resource( ec2.DHCPOptions('DHCPOptions', DomainNameServers=['AmazonProvidedDNS'])) vpc = t.add_resource( ec2.VPC('VPC', CidrBlock=Ref(param_vpc_cidr), EnableDnsSupport='true', EnableDnsHostnames='true', Tags=Tags(Name=Ref(AWS_STACK_NAME))))
( 'StorageEncryptedConditon', Equals(Ref(param_db_storage_encrypted), 'true'), ), ( 'DefaultKmsCondition', Equals(Ref(param_db_kms_key), '') ), ( 'EnhancedMonitoringCondition', Not(Equals(Ref(param_db_monitoring_role), '')) ), ( 'PostgresCondition', Or( Equals(Ref(param_db_engine), 'aurora'), Equals(Ref(param_db_engine), 'aurora-mysql') ) ), ( 'MysqlCondition', Equals(Ref(param_db_engine), 'aurora-postgresql'), ), ] # # Resources # rds_sg = t.add_resource(ec2.SecurityGroup( 'RdsSecurityGroup', Condition='CreateSecurityGroupCondition',
'file system, If you enable the Encrypted property but ' 'don\'t specify this property, this template uses service ' 'default master key.', Default='', Type='String')) # # Conditions # t.add_condition( 'TwoSubnetsCondition', Or( Equals(Ref(param_num_of_subnets), '2'), Equals(Ref(param_num_of_subnets), '3'), Equals(Ref(param_num_of_subnets), '4'), Equals(Ref(param_num_of_subnets), '5'), Equals(Ref(param_num_of_subnets), '6'), )) t.add_condition( 'ThreeSubnetsCondition', Or( Equals(Ref(param_num_of_subnets), '3'), Equals(Ref(param_num_of_subnets), '4'), Equals(Ref(param_num_of_subnets), '5'), Equals(Ref(param_num_of_subnets), '6'), )) t.add_condition( 'FourSubnetsCondition',
def create_template(): template = Template(Description=( "Static website hosted with S3 and CloudFront. " "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website" )) partition_config = add_mapping( template, "PartitionConfig", { "aws": { # the region with the control plane for CloudFront, IAM, Route 53, etc "PrimaryRegion": "us-east-1", # assume that Lambda@Edge replicates to all default enabled regions, and that # future regions will be opt-in. generated with AWS CLI: # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)" "DefaultRegions": [ "ap-northeast-1", "ap-northeast-2", "ap-northeast-3", "ap-south-1", "ap-southeast-1", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-north-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", ], }, # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn "aws-cn": { "PrimaryRegion": "cn-north-1", "DefaultRegions": ["cn-north-1", "cn-northwest-1"], }, }, ) acm_certificate_arn = template.add_parameter( Parameter( "AcmCertificateArn", Description= "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.", Type="String", AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)", Default="", )) hosted_zone_id = template.add_parameter( Parameter( "HostedZoneId", Description= "Existing Route 53 zone to use for validating a new TLS certificate.", Type="String", AllowedPattern="(Z[A-Z0-9]+|)", Default="", )) dns_names = template.add_parameter( Parameter( "DomainNames", Description= "Comma-separated list of additional domain names to serve.", Type="CommaDelimitedList", Default="", )) tls_protocol_version = template.add_parameter( Parameter( "TlsProtocolVersion", Description= "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.", Type="String", Default="TLSv1.2_2019", )) log_retention_days = template.add_parameter( Parameter( "LogRetentionDays", Description= "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.", Type="Number", AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS, Default=365, )) default_ttl_seconds = template.add_parameter( Parameter( "DefaultTtlSeconds", Description="Cache time-to-live when not set by S3 object headers.", Type="Number", Default=int(datetime.timedelta(minutes=5).total_seconds()), )) enable_price_class_hack = template.add_parameter( Parameter( "EnablePriceClassHack", Description="Cut your bill in half with this one weird trick.", Type="String", Default="false", AllowedValues=["true", "false"], )) retention_defined = add_condition(template, "RetentionDefined", Not(Equals(Ref(log_retention_days), 0))) using_price_class_hack = add_condition( template, "UsingPriceClassHack", Equals(Ref(enable_price_class_hack), "true")) using_acm_certificate = add_condition( template, "UsingAcmCertificate", Not(Equals(Ref(acm_certificate_arn), ""))) using_hosted_zone = add_condition(template, "UsingHostedZone", Not(Equals(Ref(hosted_zone_id), ""))) using_certificate = add_condition( template, "UsingCertificate", Or(Condition(using_acm_certificate), Condition(using_hosted_zone)), ) should_create_certificate = add_condition( template, "ShouldCreateCertificate", And(Condition(using_hosted_zone), Not(Condition(using_acm_certificate))), ) using_dns_names = add_condition(template, "UsingDnsNames", Not(Equals(Select(0, Ref(dns_names)), ""))) is_primary_region = "IsPrimaryRegion" template.add_condition( is_primary_region, Equals(Region, FindInMap(partition_config, Partition, "PrimaryRegion")), ) precondition_region_is_primary = template.add_resource( WaitConditionHandle( "PreconditionIsPrimaryRegionForPartition", Condition=is_primary_region, )) log_ingester_dlq = template.add_resource( Queue( "LogIngesterDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", )) log_ingester_role = template.add_resource( Role( "LogIngesterRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[GetAtt(log_ingester_dlq, "Arn")], ) ], ), ) ], )) log_ingester = template.add_resource( Function( "LogIngester", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(log_ingest.handler.__name__), Code=Code(ZipFile=inspect.getsource(log_ingest)), MemorySize=256, Timeout=300, Role=GetAtt(log_ingester_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(log_ingester_dlq, "Arn")), )) log_ingester_permission = template.add_resource( Permission( "LogIngesterPermission", FunctionName=GetAtt(log_ingester, "Arn"), Action="lambda:InvokeFunction", Principal="s3.amazonaws.com", SourceAccount=AccountId, )) log_bucket = template.add_resource( Bucket( "LogBucket", # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails. # When the CloudFront distribution is created, it adds an additional bucket ACL. # That ACL is not possible to model in CloudFormation. AccessControl="LogDeliveryWrite", LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(ExpirationInDays=1, Status="Enabled"), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=1), Status="Enabled", ), ]), NotificationConfiguration=NotificationConfiguration( LambdaConfigurations=[ LambdaConfigurations(Event="s3:ObjectCreated:*", Function=GetAtt(log_ingester, "Arn")) ]), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # if we use KMS, we can't read the logs SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), DependsOn=[log_ingester_permission], )) log_ingester_log_group = template.add_resource( LogGroup( "LogIngesterLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(log_ingester)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) log_ingester_policy = template.add_resource( PolicyType( "LogIngesterPolicy", Roles=[Ref(log_ingester_role)], PolicyName="IngestLogPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/cloudfront/*", ], ), Join( ":", [ "arn", Partition, "logs", Region, AccountId, "log-group", "/aws/s3/*", ], ), GetAtt(log_ingester_log_group, "Arn"), ], ), Statement( Effect=Allow, Action=[s3.GetObject], Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])], ), ], ), )) bucket = template.add_resource( Bucket( "ContentBucket", LifecycleConfiguration=LifecycleConfiguration(Rules=[ # not supported by CFN yet: # LifecycleRule( # Transitions=[ # LifecycleRuleTransition( # StorageClass='INTELLIGENT_TIERING', # TransitionInDays=1, # ), # ], # Status="Enabled", # ), LifecycleRule( AbortIncompleteMultipartUpload= AbortIncompleteMultipartUpload(DaysAfterInitiation=7), Status="Enabled", ) ]), LoggingConfiguration=LoggingConfiguration( DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"), BucketEncryption=BucketEncryption( ServerSideEncryptionConfiguration=[ ServerSideEncryptionRule( ServerSideEncryptionByDefault= ServerSideEncryptionByDefault( # Origin Access Identities can't use KMS SSEAlgorithm="AES256")) ]), OwnershipControls=OwnershipControls(Rules=[ OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred") ], ), PublicAccessBlockConfiguration=PublicAccessBlockConfiguration( BlockPublicAcls=True, BlockPublicPolicy=True, IgnorePublicAcls=True, RestrictPublicBuckets=True, ), )) origin_access_identity = template.add_resource( CloudFrontOriginAccessIdentity( "CloudFrontIdentity", CloudFrontOriginAccessIdentityConfig= CloudFrontOriginAccessIdentityConfig( Comment=GetAtt(bucket, "Arn")), )) bucket_policy = template.add_resource( BucketPolicy( "ContentBucketPolicy", Bucket=Ref(bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "CanonicalUser", GetAtt(origin_access_identity, "S3CanonicalUserId"), ), Action=[s3.GetObject], Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])], ), ], ), )) # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs # state "In some circumstances [...] S3 resets permissions on the bucket to the default value", # and this allows logging to work without any ACLs in place. log_bucket_policy = template.add_resource( BucketPolicy( "LogBucketPolicy", Bucket=Ref(log_bucket), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join( "/", [GetAtt(log_bucket, "Arn"), "cloudfront", "*"]) ], ), Statement( Effect=Allow, Principal=Principal("Service", "delivery.logs.amazonaws.com"), Action=[s3.ListBucket], Resource=[Join("/", [GetAtt(log_bucket, "Arn")])], ), Statement( Effect=Allow, Principal=Principal("Service", "s3.amazonaws.com"), Action=[s3.PutObject], Resource=[ Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"]) ], ), ], ), )) certificate_validator_dlq = template.add_resource( Queue( "CertificateValidatorDLQ", MessageRetentionPeriod=int( datetime.timedelta(days=14).total_seconds()), KmsMasterKeyId="alias/aws/sqs", Condition=should_create_certificate, )) certificate_validator_role = template.add_resource( Role( "CertificateValidatorRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "lambda.amazonaws.com"), Action=[sts.AssumeRole], ) ], ), Policies=[ PolicyProperty( PolicyName="DLQPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sqs.SendMessage], Resource=[ GetAtt(certificate_validator_dlq, "Arn") ], ) ], ), ) ], # TODO scope down ManagedPolicyArns=[ "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", ], Condition=should_create_certificate, )) certificate_validator_function = template.add_resource( Function( "CertificateValidatorFunction", Runtime=PYTHON_RUNTIME, Handler="index.{}".format(certificate_validator.handler.__name__), Code=Code(ZipFile=inspect.getsource(certificate_validator)), MemorySize=256, Timeout=300, Role=GetAtt(certificate_validator_role, "Arn"), DeadLetterConfig=DeadLetterConfig( TargetArn=GetAtt(certificate_validator_dlq, "Arn")), Environment=Environment( Variables={ certificate_validator.EnvVars.HOSTED_ZONE_ID.name: Ref(hosted_zone_id) }), Condition=should_create_certificate, )) certificate_validator_log_group = template.add_resource( LogGroup( "CertificateValidatorLogGroup", LogGroupName=Join( "", ["/aws/lambda/", Ref(certificate_validator_function)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), Condition=should_create_certificate, )) certificate_validator_rule = template.add_resource( Rule( "CertificateValidatorRule", EventPattern={ "detail-type": ["AWS API Call via CloudTrail"], "detail": { "eventSource": ["acm.amazonaws.com"], "eventName": ["AddTagsToCertificate"], "requestParameters": { "tags": { "key": [certificate_validator_function.title], "value": [GetAtt(certificate_validator_function, "Arn")], } }, }, }, Targets=[ Target( Id="certificate-validator-lambda", Arn=GetAtt(certificate_validator_function, "Arn"), ) ], DependsOn=[certificate_validator_log_group], Condition=should_create_certificate, )) certificate_validator_permission = template.add_resource( Permission( "CertificateValidatorPermission", FunctionName=GetAtt(certificate_validator_function, "Arn"), Action="lambda:InvokeFunction", Principal="events.amazonaws.com", SourceArn=GetAtt(certificate_validator_rule, "Arn"), Condition=should_create_certificate, )) certificate = template.add_resource( Certificate( "Certificate", DomainName=Select(0, Ref(dns_names)), SubjectAlternativeNames=Ref( dns_names), # duplicate first name works fine ValidationMethod="DNS", Tags=Tags( **{ certificate_validator_function.title: GetAtt(certificate_validator_function, "Arn") }), DependsOn=[certificate_validator_permission], Condition=should_create_certificate, )) edge_hook_role = template.add_resource( Role( "EdgeHookRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect="Allow", Principal=Principal( "Service", [ "lambda.amazonaws.com", "edgelambda.amazonaws.com" ], ), Action=[sts.AssumeRole], ) ], ), )) edge_hook_function = template.add_resource( Function( "EdgeHookFunction", Runtime=PYTHON_RUNTIME, Handler="index.handler", Code=Code(ZipFile=inspect.getsource(edge_hook)), MemorySize=128, Timeout=3, Role=GetAtt(edge_hook_role, "Arn"), )) edge_hook_function_hash = (hashlib.sha256( json.dumps(edge_hook_function.to_dict(), sort_keys=True).encode("utf-8")).hexdigest()[:10].upper()) edge_hook_version = template.add_resource( Version( "EdgeHookVersion" + edge_hook_function_hash, FunctionName=GetAtt(edge_hook_function, "Arn"), )) replica_log_group_name = Join( "/", [ "/aws/lambda", Join( ".", [ FindInMap(partition_config, Partition, "PrimaryRegion"), Ref(edge_hook_function), ], ), ], ) edge_hook_role_policy = template.add_resource( PolicyType( "EdgeHookRolePolicy", PolicyName="write-logs", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.CreateLogStream, logs.PutLogEvents], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "*", ], ), ], ), ], ), Roles=[Ref(edge_hook_role)], )) stack_set_administration_role = template.add_resource( Role( "StackSetAdministrationRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal("Service", "cloudformation.amazonaws.com"), Action=[sts.AssumeRole], ), ], ), )) stack_set_execution_role = template.add_resource( Role( "StackSetExecutionRole", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Principal=Principal( "AWS", GetAtt(stack_set_administration_role, "Arn")), Action=[sts.AssumeRole], ), ], ), Policies=[ PolicyProperty( PolicyName="create-stackset-instances", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[ cloudformation.DescribeStacks, logs.DescribeLogGroups, ], Resource=["*"], ), # stack instances communicate with the CFN service via SNS Statement( Effect=Allow, Action=[sns.Publish], NotResource=[ Join( ":", [ "arn", Partition, "sns", "*", AccountId, "*" ], ) ], ), Statement( Effect=Allow, Action=[ logs.CreateLogGroup, logs.DeleteLogGroup, logs.PutRetentionPolicy, logs.DeleteRetentionPolicy, ], Resource=[ Join( ":", [ "arn", Partition, "logs", "*", AccountId, "log-group", replica_log_group_name, "log-stream", "", ], ), ], ), Statement( Effect=Allow, Action=[ cloudformation.CreateStack, cloudformation.DeleteStack, cloudformation.UpdateStack, ], Resource=[ Join( ":", [ "arn", Partition, "cloudformation", "*", AccountId, Join( "/", [ "stack", Join( "-", [ "StackSet", StackName, "*" ], ), ], ), ], ) ], ), ], ), ), ], )) stack_set_administration_role_policy = template.add_resource( PolicyType( "StackSetAdministrationRolePolicy", PolicyName="assume-execution-role", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Resource=[GetAtt(stack_set_execution_role, "Arn")], ), ], ), Roles=[Ref(stack_set_administration_role)], )) edge_log_groups = template.add_resource( StackSet( "EdgeLambdaLogGroupStackSet", AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"), ExecutionRoleName=Ref(stack_set_execution_role), StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]), PermissionModel="SELF_MANAGED", Description="Multi-region log groups for Lambda@Edge replicas", Parameters=[ StackSetParameter( ParameterKey="LogGroupName", ParameterValue=replica_log_group_name, ), StackSetParameter( ParameterKey="LogRetentionDays", ParameterValue=Ref(log_retention_days), ), ], OperationPreferences=OperationPreferences( FailureToleranceCount=0, MaxConcurrentPercentage=100, ), StackInstancesGroup=[ StackInstances( DeploymentTargets=DeploymentTargets(Accounts=[AccountId]), Regions=FindInMap(partition_config, Partition, "DefaultRegions"), ) ], TemplateBody=create_log_group_template().to_json(indent=None), DependsOn=[stack_set_administration_role_policy], )) price_class_distribution = template.add_resource( Distribution( "PriceClassDistribution", DistributionConfig=DistributionConfig( Comment="Dummy distribution used for price class hack", DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", ViewerProtocolPolicy="allow-all", ForwardedValues=ForwardedValues(QueryString=False), ), Enabled=True, Origins=[ Origin(Id="default", DomainName=GetAtt(bucket, "DomainName")) ], IPV6Enabled=True, ViewerCertificate=ViewerCertificate( CloudFrontDefaultCertificate=True), PriceClass="PriceClass_All", ), Condition=using_price_class_hack, )) distribution = template.add_resource( Distribution( "ContentDistribution", DistributionConfig=DistributionConfig( Enabled=True, Aliases=If(using_dns_names, Ref(dns_names), NoValue), Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"), Prefix="cloudfront/"), DefaultRootObject="index.html", Origins=[ Origin( Id="default", DomainName=GetAtt(bucket, "DomainName"), S3OriginConfig=S3OriginConfig( OriginAccessIdentity=Join( "", [ "origin-access-identity/cloudfront/", Ref(origin_access_identity), ], )), ) ], DefaultCacheBehavior=DefaultCacheBehavior( TargetOriginId="default", Compress=True, ForwardedValues=ForwardedValues(QueryString=False), ViewerProtocolPolicy="redirect-to-https", DefaultTTL=Ref(default_ttl_seconds), LambdaFunctionAssociations=[ LambdaFunctionAssociation( EventType="origin-request", LambdaFunctionARN=Ref(edge_hook_version), ) ], ), HttpVersion="http2", IPV6Enabled=True, ViewerCertificate=ViewerCertificate( AcmCertificateArn=If( using_acm_certificate, Ref(acm_certificate_arn), If(using_hosted_zone, Ref(certificate), NoValue), ), SslSupportMethod=If(using_certificate, "sni-only", NoValue), CloudFrontDefaultCertificate=If(using_certificate, NoValue, True), MinimumProtocolVersion=Ref(tls_protocol_version), ), PriceClass=If(using_price_class_hack, "PriceClass_100", "PriceClass_All"), ), DependsOn=[ bucket_policy, log_ingester_policy, edge_log_groups, precondition_region_is_primary, ], )) distribution_log_group = template.add_resource( LogGroup( "DistributionLogGroup", LogGroupName=Join( "", ["/aws/cloudfront/", Ref(distribution)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) bucket_log_group = template.add_resource( LogGroup( "BucketLogGroup", LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]), RetentionInDays=If(retention_defined, Ref(log_retention_days), NoValue), )) template.add_output(Output("DistributionId", Value=Ref(distribution))) template.add_output( Output("DistributionDomain", Value=GetAtt(distribution, "DomainName"))) template.add_output( Output( "DistributionDnsTarget", Value=If( using_price_class_hack, GetAtt(price_class_distribution, "DomainName"), GetAtt(distribution, "DomainName"), ), )) template.add_output( Output( "DistributionUrl", Value=Join("", ["https://", GetAtt(distribution, "DomainName"), "/"]), )) template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket, "Arn"))) return template
Parameter( "WebServerCapacity", Description="The initial nuber of WebServer instances", Default="2", Type="Number", MaxValue="5", MinValue="1", ConstraintDescription="must be between 1 and 5 EC2 instances.", )) t.add_condition("Is-EC2-Classic", Not(Condition("Is-EC2-VPC"))) t.add_condition( "Is-EC2-VPC", Or(Equals(Ref("AWS::Region"), "eu-central-1"), Equals(Ref("AWS::Region"), "cn-north-1"), Equals(Ref("AWS::Region"), "ap-northeast-2"))) t.add_mapping( "AWSInstanceType2Arch", { u'c1.medium': { u'Arch': u'PV64' }, u'c1.xlarge': { u'Arch': u'PV64' }, u'c3.2xlarge': { u'Arch': u'HVM64' }, u'c3.4xlarge': { u'Arch': u'HVM64'
USE_FARGATE_PROVIDERS_CON_T = "UseFargateProvidersCondition" USE_FARGATE_PROVIDERS_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE_PROVIDERS") USE_FARGATE_LT_CON_T = "UseFargateLaunchType" USE_FARGATE_LT_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE") USE_CLUSTER_MODE_CON_T = "UseClusterDefaultProviders" USE_CLUSTER_MODE_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "CLUSTER_MODE") USE_SERVICE_MODE_CON_T = "UseServiceProviders" USE_SERVICE_MODE_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "SERVICE_MODE") USE_FARGATE_CON_T = "UseFargate" USE_FARGATE_CON = Or( Condition(USE_FARGATE_PROVIDERS_CON_T), Condition(USE_FARGATE_LT_CON_T) ) NOT_FARGATE_CON_T = "NotUsingFargate" NOT_FARGATE_CON = Not(Condition(USE_FARGATE_CON_T)) USE_EXTERNAL_LT_T = "UseExternalLaunchType" USE_EXTERNAL_LT = Equals(Ref(ecs_params.LAUNCH_TYPE), "EXTERNAL") USE_LAUNCH_TYPE_CON_T = "UseLaunchType" USE_LAUNCH_TYPE_CON = Or( Condition(USE_EC2_CON_T), Condition(USE_FARGATE_LT_CON_T), Condition(USE_EXTERNAL_LT_T), )
def create_template(self) -> None: """Create template (main function called by Stacker).""" self.template.set_version("2010-09-09") self.template.set_description("Terraform State Resources") # Conditions for i in ["BucketName", "TableName"]: self.template.add_condition( "%sOmitted" % i, Or( Equals(self.variables[i].ref, ""), Equals(self.variables[i].ref, "undefined"), ), ) # Resources terraformlocktable = self.template.add_resource( dynamodb.Table( "TerraformStateTable", AttributeDefinitions=[ dynamodb.AttributeDefinition(AttributeName="LockID", AttributeType="S") ], KeySchema=[ dynamodb.KeySchema(AttributeName="LockID", KeyType="HASH") ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( ReadCapacityUnits=2, WriteCapacityUnits=2), TableName=If("TableNameOmitted", NoValue, self.variables["TableName"].ref), )) self.template.add_output( Output( "%sName" % terraformlocktable.title, Description="Name of DynamoDB table for Terraform state", Value=terraformlocktable.ref(), )) terraformstatebucket = self.template.add_resource( s3.Bucket( "TerraformStateBucket", DeletionPolicy=self.variables["BucketDeletionPolicy"], AccessControl=s3.Private, BucketName=If("BucketNameOmitted", NoValue, self.variables["BucketName"].ref), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status="Enabled") ]), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) self.template.add_output( Output( "%sName" % terraformstatebucket.title, Description="Name of bucket storing Terraform state", Value=terraformstatebucket.ref(), )) self.template.add_output( Output( "%sArn" % terraformstatebucket.title, Description="Arn of bucket storing Terraform state", Value=terraformstatebucket.get_att("Arn"), )) managementpolicy = self.template.add_resource( iam.ManagedPolicy( "ManagementPolicy", Description="Managed policy for Terraform state management.", Path="/", PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ # https://www.terraform.io/docs/backends/types/s3.html#s3-bucket-permissions Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[terraformstatebucket.get_att("Arn")], ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[ Join("", [ terraformstatebucket.get_att("Arn"), "/*" ]) ], ), Statement( Action=[ awacs.dynamodb.GetItem, awacs.dynamodb.PutItem, awacs.dynamodb.DeleteItem, ], Effect=Allow, Resource=[terraformlocktable.get_att("Arn")], ), ], ), )) self.template.add_output( Output( "PolicyArn", Description="Managed policy Arn", Value=managementpolicy.ref(), ))
# t.add_condition( 'HasDisplayNameCondition', Not(Equals(Ref(param_display_name), '')) ) t.add_condition( 'SixEmailsCondition', Equals(Ref(param_num_emails), '6') ) t.add_condition( 'FiveEmailsCondition', Or( Equals(Ref(param_num_emails), '6'), Equals(Ref(param_num_emails), '5'), ) ) t.add_condition( 'FourEmailsCondition', Or( Equals(Ref(param_num_emails), '6'), Equals(Ref(param_num_emails), '5'), Equals(Ref(param_num_emails), '4'), )) t.add_condition( 'ThreeEmailsCondition', Or( Equals(Ref(param_num_emails), '6'),
Parameter( 'SshKeyName', Type='String', ) ]) t.add_condition('OneEqualsFoo', Equals(Ref('One'), 'Foo')) t.add_condition('NotOneEqualsFoo', Not(Condition('OneEqualsFoo'))) t.add_condition('BarEqualsTwo', Equals('Bar', Ref('Two'))) t.add_condition('ThreeEqualsFour', Equals(Ref('Three'), Ref('Four'))) t.add_condition('OneEqualsFooOrBarEqualsTwo', Or(Condition('OneEqualsFoo'), Condition('BarEqualsTwo'))) t.add_condition('OneEqualsFooAndNotBarEqualsTwo', And(Condition('OneEqualsFoo'), Not(Condition('BarEqualsTwo')))) t.add_condition( 'OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft', And(Condition('OneEqualsFoo'), Condition('BarEqualsTwo'), Equals(Ref('Three'), 'Pft'))) t.add_condition('OneIsQuzAndThreeEqualsFour', And(Equals(Ref('One'), 'Quz'), Condition('ThreeEqualsFour'))) t.add_condition( 'LaunchInstance', And(Condition('OneEqualsFoo'), Condition('NotOneEqualsFoo'),
('CreateSecurityGroupCondition', Equals(Ref(param_sg), '')), ( 'PostgresCondition', Equals(Ref(param_db_engine), 'postgres'), ), ( 'MysqlCondition', Equals(Ref(param_db_engine), 'mysql'), ), ( 'MariadbCondition', Equals(Ref(param_db_engine), 'mariadb'), ), ('OrcaleCondition', Or( Equals(Ref(param_db_engine), 'oracle-se1'), Equals(Ref(param_db_engine), 'oracle-se2'), )), ( 'NewDatabaseCondition', Equals(Ref(param_db_snapshot), ''), ), ('UseSnapshotCondition', Not(Equals(Ref(param_db_snapshot), ''))), ( 'IopsStorageCondition', Equals(Ref(param_db_stroage_type), 'io1'), ), ( 'StorageEncryptedConditon', Equals(Ref(param_db_storage_encrypted), 'true'), ), ('DefaultKmsCondition', Equals(Ref(param_db_kms_key), '')),