Esempio n. 1
0
 def create_conditions(self):
     t = self.template
     ssl_condition = Not(Equals(Ref("ELBCertName"), ""))
     t.add_condition("UseHTTPS", ssl_condition)
     t.add_condition("UseHTTP", Not(ssl_condition))
     self.template.add_condition(
         "UseIAMCert",
         Not(Equals(Ref("ELBCertType"), "acm")))
     t.add_condition(
         "EnableSNSEvents",
         Equals(Ref("EventsBackend"), "sns"))
     t.add_condition(
         "CreateSNSTopic",
         And(Equals(Ref("EventsSNSTopicName"), ""),
             Condition("EnableSNSEvents")))
     t.add_condition(
         "EnableCloudwatchLogs",
         Equals(Ref("RunLogsBackend"), "cloudwatch"))
     t.add_condition(
         "CreateRunLogsGroup",
         And(Equals(Ref("RunLogsCloudwatchGroup"), ""),
             Condition("EnableCloudwatchLogs")))
     t.add_condition(
         "EnableAppEventStream",
         Equals(Ref("LogsStreamer"), "kinesis"))
     t.add_condition(
         "RequireCommitMessages",
         Equals(Ref("RequireCommitMessages"), "true"))
Esempio n. 2
0
    def create_conditions(self):
        t = self.template

        t.add_condition("DefinedNotificationArn",
                        Not(Equals(Ref("NotificationTopicArn"), "")))
        t.add_condition("DefinedPort", Not(Equals(Ref("Port"), "0")))
        t.add_condition(
            "DefinedAvailabilityZones",
            Not(Equals(Join(",", Ref("PreferredCacheClusterAZs")), "")))
        t.add_condition("DefinedSnapshotArns",
                        Not(Equals(Join(",", Ref("SnapshotArns")), "")))
        t.add_condition("DefinedSnapshotWindow",
                        Not(Equals(Ref("SnapshotWindow"), "")))

        # DNS Conditions
        t.add_condition("HasInternalZone",
                        Not(Equals(Ref("InternalZoneId"), "")))
        t.add_condition("HasInternalZoneName",
                        Not(Equals(Ref("InternalZoneName"), "")))
        t.add_condition("HasInternalHostname",
                        Not(Equals(Ref("InternalHostname"), "")))
        t.add_condition(
            "CreateInternalHostname",
            And(Condition("HasInternalZone"), Condition("HasInternalZoneName"),
                Condition("HasInternalHostname")))
Esempio n. 3
0
 def create_conditions(self):
     t = self.template
     t.add_condition(
         "HasInternalZone",
         Not(Equals(Ref("InternalZoneId"), "")))
     t.add_condition(
         "HasInternalZoneName",
         Not(Equals(Ref("InternalZoneName"), "")))
     t.add_condition(
         "HasInternalHostname",
         Not(Equals(Ref("InternalHostname"), "")))
     t.add_condition(
         "CreateInternalHostname",
         And(Condition("HasInternalZone"),
             Condition("HasInternalZoneName"),
             Condition("HasInternalHostname")))
     t.add_condition(
         "HasProvisionedIOPS",
         Not(Equals(Ref("IOPS"), "0")))
     t.add_condition(
         "HasStorageType",
         Not(Equals(Ref("StorageType"), "default")))
     t.add_condition(
         "HasDBSnapshotIdentifier",
         Not(Equals(Ref("DBSnapshotIdentifier"), "")))
Esempio n. 4
0
 def create_conditions(self):
     self.template.add_condition("HasInternalDomain",
                                 Not(Equals(Ref("InternalDomain"), "")))
     self.template.add_condition("HasExternalDomain",
                                 Not(Equals(Ref("BaseDomain"), "")))
     self.template.add_condition(
         "HasHostedZones",
         Or(Condition("HasInternalDomain"), Condition("HasExternalDomain")))
     self.template.add_condition("NoHostedZones",
                                 Not(Condition("HasHostedZones")))
Esempio n. 5
0
 def create_conditions(self):
     self.template.add_condition("HasInternalZone",
                                 Not(Equals(Ref("InternalZoneId"), "")))
     self.template.add_condition("HasInternalZoneName",
                                 Not(Equals(Ref("InternalZoneName"), "")))
     self.template.add_condition("HasInternalHostname",
                                 Not(Equals(Ref("InternalHostname"), "")))
     self.template.add_condition(
         "CreateInternalHostname",
         And(Condition("HasInternalZone"), Condition("HasInternalZoneName"),
             Condition("HasInternalHostname")))
Esempio n. 6
0
 def create_conditions(self):
     self.template.add_condition("CreateELB",
                                 Not(Equals(Ref("ELBHostName"), "")))
     self.template.add_condition("SetupDNS",
                                 Not(Equals(Ref("BaseDomain"), "")))
     self.template.add_condition("UseSSL",
                                 Not(Equals(Ref("ELBCertName"), "")))
     self.template.add_condition(
         "CreateSSLELB", And(Condition("CreateELB"), Condition("UseSSL")))
     self.template.add_condition(
         "SetupELBDNS", And(Condition("CreateELB"), Condition("SetupDNS")))
Esempio n. 7
0
 def create_conditions(self):
     self.template.add_condition("HasInternalDomain",
                                 Not(Equals(Ref("InternalDomain"), "")))
     self.template.add_condition("HasExternalDomain",
                                 Not(Equals(Ref("BaseDomain"), "")))
     self.template.add_condition(
         "HasHostedZones",
         Or(Condition("HasInternalDomain"), Condition("HasExternalDomain")))
     self.template.add_condition("NoHostedZones",
                                 Not(Condition("HasHostedZones")))
     self.template.add_condition("UseNatGateway",
                                 Equals(Ref("UseNatGateway"), "true"))
     self.template.add_condition("UseNatInstances",
                                 Not(Condition("UseNatGateway")))
Esempio n. 8
0
 def add_conditions(self):
     """Set up AZ conditions."""
     template = self.template
     for i in range(AZS):
         template.add_condition(
             'PublicAZ%i' % (i + 1),
             Not(Equals(Ref('PublicSubnet%i' % (i + 1)), '')))
         template.add_condition(
             'PrivateAZ%i' % (i + 1),
             Not(Equals(Ref('PrivateSubnet%i' % (i + 1)), '')))
         template.add_condition(
             'CreateNATGateway%i' % (i + 1),
             And(Condition('PublicAZ%i' % (i + 1)),
                 Condition('PrivateAZ%i' % (i + 1))))
Esempio n. 9
0
 def create_conditions(self):
     t = self.template
     t.add_condition(
         'HasInternalZone',
         Not(Equals(Ref('InternalZoneId'), '')))
     t.add_condition(
         'HasInternalZoneName',
         Not(Equals(Ref('InternalZoneName'), '')))
     t.add_condition(
         'HasInternalHostname',
         Not(Equals(Ref('InternalHostname'), '')))
     t.add_condition(
         'CreateInternalHostname',
         And(Condition('HasInternalZone'),
             Condition('HasInternalZoneName'),
             Condition('HasInternalHostname')))
def create_conditions():
    condition_counter = 4
    base_condition = Equals(Ref(NumSRRHostsParam), 4)
    t.add_condition(CONDITION_COUNTER_PREFIX + str(condition_counter),
                    base_condition)

    last_condition = CONDITION_COUNTER_PREFIX + str(condition_counter)
    for i in range(condition_counter + 1, MAX_INSTANCES + 1):
        t.add_condition(
            CONDITION_COUNTER_PREFIX + str(i),
            Or(Equals(Ref(NumSRRHostsParam), i), Condition(last_condition)))
        last_condition = CONDITION_COUNTER_PREFIX + str(i)

    t.add_condition("GovCloudCondition",
                    Equals(Ref("AWS::Region"), "us-gov-west-1"))
Esempio n. 11
0
"""

from troposphere import Condition, Ref, Equals, And, Not

from ecs_composex.ecs import ecs_params

GENERATED_CLUSTER_NAME_CON_T = "UsCfnGeneratedClusterName"
GENERATED_CLUSTER_NAME_CON = Equals(Ref(ecs_params.CLUSTER_NAME),
                                    ecs_params.CLUSTER_NAME.Default)

NOT_USE_CLUSTER_SG_CON_T = "NotUseClusterSecurityGroupCondition"
NOT_USE_CLUSTER_SG_CON = Equals(Ref(ecs_params.CLUSTER_SG_ID),
                                ecs_params.CLUSTER_SG_ID.Default)

USE_CLUSTER_SG_CON_T = "UseClusterSecurityGroupCondition"
USE_CLUSTER_SG_CON = Not(Condition(NOT_USE_CLUSTER_SG_CON_T))

SERVICE_COUNT_ZERO_CON_T = "ServiceCountIsZeroCondition"
SERVICE_COUNT_ZERO_CON = Equals(Ref(ecs_params.SERVICE_COUNT), "0")

USE_FARGATE_CON_T = "UseFargateCondition"
USE_FARGATE_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE")

SERVICE_COUNT_ZERO_AND_FARGATE_CON_T = "ServiceCountZeroAndFargate"
SERVICE_COUNT_ZERO_AND_FARGATE_CON = And(Condition(USE_FARGATE_CON_T),
                                         Condition(SERVICE_COUNT_ZERO_CON_T))

NOT_USE_HOSTNAME_CON_T = "NotUseMicroserviceHostnameCondition"
NOT_USE_HOSTNAME_CON = Equals(Ref(ecs_params.SERVICE_HOSTNAME),
                              ecs_params.SERVICE_HOSTNAME.Default)
Esempio n. 12
0
        MaxLength="64",
        Type="String",
    ))

WebServerCapacity = t.add_parameter(
    Parameter(
        "WebServerCapacity",
        Description="The initial nuber of WebServer instances",
        Default="2",
        Type="Number",
        MaxValue="5",
        MinValue="1",
        ConstraintDescription="must be between 1 and 5 EC2 instances.",
    ))

t.add_condition("Is-EC2-Classic", Not(Condition("Is-EC2-VPC")))

t.add_condition(
    "Is-EC2-VPC",
    Or(Equals(Ref("AWS::Region"), "eu-central-1"),
       Equals(Ref("AWS::Region"), "cn-north-1"),
       Equals(Ref("AWS::Region"), "ap-northeast-2")))

t.add_mapping(
    "AWSInstanceType2Arch", {
        u'c1.medium': {
            u'Arch': u'PV64'
        },
        u'c1.xlarge': {
            u'Arch': u'PV64'
        },
Esempio n. 13
0
        "the existing primary fails.",
        Type="String",
        AllowedValues=["true", "false"],
        Default="false",
    ),
    group="Redis",
    label="Enable automatic failover",
)
redis_uses_automatic_failover = "RedisAutomaticFailoverCondition"
template.add_condition(redis_uses_automatic_failover,
                       Equals(Ref(redis_automatic_failover), "true"))

secure_redis_condition = "SecureRedisCondition"
template.add_condition(
    secure_redis_condition,
    And(Condition(using_redis_condition),
        Condition(use_aes256_encryption_cond)))

using_either_cache_condition = "EitherCacheCondition"
template.add_condition(
    using_either_cache_condition,
    Or(Condition(using_memcached_condition), Condition(using_redis_condition)))

# Subnet and security group shared by both clusters

cache_subnet_group = elasticache.SubnetGroup(
    "CacheSubnetGroup",
    template=template,
    Description="Subnets available for the cache instance",
    Condition=using_either_cache_condition,
    SubnetIds=[Ref(private_subnet_a),
Esempio n. 14
0
def main(args):
    number_of_vol = 5

    t = Template()
    availability_zone = t.add_parameter(
        Parameter(
            "AvailabilityZone",
            Type="String",
            Description="Availability Zone the cluster will launch into. "
            "THIS IS REQUIRED",
        ))
    raid_options = t.add_parameter(
        Parameter(
            "RAIDOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of RAID related options, "
            "8 parameters in total, "
            "["
            "0 shared_dir,"
            "1 raid_type,"
            "2 num_of_vols,"
            "3 vol_type,"
            "4 vol_size,"
            "5 vol_IOPS,"
            "6 encrypted, "
            "7 ebs_kms_key]",
        ))
    use_vol = [None] * number_of_vol
    v = [None] * number_of_vol

    for i in range(number_of_vol):
        if i == 0:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1),
                Not(Equals(Select("0", Ref(raid_options)), "NONE")))
        else:
            use_vol[i] = t.add_condition(
                "UseVol%s" % (i + 1),
                And(Not(Equals(Select("2", Ref(raid_options)), str(i))),
                    Condition(use_vol[i - 1])),
            )

        use_ebs_iops = t.add_condition(
            "Vol%s_UseEBSPIOPS" % (i + 1),
            Equals(Select("3", Ref(raid_options)), "io1"))
        use_volume_size = t.add_condition(
            "Vol%s_UseVolumeSize" % (i + 1),
            Not(Equals(Select("4", Ref(raid_options)), "NONE")))
        use_volume_type = t.add_condition(
            "Vol%s_UseVolumeType" % (i + 1),
            Not(Equals(Select("3", Ref(raid_options)), "NONE")))
        use_ebs_encryption = t.add_condition(
            "Vol%s_UseEBSEncryption" % (i + 1),
            Equals(Select("6", Ref(raid_options)), "true"))
        use_ebs_kms_key = t.add_condition(
            "Vol%s_UseEBSKMSKey" % (i + 1),
            And(Condition(use_ebs_encryption),
                Not(Equals(Select("7", Ref(raid_options)), "NONE"))),
        )
        v[i] = t.add_resource(
            ec2.Volume(
                "Volume%s" % (i + 1),
                AvailabilityZone=Ref(availability_zone),
                VolumeType=If(use_volume_type, Select("3", Ref(raid_options)),
                              "gp2"),
                Size=If(use_volume_size, Select("4", Ref(raid_options)), 20),
                Iops=If(use_ebs_iops, Select("5", Ref(raid_options)), NoValue),
                Encrypted=If(use_ebs_encryption,
                             Select("6", Ref(raid_options)), NoValue),
                KmsKeyId=If(use_ebs_kms_key, Select("7", Ref(raid_options)),
                            NoValue),
                Condition=use_vol[i],
            ))

    outputs = [None] * number_of_vol
    vol_to_return = [None] * number_of_vol
    for i in range(number_of_vol):
        vol_to_return[i] = Ref(v[i])
        if i == 0:
            outputs[i] = If(use_vol[i], vol_to_return[i], "NONE")
        else:
            outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]),
                            outputs[i - 1])

    t.add_output(
        Output("Volumeids",
               Description="Volume IDs of the resulted RAID EBS volumes",
               Value=outputs[number_of_vol - 1]))

    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
UseVol = [None] * numberOfVol
UseExistingEBSVolume = [None] * numberOfVol
v = [None] * numberOfVol

for i in range(numberOfVol):
    if i == 0:
        CreateVol = t.add_condition(
            "Vol%s_CreateEBSVolume" % (i + 1),
            Equals(Select(str(i), Ref(EBSVolumeId)), "NONE"))
    elif i == 1:
        UseVol[i] = t.add_condition("UseVol%s" % (i + 1),
                                    Not(Equals(Ref(EBSVolumeNum), str(i))))
        CreateVol = t.add_condition(
            "Vol%s_CreateEBSVolume" % (i + 1),
            And(Condition(UseVol[i]),
                Equals(Select(str(i), Ref(EBSVolumeId)), "NONE")))
    else:
        UseVol[i] = t.add_condition(
            "UseVol%s" % (i + 1),
            And(Not(Equals(Ref(EBSVolumeNum), str(i))),
                Condition(UseVol[i - 1])))
        CreateVol = t.add_condition(
            "Vol%s_CreateEBSVolume" % (i + 1),
            And(Condition(UseVol[i]),
                Equals(Select(str(i), Ref(EBSVolumeId)), "NONE")))

    UseEBSPIOPS = t.add_condition(
        "Vol%s_UseEBSPIOPS" % (i + 1),
        Equals(Select(str(i), Ref(VolumeType)), "io1"))
    UseVolumeSize = t.add_condition(
Esempio n. 16
0
                       Not(Equals(Ref(db_class), dont_create_value)))

db_replication = template.add_parameter(Parameter(
    "DatabaseReplication",
    Type="String",
    AllowedValues=["true", "false"],
    Default="false",
    Description="Whether to create a database server replica - "
    "WARNING this will fail if DatabaseBackupRetentionDays is 0.",
),
                                        group="Database",
                                        label="Database replication")
db_replication_condition = "DatabaseReplicationCondition"
template.add_condition(
    db_replication_condition,
    And(Condition(db_condition), Equals(Ref(db_replication), "true")))

db_engine = template.add_parameter(
    Parameter(
        "DatabaseEngine",
        Default="postgres",
        Description="Database engine to use",
        Type="String",
        AllowedValues=list(rds_engine_map.keys()),
        ConstraintDescription="must select a valid database engine.",
    ),
    group="Database",
    label="Engine",
)

db_engine_version = template.add_parameter(
Esempio n. 17
0
"""Common Conditions across the templates"""

from troposphere import Condition, Not, Ref, Equals, And, If

from ecs_composex.common import cfn_params

USE_STACK_NAME_CON_T = "UseStackName"
USE_STACK_NAME_CON = Equals(Ref(cfn_params.ROOT_STACK_NAME),
                            cfn_params.ROOT_STACK_NAME.Default)

USE_CFN_PARAMS_CON_T = "UseCfnParametersValueCondition"
USE_CFN_PARAMS_CON = Equals(Ref(cfn_params.USE_CFN_PARAMS),
                            cfn_params.USE_CFN_PARAMS.Default)

NOT_USE_CFN_PARAMS_CON_T = f"Not{USE_CFN_PARAMS_CON_T}"
NOT_USE_CFN_PARAMS_CON = Not(Condition(USE_CFN_PARAMS_CON_T))

USE_CFN_EXPORTS_T = "UseExportsCondition"
USE_CFN_EXPORTS = Equals(Ref(cfn_params.USE_CFN_EXPORTS), "True")

NOT_USE_CFN_EXPORTS_T = "NotUseCfnExportsCondition"
NOT_USE_CFN_EXPORTS = Not(Condition(USE_CFN_EXPORTS_T))

USE_SSM_EXPORTS_T = "UseSsmExportsCondition"
USE_SSM_EXPORTS = Equals(Ref(cfn_params.USE_SSM_EXPORTS), "True")

USE_CFN_AND_SSM_EXPORTS_T = "UseCfnAndSsmCondition"
USE_CFN_AND_SSM_EXPORTS = And(Condition(USE_CFN_EXPORTS_T),
                              Condition(USE_SSM_EXPORTS_T))

USE_SSM_ONLY_T = "UseSsmOnlyCondition"
Esempio n. 18
0
MEMBER_ACCOUNT_EMAIL = "*****@*****.**"

t = Template()

t.add_description(
    "GuardDuty example deployment for master and member accounts")

member_invitation = t.add_parameter(
    Parameter(
        "MemberInvitation",
        Type="String",
        Description=
        "Invitation ID for member account, leave empty on master account"))

t.add_condition("IsMaster", Equals(Ref(AWS_ACCOUNT_ID), MASTER_ACCOUNT_ID))
t.add_condition("IsMember", Not(Condition("IsMaster")))

detector = t.add_resource(guardduty.Detector("Detector", Enable=True))

master = t.add_resource(
    guardduty.Master(
        "Master",
        Condition="IsMember",
        DetectorId=Ref(detector),
        MasterId=MASTER_ACCOUNT_ID,
        InvitationId=Ref(member_invitation),
    ))

# You can create multiple members if you have multiple members accounts
member = t.add_resource(
    guardduty.Member("Member",
Esempio n. 19
0
bastion_type_is_openvpn_set = "BastionTypeIsOpenVPNSet"
template.add_condition(bastion_type_is_openvpn_set,
                       Equals("OpenVPN", Ref(bastion_type)))

bastion_type_is_ssh_set = "BastionTypeIsSSHSet"
template.add_condition(bastion_type_is_ssh_set, Equals("SSH",
                                                       Ref(bastion_type)))

bastion_ami_set = "BastionAMISet"
template.add_condition(bastion_ami_set, Not(Equals("", Ref(bastion_ami))))

bastion_type_and_ami_set = "BastionTypeAndAMISet"
template.add_condition(
    bastion_type_and_ami_set,
    And(Condition(bastion_type_set), Condition(bastion_ami_set)))

bastion_security_group = ec2.SecurityGroup(
    'BastionSecurityGroup',
    template=template,
    GroupDescription="Bastion security group.",
    VpcId=Ref(vpc),
    Condition=bastion_type_set,
    Tags=Tags(Name=Join("-", [Ref("AWS::StackName"), "bastion"]), ),
)

bastion_security_group_ingress_ssh = ec2.SecurityGroupIngress(
    'BastionSecurityGroupIngressSSH',
    template=template,
    GroupId=Ref(bastion_security_group),
    IpProtocol="tcp",
Esempio n. 20
0
which it heavily relies onto.

You can change the names *values* so you like so long as you keep it Alphanumerical [a-zA-Z0-9]
"""

from troposphere import And, Condition, Equals, If, Not, Or, Ref

from ecs_composex.ecs import ecs_params

NOT_USE_CLUSTER_SG_CON_T = "NotUseClusterSecurityGroupCondition"
NOT_USE_CLUSTER_SG_CON = Equals(
    Ref(ecs_params.CLUSTER_SG_ID), ecs_params.CLUSTER_SG_ID.Default
)

USE_CLUSTER_SG_CON_T = "UseClusterSecurityGroupCondition"
USE_CLUSTER_SG_CON = Not(Condition(NOT_USE_CLUSTER_SG_CON_T))

SERVICE_COUNT_ZERO_CON_T = "ServiceCountIsZeroCondition"
SERVICE_COUNT_ZERO_CON = Equals(Ref(ecs_params.SERVICE_COUNT), "0")

USE_EC2_CON_T = "UseEC2LaunchType"
USE_EC2_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "EC2")


USE_FARGATE_PROVIDERS_CON_T = "UseFargateProvidersCondition"
USE_FARGATE_PROVIDERS_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE_PROVIDERS")

USE_FARGATE_LT_CON_T = "UseFargateLaunchType"
USE_FARGATE_LT_CON = Equals(Ref(ecs_params.LAUNCH_TYPE), "FARGATE")

USE_CLUSTER_MODE_CON_T = "UseClusterDefaultProviders"
Esempio n. 21
0
from ecs_composex.acm.acm_params import (
    VALIDATION_DOMAIN_NAME,
    VALIDATION_DOMAIN_ZONE_ID,
    CERT_ALT_NAMES,
)

ACM_ZONE_ID_IS_NONE_T = "AcmZoneIsNoneCondition"
ACM_ZONE_ID_IS_NONE = Equals(Ref(VALIDATION_DOMAIN_ZONE_ID),
                             VALIDATION_DOMAIN_ZONE_ID.Default)

ACM_ZONE_NAME_IS_NONE_T = "AcmZoneNameIsNoneCondition"
ACM_ZONE_NAME_IS_NONE = Equals(Ref(VALIDATION_DOMAIN_NAME),
                               VALIDATION_DOMAIN_NAME.Default)

USE_ZONE_ID_T = "UseZoneIdOverZoneNameForValidation"
USE_ZONE_ID = And(Not(Condition(ACM_ZONE_ID_IS_NONE_T)),
                  Not(Condition(ACM_ZONE_NAME_IS_NONE_T)))

NO_VALIDATION_CONDITION_T = "NoValidationConfiguredCondition"
NO_VALIDATION_CONDITION = And(Condition(ACM_ZONE_ID_IS_NONE_T),
                              Condition(ACM_ZONE_NAME_IS_NONE_T))

NO_ALT_NAMES_T = "NoAlternativeSubNamesCondition"
NO_ALT_NAMES = Equals(Select(0, Ref(CERT_ALT_NAMES)), CERT_ALT_NAMES.Default)


def add_all_conditions(template):
    """
    Function to add all conditions to the template
    :param template:
    :return:
Esempio n. 22
0
    "SSHLocationBastion":
    Parameter(
        "SSHLocationBastion",
        Type="String",
        AllowedPattern=
        "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))$",
        Default="0.0.0.0/0")
}

conditions = {
    "CreatePrivateSubnet1ACondition":
    Equals(Ref("CreatePrivateSubnet1A"), "True"),
    "CreatePrivateSubnet1BCondition":
    Equals(Ref("CreatePrivateSubnet1B"), "True"),
    "NAT1EIPCondition":
    Or(Condition("CreatePrivateSubnet1ACondition"),
       Condition("CreatePrivateSubnet1BCondition")),
    "CreatePublicSubnet2Condition":
    Equals(Ref("CreatePublicSubnet2"), "True"),
    "CreatePrivateSubnet2ACondition":
    Equals(Ref("CreatePrivateSubnet2A"), "True"),
    "CreatePrivateSubnet2BCondition":
    Equals(Ref("CreatePrivateSubnet2B"), "True"),
    "AttachNAT2ACondition":
    And(Condition("CreatePublicSubnet2Condition"),
        Condition("CreatePrivateSubnet2ACondition")),
    "AttachNAT2BCondition":
    And(Condition("CreatePublicSubnet2Condition"),
        Condition("CreatePrivateSubnet2BCondition")),
    "NAT2EIPCondition":
    Or(Condition("AttachNAT2ACondition"), Condition("AttachNAT2BCondition"))
Esempio n. 23
0
def create_template():
    template = Template(Description=(
        "Static website hosted with S3 and CloudFront. "
        "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website"
    ))

    partition_config = add_mapping(
        template,
        "PartitionConfig",
        {
            "aws": {
                # the region with the control plane for CloudFront, IAM, Route 53, etc
                "PrimaryRegion":
                "us-east-1",
                # assume that Lambda@Edge replicates to all default enabled regions, and that
                # future regions will be opt-in. generated with AWS CLI:
                # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)"
                "DefaultRegions": [
                    "ap-northeast-1",
                    "ap-northeast-2",
                    "ap-northeast-3",
                    "ap-south-1",
                    "ap-southeast-1",
                    "ap-southeast-2",
                    "ca-central-1",
                    "eu-central-1",
                    "eu-north-1",
                    "eu-west-1",
                    "eu-west-2",
                    "eu-west-3",
                    "sa-east-1",
                    "us-east-1",
                    "us-east-2",
                    "us-west-1",
                    "us-west-2",
                ],
            },
            # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn
            "aws-cn": {
                "PrimaryRegion": "cn-north-1",
                "DefaultRegions": ["cn-north-1", "cn-northwest-1"],
            },
        },
    )

    acm_certificate_arn = template.add_parameter(
        Parameter(
            "AcmCertificateArn",
            Description=
            "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.",
            Type="String",
            AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)",
            Default="",
        ))

    hosted_zone_id = template.add_parameter(
        Parameter(
            "HostedZoneId",
            Description=
            "Existing Route 53 zone to use for validating a new TLS certificate.",
            Type="String",
            AllowedPattern="(Z[A-Z0-9]+|)",
            Default="",
        ))

    dns_names = template.add_parameter(
        Parameter(
            "DomainNames",
            Description=
            "Comma-separated list of additional domain names to serve.",
            Type="CommaDelimitedList",
            Default="",
        ))

    tls_protocol_version = template.add_parameter(
        Parameter(
            "TlsProtocolVersion",
            Description=
            "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.",
            Type="String",
            Default="TLSv1.2_2019",
        ))

    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Description=
            "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.",
            Type="Number",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
            Default=365,
        ))

    default_ttl_seconds = template.add_parameter(
        Parameter(
            "DefaultTtlSeconds",
            Description="Cache time-to-live when not set by S3 object headers.",
            Type="Number",
            Default=int(datetime.timedelta(minutes=5).total_seconds()),
        ))

    enable_price_class_hack = template.add_parameter(
        Parameter(
            "EnablePriceClassHack",
            Description="Cut your bill in half with this one weird trick.",
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    using_price_class_hack = add_condition(
        template, "UsingPriceClassHack",
        Equals(Ref(enable_price_class_hack), "true"))

    using_acm_certificate = add_condition(
        template, "UsingAcmCertificate",
        Not(Equals(Ref(acm_certificate_arn), "")))

    using_hosted_zone = add_condition(template, "UsingHostedZone",
                                      Not(Equals(Ref(hosted_zone_id), "")))

    using_certificate = add_condition(
        template,
        "UsingCertificate",
        Or(Condition(using_acm_certificate), Condition(using_hosted_zone)),
    )

    should_create_certificate = add_condition(
        template,
        "ShouldCreateCertificate",
        And(Condition(using_hosted_zone),
            Not(Condition(using_acm_certificate))),
    )

    using_dns_names = add_condition(template, "UsingDnsNames",
                                    Not(Equals(Select(0, Ref(dns_names)), "")))

    is_primary_region = "IsPrimaryRegion"
    template.add_condition(
        is_primary_region,
        Equals(Region, FindInMap(partition_config, Partition,
                                 "PrimaryRegion")),
    )

    precondition_region_is_primary = template.add_resource(
        WaitConditionHandle(
            "PreconditionIsPrimaryRegionForPartition",
            Condition=is_primary_region,
        ))

    log_ingester_dlq = template.add_resource(
        Queue(
            "LogIngesterDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
        ))

    log_ingester_role = template.add_resource(
        Role(
            "LogIngesterRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[GetAtt(log_ingester_dlq, "Arn")],
                            )
                        ],
                    ),
                )
            ],
        ))

    log_ingester = template.add_resource(
        Function(
            "LogIngester",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(log_ingest.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(log_ingest)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(log_ingester_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(log_ingester_dlq, "Arn")),
        ))

    log_ingester_permission = template.add_resource(
        Permission(
            "LogIngesterPermission",
            FunctionName=GetAtt(log_ingester, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="s3.amazonaws.com",
            SourceAccount=AccountId,
        ))

    log_bucket = template.add_resource(
        Bucket(
            "LogBucket",
            # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails.
            # When the CloudFront distribution is created, it adds an additional bucket ACL.
            # That ACL is not possible to model in CloudFormation.
            AccessControl="LogDeliveryWrite",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(ExpirationInDays=1, Status="Enabled"),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=1),
                    Status="Enabled",
                ),
            ]),
            NotificationConfiguration=NotificationConfiguration(
                LambdaConfigurations=[
                    LambdaConfigurations(Event="s3:ObjectCreated:*",
                                         Function=GetAtt(log_ingester, "Arn"))
                ]),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # if we use KMS, we can't read the logs
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            DependsOn=[log_ingester_permission],
        ))

    log_ingester_log_group = template.add_resource(
        LogGroup(
            "LogIngesterLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/", Ref(log_ingester)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    log_ingester_policy = template.add_resource(
        PolicyType(
            "LogIngesterPolicy",
            Roles=[Ref(log_ingester_role)],
            PolicyName="IngestLogPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/cloudfront/*",
                                ],
                            ),
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/s3/*",
                                ],
                            ),
                            GetAtt(log_ingester_log_group, "Arn"),
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    bucket = template.add_resource(
        Bucket(
            "ContentBucket",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                # not supported by CFN yet:
                # LifecycleRule(
                # Transitions=[
                # LifecycleRuleTransition(
                # StorageClass='INTELLIGENT_TIERING',
                # TransitionInDays=1,
                # ),
                # ],
                # Status="Enabled",
                # ),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=7),
                    Status="Enabled",
                )
            ]),
            LoggingConfiguration=LoggingConfiguration(
                DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # Origin Access Identities can't use KMS
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    origin_access_identity = template.add_resource(
        CloudFrontOriginAccessIdentity(
            "CloudFrontIdentity",
            CloudFrontOriginAccessIdentityConfig=
            CloudFrontOriginAccessIdentityConfig(
                Comment=GetAtt(bucket, "Arn")),
        ))

    bucket_policy = template.add_resource(
        BucketPolicy(
            "ContentBucketPolicy",
            Bucket=Ref(bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "CanonicalUser",
                            GetAtt(origin_access_identity,
                                   "S3CanonicalUserId"),
                        ),
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs
    # state "In some circumstances [...] S3 resets permissions on the bucket to the default value",
    # and this allows logging to work without any ACLs in place.
    log_bucket_policy = template.add_resource(
        BucketPolicy(
            "LogBucketPolicy",
            Bucket=Ref(log_bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join(
                                "/",
                                [GetAtt(log_bucket, "Arn"), "cloudfront", "*"])
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.ListBucket],
                        Resource=[Join("/", [GetAtt(log_bucket, "Arn")])],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"])
                        ],
                    ),
                ],
            ),
        ))

    certificate_validator_dlq = template.add_resource(
        Queue(
            "CertificateValidatorDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
            Condition=should_create_certificate,
        ))

    certificate_validator_role = template.add_resource(
        Role(
            "CertificateValidatorRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[
                                    GetAtt(certificate_validator_dlq, "Arn")
                                ],
                            )
                        ],
                    ),
                )
            ],
            # TODO scope down
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
                "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
                "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
            ],
            Condition=should_create_certificate,
        ))

    certificate_validator_function = template.add_resource(
        Function(
            "CertificateValidatorFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(certificate_validator.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(certificate_validator)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(certificate_validator_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(certificate_validator_dlq, "Arn")),
            Environment=Environment(
                Variables={
                    certificate_validator.EnvVars.HOSTED_ZONE_ID.name:
                    Ref(hosted_zone_id)
                }),
            Condition=should_create_certificate,
        ))

    certificate_validator_log_group = template.add_resource(
        LogGroup(
            "CertificateValidatorLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/",
                     Ref(certificate_validator_function)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
            Condition=should_create_certificate,
        ))

    certificate_validator_rule = template.add_resource(
        Rule(
            "CertificateValidatorRule",
            EventPattern={
                "detail-type": ["AWS API Call via CloudTrail"],
                "detail": {
                    "eventSource": ["acm.amazonaws.com"],
                    "eventName": ["AddTagsToCertificate"],
                    "requestParameters": {
                        "tags": {
                            "key": [certificate_validator_function.title],
                            "value":
                            [GetAtt(certificate_validator_function, "Arn")],
                        }
                    },
                },
            },
            Targets=[
                Target(
                    Id="certificate-validator-lambda",
                    Arn=GetAtt(certificate_validator_function, "Arn"),
                )
            ],
            DependsOn=[certificate_validator_log_group],
            Condition=should_create_certificate,
        ))

    certificate_validator_permission = template.add_resource(
        Permission(
            "CertificateValidatorPermission",
            FunctionName=GetAtt(certificate_validator_function, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="events.amazonaws.com",
            SourceArn=GetAtt(certificate_validator_rule, "Arn"),
            Condition=should_create_certificate,
        ))

    certificate = template.add_resource(
        Certificate(
            "Certificate",
            DomainName=Select(0, Ref(dns_names)),
            SubjectAlternativeNames=Ref(
                dns_names),  # duplicate first name works fine
            ValidationMethod="DNS",
            Tags=Tags(
                **{
                    certificate_validator_function.title:
                    GetAtt(certificate_validator_function, "Arn")
                }),
            DependsOn=[certificate_validator_permission],
            Condition=should_create_certificate,
        ))

    edge_hook_role = template.add_resource(
        Role(
            "EdgeHookRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal(
                            "Service",
                            [
                                "lambda.amazonaws.com",
                                "edgelambda.amazonaws.com"
                            ],
                        ),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
        ))

    edge_hook_function = template.add_resource(
        Function(
            "EdgeHookFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.handler",
            Code=Code(ZipFile=inspect.getsource(edge_hook)),
            MemorySize=128,
            Timeout=3,
            Role=GetAtt(edge_hook_role, "Arn"),
        ))
    edge_hook_function_hash = (hashlib.sha256(
        json.dumps(edge_hook_function.to_dict(),
                   sort_keys=True).encode("utf-8")).hexdigest()[:10].upper())

    edge_hook_version = template.add_resource(
        Version(
            "EdgeHookVersion" + edge_hook_function_hash,
            FunctionName=GetAtt(edge_hook_function, "Arn"),
        ))

    replica_log_group_name = Join(
        "/",
        [
            "/aws/lambda",
            Join(
                ".",
                [
                    FindInMap(partition_config, Partition, "PrimaryRegion"),
                    Ref(edge_hook_function),
                ],
            ),
        ],
    )

    edge_hook_role_policy = template.add_resource(
        PolicyType(
            "EdgeHookRolePolicy",
            PolicyName="write-logs",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    "*",
                                    AccountId,
                                    "log-group",
                                    replica_log_group_name,
                                    "log-stream",
                                    "*",
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            Roles=[Ref(edge_hook_role)],
        ))

    stack_set_administration_role = template.add_resource(
        Role(
            "StackSetAdministrationRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "cloudformation.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
        ))

    stack_set_execution_role = template.add_resource(
        Role(
            "StackSetExecutionRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "AWS", GetAtt(stack_set_administration_role,
                                          "Arn")),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="create-stackset-instances",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.DescribeStacks,
                                    logs.DescribeLogGroups,
                                ],
                                Resource=["*"],
                            ),
                            # stack instances communicate with the CFN service via SNS
                            Statement(
                                Effect=Allow,
                                Action=[sns.Publish],
                                NotResource=[
                                    Join(
                                        ":",
                                        [
                                            "arn", Partition, "sns", "*",
                                            AccountId, "*"
                                        ],
                                    )
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    logs.CreateLogGroup,
                                    logs.DeleteLogGroup,
                                    logs.PutRetentionPolicy,
                                    logs.DeleteRetentionPolicy,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "logs",
                                            "*",
                                            AccountId,
                                            "log-group",
                                            replica_log_group_name,
                                            "log-stream",
                                            "",
                                        ],
                                    ),
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.CreateStack,
                                    cloudformation.DeleteStack,
                                    cloudformation.UpdateStack,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "cloudformation",
                                            "*",
                                            AccountId,
                                            Join(
                                                "/",
                                                [
                                                    "stack",
                                                    Join(
                                                        "-",
                                                        [
                                                            "StackSet",
                                                            StackName, "*"
                                                        ],
                                                    ),
                                                ],
                                            ),
                                        ],
                                    )
                                ],
                            ),
                        ],
                    ),
                ),
            ],
        ))

    stack_set_administration_role_policy = template.add_resource(
        PolicyType(
            "StackSetAdministrationRolePolicy",
            PolicyName="assume-execution-role",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Resource=[GetAtt(stack_set_execution_role, "Arn")],
                    ),
                ],
            ),
            Roles=[Ref(stack_set_administration_role)],
        ))

    edge_log_groups = template.add_resource(
        StackSet(
            "EdgeLambdaLogGroupStackSet",
            AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"),
            ExecutionRoleName=Ref(stack_set_execution_role),
            StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]),
            PermissionModel="SELF_MANAGED",
            Description="Multi-region log groups for Lambda@Edge replicas",
            Parameters=[
                StackSetParameter(
                    ParameterKey="LogGroupName",
                    ParameterValue=replica_log_group_name,
                ),
                StackSetParameter(
                    ParameterKey="LogRetentionDays",
                    ParameterValue=Ref(log_retention_days),
                ),
            ],
            OperationPreferences=OperationPreferences(
                FailureToleranceCount=0,
                MaxConcurrentPercentage=100,
            ),
            StackInstancesGroup=[
                StackInstances(
                    DeploymentTargets=DeploymentTargets(Accounts=[AccountId]),
                    Regions=FindInMap(partition_config, Partition,
                                      "DefaultRegions"),
                )
            ],
            TemplateBody=create_log_group_template().to_json(indent=None),
            DependsOn=[stack_set_administration_role_policy],
        ))

    price_class_distribution = template.add_resource(
        Distribution(
            "PriceClassDistribution",
            DistributionConfig=DistributionConfig(
                Comment="Dummy distribution used for price class hack",
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    ViewerProtocolPolicy="allow-all",
                    ForwardedValues=ForwardedValues(QueryString=False),
                ),
                Enabled=True,
                Origins=[
                    Origin(Id="default",
                           DomainName=GetAtt(bucket, "DomainName"))
                ],
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    CloudFrontDefaultCertificate=True),
                PriceClass="PriceClass_All",
            ),
            Condition=using_price_class_hack,
        ))

    distribution = template.add_resource(
        Distribution(
            "ContentDistribution",
            DistributionConfig=DistributionConfig(
                Enabled=True,
                Aliases=If(using_dns_names, Ref(dns_names), NoValue),
                Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"),
                                Prefix="cloudfront/"),
                DefaultRootObject="index.html",
                Origins=[
                    Origin(
                        Id="default",
                        DomainName=GetAtt(bucket, "DomainName"),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Join(
                                "",
                                [
                                    "origin-access-identity/cloudfront/",
                                    Ref(origin_access_identity),
                                ],
                            )),
                    )
                ],
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    Compress=True,
                    ForwardedValues=ForwardedValues(QueryString=False),
                    ViewerProtocolPolicy="redirect-to-https",
                    DefaultTTL=Ref(default_ttl_seconds),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType="origin-request",
                            LambdaFunctionARN=Ref(edge_hook_version),
                        )
                    ],
                ),
                HttpVersion="http2",
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    AcmCertificateArn=If(
                        using_acm_certificate,
                        Ref(acm_certificate_arn),
                        If(using_hosted_zone, Ref(certificate), NoValue),
                    ),
                    SslSupportMethod=If(using_certificate, "sni-only",
                                        NoValue),
                    CloudFrontDefaultCertificate=If(using_certificate, NoValue,
                                                    True),
                    MinimumProtocolVersion=Ref(tls_protocol_version),
                ),
                PriceClass=If(using_price_class_hack, "PriceClass_100",
                              "PriceClass_All"),
            ),
            DependsOn=[
                bucket_policy,
                log_ingester_policy,
                edge_log_groups,
                precondition_region_is_primary,
            ],
        ))

    distribution_log_group = template.add_resource(
        LogGroup(
            "DistributionLogGroup",
            LogGroupName=Join(
                "", ["/aws/cloudfront/", Ref(distribution)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    bucket_log_group = template.add_resource(
        LogGroup(
            "BucketLogGroup",
            LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    template.add_output(Output("DistributionId", Value=Ref(distribution)))

    template.add_output(
        Output("DistributionDomain", Value=GetAtt(distribution, "DomainName")))

    template.add_output(
        Output(
            "DistributionDnsTarget",
            Value=If(
                using_price_class_hack,
                GetAtt(price_class_distribution, "DomainName"),
                GetAtt(distribution, "DomainName"),
            ),
        ))

    template.add_output(
        Output(
            "DistributionUrl",
            Value=Join("",
                       ["https://",
                        GetAtt(distribution, "DomainName"), "/"]),
        ))

    template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket,
                                                                "Arn")))

    return template
    MaxLength="64",
    Type="String",
))

WebServerCapacity = t.add_parameter(Parameter(
    "WebServerCapacity",
    Description="The initial nuber of WebServer instances",
    Default="2",
    Type="Number",
    MaxValue="5",
    MinValue="1",
    ConstraintDescription="must be between 1 and 5 EC2 instances.",
))

t.add_condition("Is-EC2-Classic",
    Not(Condition("Is-EC2-VPC"))
)

t.add_condition("Is-EC2-VPC",
    Or(Equals(Ref("AWS::Region"), "eu-central-1"), Equals(Ref("AWS::Region"), "cn-north-1"), Equals(Ref("AWS::Region"), "ap-northeast-2"))
)

t.add_mapping("AWSInstanceType2Arch",
{u'c1.medium': {u'Arch': u'PV64'},
 u'c1.xlarge': {u'Arch': u'PV64'},
 u'c3.2xlarge': {u'Arch': u'HVM64'},
 u'c3.4xlarge': {u'Arch': u'HVM64'},
 u'c3.8xlarge': {u'Arch': u'HVM64'},
 u'c3.large': {u'Arch': u'HVM64'},
 u'c3.xlarge': {u'Arch': u'HVM64'},
 u'c4.2xlarge': {u'Arch': u'HVM64'},
Esempio n. 25
0
"""
ecs_composex.rds conditions for CFN. Allows to implement conditional logic in native CFN format.
"""

from troposphere import And, Condition, Equals, Not, Or, Ref, Select, Split

from ecs_composex.rds.rds_params import DB_ENGINE_NAME, DB_SNAPSHOT_ID, DBS_SUBNET_GROUP

DBS_SUBNET_GROUP_CON_T = "CreateSubnetGroupCondition"
DBS_SUBNET_GROUP_CON = Equals(Ref(DBS_SUBNET_GROUP), DBS_SUBNET_GROUP.Default)

NOT_USE_DB_SNAPSHOT_CON_T = "NotUseSnapshotToCreateDbCondition"
NOT_USE_DB_SNAPSHOT_CON = Equals(Ref(DB_SNAPSHOT_ID), DB_SNAPSHOT_ID.Default)

USE_DB_SNAPSHOT_CON_T = "UseSnapshotToCreateDbCondition"
USE_DB_SNAPSHOT_CON = Not(Condition(NOT_USE_DB_SNAPSHOT_CON_T))

USE_CLUSTER_CON_T = "UseAuroraClusterCondition"
USE_CLUSTER_CON = Equals("aurora", Select(0, Split("-", Ref(DB_ENGINE_NAME))))

NOT_USE_CLUSTER_CON_T = "NotUseClusterCondition"
NOT_USE_CLUSTER_CON = Not(Condition(USE_CLUSTER_CON_T))

USE_CLUSTER_AND_SNAPSHOT_CON_T = "UseClusterAndSnapshotCondition"
USE_CLUSTER_AND_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T),
                                   Condition(USE_DB_SNAPSHOT_CON_T))

USE_CLUSTER_NOT_SNAPSHOT_CON_T = "UseClusterAndNotSnapshotCondition"
USE_CLUSTER_NOT_SNAPSHOT_CON = And(Condition(USE_CLUSTER_CON_T),
                                   Condition(NOT_USE_DB_SNAPSHOT_CON_T))
Esempio n. 26
0
    ),
    "Four": Parameter(
        "Four",
        Type="String",
    ),
    "SshKeyName": Parameter(
        "SshKeyName",
        Type="String",
    )
}

conditions = {
    "OneEqualsFoo":
    Equals(Ref("One"), "Foo"),
    "NotOneEqualsFoo":
    Not(Condition("OneEqualsFoo")),
    "BarEqualsTwo":
    Equals("Bar", Ref("Two")),
    "ThreeEqualsFour":
    Equals(Ref("Three"), Ref("Four")),
    "OneEqualsFooOrBarEqualsTwo":
    Or(Condition("OneEqualsFoo"), Condition("BarEqualsTwo")),
    "OneEqualsFooAndNotBarEqualsTwo":
    And(Condition("OneEqualsFoo"), Not(Condition("BarEqualsTwo"))),
    "OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft":
    And(Condition("OneEqualsFoo"), Condition("BarEqualsTwo"),
        Equals(Ref("Three"), "Pft")),
    "OneIsQuzAndThreeEqualsFour":
    And(Equals(Ref("One"), "Quz"), Condition("ThreeEqualsFour")),
    "LaunchInstance":
    And(Condition("OneEqualsFoo"), Condition("NotOneEqualsFoo"),
Esempio n. 27
0
        Type="String",
    ),
    "SshKeyName": Parameter(
        "SshKeyName",
        Type="String",
    )
}

#Define conditions here.
conditions = {
    "ValidateRegion": Equals(
        Ref("AWS::Region"),
        "eu-west-1"
    ),
    "NotOneEqualsFoo": Not(
        Condition("OneEqualsFoo")
    ),
    "BarEqualsTwo": Equals(
        "Bar",
        Ref("Two")
    ),
    "ThreeEqualsFour": Equals(
        Ref("Three"),
        Ref("Four")
    ),
    "OneEqualsFooOrBarEqualsTwo": Or(
        Condition("OneEqualsFoo"),
        Condition("BarEqualsTwo")
    ),
    "OneEqualsFooAndNotBarEqualsTwo": And(
        Condition("OneEqualsFoo"),
Esempio n. 28
0
def main(args):
    t = Template()

    # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id,
    # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_head_node_mt, 8 exists_valid_compute_mt]
    efs_options = t.add_parameter(
        Parameter(
            "EFSOptions",
            Type="CommaDelimitedList",
            Description="Comma separated list of efs related options, 9 parameters in total",
        )
    )
    compute_security_group = t.add_parameter(
        Parameter("ComputeSecurityGroup", Type="String", Description="Security Group for Mount Target")
    )
    head_node_subnet_id = t.add_parameter(
        Parameter("MasterSubnetId", Type="String", Description="Head node subnet id for head node mount target")
    )
    compute_subnet_id = t.add_parameter(
        Parameter(
            "ComputeSubnetId",
            Type="String",
            Description="User provided compute subnet id. Will be use to create compute mount target if needed.",
        )
    )

    create_efs = t.add_condition(
        "CreateEFS",
        And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")),
    )
    create_head_node_mt = t.add_condition(
        "CreateMasterMT",
        And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")),
    )
    no_mt_in_compute_az = t.add_condition("NoMTInComputeAZ", Equals(Select(str(8), Ref(efs_options)), "NONE"))
    use_user_provided_compute_subnet = t.add_condition(
        "UseUserProvidedComputeSubnet", Not(Equals(Ref(compute_subnet_id), "NONE"))
    )
    # Need to create compute mount target if:
    # user is providing a compute subnet and
    # there is no existing MT in compute subnet's AZ(includes case where head node AZ == compute AZ).
    #
    # If user is not providing a compute subnet, either we are using the head node subnet as compute subnet,
    # or we will be creating a compute subnet that is in the same AZ as head node subnet,
    # see ComputeSubnet resource in the main stack.
    # In both cases no compute MT is needed.
    create_compute_mt = t.add_condition(
        "CreateComputeMT", And(Condition(use_user_provided_compute_subnet), Condition(no_mt_in_compute_az))
    )

    use_performance_mode = t.add_condition("UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE")))
    use_efs_encryption = t.add_condition("UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true"))
    use_efs_kms_key = t.add_condition(
        "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE")))
    )
    use_throughput_mode = t.add_condition("UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE")))
    use_provisioned = t.add_condition("UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned"))
    use_provisioned_throughput = t.add_condition(
        "UseProvisionedThroughput",
        And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))),
    )

    fs = t.add_resource(
        FileSystem(
            "EFSFS",
            PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue),
            ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue),
            ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue),
            Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue),
            KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue),
            Condition=create_efs,
        )
    )

    t.add_resource(
        MountTarget(
            "MasterSubnetEFSMT",
            FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))),
            SecurityGroups=[Ref(compute_security_group)],
            SubnetId=Ref(head_node_subnet_id),
            Condition=create_head_node_mt,
        )
    )

    t.add_resource(
        MountTarget(
            "ComputeSubnetEFSMT",
            FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))),
            SecurityGroups=[Ref(compute_security_group)],
            SubnetId=Ref(compute_subnet_id),
            Condition=create_compute_mt,
        )
    )

    t.add_output(
        Output(
            "FileSystemId",
            Description="ID of the FileSystem",
            Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))),
        )
    )

    # Specify output file path
    json_file_path = args.target_path
    output_file = open(json_file_path, "w")
    output_file.write(t.to_json())
    output_file.close()
        'Three',
        Type='String',
    ),
    Parameter(
        'Four',
        Type='String',
    ),
    Parameter(
        'SshKeyName',
        Type='String',
    )
])

t.add_condition('OneEqualsFoo', Equals(Ref('One'), 'Foo'))

t.add_condition('NotOneEqualsFoo', Not(Condition('OneEqualsFoo')))

t.add_condition('BarEqualsTwo', Equals('Bar', Ref('Two')))

t.add_condition('ThreeEqualsFour', Equals(Ref('Three'), Ref('Four')))

t.add_condition('OneEqualsFooOrBarEqualsTwo',
                Or(Condition('OneEqualsFoo'), Condition('BarEqualsTwo')))

t.add_condition('OneEqualsFooAndNotBarEqualsTwo',
                And(Condition('OneEqualsFoo'), Not(Condition('BarEqualsTwo'))))

t.add_condition(
    'OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft',
    And(Condition('OneEqualsFoo'), Condition('BarEqualsTwo'),
        Equals(Ref('Three'), 'Pft')))
Esempio n. 30
0
def ssm_network():
    template = Template()

    default_route = "0.0.0.0/0"
    vpc_cidr = "192.168.0.0/16"

    template.add_parameter(Parameter(
        "VpcCidr",
        Type="String",
        Description="Cidr block for VPC",
        MinLength="9",
        MaxLength="18",
        Default=vpc_cidr,
        AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
        ConstraintDescription="Must match following pattern 'xxx.xxx.xxx.xxx/xx'"
    ))

    template.add_parameter(Parameter(
        "CreateEndpoints",
        Type="String",
        Description="Create VPC Endpoints",
        Default="No",
        AllowedValues=["Yes", "No"],
        ConstraintDescription="'Yes' or 'No' are only options"
    ))

    template.add_parameter(Parameter(
        "CreateNatGateway",
        Type="String",
        Description="Create NAT Gateway",
        Default="No",
        AllowedValues=["Yes", "No"],
        ConstraintDescription="'Yes' or 'No' are only options"
    ))

    conditions = {
        "CreateVpcEndpointsUpperYes": Equals(
            Ref("CreateEndpoints"), "Yes"
        ),
        "CreateVpcEndpointsLowerYes": Equals(
            Ref("CreateEndpoints"), "yes"
        ),
        "CreateVpcEndpoints": Or(
            Condition("CreateVpcEndpointsUpperYes"),
            Condition("CreateVpcEndpointsLowerYes")
        ),
        "CreateNatGatewayUpperYes": Equals(
            Ref("CreateNatGateway"), "Yes"
        ),
        "CreateNatGatewayLowerYes": Equals(
            Ref("CreateNatGateway"), "yes"
        ),
        "CreateNatGateway": Or(
            Condition("CreateNatGatewayUpperYes"),
            Condition("CreateNatGatewayLowerYes")
        )
    }

    ssm_vpc = ec2.VPC(
        'SsmVpc',
        CidrBlock=Ref("VpcCidr"),
        InstanceTenancy="default",
        EnableDnsHostnames=True,
        EnableDnsSupport=True,
        Tags=Tags(
            Name="SSM VPC"
        )
    )

    subnet_blocks = Cidr(GetAtt(ssm_vpc, "CidrBlock"), 256, 8)

    ssm_ig = ec2.InternetGateway(
        'SsmIG',
    )

    ssm_attach_gw = ec2.VPCGatewayAttachment(
        'SsmAttachGateway',
        InternetGatewayId=Ref(ssm_ig),
        VpcId=Ref(ssm_vpc)
    )

    ssm_public_subnet = ec2.Subnet(
        'SsmPublicSubnet',
        DependsOn=ssm_attach_gw,
        AvailabilityZone=Select(0, GetAZs('')),
        CidrBlock=Select(0, subnet_blocks),
        VpcId=Ref(ssm_vpc),
        Tags=Tags(
            Name="Public Subnet"
        )
    )

    ssm_public_route_table = ec2.RouteTable(
        'SsmPublicRouteTable',
        VpcId=Ref(ssm_vpc),
    )

    ssm_public_route = ec2.Route(
        'SsmPublicRoute',
        DestinationCidrBlock=default_route,
        GatewayId=Ref(ssm_ig),
        RouteTableId=Ref(ssm_public_route_table)
    )

    ssm_public_subnet_route_table_association = ec2.SubnetRouteTableAssociation(
        'SsmPublicSubnetRouteTableAssociation',
        RouteTableId=Ref(ssm_public_route_table),
        SubnetId=Ref(ssm_public_subnet)
    )

    ssm_eip_nat_gateway = ec2.EIP(
        'SsmEipNatGateway',
        Condition="CreateNatGateway"
    )

    ssm_nat_gateway = ec2.NatGateway(
        'SsmNatGateway',
        Condition="CreateNatGateway",
        DependsOn=ssm_eip_nat_gateway,
        SubnetId=Ref(ssm_public_subnet),
        AllocationId=GetAtt(ssm_eip_nat_gateway, "AllocationId"),
    )

    ssm_private_subnet = ec2.Subnet(
        'SsmPrivateSubnet',
        DependsOn=ssm_attach_gw,
        AvailabilityZone=Select(0, GetAZs('')),
        CidrBlock=Select(1, subnet_blocks),
        VpcId=Ref(ssm_vpc),
        Tags=Tags(
            Name="Private Subnet"
        )
    )

    ssm_private_route_table = ec2.RouteTable(
        'SsmPrivateRouteTable',
        VpcId=Ref(ssm_vpc),
    )

    ssm_private_route = ec2.Route(
        'SsmPrivateRoute',
        Condition="CreateNatGateway",
        DestinationCidrBlock=default_route,
        NatGatewayId=Ref(ssm_nat_gateway),
        RouteTableId=Ref(ssm_private_route_table)
    )

    ssm_private_subnet_route_table_association = ec2.SubnetRouteTableAssociation(
        'SsmPrivateSubnetRouteTableAssociation',
        RouteTableId=Ref(ssm_private_route_table),
        SubnetId=Ref(ssm_private_subnet)
    )

    ssm_sg_ingress_rules = [
        ec2.SecurityGroupRule(
            ToPort=443,
            FromPort=443,
            IpProtocol="tcp",
            CidrIp=GetAtt(ssm_vpc, "CidrBlock")
        )
    ]

    ssm_security_group = ec2.SecurityGroup(
        'SsmSecurityGroup',
        GroupName="SsmSG",
        GroupDescription="SG for SSM usage",
        VpcId=Ref(ssm_vpc),
        SecurityGroupIngress=ssm_sg_ingress_rules
    )

    ssm_s3e_vpc_endpoint = ec2.VPCEndpoint(
        'SsmS3VpcEndpoint',
        Condition="CreateVpcEndpoints",
        RouteTableIds=[
            Ref(ssm_private_route_table)
        ],
        ServiceName=vpc_endpoint("s3"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Gateway"
    )

    ssm_ssm_vpc_endpoint = ec2.VPCEndpoint(
        'SsmSsmVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ssm"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    ssm_ssmmessages_vpc_endpoint = ec2.VPCEndpoint(
        'SsmSsmMessagesVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ssmmessages"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    ssm_ec2messages_vpc_endpoint = ec2.VPCEndpoint(
        'SsmEc2MessagesVpcEndpoint',
        Condition="CreateVpcEndpoints",
        SubnetIds=[Ref(ssm_private_subnet)],
        ServiceName=vpc_endpoint("ec2messages"),
        VpcId=Ref(ssm_vpc),
        VpcEndpointType="Interface",
        SecurityGroupIds=[
            Ref(ssm_security_group)
        ],
        PrivateDnsEnabled=True
    )

    template.add_resource(ssm_vpc)
    template.add_resource(ssm_ig)
    template.add_resource(ssm_attach_gw)
    template.add_resource(ssm_eip_nat_gateway)
    template.add_resource(ssm_public_subnet)
    template.add_resource(ssm_public_route_table)
    template.add_resource(ssm_nat_gateway)
    template.add_resource(ssm_public_route)
    template.add_resource(ssm_public_subnet_route_table_association)
    template.add_resource(ssm_private_subnet)
    template.add_resource(ssm_private_route_table)
    template.add_resource(ssm_private_route)
    template.add_resource(ssm_private_subnet_route_table_association)
    template.add_resource(ssm_security_group)
    template.add_resource(ssm_s3e_vpc_endpoint)
    template.add_resource(ssm_ec2messages_vpc_endpoint)
    template.add_resource(ssm_ssm_vpc_endpoint)
    template.add_resource(ssm_ssmmessages_vpc_endpoint)

    for k in conditions:
        template.add_condition(k, conditions[k])

    template.add_output(Output(
        'SsmVpc',
        Description="VPC for SSM",
        Value=Ref(ssm_vpc),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-vpc"]))
    ))

    template.add_output(Output(
        'SsmSg',
        Description="Security Group for SSM",
        Value=Ref(ssm_security_group),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-sg"]))
    ))

    template.add_output(Output(
        'SsmPrivateSubnet',
        Description="Private Subnet for SSM",
        Value=Ref(ssm_private_subnet),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-subnet"]))
    ))

    template.add_output(Output(
        'SsmPrivateRouteTable',
        Description="Private RouteTable for SSM",
        Value=Ref(ssm_private_route_table),
        Export=Export(Join("", [Ref("AWS::StackName"), "-ssm-private-route-table"]))
    ))

    with open(os.path.dirname(os.path.realpath(__file__)) + '/ssm_network.yml', 'w') as cf_file:
        cf_file.write(template.to_yaml())

    return template.to_yaml()