Пример #1
0
    def instance_tags(self, index):
        tags1 = {"testing": True, "cloudformation":True,"stack":"eddie", "Name": "myinstance1"}
        tags2 = {"testing": True, "cloudformation": True, "stack": "eddie", "Name": "myinstance2"}
        if index ==1:
	        return [ec2.Tag(key, str(value)) for key, value in tags1.items()]
        else:
            return [ec2.Tag(key, str(value)) for key, value in tags2.items()]
Пример #2
0
 def get_tags(self, service_override=None, role_override=None, typ=None):
     """
     Get the default tags for this environment
     :return:
     """
     return [
         ec2.Tag('{}:environment'.format(constants.TAG), self.env),
         ec2.Tag('{}:sysenv'.format(constants.TAG), self.sysenv),
         ec2.Tag(
             '{}:service'.format(constants.TAG),
             service_override if service_override else self.template_name),
         ec2.Tag('{}:role'.format(constants.TAG),
                 role_override if role_override else self.name),
         ec2.Tag('{}:team'.format(constants.TAG), self.TEAM['email']),
     ]
Пример #3
0
def elb_tags(context):
    tags = _generic_tags(context)
    tags.update({
        'Name': '%s--elb' % context['stackname'],  # ll: journal--prod--elb
        'Cluster': context['stackname'],  # ll: journal--prod
    })
    return [ec2.Tag(key, value) for key, value in tags.items()]
Пример #4
0
    def create_nat_instance(self, zone_id, subnet_name):
        t = self.template
        suffix = zone_id
        nat_instance = t.add_resource(
            ec2.Instance(NAT_INSTANCE_NAME % suffix,
                         Condition="UseNatInstances",
                         ImageId=FindInMap('AmiMap', Ref("AWS::Region"),
                                           Ref("ImageName")),
                         SecurityGroupIds=[Ref(DEFAULT_SG),
                                           Ref(NAT_SG)],
                         SubnetId=Ref(subnet_name),
                         InstanceType=Ref('InstanceType'),
                         SourceDestCheck=False,
                         KeyName=Ref('SshKeyName'),
                         Tags=[ec2.Tag('Name', 'nat-gw%s' % suffix)],
                         DependsOn=GW_ATTACH))

        eip = t.add_resource(
            ec2.EIP('NATExternalIp%s' % suffix,
                    Domain='vpc',
                    InstanceId=If("UseNatInstances", Ref(nat_instance),
                                  Ref("AWS::NoValue")),
                    DependsOn=GW_ATTACH))

        t.add_resource(
            ec2.NatGateway(
                NAT_GATEWAY_NAME % suffix,
                Condition="UseNatGateway",
                AllocationId=GetAtt(eip, 'AllocationId'),
                SubnetId=Ref(subnet_name),
            ))

        return nat_instance
Пример #5
0
    def create_vpc(self):
        """Create the VPC resources."""
        template = self.template
        variables = self.get_variables()
        self.template.add_version(AWS_TEMPLATE_VERSION)
        self.template.add_description('Create a VPC')

        vpc = ec2.VPC(
            VPC_NAME,
            CidrBlock=variables['VpcCidr'],
            EnableDnsSupport=True,
            EnableDnsHostnames=True,
            Tags=[ec2.Tag('Name', variables['VpcName'] + '-LATESTREPO')]
        )

        template.add_resource(vpc)

        template.add_output(
            Output(
                OUTPUT_VPC_ID,
                Value=VPC_ID
            )
        )

        # create the internet gateway if needed
        if variables["UseInternetGW"]:
            template.add_resource(ec2.InternetGateway('InternetGatway'))
            template.add_resource(
            ec2.VPCGatewayAttachment(
                'GatewayAttach',
                VpcId=Ref(VPC_NAME),
                InternetGatewayId=Ref('InternetGatway')
            )
        )
Пример #6
0
    def __init__(self, sceptre_user_data):
        self.template = Template()
        self.template.add_description("VPC Stack")
        self.sceptreUserData = sceptre_user_data
        self.environment = self.sceptreUserData['environment']
        self.numAz = self.sceptreUserData['numAz']

        self.add_parameters()

        self.defaultTags = [ec2.Tag('Contact', Ref(self.ownerEmailParam))]
        self.namePrefix = Join(
            "",
            [Ref(self.ownerNameParam), self.sceptreUserData['environment']])

        self.subnets = self.sceptreUserData['subnets']
        self.routeTables = {}

        self.add_vpc()
        self.add_igw()
        self.add_subnets()
        self.add_natgw()
        self.add_route_tables()
        self.add_routes()
        self.associate_route_tables()

        self.add_outputs()
Пример #7
0
 def build_subnet(self, t, subnetName, az, cidr):
     subnet = t.add_resource(
         ec2.Subnet(
             subnetName,
             VpcId=Ref(self.vpc),
             AvailabilityZone=az,
             CidrBlock=cidr,
             Tags=self.defaultTags +
             [ec2.Tag('Name', Join("", [self.namePrefix, subnetName]))]))
     return subnet
Пример #8
0
    def add_vpc(self):
        t = self.template

        self.vpc = t.add_resource(
            ec2.VPC('Vpc',
                    CidrBlock=Ref(self.vpcCidrParam),
                    EnableDnsSupport='true',
                    EnableDnsHostnames='true',
                    Tags=self.defaultTags +
                    [ec2.Tag('Name', Join("", [self.namePrefix, 'Vpc']))]))
Пример #9
0
def instance_tags(context, node=None):
    # NOTE: RDS and Elasticache instances also call this function
    tags = aws.generic_tags(context)
    if node:
        # this instance is part of a cluster
        tags.update({
            'Name':
            '%s--%d' % (context['stackname'], node),  # "journal--prod--1"
            'Node': node,  # "1"
        })
    return [ec2.Tag(key, str(value)) for key, value in tags.items()]
Пример #10
0
    def add_route_tables(self):
        t = self.template

        for subnetDict in self.subnets:
            tableName = subnetDict['tier'] + 'RouteTable'
            routeTable = t.add_resource(
                ec2.RouteTable(
                    tableName,
                    VpcId=Ref(self.vpc),
                    Tags=self.defaultTags +
                    [ec2.Tag('Name', Join("", [self.namePrefix, tableName]))]))
            self.routeTables[tableName] = Ref(routeTable)
Пример #11
0
def instance_tags(context, node=None):
    # NOTE: RDS instances also call this function
    tags = _generic_tags(context)
    if node:
        # this instance is part of a cluster
        tags.update({
            'Name':
            '%s--%d' % (context['stackname'], node),  # ll: journal--prod--1
            'Cluster': context['stackname'],  # ll: journal--prod
            'Node': node,  # ll: 1
        })
    return [ec2.Tag(key, value) for key, value in tags.items()]
Пример #12
0
    def create_nat_instance(self, zone_id, subnet_name):
        t = self.template
        variables = self.get_variables()
        suffix = zone_id
        eip_name = "NATExternalIp%s" % suffix

        if variables["UseNatGateway"]:
            gateway_name = NAT_GATEWAY_NAME % suffix
            t.add_resource(
                ec2.NatGateway(
                    gateway_name,
                    AllocationId=GetAtt(eip_name, 'AllocationId'),
                    SubnetId=Ref(subnet_name),
                ))

            t.add_output(Output(gateway_name + "Id", Value=Ref(gateway_name)))

            # Using NAT Gateways, leave the EIP unattached - it gets allocated
            # to the NAT Gateway in that resource above
            eip_instance_id = Ref("AWS::NoValue")
        else:
            image_id = FindInMap('AmiMap', Ref("AWS::Region"),
                                 Ref("ImageName"))
            instance_name = NAT_INSTANCE_NAME % suffix
            t.add_resource(
                ec2.Instance(instance_name,
                             Condition="UseNatInstances",
                             ImageId=image_id,
                             SecurityGroupIds=[Ref(DEFAULT_SG),
                                               Ref(NAT_SG)],
                             SubnetId=Ref(subnet_name),
                             InstanceType=variables["InstanceType"],
                             SourceDestCheck=False,
                             KeyName=variables["SshKeyName"],
                             Tags=[ec2.Tag('Name', 'nat-gw%s' % suffix)],
                             DependsOn=GW_ATTACH))
            t.add_output(
                Output(instance_name + "PublicHostname",
                       Value=GetAtt(instance_name, "PublicDnsName")))
            t.add_output(
                Output(instance_name + "InstanceId", Value=Ref(instance_name)))

            # Since we're using NAT instances, go ahead and attach the EIP
            # to the NAT instance
            eip_instance_id = Ref(instance_name)

        t.add_resource(
            ec2.EIP(eip_name,
                    Domain='vpc',
                    InstanceId=eip_instance_id,
                    DependsOn=GW_ATTACH))
Пример #13
0
    def add_igw(self):
        t = self.template

        self.igw = t.add_resource(
            ec2.InternetGateway(
                'InternetGateway',
                Tags=self.defaultTags + [
                    ec2.Tag('Name',
                            Join("", [self.namePrefix, 'InternetGateway']))
                ]))

        self.igwAttachment = t.add_resource(
            ec2.VPCGatewayAttachment('InternetGatewayAttachment',
                                     VpcId=Ref(self.vpc),
                                     InternetGatewayId=Ref(self.igw)))
Пример #14
0
    def add_vpn_gateway(self, vpn_conf):
        if 'vpn_name' in vpn_conf:
            vpn_name = vpn_conf.get('vpn_name')
        else:
            vpn_name = self.__class__.__name__ + 'Gateway'

        gateway = self.template.add_resource(
            ec2.VPNGateway('vpnGateway',
                           Type=vpn_conf.get('vpn_type', 'ipsec.1'),
                           Tags=[ec2.Tag(key='Name', value=vpn_name)]))

        gateway_connection = self.template.add_resource(
            ec2.VPNGatewayAttachment('vpnGatewayAttachment',
                                     VpcId=Ref(self.vpc),
                                     InternetGatewayId=Ref(self.igw),
                                     VpnGatewayId=Ref(gateway)))
    def add_vpn_gateway(self, vpn_conf):
        """
        Not surprisingly, adds a VPN gateway to the network created by this template.
        @param vpn_conf [dict] - collection of vpn-level configuration values.
        """
        if 'vpn_name' in vpn_conf:
            vpn_name = vpn_conf.get('vpn_name')
        else:
            vpn_name = self.__class__.__name__ + 'Gateway'

        gateway = self.add_resource(
            ec2.VPNGateway('vpnGateway',
                           Type=vpn_conf.get('vpn_type', 'ipsec.1'),
                           Tags=[ec2.Tag(key='Name', value=vpn_name)]))

        gateway_connection = self.add_resource(
            ec2.VPCGatewayAttachment('vpnGatewayAttachment',
                                     VpcId=self.vpc_id,
                                     InternetGatewayId=self.igw,
                                     VpnGatewayId=gateway))
Пример #16
0
def configure_vpc(cfn_template, cluster_name):

    vpc = ec2.VPC("DustVPC")
    vpc.CidrBlock = "10.0.0.0/16"
    vpc.Tags = [ec2.Tag("Name:", cluster_name)]
    cfn_template.add_resource(vpc)
    vpc_id = Ref(vpc)

    subnet = ec2.Subnet('dustSubnet')
    subnet.VpcId = vpc_id
    subnet.CidrBlock = "10.0.0.0/24"
    cfn_template.add_resource(subnet)
    vpc_subnet = Ref(subnet)

    net_gateway = ec2.InternetGateway('dustGateway')
    cfn_template.add_resource(net_gateway)

    attach_net_gateway = ec2.VPCGatewayAttachment('dustAttachGateway')
    attach_net_gateway.VpcId = vpc_id
    attach_net_gateway.InternetGatewayId = Ref(net_gateway)
    cfn_template.add_resource(attach_net_gateway)

    route_table = ec2.RouteTable('dustRoutetable')
    route_table.VpcId = vpc_id
    cfn_template.add_resource(route_table)

    route = ec2.Route('dustRoute')
    route.RouteTableId = Ref(route_table)
    route.DestinationCidrBlock = "0.0.0.0/0"
    route.GatewayId = Ref(net_gateway)
    route.DependsOn = "dustAttachGateway"
    cfn_template.add_resource(route)

    attach_route = ec2.SubnetRouteTableAssociation('dustAttachRouteTable')
    attach_route.SubnetId = vpc_subnet
    attach_route.RouteTableId = Ref(route_table)
    cfn_template.add_resource(attach_route)

    return vpc_id, vpc_subnet
Пример #17
0
    def create_vpc(self):
        """Create the VPC resources."""
        template = self.template
        variables = self.get_variables()
        self.template.add_version(AWS_TEMPLATE_VERSION)
        self.template.add_description('Create a VPC')

        vpc = ec2.VPC(
            VPC_NAME,
            CidrBlock=variables['VpcCidr'],
            EnableDnsSupport=True,
            EnableDnsHostnames=True,
            Tags=[ec2.Tag('Name', variables['VpcName'])]
        )

        template.add_resource(vpc)

        template.add_output(
            Output(
                OUTPUT_VPC_ID,
                Value=VPC_ID
            )
        )
Пример #18
0
    def create_network(self):
        t = self.template
        variables = self.get_variables()
        self.create_gateway()
        t.add_resource(ec2.NetworkAcl('DefaultACL', VpcId=VPC_ID))

        self.create_nat_security_groups()
        subnets = {'public': [], 'private': []}
        net_types = subnets.keys()
        zones = []
        for i in range(variables["AZCount"]):
            az = Select(i, GetAZs(""))
            zones.append(az)
            name_suffix = i
            for net_type in net_types:
                name_prefix = net_type.capitalize()
                subnet_name = "%sSubnet%s" % (name_prefix, name_suffix)
                subnets[net_type].append(subnet_name)
                t.add_resource(
                    ec2.Subnet(subnet_name,
                               AvailabilityZone=az,
                               VpcId=VPC_ID,
                               DependsOn=GW_ATTACH,
                               CidrBlock=variables.get("%sSubnets" %
                                                       name_prefix)[i],
                               Tags=Tags(type=net_type)))

                route_table_name = "%sRouteTable%s" % (name_prefix,
                                                       name_suffix)
                t.add_resource(
                    ec2.RouteTable(route_table_name,
                                   VpcId=VPC_ID,
                                   Tags=[ec2.Tag('type', net_type)]))
                t.add_resource(
                    ec2.SubnetRouteTableAssociation(
                        "%sRouteTableAssociation%s" %
                        (name_prefix, name_suffix),
                        SubnetId=Ref(subnet_name),
                        RouteTableId=Ref(route_table_name)))

                route_name = '%sRoute%s' % (name_prefix, name_suffix)
                if net_type == 'public':
                    # the public subnets are where the NAT instances live,
                    # so their default route needs to go to the AWS
                    # Internet Gateway
                    t.add_resource(
                        ec2.Route(route_name,
                                  RouteTableId=Ref(route_table_name),
                                  DestinationCidrBlock="0.0.0.0/0",
                                  GatewayId=Ref(GATEWAY)))
                    self.create_nat_instance(i, subnet_name)
                else:
                    # Private subnets are where actual instances will live
                    # so their gateway needs to be through the nat instances
                    route = ec2.Route(
                        route_name,
                        RouteTableId=Ref(route_table_name),
                        DestinationCidrBlock='0.0.0.0/0',
                    )
                    if variables["UseNatGateway"]:
                        route.NatGatewayId = Ref(NAT_GATEWAY_NAME %
                                                 name_suffix)
                    else:
                        route.InstanceId = Ref(NAT_INSTANCE_NAME % name_suffix)
                    t.add_resource(route)

        for net_type in net_types:
            t.add_output(
                Output("%sSubnets" % net_type.capitalize(),
                       Value=Join(",", [Ref(sn) for sn in subnets[net_type]])))

            for i, sn in enumerate(subnets[net_type]):
                t.add_output(
                    Output("%sSubnet%d" % (net_type.capitalize(), i),
                           Value=Ref(sn)))

        self.template.add_output(
            Output("AvailabilityZones", Value=Join(",", zones)))

        for i, az in enumerate(zones):
            t.add_output(Output("AvailabilityZone%d" % (i), Value=az))
Пример #19
0
def elb_tags(context):
    tags = aws.generic_tags(context)
    tags.update({
        'Name': '%s--elb' % context['stackname'],  # "journal--prod--elb"
    })
    return [ec2.Tag(key, value) for key, value in tags.items()]
Пример #20
0
registry = ec2.Instance(
    service_name + 'Instance',
    template,
    AvailabilityZone=Ref(az),
    IamInstanceProfile=Ref(registry_profile),
    InstanceType=Ref(registry_instance_type),
    ImageId=ami_id,
    KeyName=Ref(ssh_key),
    SecurityGroupIds=[Ref(ssh_sg), Ref(web_sg)],
    BlockDeviceMappings=[
        ec2.BlockDeviceMapping(DeviceName='/dev/xvda',
                               Ebs=ec2.EBSBlockDevice(
                                   VolumeSize=Ref(registry_block_device_size),
                                   VolumeType='gp2'))
    ],
    Tags=[ec2.Tag('Name', 'docker-registry')],
)

registry_domain = template.add_parameter(
    Parameter('DockerRegistryCertDomainName',
              Type=c.STRING,
              Description='Domain to issue certificate for'))
registry_domain_email = template.add_parameter(
    Parameter('DockerRegistryCertEmail',
              Type=c.STRING,
              Description='Email to use on certificate issue'))

registry_certs = '/opt/registry/security/'
registry_htpasswd = '/opt/registry/htpasswd'
registry_compose = Join('', [
    'version: "2"\n',
Пример #21
0
    ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0'),
    ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=443, ToPort=443, CidrIp='0.0.0.0/0'),
]

instance = ec2.Instance("ec2instance", ImageId="ami-cd0f5cb6", InstanceType="t2.micro")
instance.SecurityGroups = [Ref(securityGroup)]
instance.KeyName = Ref(keyName)
instance.BlockDeviceMappings = [
    ec2.BlockDeviceMapping(
        DeviceName='/dev/sda1',
        Ebs=ec2.EBSBlockDevice(
            VolumeSize=30
        )
    )
]
instance.Tags = [ec2.Tag('Name', Ref(name))]

ipAddress = ec2.EIP('IPAddress')
ipAssociation = ec2.EIPAssociation(
    'EIPAssociation',
    InstanceId=Ref(instance),
    EIP=Ref(ipAddress)
)

# It would be nice to generate the route53 record here as well, but a
# different account has the Hosted Zone configured :(

t.add_resource(instance)
t.add_resource(securityGroup)
t.add_resource(ipAddress)
t.add_resource(ipAssociation)
Пример #22
0
    def configure(self):
        self.vpc_metadata = constants.ENVIRONMENTS[self.env]['vpc']
        self.set_description('VPC, Routes, Base Security Groups, and NATs')

        common_vpc_tags = [ec2.Tag('Name', self.env)
                           ] + self.get_tags(service_override='VPC')

        _vpc = self.add_resource(
            ec2.VPC('VPC',
                    CidrBlock=self.vpc_metadata['cidrblock'],
                    EnableDnsSupport=True,
                    EnableDnsHostnames=True,
                    Tags=common_vpc_tags))

        _dhcp_options = self.add_resource(
            ec2.DHCPOptions('DHCPOptions',
                            DomainName="node.{}.{}".format(
                                self.env, constants.TAG),
                            DomainNameServers=['AmazonProvidedDNS'],
                            Tags=common_vpc_tags))

        self.add_resource(
            ec2.VPCDHCPOptionsAssociation('VPCDHCPOptionsAssociation',
                                          DhcpOptionsId=Ref(_dhcp_options),
                                          VpcId=Ref(_vpc)))

        _internet_gateway = self.add_resource(
            ec2.InternetGateway('InternetGateway',
                                Tags=self.get_tags(
                                    service_override='InternetGateway',
                                    role_override='InternetGateway')))
        self.add_resource(
            ec2.VPCGatewayAttachment('AttachInternetGateway',
                                     VpcId=Ref(_vpc),
                                     InternetGatewayId=Ref(_internet_gateway)))
        # route_tables stores all ec2.RouteTables generated and adds them to
        # a private vpc s3 endpoint
        route_tables = []
        _public_route_table = self.add_resource(
            ec2.RouteTable('PublicRouteTable',
                           VpcId=Ref(_vpc),
                           Tags=self.get_tags(
                               service_override='PublicRouteTable',
                               role_override='PublicRouteTable')))
        route_tables.append(_public_route_table)
        # Public Subnet Routes and ACLs
        self.add_resource(
            ec2.Route('PublicRoute',
                      RouteTableId=Ref(_public_route_table),
                      DestinationCidrBlock='0.0.0.0/0',
                      GatewayId=Ref(_internet_gateway)))
        _public_network_acl = self.add_resource(
            ec2.NetworkAcl('PublicNetworkAcl',
                           VpcId=Ref(_vpc),
                           Tags=self.get_tags(
                               service_override='PublicNetworkAcl',
                               role_override='PublicNetworkAcl')))
        self.add_resource(
            ec2.NetworkAclEntry('IngressPublicNetworkAclEntry',
                                NetworkAclId=Ref(_public_network_acl),
                                RuleNumber=100,
                                Protocol='-1',
                                RuleAction='allow',
                                Egress=False,
                                CidrBlock='0.0.0.0/0',
                                PortRange=ec2.PortRange(From=1, To=65535)))
        self.add_resource(
            ec2.NetworkAclEntry('EgressPublicNetworkAclEntry',
                                NetworkAclId=Ref(_public_network_acl),
                                RuleNumber=101,
                                Protocol='-1',
                                RuleAction='allow',
                                Egress=True,
                                CidrBlock='0.0.0.0/0',
                                PortRange=ec2.PortRange(From=1, To=65535)))
        # Private Network ACLs
        _private_network_acl = self.add_resource(
            ec2.NetworkAcl('PrivateNetworkAcl',
                           VpcId=Ref(_vpc),
                           Tags=self.get_tags(
                               service_override='PrivateNetworkAcl',
                               role_override='PrivateNetworkAcl')))
        self.add_resource(
            ec2.NetworkAclEntry('IngressPrivateNetworkAclEntry',
                                NetworkAclId=Ref(_private_network_acl),
                                RuleNumber=100,
                                Protocol='-1',
                                RuleAction='allow',
                                Egress=False,
                                CidrBlock='0.0.0.0/0',
                                PortRange=ec2.PortRange(From=1, To=65535)))
        self.add_resource(
            ec2.NetworkAclEntry('EgressPrivateNetworkAclEntry',
                                NetworkAclId=Ref(_private_network_acl),
                                RuleNumber=101,
                                Protocol='-1',
                                RuleAction='allow',
                                Egress=True,
                                CidrBlock='0.0.0.0/0',
                                PortRange=ec2.PortRange(From=1, To=65535)))

        # Default security groups - referenced by name by constants/default-security-groups
        # _nat_security_group = self.add_resource(
        #     ec2.SecurityGroup(
        #         'NATSecurityGroup',
        #         VpcId=Ref(_vpc),
        #         GroupDescription='Security Group for NAT Instances',
        #         SecurityGroupIngress=[
        #             {'IpProtocol': '-1', 'FromPort': 1, 'ToPort': 65535, 'CidrIp': self.vpc_metadata['cidrblock']},
        #             {'IpProtocol': '-1', 'FromPort': 1, 'ToPort': 65535, 'CidrIp': '10.0.0.0/8'}
        #         ],
        #         Tags=self.get_tags(service_override='NAT', role_override='NAT-SecurityGroup')
        #     )
        # )
        # _consul_security_group = self.add_resource(
        #     ec2.SecurityGroup(
        #         'ConsulSecurityGroup',
        #         VpcId=Ref(_vpc),
        #         GroupDescription='Security Group for Consul access',
        #         SecurityGroupIngress=[
        #             {'IpProtocol': 'tcp', 'FromPort': 8300, 'ToPort': 8302, 'CidrIp': '10.0.0.0/8'},  # consul server rpc/serf
        #             {'IpProtocol': 'udp', 'FromPort': 8300, 'ToPort': 8302, 'CidrIp': '10.0.0.0/8'},  # consul server rpc/serf
        #             {'IpProtocol': 'tcp', 'FromPort': 8400, 'ToPort': 8400, 'CidrIp': '10.0.0.0/8'},  # consul client rpc
        #             {'IpProtocol': 'tcp', 'FromPort': 8500, 'ToPort': 8500, 'CidrIp': '10.0.0.0/8'},  # consul http
        #             {'IpProtocol': 'tcp', 'FromPort': 8600, 'ToPort': 8600, 'CidrIp': '10.0.0.0/8'},  # consul dns
        #             {'IpProtocol': 'udp', 'FromPort': 8600, 'ToPort': 8600, 'CidrIp': '10.0.0.0/8'}   # consul dns
        #         ],
        #         Tags=[
        #             ec2.Tag('ivy:team', self.TEAM['email']),
        #             ec2.Tag('ivy:environment', self.env),
        #             ec2.Tag('ivy:service', 'Consul'),
        #             ec2.Tag('ivy:role', 'Consul-SecurityGroup')
        #         ]
        #     )
        # )
        # _ssh_security_group = self.add_resource(
        #     ec2.SecurityGroup(
        #         'InternalSecurityGroup',
        #         VpcId=Ref(_vpc),
        #         GroupDescription='Internal Rules',
        #         SecurityGroupIngress=[
        #             {'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'CidrIp': '10.0.0.0/8'},
        #             {'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'CidrIp': '10.0.0.0/8'}
        #         ],
        #         SecurityGroupEgress=[
        #             {'IpProtocol': '-1', 'FromPort': 0, 'ToPort': 65535, 'CidrIp': '0.0.0.0/0'}
        #         ],
        #         Tags=[
        #             ec2.Tag('ivy:team', self.TEAM['email']),
        #             ec2.Tag('ivy:environment', self.env),
        #             ec2.Tag('ivy:service', 'infrastructure'),
        #             ec2.Tag('ivy:role', 'internal')
        #         ]
        #     )
        # )
        #
        # self.add_security_group(Ref(_nat_security_group), Ref(_consul_security_group), Ref(_ssh_security_group))

        ## This sets up all private and public AZs
        for index, zone in enumerate(self.vpc_metadata['zones'], 1):
            _public_subnet = self.add_resource(
                ec2.Subnet(
                    'PublicSubnet{}'.format(index),
                    VpcId=Ref(_vpc),
                    CidrBlock=zone['public-cidrblock'],
                    AvailabilityZone=zone['availability-zone'],
                    MapPublicIpOnLaunch=True,
                    Tags=self.get_tags(
                        service_override='PublicSubnet',
                        role_override='PublicSubnet{}'.format(index)) +
                    [
                        ec2.Tag('Name', '{}-PublicSubnet{}'.format(
                            self.env, index))
                    ]))
            self.add_resource(
                ec2.SubnetRouteTableAssociation(
                    'PublicSubnetRouteTableAssociation{}'.format(index),
                    SubnetId=Ref(_public_subnet),
                    RouteTableId=Ref(_public_route_table)))
            self.add_resource(
                ec2.SubnetNetworkAclAssociation(
                    'PublicSubnetNetworkAclAssociation{}'.format(index),
                    SubnetId=Ref(_public_subnet),
                    NetworkAclId=Ref(_public_network_acl)))

            # Allow VPCs with no private subnets (save money on NAT instances for VPCs with only public instances)
            if zone.get('private-cidrblock'):
                _private_subnet = self.add_resource(
                    ec2.Subnet(
                        'PrivateSubnet{}'.format(index),
                        VpcId=Ref(_vpc),
                        CidrBlock=zone['private-cidrblock'],
                        AvailabilityZone=zone['availability-zone'],
                        Tags=self.get_tags(
                            service_override='PrivateSubnet',
                            role_override='PrivateSubnet{}'.format(index)) + [
                                ec2.Tag(
                                    'Name', '{}-PrivateSubnet{}'.format(
                                        self.env, index))
                            ]))
                # Private subnets get their own route table for AZ-specific NATs
                _private_route_table = self.add_resource(
                    ec2.RouteTable(
                        'PrivateRouteTable{}'.format(index),
                        VpcId=Ref(_vpc),
                        Tags=self.get_tags(
                            service_override='PrivateRouteTable',
                            role_override='PrivateRouteTable{}'.format(
                                index))))
                route_tables.append(_private_route_table)

                # Create an EIP to be used with the NAT instance or gateway
                _nat_eip = self.add_resource(
                    ec2.EIP('NATInstanceEIP{}'.format(index), Domain='vpc'))

                # Use VPC NAT Gateway
                _nat_gw = self.add_resource(
                    ec2.NatGateway('NATGateway{}'.format(index),
                                   AllocationId=GetAtt(_nat_eip,
                                                       "AllocationId"),
                                   SubnetId=Ref(_public_subnet)))
                # Create a route via the NAT GW for the private route table
                self.add_resource(
                    ec2.Route('PrivateRoute{}'.format(index),
                              RouteTableId=Ref(_private_route_table),
                              DestinationCidrBlock='0.0.0.0/0',
                              NatGatewayId=Ref(_nat_gw)))

                self.add_resource(
                    ec2.SubnetRouteTableAssociation(
                        'PrivateSubnetRouteTableAssociation{}'.format(index),
                        SubnetId=Ref(_private_subnet),
                        RouteTableId=Ref(_private_route_table)))
                self.add_resource(
                    ec2.SubnetNetworkAclAssociation(
                        'PrivateSubnetNetworkAclAssociation{}'.format(index),
                        SubnetId=Ref(_private_subnet),
                        NetworkAclId=Ref(_private_network_acl)))

        # use route_table to create a VPC S3 endpoint
        self.add_resource(
            ec2.VPCEndpoint('S3VPCEndpoint',
                            RouteTableIds=[Ref(rt) for rt in route_tables],
                            ServiceName='com.amazonaws.{}.s3'.format(
                                self.region),
                            VpcId=Ref(_vpc)))
    def create_network_components(self, network_config=None):
        """
        Method creates a network with the specified number of public and private subnets within the VPC cidr specified by the networkAddresses CloudFormation mapping
        @param network_config [dict] collection of network parameters for creating the VPC network
        """
        if 'network_name' in network_config:
            network_name = network_config.get('network_name')
        else:
            network_name = self.__class__.__name__

        self.template.vpc_id = self.template.add_resource(
            ec2.VPC('vpc',
                    CidrBlock=FindInMap('networkAddresses', 'vpcBase', 'cidr'),
                    EnableDnsSupport=True,
                    EnableDnsHostnames=True,
                    Tags=[ec2.Tag(key='Name', value=network_name)]))

        self.template.vpc_cidr = FindInMap('networkAddresses', 'vpcBase',
                                           'cidr')

        self.template.igw = self.template.add_resource(
            ec2.InternetGateway('vpcIgw'))

        igw_title = 'igwVpcAttachment'
        self.template.igw_attachment = self.template.add_resource(
            ec2.VPCGatewayAttachment(igw_title,
                                     InternetGatewayId=Ref(self.template.igw),
                                     VpcId=Ref(self.template.vpc_id)))

        self.gateway_hook()

        # Iterate through each subnet type for each AZ and add subnets, routing tables, routes, and NATs as necessary
        for index in range(0, int(network_config.get('az_count', 2))):
            for subnet_type in network_config.get('subnet_types',
                                                  ['public', 'private']):

                if subnet_type not in self.template.subnets:
                    self.template.subnets[subnet_type] = []
                if subnet_type not in self.template.mappings[
                        'networkAddresses']['subnet' + str(index)]:
                    continue

                # Create the subnet
                self.template.subnets[subnet_type].append(
                    self.template.add_resource(
                        ec2.Subnet(
                            subnet_type + 'Subnet' + str(index),
                            AvailabilityZone=FindInMap(
                                'RegionMap', Ref('AWS::Region'),
                                'az' + str(index) + 'Name'),
                            VpcId=Ref(self.template.vpc_id),
                            CidrBlock=FindInMap('networkAddresses',
                                                'subnet' + str(index),
                                                subnet_type),
                            Tags=[ec2.Tag(key='network', value=subnet_type)])))

                # Create the routing table
                route_table = self.template.add_resource(
                    ec2.RouteTable(subnet_type + 'Subnet' + str(index) +
                                   'RouteTable',
                                   VpcId=Ref(self.template.vpc_id)))

                # Create the NATs and egress rules
                self.create_subnet_egress(index, route_table, igw_title,
                                          subnet_type)

                # Associate the routing table with the subnet
                self.template.add_resource(
                    ec2.SubnetRouteTableAssociation(
                        subnet_type + 'Subnet' + str(index) +
                        'EgressRouteTableAssociation',
                        RouteTableId=Ref(route_table),
                        SubnetId=Ref(
                            self.template.subnets[subnet_type][index])))

        self.manual_parameter_bindings['vpcId'] = Ref(self.template.vpc_id)
        self.manual_parameter_bindings['vpcCidr'] = self.template.vpc_cidr
        self.manual_parameter_bindings['internetGateway'] = Ref(
            self.template.igw)
Пример #24
0
    def configure(self):
        """
        Returns a Pritunl template
        """
        self.defaults = {'instance_type': 't3.large'}

        self.service = 'pritunl'
        self.set_description('Sets up Pritunl servers')
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()

        _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl']
        _global_config = constants.ENVIRONMENTS[self.env]
        _bootstrap_mode = _vpn_config.get('bootstrap_mode', False)

        _bootstrap_ami = get_latest_ami_id(
            self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon')
        _ivy_ami = get_latest_ami_id(self.region, 'ivy-base',
                                     _global_config.get('ami_owner', 'self'))

        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami))

        _public_dns = _vpn_config['public_dns']

        _vpn_name = '{}Pritunl'.format(self.env)

        # We want the preferred subnet only.
        _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0]

        # Add our security group
        _vpn_security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(_vpn_name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for Pritunl {}'.format(
                    _vpn_name),
                SecurityGroupIngress=[
                    {
                        "IpProtocol": "icmp",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": "0.0.0.0/0"
                    },  # Ping
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "80",
                        "ToPort": "80",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTP
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "443",
                        "ToPort": "443",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTPS
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "22",
                        "ToPort": "22",
                        "CidrIp": "0.0.0.0/0"
                    },  # SSH
                    {
                        "IpProtocol": "udp",
                        "FromPort": "10000",
                        "ToPort": "20000",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTPS/OVPN
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "27017",
                        "ToPort": "27017",
                        "CidrIp": constants.SUPERNET
                    },  # mongodb master
                    {
                        "IpProtocol": "-1",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": constants.SUPERNET
                    }  # Replies from local VPC
                ],
                SecurityGroupEgress=[{
                    "IpProtocol": "-1",
                    "FromPort": "-1",
                    "ToPort": "-1",
                    "CidrIp": "0.0.0.0/0"
                }]))

        # Add EBS volume if local mongo used
        _data_volume = None
        if _vpn_config.get('local_mongo', False):
            self.add_iam_policy(
                iam.Policy(
                    PolicyName='AttachVolume',
                    PolicyDocument={
                        'Statement': [{
                            'Effect':
                            'Allow',
                            'Resource':
                            '*',
                            'Action': [
                                'ec2:AttachVolume', 'ec2:DeleteSnapshot',
                                'ec2:DescribeTags',
                                'ec2:DescribeVolumeAttribute',
                                'ec2:DescribeVolumeStatus',
                                'ec2:DescribeVolumes', 'ec2:DetachVolume'
                            ]
                        }]
                    }))
            _data_volume = ec2.Volume(
                '{}DataVolume'.format(_vpn_name),
                Size=_vpn_config.get('data_volume_size', 20),
                VolumeType='gp2',
                AvailabilityZone=_vpn_subnet['AvailabilityZone'],
                DeletionPolicy='Retain',
                Tags=self.get_tags(service_override=self.service,
                                   role_override=_vpn_name) +
                [ec2.Tag('Name', _vpn_name + "-datavol")])
            self.add_resource(_data_volume)

        # Add the elastic IP and the ENI for it, then attach it.
        _vpn_eip = self.add_resource(
            ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc'))
        _vpn_eni = self.add_resource(
            ec2.NetworkInterface(
                '{}InstanceENI'.format(_vpn_name),
                SubnetId=_vpn_subnet['SubnetId'],
                Description='ENI for {}'.format(_vpn_name),
                GroupSet=[Ref(_vpn_security_group)] + self.security_groups,
                SourceDestCheck=False,
                Tags=self.get_tags(service_override=self.service,
                                   role_override=_vpn_name)))
        self.get_eni_policies()

        self.add_resource(
            ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name),
                               AllocationId=GetAtt(_vpn_eip, "AllocationId"),
                               NetworkInterfaceId=Ref(_vpn_eni)))

        # Add a route53 DNS name
        if self.get_partition() != 'aws-us-gov':
            self.add_resource(
                route53.RecordSetGroup('{}Route53'.format(_vpn_name),
                                       HostedZoneName=constants.ENVIRONMENTS[
                                           self.env]['route53_zone'],
                                       RecordSets=[
                                           route53.RecordSet(
                                               Name=_public_dns,
                                               ResourceRecords=[Ref(_vpn_eip)],
                                               Type='A',
                                               TTL=600)
                                       ]))

        # Get all route tables in the VPC
        _vpc_route_tables = self.ec2_conn.describe_route_tables(
            Filters=[{
                'Name': 'vpc-id',
                'Values': [self.vpc_id]
            }])['RouteTables']

        # Set up the routing table for the VPC
        # Allow for changing client subnets in constants.py
        for client_subnet in _vpn_config['client_subnets']:
            for route_table in _vpc_route_tables:
                self.add_resource(
                    ec2.Route('{}Route{}{}'.format(
                        _vpn_name,
                        client_subnet.translate({
                            ord("."): "",
                            ord("/"): ""
                        }), route_table['RouteTableId'].replace('-', '')),
                              RouteTableId=route_table['RouteTableId'],
                              DestinationCidrBlock=client_subnet,
                              NetworkInterfaceId=Ref(_vpn_eni)))

        _mongodb = _vpn_config.get('mongodb')
        _server_id = _vpn_config['server_id']

        _userdata_template = self.get_cloudinit_template(
            _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None,
            replacements=(('__PROMPT_COLOR__', self.prompt_color()),
                          ('__SERVER_ID__', _server_id), ('__SERVICE__',
                                                          self.service),
                          ('__MONGODB__', _mongodb if _mongodb else '')))

        _userdata = Sub(
            _userdata_template.replace(
                '${', '${!')  # Replace bash brackets with CFN escaped style
            .replace(
                '{#', '${'
            ),  # Replace rain-style CFN escapes with proper CFN brackets
            {
                'CFN_ENI_ID': Ref(_vpn_eni),
                'CFN_EBS_ID': Ref(_data_volume) if _data_volume else ''
            })

        _vpn_launch_configuration = self.add_resource(
            autoscaling.LaunchConfiguration(
                '{}LaunchConfiguration'.format(_vpn_name),
                AssociatePublicIpAddress=True,
                KeyName=Ref(self.keypair_name),
                ImageId=Ref(self.ami),
                InstanceType=Ref(self.instance_type),
                InstanceMonitoring=False,
                IamInstanceProfile=Ref(self.instance_profile),
                UserData=Base64(_userdata)))
        self.add_resource(
            autoscaling.AutoScalingGroup(
                '{}ASGroup'.format(_vpn_name),
                AvailabilityZones=[_vpn_subnet['AvailabilityZone']],
                HealthCheckType='EC2',
                LaunchConfigurationName=Ref(_vpn_launch_configuration),
                MinSize=0,
                MaxSize=1,
                VPCZoneIdentifier=[_vpn_subnet['SubnetId']],
                Tags=self.get_autoscaling_tags(service_override=self.service,
                                               role_override=_vpn_name) +
                [autoscaling.Tag('Name', _vpn_name, True)]))
Пример #25
0
        "ap-southeast-1": {
            "AMI": "ami-74dda626"
        },
        "ap-northeast-1": {
            "AMI": "ami-dcfa4edd"
        }
    })

ec2_instance = template.add_resource(
    ec2.Instance(
        "Ec2Instance",
        ImageId='ami-25681456',
        InstanceType='t2.micro',
        KeyName='dev-ec2',
        SecurityGroupIds=["sg-8eec36e8"],  # hard coded to glomex default sg
        SubnetId='subnet-b6eaa5d2',  # hard coded to glomex subnet eu-west-1a
        UserData=Base64("80"),
        Tags=[ec2.Tag('Name', 'gcdt-test-ec2-ebs-tagging')]))

template.add_output([
    Output(
        "InstanceId",
        Description="InstanceId of the newly created EC2 instance",
        Value=Ref(ec2_instance),
    ),
])


def generate_template():
    return template.to_json()
Пример #26
0
    def configure(self):
        """
        Returns a Nexus template
        """
        self.defaults = {'instance_type': 't3.xlarge'}

        self.service = 'nexus'
        self.set_description('Sets up Nexus repository manager servers')
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()
        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2',
                          'amazon')))

        config = constants.ENVIRONMENTS[self.env][self.service]

        # We want the preferred subnet only.
        subnet = self.get_subnets('private', _preferred_only=True)[0]

        # Add our security group
        security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(self.name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for {}'.format(self.name),
                SecurityGroupIngress=[
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "80",
                        "ToPort": "80",
                        "CidrIp": constants.SUPERNET
                    },  # HTTP
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "443",
                        "ToPort": "443",
                        "CidrIp": constants.SUPERNET
                    },  # HTTPS
                    # {"IpProtocol": "tcp", "FromPort": "8081", "ToPort": "8081", "CidrIp": constants.SUPERNET},  # NexusRM Direct (disabled!)
                ],
                SecurityGroupEgress=[{
                    "IpProtocol": "-1",
                    "FromPort": "-1",
                    "ToPort": "-1",
                    "CidrIp": "0.0.0.0/0"
                }]))

        # Add our EBS data volume
        data_volume = ec2.Volume(
            '{}DataVolume'.format(self.name),
            Size=config.get('data_volume_size', 20),
            VolumeType='gp2',
            AvailabilityZone=subnet['AvailabilityZone'],
            DeletionPolicy='Retain',
            Tags=self.get_tags(service_override=self.service,
                               role_override=self.name) +
            [ec2.Tag('Name', self.name + "-datavol")])
        self.add_resource(data_volume)
        self.add_iam_policy(
            iam.Policy(PolicyName='AttachVolume',
                       PolicyDocument={
                           'Statement': [{
                               'Effect':
                               'Allow',
                               'Resource':
                               '*',
                               'Action': [
                                   'ec2:AttachVolume', 'ec2:DeleteSnapshot',
                                   'ec2:DescribeTags',
                                   'ec2:DescribeVolumeAttribute',
                                   'ec2:DescribeVolumeStatus',
                                   'ec2:DescribeVolumes', 'ec2:DetachVolume'
                               ]
                           }]
                       }))

        # Add a ENI for static IP address
        eni = self.add_resource(
            ec2.NetworkInterface(
                '{}InstanceENI'.format(self.name),
                SubnetId=subnet['SubnetId'],
                Description='ENI for {}'.format(self.name),
                GroupSet=[Ref(security_group)] + self.security_groups,
                SourceDestCheck=True,
                Tags=self.get_tags(service_override=self.service,
                                   role_override=self.name)))
        self.get_eni_policies()

        # Add a route53 A record for the main Nexus host
        route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone']
        private_dns = config.get('private_dns',
                                 'nexus.{}'.format(route53_zone))
        self.add_resource(
            route53.RecordSetGroup(
                '{}Route53'.format(self.name),
                HostedZoneName=route53_zone,
                RecordSets=[
                    route53.RecordSet(Name=private_dns,
                                      ResourceRecords=[
                                          GetAtt(eni,
                                                 'PrimaryPrivateIpAddress')
                                      ],
                                      Type='A',
                                      TTL=600)
                ]))
        # Add CNAME records for each repository, pointing to the main
        for repository in config['repositories']:
            self.add_resource(
                route53.RecordSetGroup(
                    '{}{}Route53'.format(self.name, self.cfn_name(repository)),
                    HostedZoneName=route53_zone,
                    RecordSets=[
                        route53.RecordSet(Name='{}.{}'.format(
                            repository, route53_zone),
                                          ResourceRecords=[private_dns],
                                          Type='CNAME',
                                          TTL=600)
                    ]))

        # Add S3 IAM role for nexus blobstore access
        self.add_iam_policy(
            iam.Policy(
                PolicyName='S3Access',
                PolicyDocument={
                    'Statement': [{
                        "Effect":
                        "Allow",
                        "Action": [
                            "s3:ListBucket", "s3:GetBucketLocation",
                            "s3:ListBucketMultipartUploads",
                            "s3:ListBucketVersions", "s3:GetBucketAcl",
                            "s3:GetLifecycleConfiguration",
                            "s3:PutLifecycleConfiguration"
                        ],
                        "Resource": [
                            'arn:{}:s3:::{}'.format(self.get_partition(),
                                                    config['s3_bucket'])
                        ]
                    }, {
                        "Effect":
                        "Allow",
                        "Action": [
                            "s3:GetObject", "s3:PutObject", "s3:DeleteObject",
                            "s3:AbortMultipartUpload",
                            "s3:ListMultipartUploadParts",
                            "s3:GetObjectTagging", "s3:PutObjectTagging",
                            "s3:GetObjectTagging", "s3:DeleteObjectTagging"
                        ],
                        "Resource": [
                            'arn:{}:s3:::{}/*'.format(self.get_partition(),
                                                      config['s3_bucket'])
                        ]
                    }]
                }))

        # Substitute the userdata template and feed it to CFN
        userdata_template = self.get_cloudinit_template(replacements=(
            ('__PROMPT_COLOR__', self.prompt_color()),
            ('__SERVICE__', self.service),
            ('__DEFAULT_DOMAIN__',
             route53_zone[:-1]),  # route53_zone has a trailing '.', strip it
            ('__TOP_DOMAIN__', constants.ROOT_ROUTE53_ZONE),
            # ('__REPOSITORIES__', " ".join(['"{}"'.format(x) for x in config['repositories']]))  # '"abc" "def" "ghi"'
        ))
        userdata = Sub(
            userdata_template.replace(
                '${', '${!')  # Replace bash brackets with CFN escaped style
            .replace(
                '{#', '${'
            ),  # Replace rain-style CFN escapes with proper CFN brackets
            {
                'CFN_ENI_ID': Ref(eni),
                'CFN_EBS_ID': Ref(data_volume)
            })

        launch_configuration = self.add_resource(
            autoscaling.LaunchConfiguration(
                '{}LaunchConfiguration'.format(self.name),
                AssociatePublicIpAddress=False,
                KeyName=Ref(self.keypair_name),
                ImageId=Ref(self.ami),
                InstanceType=Ref(self.instance_type),
                InstanceMonitoring=False,
                IamInstanceProfile=Ref(self.instance_profile),
                UserData=Base64(userdata)))
        self.add_resource(
            autoscaling.AutoScalingGroup(
                '{}ASGroup'.format(self.name),
                AvailabilityZones=[subnet['AvailabilityZone']],
                HealthCheckType='EC2',
                LaunchConfigurationName=Ref(launch_configuration),
                MinSize=0,
                MaxSize=1,
                DesiredCapacity=0,
                VPCZoneIdentifier=[subnet['SubnetId']],
                Tags=self.get_autoscaling_tags(service_override=self.service,
                                               role_override=self.name) +
                [autoscaling.Tag('Name', self.name, True)]))
    def create_network_components(self, network_config, nat_config):
        """
        Method creates a network with the specified number of public and private subnets within the
        VPC cidr specified by the networkAddresses CloudFormation mapping.
        @param network_config [dict] collection of network parameters for creating the VPC network
        """

        ## make VPC
        if 'network_name' in network_config:
            network_name = network_config.get('network_name')
        else:
            network_name = self.__class__.__name__

        self._vpc_cidr = FindInMap('networkAddresses', 'vpcBase', 'cidr')
        self.add_output(
            Output('networkAddresses',
                   Value=str(self.mappings['networkAddresses'])))
        self.add_output(Output('vpcCidr', Value=self.vpc_cidr))

        self._vpc_id = self.add_resource(
            ec2.VPC('vpc',
                    CidrBlock=self._vpc_cidr,
                    EnableDnsSupport=True,
                    EnableDnsHostnames=True,
                    Tags=[ec2.Tag(key='Name', value=network_name)]))

        self.add_output(Output('vpcId', Value=self.vpc_id))

        self._igw = self.add_resource(ec2.InternetGateway('vpcIgw'))
        self.add_output(Output('internetGateway', Value=self.igw))

        ## add IGW
        igw_title = 'igwVpcAttachment'
        self._vpc_gateway_attachment = self.add_resource(
            ec2.VPCGatewayAttachment(igw_title,
                                     InternetGatewayId=self.igw,
                                     VpcId=self.vpc_id))

        self.add_output(
            Output('igwVpcAttachment', Value=self.vpc_gateway_attachment))

        self.gateway_hook()

        # make Subnets
        for index, subnet_config in enumerate(self._subnet_configs):
            subnet_type = subnet_config.get('type', 'private')
            subnet_size = subnet_config.get('size', '22')
            subnet_layer = subnet_config.get('name', 'subnet')
            subnet_az = subnet_config.get('AZ', '-1')
            subnet_cidr = subnet_config.get('cidr', 'ERROR')
            az_key = 'AZ{}'.format(subnet_az)

            CidrBlock = subnet_cidr
            # Create the subnet
            subnet_name = subnet_layer + 'AZ' + str(subnet_az)
            subnet = self.add_resource(
                ec2.Subnet(subnet_name,
                           AvailabilityZone=Select(subnet_az, GetAZs()),
                           VpcId=self.vpc_id,
                           CidrBlock=CidrBlock,
                           Tags=[
                               ec2.Tag(key='network', value=subnet_type),
                               ec2.Tag(key='Name', value=subnet_name)
                           ]))

            self.add_output(Output(subnet_name, Value=self._ref_maybe(subnet)))

            # Create the routing table
            route_table = self.add_resource(
                ec2.RouteTable(subnet_name + 'RouteTable', VpcId=self.vpc_id))

            # Create the NATs and egress rules
            self.create_subnet_egress(subnet_az, route_table, igw_title,
                                      subnet_type, subnet_layer, nat_config)

            # Associate the routing table with the subnet
            self.add_resource(
                ec2.SubnetRouteTableAssociation(subnet_name +
                                                'EgressRouteTableAssociation',
                                                RouteTableId=Ref(route_table),
                                                SubnetId=Ref(subnet)))
Пример #28
0
    def generate_load_balancer(self, lb_name, typ, port, cert_arn, log_bucket):

        lb_name = self.cfn_name(lb_name)

        if typ not in ['internal', 'internet-facing']:
            raise NameError("Load balancer type must be of type internal, internet-facing")

        # Use the system security groups (automatic) if internal, else use the limited external security group
        sg = self.security_groups if typ == 'internal' else [Ref(self.elb_external_security_group)]

        return elasticloadbalancing.LoadBalancer(
            lb_name,
            AccessLoggingPolicy=elasticloadbalancing.AccessLoggingPolicy(
                EmitInterval=60,
                Enabled=True,
                S3BucketName=log_bucket,
                S3BucketPrefix="ELB/{}/{}".format(self.env, lb_name)
            ),
            ConnectionDrainingPolicy=elasticloadbalancing.ConnectionDrainingPolicy(
                Enabled=True,
                Timeout=60
            ),
            ConnectionSettings=elasticloadbalancing.ConnectionSettings(
                IdleTimeout=3600
            ),
            CrossZone=False,
            HealthCheck=elasticloadbalancing.HealthCheck(
                HealthyThreshold=5,
                Interval=30,
                Target='HTTP:{}/ping'.format(port),
                Timeout=5,
                UnhealthyThreshold=2
            ),
            LoadBalancerName=lb_name,
            Listeners=[
                elasticloadbalancing.Listener(
                    InstancePort=port,
                    InstanceProtocol='HTTP',
                    LoadBalancerPort=80,
                    Protocol='HTTP'
                ),
                elasticloadbalancing.Listener(
                    InstancePort=port,
                    InstanceProtocol='HTTP',
                    LoadBalancerPort=443,
                    Protocol='HTTPS',
                    SSLCertificateId=cert_arn
                ),
                elasticloadbalancing.Listener(
                    InstancePort=port,
                    InstanceProtocol='TCP',
                    LoadBalancerPort=8443,
                    Protocol='SSL',
                    SSLCertificateId=cert_arn
                )
            ],
            Policies=[
                elasticloadbalancing.Policy(
                    PolicyName='ELBSecurityPolicyNoTLS10',
                    PolicyType='SSLNegotiationPolicyType',
                    Attributes=[{
                        'Name': 'Reference-Security-Policy',
                        # Disable TLS 1.0 and migrate to TLS 1.2 default for external ELB
                        'Value': 'ELBSecurityPolicy-TLS-1-2-2017-01'
                    }]
                )
            ],
            Scheme=typ,
            SecurityGroups=sg,
            Subnets=[s['SubnetId'] for s in self.get_subnets('private' if typ == 'internal' else 'public')],
            Tags=self.get_tags(
                service_override="InternalELB" if typ == 'internal' else "ExternalELB",
                role_override=lb_name
            ) + [ec2.Tag('Name', lb_name)]
        )
Пример #29
0
    def generate_app_load_balancer(self, lb_name, typ, port, cert_arn, log_bucket):

        lb_name = self.cfn_name(lb_name)

        if len(lb_name) >= 32:
            alb_name = lb_name[0:31]
        else:
            alb_name = lb_name

        if len(lb_name + 'TG') >= 32:
            tg_name = '{}TG'.format(lb_name[0:29])
        else:
            tg_name = '{}TG'.format(lb_name)

        if typ not in ['internal', 'internet-facing']:
            raise NameError("Load balancer type must be of type internal, internet-facing")

        # Use the system security groups (automatic) if internal, else use the limited external security group
        sg = self.security_groups if typ == 'internal' else [Ref(self.elb_external_security_group)]

        _alb = elasticloadbalancingv2.LoadBalancer(
            alb_name,
            Name=alb_name,
            IpAddressType='ipv4',
            LoadBalancerAttributes=[
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='access_logs.s3.enabled',
                    Value='true'
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='access_logs.s3.bucket',
                    Value=log_bucket
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='access_logs.s3.prefix',
                    Value="ELB/{}/{}".format(self.env, lb_name)
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='deletion_protection.enabled',
                    Value='false'
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='idle_timeout.timeout_seconds',
                    Value='60'
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='routing.http.drop_invalid_header_fields.enabled',
                    Value='false'
                ),
                elasticloadbalancingv2.LoadBalancerAttributes(
                    Key='routing.http2.enabled',
                    Value='true'
                )
            ],
            Scheme=typ,
            SecurityGroups=sg,
            Subnets=[s['SubnetId'] for s in self.get_subnets('private' if typ == 'internal' else 'public')],
            Type='application',
            Tags=self.get_tags(
                service_override="InternalALB" if typ == 'internal' else "ExternalALB",
                role_override=lb_name
            ) + [ec2.Tag('Name', lb_name)]
        )

        _target_group = elasticloadbalancingv2.TargetGroup(
            tg_name,
            Name=tg_name,
            HealthCheckIntervalSeconds=30,
            HealthCheckPath='/ping',
            HealthCheckPort=port,
            HealthCheckProtocol='HTTP',
            HealthCheckTimeoutSeconds=5,
            HealthyThresholdCount=5,
            UnhealthyThresholdCount=2,
            Matcher=elasticloadbalancingv2.Matcher(
                HttpCode='200'
            ),
            Port=port,
            Protocol='HTTP',
            TargetGroupAttributes=[
                elasticloadbalancingv2.TargetGroupAttribute(
                    Key='deregistration_delay.timeout_seconds',
                    Value='300'
                ),
                elasticloadbalancingv2.TargetGroupAttribute(
                    Key='stickiness.enabled',
                    Value='false'
                ),
                elasticloadbalancingv2.TargetGroupAttribute(
                    Key='stickiness.type',
                    Value='lb_cookie'
                ),
                elasticloadbalancingv2.TargetGroupAttribute(
                    Key='load_balancing.algorithm.type',
                    Value='least_outstanding_requests'
                )
            ],
            TargetType='instance',
            VpcId=self.vpc_id,
            Tags=self.get_tags(
                service_override="InternalALB" if typ == 'internal' else "ExternalALB",
                role_override=lb_name
            ) + [ec2.Tag('Name', '{}TG'.format(lb_name))]
        )

        _listener_80 = self.add_resource(elasticloadbalancingv2.Listener(
            '{}80Listener'.format(lb_name),
            Port='80',
            Protocol='HTTP',
            LoadBalancerArn=Ref(_alb),
            DefaultActions=[
                elasticloadbalancingv2.Action(
                    Type='redirect',
                    RedirectConfig=elasticloadbalancingv2.RedirectConfig(
                        Host='#{host}',
                        Path='/#{path}',
                        Port='443',
                        Protocol='HTTPS',
                        Query='#{query}',
                        StatusCode='HTTP_301'
                    )
                )
            ],
        ))
        _listener_443 = self.add_resource(elasticloadbalancingv2.Listener(
            '{}443Listener'.format(lb_name),
            Port='443',
            Protocol='HTTPS',
            LoadBalancerArn=Ref(_alb),
            SslPolicy='ELBSecurityPolicy-2016-08',
            Certificates=[
                elasticloadbalancingv2.Certificate(
                    CertificateArn=cert_arn
                )
            ],
            DefaultActions=[
                elasticloadbalancingv2.Action(
                    Type='forward',
                    TargetGroupArn=Ref(_target_group)
                )
            ],
        ))
        return _alb, _target_group
Пример #30
0
    def create_nat_instance(self,
                            nat_subnet_number,
                            nat_instance_type=None,
                            nat_subnet_type='public'):
        '''
        Method creates a NAT instance for the private subnet within the specified corresponding subnet
        @param nat_subnet_number [int] ID of the subnet that the NAT instance will be deployed to
        @param nat_instance_type [string | Troposphere.Parameter] instance type to be set when launching the NAT instance
        @param nat_subnet_type [string] type of subnet (public/private) that this instance will be deployed for (which subnet is going to use this to egress traffic)
        '''
        if nat_subnet_type == 'public':
            source_name = 'private'
        else:
            source_name = 'public'

        if nat_instance_type == None:
            nat_instance_type = 'm1.small'
        elif type(nat_instance_type) == Parameter:
            nat_instance_type = Ref(nat_instance_type)

        nat_sg = self.template.add_resource(
            ec2.SecurityGroup(
                nat_subnet_type + 'Subnet' + str(nat_subnet_number) +
                'SecurityGroup',
                VpcId=Ref(self.vpc),
                GroupDescription='Security Group for the ' + nat_subnet_type +
                ' subnet for az ' + str(nat_subnet_number),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol='-1',
                        FromPort='-1',
                        ToPort='-1',
                        CidrIp=FindInMap('networkAddresses',
                                         'subnet' + str(nat_subnet_number),
                                         source_name))
                ],
                SecurityGroupEgress=[
                    ec2.SecurityGroupRule(IpProtocol='-1',
                                          FromPort='-1',
                                          ToPort='-1',
                                          CidrIp='0.0.0.0/0')
                ]))

        return self.template.add_resource(
            ec2.Instance(
                nat_subnet_type + str(nat_subnet_number) + 'NATInstance',
                AvailabilityZone=FindInMap(
                    'RegionMap', Ref('AWS::Region'),
                    'az' + str(nat_subnet_number) + 'Name'),
                ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'natAmiId'),
                KeyName=Ref(self.template.parameters['ec2Key']),
                InstanceType=nat_instance_type,
                Tags=[ec2.Tag('Name', 'NAT')],
                NetworkInterfaces=[
                    ec2.NetworkInterfaceProperty(
                        AssociatePublicIpAddress=True,
                        DeleteOnTermination=True,
                        DeviceIndex='0',
                        GroupSet=[Ref(nat_sg)],
                        SubnetId=Ref(self.local_subnets[nat_subnet_type][str(
                            nat_subnet_number)]))
                ],
                SourceDestCheck=False))