예제 #1
0
    def configure(self):
        """
        Returns a Pritunl template
        """
        self.defaults = {'instance_type': 't3.large'}

        self.service = 'pritunl'
        self.set_description('Sets up Pritunl servers')
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()

        _vpn_config = constants.ENVIRONMENTS[self.env]['pritunl']
        _global_config = constants.ENVIRONMENTS[self.env]
        _bootstrap_mode = _vpn_config.get('bootstrap_mode', False)

        _bootstrap_ami = get_latest_ami_id(
            self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2', 'amazon')
        _ivy_ami = get_latest_ami_id(self.region, 'ivy-base',
                                     _global_config.get('ami_owner', 'self'))

        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=_bootstrap_ami if _bootstrap_mode else _ivy_ami))

        _public_dns = _vpn_config['public_dns']

        _vpn_name = '{}Pritunl'.format(self.env)

        # We want the preferred subnet only.
        _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0]

        # Add our security group
        _vpn_security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(_vpn_name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for Pritunl {}'.format(
                    _vpn_name),
                SecurityGroupIngress=[
                    {
                        "IpProtocol": "icmp",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": "0.0.0.0/0"
                    },  # Ping
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "80",
                        "ToPort": "80",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTP
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "443",
                        "ToPort": "443",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTPS
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "22",
                        "ToPort": "22",
                        "CidrIp": "0.0.0.0/0"
                    },  # SSH
                    {
                        "IpProtocol": "udp",
                        "FromPort": "10000",
                        "ToPort": "20000",
                        "CidrIp": "0.0.0.0/0"
                    },  # HTTPS/OVPN
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "27017",
                        "ToPort": "27017",
                        "CidrIp": constants.SUPERNET
                    },  # mongodb master
                    {
                        "IpProtocol": "-1",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": constants.SUPERNET
                    }  # Replies from local VPC
                ],
                SecurityGroupEgress=[{
                    "IpProtocol": "-1",
                    "FromPort": "-1",
                    "ToPort": "-1",
                    "CidrIp": "0.0.0.0/0"
                }]))

        # Add EBS volume if local mongo used
        _data_volume = None
        if _vpn_config.get('local_mongo', False):
            self.add_iam_policy(
                iam.Policy(
                    PolicyName='AttachVolume',
                    PolicyDocument={
                        'Statement': [{
                            'Effect':
                            'Allow',
                            'Resource':
                            '*',
                            'Action': [
                                'ec2:AttachVolume', 'ec2:DeleteSnapshot',
                                'ec2:DescribeTags',
                                'ec2:DescribeVolumeAttribute',
                                'ec2:DescribeVolumeStatus',
                                'ec2:DescribeVolumes', 'ec2:DetachVolume'
                            ]
                        }]
                    }))
            _data_volume = ec2.Volume(
                '{}DataVolume'.format(_vpn_name),
                Size=_vpn_config.get('data_volume_size', 20),
                VolumeType='gp2',
                AvailabilityZone=_vpn_subnet['AvailabilityZone'],
                DeletionPolicy='Retain',
                Tags=self.get_tags(service_override=self.service,
                                   role_override=_vpn_name) +
                [ec2.Tag('Name', _vpn_name + "-datavol")])
            self.add_resource(_data_volume)

        # Add the elastic IP and the ENI for it, then attach it.
        _vpn_eip = self.add_resource(
            ec2.EIP('{}InstanceEIP'.format(_vpn_name), Domain='vpc'))
        _vpn_eni = self.add_resource(
            ec2.NetworkInterface(
                '{}InstanceENI'.format(_vpn_name),
                SubnetId=_vpn_subnet['SubnetId'],
                Description='ENI for {}'.format(_vpn_name),
                GroupSet=[Ref(_vpn_security_group)] + self.security_groups,
                SourceDestCheck=False,
                Tags=self.get_tags(service_override=self.service,
                                   role_override=_vpn_name)))
        self.get_eni_policies()

        self.add_resource(
            ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(_vpn_name),
                               AllocationId=GetAtt(_vpn_eip, "AllocationId"),
                               NetworkInterfaceId=Ref(_vpn_eni)))

        # Add a route53 DNS name
        if self.get_partition() != 'aws-us-gov':
            self.add_resource(
                route53.RecordSetGroup('{}Route53'.format(_vpn_name),
                                       HostedZoneName=constants.ENVIRONMENTS[
                                           self.env]['route53_zone'],
                                       RecordSets=[
                                           route53.RecordSet(
                                               Name=_public_dns,
                                               ResourceRecords=[Ref(_vpn_eip)],
                                               Type='A',
                                               TTL=600)
                                       ]))

        # Get all route tables in the VPC
        _vpc_route_tables = self.ec2_conn.describe_route_tables(
            Filters=[{
                'Name': 'vpc-id',
                'Values': [self.vpc_id]
            }])['RouteTables']

        # Set up the routing table for the VPC
        # Allow for changing client subnets in constants.py
        for client_subnet in _vpn_config['client_subnets']:
            for route_table in _vpc_route_tables:
                self.add_resource(
                    ec2.Route('{}Route{}{}'.format(
                        _vpn_name,
                        client_subnet.translate({
                            ord("."): "",
                            ord("/"): ""
                        }), route_table['RouteTableId'].replace('-', '')),
                              RouteTableId=route_table['RouteTableId'],
                              DestinationCidrBlock=client_subnet,
                              NetworkInterfaceId=Ref(_vpn_eni)))

        _mongodb = _vpn_config.get('mongodb')
        _server_id = _vpn_config['server_id']

        _userdata_template = self.get_cloudinit_template(
            _tpl_name="pritunl_bootstrap" if _bootstrap_mode else None,
            replacements=(('__PROMPT_COLOR__', self.prompt_color()),
                          ('__SERVER_ID__', _server_id), ('__SERVICE__',
                                                          self.service),
                          ('__MONGODB__', _mongodb if _mongodb else '')))

        _userdata = Sub(
            _userdata_template.replace(
                '${', '${!')  # Replace bash brackets with CFN escaped style
            .replace(
                '{#', '${'
            ),  # Replace rain-style CFN escapes with proper CFN brackets
            {
                'CFN_ENI_ID': Ref(_vpn_eni),
                'CFN_EBS_ID': Ref(_data_volume) if _data_volume else ''
            })

        _vpn_launch_configuration = self.add_resource(
            autoscaling.LaunchConfiguration(
                '{}LaunchConfiguration'.format(_vpn_name),
                AssociatePublicIpAddress=True,
                KeyName=Ref(self.keypair_name),
                ImageId=Ref(self.ami),
                InstanceType=Ref(self.instance_type),
                InstanceMonitoring=False,
                IamInstanceProfile=Ref(self.instance_profile),
                UserData=Base64(_userdata)))
        self.add_resource(
            autoscaling.AutoScalingGroup(
                '{}ASGroup'.format(_vpn_name),
                AvailabilityZones=[_vpn_subnet['AvailabilityZone']],
                HealthCheckType='EC2',
                LaunchConfigurationName=Ref(_vpn_launch_configuration),
                MinSize=0,
                MaxSize=1,
                VPCZoneIdentifier=[_vpn_subnet['SubnetId']],
                Tags=self.get_autoscaling_tags(service_override=self.service,
                                               role_override=_vpn_name) +
                [autoscaling.Tag('Name', _vpn_name, True)]))
예제 #2
0
    def configure(self):
        """
        Returns a vpn template
        """
        self.defaults = {'instance_type': 't2.small'}

        self.service = 'vpn'
        self.add_description('Sets up VPNs')
        self.get_eni_policies()
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()
        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2',
                          'amazon')))

        # Custom config per VPN
        for vpn in constants.ENVIRONMENTS[self.env]['vpn']:
            if not vpn['active']:
                continue
            _vpn_name = vpn['name']
            _vpn_subnet = self.get_subnets('public', _preferred_only=True)[0]
            _role = 'vpn-{}'.format(_vpn_name)

            _vpn_security_group = self.add_resource(
                ec2.SecurityGroup(
                    self.cfn_name('VPNSecurityGroup', _vpn_name),
                    VpcId=self.vpc_id,
                    GroupDescription='Security Group for VPN {}'.format(
                        _vpn_name),
                    SecurityGroupIngress=[{
                        "IpProtocol": "50",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "51",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "udp",
                        "FromPort": "500",
                        "ToPort": "500",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "udp",
                        "FromPort": "4500",
                        "ToPort": "4500",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "icmp",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": "0.0.0.0/0"
                    }, {
                        "IpProtocol": "-1",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": constants.SUPERNET
                    }],
                    SecurityGroupEgress=[{
                        "IpProtocol": "50",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "51",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "udp",
                        "FromPort": "500",
                        "ToPort": "500",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "udp",
                        "FromPort": "4500",
                        "ToPort": "4500",
                        "CidrIp": vpn['remote_ip'] + '/32'
                    }, {
                        "IpProtocol": "tcp",
                        "FromPort": "80",
                        "ToPort": "80",
                        "CidrIp": "0.0.0.0/0"
                    }, {
                        "IpProtocol": "tcp",
                        "FromPort": "443",
                        "ToPort": "443",
                        "CidrIp": "0.0.0.0/0"
                    }, {
                        "IpProtocol": "udp",
                        "FromPort": "123",
                        "ToPort": "123",
                        "CidrIp": "0.0.0.0/0"
                    }, {
                        "IpProtocol": "icmp",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": "0.0.0.0/0"
                    }, {
                        "IpProtocol": "-1",
                        "FromPort": "-1",
                        "ToPort": "-1",
                        "CidrIp": constants.SUPERNET
                    }]))
            _vpn_eip = self.add_resource(
                ec2.EIP(self.cfn_name('VPNInstanceEIP', _vpn_name),
                        Domain='vpc'))
            _vpn_eni = self.add_resource(
                ec2.NetworkInterface(
                    self.cfn_name('VPNInstanceENI', _vpn_name),
                    SubnetId=_vpn_subnet['SubnetId'],
                    Description='ENI for VPN - {}'.format(_vpn_name),
                    GroupSet=[Ref(_vpn_security_group)] + self.security_groups,
                    SourceDestCheck=False,
                    Tags=self.get_tags(role_override=_role)))
            self.add_resource(
                ec2.EIPAssociation(self.cfn_name('AssociateVPNInstanceENI',
                                                 _vpn_name),
                                   AllocationId=GetAtt(_vpn_eip,
                                                       "AllocationId"),
                                   NetworkInterfaceId=Ref(_vpn_eni)))
            # Set up Routes from all VPC subnets to the ENI
            _vpc_route_tables = self.ec2_conn.describe_route_tables(
                Filters=[{
                    'Name': 'vpc-id',
                    'Values': [self.vpc_id]
                }])['RouteTables']

            _local_subnets = iter(
                map(
                    lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'],
                    filter(lambda z: z in vpn.get('local_envs', []),
                           constants.ENVIRONMENTS.keys())))
            _local_subnets = list(
                itertools.chain(_local_subnets, [
                    self.vpc_metadata['cidrblock'],
                ]))

            # append remote vpc subnets
            _remote_subnets = iter(
                map(
                    lambda x: constants.ENVIRONMENTS[x]['vpc']['cidrblock'],
                    filter(lambda z: z in vpn.get('remote_envs', []),
                           constants.ENVIRONMENTS.keys())))
            _remote_subnets = list(
                itertools.chain(_remote_subnets, vpn.get('remote_subnets',
                                                         [])))

            for remote_subnet in _remote_subnets:
                for route_table in _vpc_route_tables:
                    self.add_resource(
                        ec2.Route(self.cfn_name(_vpn_name, "VPNRoute",
                                                remote_subnet,
                                                route_table['RouteTableId']),
                                  RouteTableId=route_table['RouteTableId'],
                                  DestinationCidrBlock=remote_subnet,
                                  NetworkInterfaceId=Ref(_vpn_eni)))

            _user_data_template = self.get_cloudinit_template(replacements=(
                ('__PROMPT_COLOR__',
                 self.prompt_color()), ('__LOCAL_SUBNETS__',
                                        ','.join(sorted(_local_subnets))),
                ('__REMOTE_IP__',
                 vpn['remote_ip']), ('__REMOTE_SUBNETS__',
                                     ','.join(sorted(_remote_subnets))),
                ('__SECRET__',
                 vpn['secret']), ('__IKE__',
                                  vpn.get('ike', 'aes256-sha1-modp1536')),
                ('__IKE_LIFETIME__', vpn.get('ikelifetime', '28800s')),
                ('__ESP__',
                 vpn.get('esp', 'aes256-sha1')), ('__KEYLIFE__',
                                                  vpn.get('keylife', '1800s')),
                ('__IPTABLES_RULES__',
                 '\n'.join(vpn.get('iptables_rules', ''))),
                ('__SERVICE__', self.service), ('__VPN_NAME__', _vpn_name),
                ('__TAG__', _vpn_name.lower()), ('__VPC_ID__', self.vpc_id)))
            _user_data = Sub(
                _user_data_template.replace(
                    '${',
                    '${!')  # Replace bash brackets with CFN escaped style
                .replace(
                    '{#', '${'
                ),  # Replace rain-style CFN escapes with proper CFN brackets,
                {
                    'CFN_EIP_ADDR': Ref(_vpn_eip),
                    'CFN_ENI_ID': Ref(_vpn_eni),
                })

            _vpn_launch_configuration = self.add_resource(
                autoscaling.LaunchConfiguration(
                    self.cfn_name('VPNLaunchConfiguration', _vpn_name),
                    AssociatePublicIpAddress=True,
                    KeyName=Ref(self.keypair_name),
                    ImageId=Ref(self.ami),
                    InstanceType=Ref(self.instance_type),
                    InstanceMonitoring=False,
                    IamInstanceProfile=Ref(self.instance_profile),
                    UserData=Base64(_user_data)))
            self.add_resource(
                autoscaling.AutoScalingGroup(
                    self.cfn_name('VPNASGroup', _vpn_name),
                    AvailabilityZones=[_vpn_subnet['AvailabilityZone']],
                    HealthCheckType='EC2',
                    LaunchConfigurationName=Ref(_vpn_launch_configuration),
                    MinSize=1,
                    MaxSize=1,
                    DesiredCapacity=1,
                    VPCZoneIdentifier=[_vpn_subnet['SubnetId']],
                    Tags=self.get_autoscaling_tags(role_override=_role) +
                    [autoscaling.Tag('Name', _role, True)]))
def create():
    mydb = mysql.connector.connect(host="localhost",
                                   user="******",
                                   passwd="AmazingTheory62",
                                   database="cloud_formation")

    mycursor = mydb.cursor()
    mycursor.execute("SELECT * FROM ec2_table")
    myresult = (mycursor.fetchone())
    sname = myresult[0]
    name = myresult[1]
    region = myresult[2]
    itype = myresult[3]
    vpc1 = myresult[4]
    subnet1 = myresult[5]

    #print(type(vpc1))

    template = Template()

    keyname_param = template.add_parameter(
        Parameter("KeyName",
                  Description="Name of an existing EC2 KeyPair to enable SSH "
                  "access to the instance",
                  Type="String",
                  Default="jayaincentiuskey"))

    vpcid_param = template.add_parameter(
        Parameter(
            "VpcId",
            Description="VpcId of your existing Virtual Private Cloud (VPC)",
            Type="String",
            Default=vpc1))

    subnetid_param = template.add_parameter(
        Parameter(
            "SubnetId",
            Description=
            "SubnetId of an existing subnet (for the primary network) in "
            "your Virtual Private Cloud (VPC)"
            "access to the instance",
            Type="String",
            Default=subnet1))

    secondary_ip_param = template.add_parameter(
        Parameter(
            "SecondaryIPAddressCount",
            Description=
            "Number of secondary IP addresses to assign to the network "
            "interface (1-5)",
            ConstraintDescription="must be a number from 1 to 5.",
            Type="Number",
            Default="1",
            MinValue="1",
            MaxValue="5",
        ))

    sshlocation_param = template.add_parameter(
        Parameter(
            "SSHLocation",
            Description="The IP address range that can be used to SSH to the "
            "EC2 instances",
            Type="String",
            MinLength="9",
            MaxLength="18",
            Default="0.0.0.0/0",
            AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})"
            "/(\\d{1,2})",
            ConstraintDescription="must be a valid IP CIDR range of the "
            "form x.x.x.x/x."))

    template.add_mapping('RegionMap', {"us-west-2": {"AMI": region}})

    eip1 = template.add_resource(ec2.EIP(
        "EIP1",
        Domain="vpc",
    ))

    ssh_sg = template.add_resource(
        ec2.SecurityGroup(
            "SSHSecurityGroup",
            VpcId=Ref(vpcid_param),
            GroupDescription="Enable SSH access via port 22",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp=Ref(sshlocation_param),
                ),
            ],
        ))

    eth0 = template.add_resource(
        ec2.NetworkInterface(
            "Eth0",
            Description="eth0",
            GroupSet=[
                Ref(ssh_sg),
            ],
            SourceDestCheck=True,
            SubnetId=Ref(subnetid_param),
            Tags=Tags(
                Name="Interface 0",
                Interface="eth0",
            ),
            SecondaryPrivateIpAddressCount=Ref(secondary_ip_param),
        ))

    # eipassoc1 = template.add_resource(ec2.EIPAssociation(
    #     "EIPAssoc1",
    #     NetworkInterfaceId=Ref(eth0),
    #     AllocationId=GetAtt("EIP1", "AllocationId"),
    #     PrivateIpAddress=GetAtt("Eth0", "PrimaryPrivateIpAddress"),
    # ))

    ec2_instance = template.add_resource(
        ec2.Instance("EC2Instance",
                     ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                     InstanceType=itype,
                     KeyName=Ref(keyname_param),
                     NetworkInterfaces=[
                         ec2.NetworkInterfaceProperty(
                             NetworkInterfaceId=Ref(eth0),
                             DeviceIndex="0",
                         ),
                     ],
                     Tags=Tags(Name=name, )))

    template.add_output([
        Output(
            "InstanceId",
            Description="InstanceId of the newly created EC2 instance",
            Value=Ref(ec2_instance),
        ),
        Output(
            "EIP1",
            Description="Primary public IP address for Eth0",
            Value=Join(
                " ",
                ["IP address",
                 Ref(eip1), "on subnet",
                 Ref(subnetid_param)]),
        ),
        Output(
            "PrimaryPrivateIPAddress",
            Description="Primary private IP address of Eth0",
            Value=Join(" ", [
                "IP address",
                GetAtt("Eth0", "PrimaryPrivateIpAddress"), "on subnet",
                Ref(subnetid_param)
            ]),
        ),
        Output(
            "FirstSecondaryPrivateIPAddress",
            Description="First secondary private IP address of Eth0",
            Value=Join(" ", [
                "IP address",
                Select("0", GetAtt("Eth0", "SecondaryPrivateIpAddresses")),
                "on subnet",
                Ref(subnetid_param)
            ]),
        ),
    ])

    print(template.to_json())
    file = open('ec2json.json', 'w')
    file.write(template.to_json())
    file.close()
    os.system('aws cloudformation create-stack --stack-name ' + sname +
              ' --template-body file://ec2json.json')
예제 #4
0
    def add_resources_and_outputs(self):
        """Add resources to template."""
        template = self.template
        variables = self.get_variables()

        ec2networkinterface = template.add_resource(
            ec2.NetworkInterface(
                'Ec2NetworkInterface',
                Description=variables['Description'].ref,
                GroupSet=variables['SecurityGroupIds'].ref,
                SecondaryPrivateIpAddressCount=variables[
                    'SecondaryAddressCount'],
                SourceDestCheck=variables['SourceDestCheck'].ref,
                SubnetId=variables['SubnetId'].ref,
                Tags=Tags(variables['Tags']),
            )
        )

        template.add_output(
            Output(
                '{}Id'.format(ec2networkinterface.title),
                Description='ID of the EC2 Network Interface created',
                Export=Export(
                    Sub('${AWS::StackName}-%sId' % ec2networkinterface.title)
                ),
                Value=Ref(ec2networkinterface)
            )
        )

        template.add_output(
            Output(
                '{}PrimaryPrivateIp'.format(ec2networkinterface.title),
                Description='Primary Private IP of the EC2 Network Interface',
                Export=Export(
                    Sub('${AWS::StackName}-%sPrimaryPrivateIp'
                        % ec2networkinterface.title)
                ),
                Value=GetAtt(
                    ec2networkinterface, 'PrimaryPrivateIpAddress')
            )
        )

        for i in range(variables['SecondaryAddressCount']):
            template.add_output(
                Output(
                    '{}SecondaryPrivateIp{}'.format(
                        ec2networkinterface.title, i+1),
                    Description='Secondary Private IP {} of'
                                ' the EC2 Network Interface'.format(i+1),
                    Export=Export(
                        Sub('${AWS::StackName}-%sSecondaryPrivateIp%i'
                            % (ec2networkinterface.title, i+1))
                    ),
                    Value=Select(i, GetAtt(
                        ec2networkinterface, 'SecondaryPrivateIpAddresses')
                    )
                )
            )

        if variables['AttachEip']:
            # allocate and output an EIP for the primary private IP
            primaryeip = template.add_resource(
                ec2.EIP(
                    'Ec2EipPrimary',
                    Domain='vpc',
                )
            )
            template.add_output(
                Output(
                    '{}PrimaryPublicIp'.format(ec2networkinterface.title),
                    Description='Primary Public IP of'
                                ' the EC2 Network Interface',
                    Export=Export(
                        Sub('${AWS::StackName}-%sPrimaryPublicIp'
                            % ec2networkinterface.title)
                    ),
                    Value=Ref(primaryeip)
                )
            )
            # associate it to the primary private IP
            template.add_resource(
                ec2.EIPAssociation(
                    'Ec2EipPrimaryAssociation',
                    AllocationId=GetAtt(primaryeip, 'AllocationId'),
                    NetworkInterfaceId=Ref(ec2networkinterface),
                    PrivateIpAddress=GetAtt(
                        ec2networkinterface, 'PrimaryPrivateIpAddress'),
                )
            )

            # allocate and output EIP(s) for any secondary private IPs
            for i in range(variables['SecondaryAddressCount']):
                # allocate and output an EIP for a secondary private IP
                secondaryeip = template.add_resource(
                    ec2.EIP(
                        'Ec2EipSecondary{}'.format(i+1),
                        Domain='vpc',
                    )
                )
                template.add_output(
                    Output(
                        '{}SecondaryPublicIp{}'.format(
                            ec2networkinterface.title, i+1),
                        Description='Secondary Public IP {} of'
                                    ' the EC2 Network Interface'.format(i+1),
                        Export=Export(
                            Sub('${AWS::StackName}-%sSecondaryPublicIp%i'
                                % (ec2networkinterface.title, i+1))
                        ),
                        Value=Ref(secondaryeip)
                    )
                )
                # associate it to a secondary private IP
                template.add_resource(
                    ec2.EIPAssociation(
                        'Ec2EipSecondaryAssociation{}'.format(i+1),
                        AllocationId=GetAtt(secondaryeip, 'AllocationId'),
                        NetworkInterfaceId=Ref(ec2networkinterface),
                        PrivateIpAddress=Select(i, GetAtt(
                            ec2networkinterface, 'SecondaryPrivateIpAddresses')
                        )
                    )
                )
예제 #5
0
    def configure(self):
        """
        This template creates a mesos-master per subnet in the VPC
        """
        config = constants.ENVIRONMENTS[self.env]['mesos']['master']
        self.defaults = {
            'instance_type': config.get('instance_type', 't3.large')
        }

        self.add_description('Sets up Mesos Masters in all Zones')
        self.get_eni_policies()
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()

        _global_config = constants.ENVIRONMENTS[self.env]

        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'ivy-mesos',
                          _global_config.get('ami_owner', 'self'))))
        _mesos_master_security_group = self.add_resource(
            ec2.SecurityGroup(
                'MesosMasterSecurityGroup',
                VpcId=self.vpc_id,
                GroupDescription='Security Group for MesosMaster Instances',
                SecurityGroupIngress=[
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 2181,
                        'ToPort': 2181,
                        'CidrIp': self.vpc_cidr
                    },  # zk
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 4400,
                        'ToPort': 4400,
                        'CidrIp': self.vpc_cidr
                    },  # chronos
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 5050,
                        'ToPort': 5051,
                        'CidrIp': self.vpc_cidr
                    },  # mesos
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 8080,
                        'ToPort': 8080,
                        'CidrIp': self.vpc_cidr
                    },  # marathon
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 8500,
                        'ToPort': 8500,
                        'CidrIp': self.vpc_cidr
                    },  # consul ui
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 8300,
                        'ToPort': 8301,
                        'CidrIp': self.vpc_cidr
                    },  # consul rpc/lan serf
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 8302,
                        'ToPort': 8302,
                        'CidrIp': constants.SUPERNET
                    },  # consul wan serf
                    {
                        'IpProtocol': 'udp',
                        'FromPort': 8300,
                        'ToPort': 8301,
                        'CidrIp': self.vpc_cidr
                    },  # consul rpc/lan serf (udp)
                    {
                        'IpProtocol': 'udp',
                        'FromPort': 8302,
                        'ToPort': 8302,
                        'CidrIp': constants.SUPERNET
                    },  # consul wan serf (udp)
                ],
                SecurityGroupEgress=[{
                    'IpProtocol': '-1',
                    'FromPort': 0,
                    'ToPort': 65535,
                    'CidrIp': '0.0.0.0/0'
                }]))
        self.add_resource(
            ec2.SecurityGroupIngress(
                'MesosMasterIngressSecurityGroup',
                GroupId=Ref(_mesos_master_security_group),
                IpProtocol='-1',
                FromPort=-1,
                ToPort=-1,
                SourceSecurityGroupId=Ref(_mesos_master_security_group)
                # this allows members all traffic (for replication)
            ))
        self.add_security_group(Ref(_mesos_master_security_group))

        masters = [(index, ip)
                   for index, ip in enumerate(config['masters'], 1)]
        subnets = self.get_subnets('private')
        for master in masters:
            zone_index, master_ip = master
            subnet = [
                s for s in subnets if netaddr.IPAddress(master_ip) in
                netaddr.IPNetwork(s['CidrBlock'])
            ][0]

            _mesos_master_eni = ec2.NetworkInterface(
                'MesosMasterInstanceENI{}'.format(
                    subnet['AvailabilityZone'][-1]),
                Description='ENI for Mesos Master ENV: {0}  PrivateSubnet {1}'.
                format(self.env, subnet['SubnetId']),
                GroupSet=self.security_groups,
                PrivateIpAddress=master_ip,
                SourceDestCheck=True,
                SubnetId=subnet['SubnetId'],
                Tags=self.get_tags(service_override="Mesos",
                                   role_override='MesosMaster-{}'.format(
                                       subnet['AvailabilityZone'])))
            self.add_resource(_mesos_master_eni)

            _user_data_template = self.get_cloudinit_template(replacements=(
                ('__PROMPT_COLOR__', self.prompt_color()),
                ('__ENI_IP__', master_ip), ('__ZK_SERVER_ID__', zone_index),
                ('__HOSTS_ENTRIES__', '\n'.join([
                    '{0} mesos-master-{1}.node.{2}.{3} mesos-master-{1}'.
                    format(ip, index, self.env, constants.TAG)
                    for index, ip in masters
                ])), ('__ZK_CONNECT__',
                      ','.join(['{}:2181'.format(z[1]) for z in masters])),
                ('__ZK_PEERS__', '\n'.join([
                    'server.{0}={1}:2888:3888'.format(index, ip)
                    for index, ip in masters
                ]))))

            _user_data = Sub(
                _user_data_template.replace(
                    '${',
                    '${!')  # Replace bash brackets with CFN escaped style
                .replace(
                    '{#', '${'
                ),  # Replace rain-style CFN escapes with proper CFN brackets
                {
                    'CFN_ENI_ID': Ref(_mesos_master_eni),
                })

            _mesos_master_launch_configuration = self.add_resource(
                autoscaling.LaunchConfiguration(
                    'MesosMasterLaunchConfiguration{}'.format(
                        subnet['AvailabilityZone'][-1]),
                    AssociatePublicIpAddress=False,
                    BlockDeviceMappings=get_block_device_mapping(
                        self.parameters['InstanceType'].resource['Default']),
                    SecurityGroups=self.security_groups,
                    KeyName=Ref(self.keypair_name),
                    ImageId=Ref(self.ami),
                    InstanceType=Ref(self.instance_type),
                    InstanceMonitoring=False,
                    IamInstanceProfile=Ref(self.instance_profile),
                    UserData=Base64(_user_data)))
            self.add_resource(
                autoscaling.AutoScalingGroup(
                    'MesosMasterASGroup{}'.format(
                        subnet['AvailabilityZone'][-1]),
                    AvailabilityZones=[subnet['AvailabilityZone']],
                    HealthCheckType='EC2',
                    LaunchConfigurationName=Ref(
                        _mesos_master_launch_configuration),
                    MinSize=0,
                    MaxSize=1,
                    # DesiredCapacity=1,
                    VPCZoneIdentifier=[subnet['SubnetId']],
                    Tags=self.get_autoscaling_tags(
                        service_override="MesosMaster",
                        role_override='MesosMaster-{}'.format(
                            subnet['AvailabilityZone'])) +
                    [
                        autoscaling.Tag(
                            'Name', '{}Mesos-Master-{}'.format(
                                self.env, subnet['AvailabilityZone']), True),
                        # tag to allow consul to discover the hosts
                        # autoscaling.Tag('{}:consul_master'.format(constants.TAG), self.env, True)
                    ]))
예제 #6
0
    def add_resources(self):

        self.CassandraPublicLBSG = self.template.add_resource(
            ec2.SecurityGroup(
                "CassandraPublicLBSG",
                GroupDescription=
                "Loadbalancer Security Group For Cassandra Public LB",
                VpcId=Ref(self.VpcId),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol="tcp",
                        FromPort=22,
                        ToPort=22,
                        CidrIp=Ref(self.AdminCidrBlock),
                    ),
                ],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraPublicLBSG"),
            ))

        self.CassandraSG = self.template.add_resource(
            ec2.SecurityGroup(
                "CassandraSG",
                GroupDescription=
                "Allow communication between Cassandra Seed and Non-Seed Nodes",
                VpcId=Ref(self.VpcId),
                SecurityGroupIngress=[
                    ec2.SecurityGroupRule(
                        IpProtocol="tcp",
                        FromPort=22,
                        ToPort=22,
                        SourceSecurityGroupId=Ref(self.CassandraPublicLBSG),
                    ),
                ],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraEc2SG"),
            ))

        self.CassandraSGInterNodeCommunicationIngress = self.template.add_resource(
            ec2.SecurityGroupIngress(
                "CassandraSGInterNodeCommunicationIngress",
                DependsOn=self.CassandraSG,
                GroupId=Ref(self.CassandraSG),
                IpProtocol="tcp",
                FromPort=7000,
                ToPort=7001,
                SourceSecurityGroupId=Ref(self.CassandraSG),
            ))

        self.CassandraSeedNetworkInterface = self.template.add_resource(
            ec2.NetworkInterface(
                "Eth0",
                Description="eth0",
                GroupSet=[Ref(self.CassandraSG)],
                SubnetId=Ref(self.RESTPrivSubnet1),
                PrivateIpAddress="10.0.1.132",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraSeedNetworkInterface"),
            ))

        self.CassandraSeed1 = self.template.add_resource(
            ec2.Instance(
                "CassandraSeed1",
                ImageId=Ref(self.CassandraImageId),
                KeyName=Ref(self.CassandraServerKeyName),
                InstanceType=Ref(self.CassandraServerInstanceType),
                IamInstanceProfile=Ref(self.CassandraServerIAMInstanceProfile),
                NetworkInterfaces=[
                    ec2.NetworkInterfaceProperty(
                        NetworkInterfaceId=Ref(
                            self.CassandraSeedNetworkInterface),
                        DeviceIndex="0",
                    ),
                ],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash -x\n", "export NODE_IP=`hostname -I`\n",
                        "export SEED_LIST=\"10.0.1.132\"\n",
                        "export CASSANDRA_YML=\"/etc/cassandra/conf/cassandra.yaml\"\n",
                        "export CLUSTER_NAME=\"devops_cluster\"\n",
                        "export SNITCH_TYPE=\"Ec2Snitch\"\n",
                        "sed -i \"/cluster_name:/c\\cluster_name: \\'${CLUSTER_NAME}\\'\"  ${CASSANDRA_YML}\n",
                        "sed -i \"/- seeds:/c\\          - seeds: \\\"${SEED_LIST}\\\"\"     ${CASSANDRA_YML}\n",
                        "sed -i \"/listen_address:/c\\listen_address: ${NODE_IP}\"       ${CASSANDRA_YML}\n",
                        "sed -i \"/rpc_address:/c\\rpc_address: ${NODE_IP}\"             ${CASSANDRA_YML}\n",
                        "sed -i \"/endpoint_snitch:/c\\endpoint_snitch: ${SNITCH_TYPE}\" ${CASSANDRA_YML}\n",
                        "sed -i \"/authenticator: AllowAllAuthenticator/c\\authenticator: PasswordAuthenticator\" ${CASSANDRA_YML}\n"
                        "echo 'auto_bootstrap: false' >> ${CASSANDRA_YML}\n",
                        "service cassandra start\n"
                    ])),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraSeed-1-Ec2"),
            ))

        self.CassandraPublicLoadBalancer = self.template.add_resource(
            elb.LoadBalancer(
                "CassandraPublicLoadBalancer",
                LoadBalancerName=self.
                environment_parameters["ClientEnvironmentKey"] +
                "-CassandraNonSeedPubLB",
                Scheme="internet-facing",
                Listeners=[
                    elb.Listener(
                        LoadBalancerPort="22",
                        InstancePort="22",
                        Protocol="TCP",
                        InstanceProtocol="TCP",
                    )
                ],
                Instances=[],
                SecurityGroups=[Ref(self.CassandraPublicLBSG)],
                Subnets=[Ref(self.RESTPubSubnet1)],
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-CassandraNonSeedPubLB"),
            ))

        self.CassandraNonSeedLaunchConfiguration = self.template.add_resource(
            LaunchConfiguration(
                "CassandraNonSeedLaunchConfiguration",
                ImageId=Ref(self.CassandraImageId),
                InstanceType=Ref(self.CassandraServerInstanceType),
                IamInstanceProfile=Ref(self.CassandraServerIAMInstanceProfile),
                KeyName=Ref(self.CassandraServerKeyName),
                SecurityGroups=[Ref(self.CassandraSG)],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash -x\n", "export NODE_IP=`hostname -I`\n",
                        "export SEED_LIST=\"10.0.1.132\"\n",
                        "export CASSANDRA_YML=\"/etc/cassandra/conf/cassandra.yaml\"\n",
                        "export CLUSTER_NAME=\"devoops_cluster\"\n",
                        "export SNITCH_TYPE=\"Ec2Snitch\"\n",
                        "sed -i \"/cluster_name:/c\\cluster_name: \\'${CLUSTER_NAME}\\'\"  ${CASSANDRA_YML}\n",
                        "sed -i \"/- seeds:/c\\          - seeds: \\\"${SEED_LIST}\\\"\"     ${CASSANDRA_YML}\n",
                        "sed -i \"/listen_address:/c\\listen_address: ${NODE_IP}\"       ${CASSANDRA_YML}\n",
                        "sed -i \"/rpc_address:/c\\rpc_address: ${NODE_IP}\"             ${CASSANDRA_YML}\n",
                        "sed -i \"/endpoint_snitch:/c\\endpoint_snitch: ${SNITCH_TYPE}\" ${CASSANDRA_YML}\n",
                        "sed -i \"/authenticator: AllowAllAuthenticator/c\\authenticator: PasswordAuthenticator\" ${CASSANDRA_YML}\n",
                        "echo 'auto_bootstrap: false' >> ${CASSANDRA_YML}\n",
                        "service cassandra start\n"
                    ])),
            ))

        self.CassandraNonSeedAutoScalingGroup = self.template.add_resource(
            AutoScalingGroup(
                "CassandraNonSeedAutoscalingGroup",
                AutoScalingGroupName=self.
                environment_parameters["ClientEnvironmentKey"] +
                "-CassandraNonSeedAutoScalingGroup",
                LaunchConfigurationName=Ref(
                    self.CassandraNonSeedLaunchConfiguration),
                LoadBalancerNames=[Ref(self.CassandraPublicLoadBalancer)],
                MaxSize="1",
                MinSize="1",
                DesiredCapacity="1",
                VPCZoneIdentifier=[Ref(self.RESTPrivSubnet1)],
                Tags=[
                    AutoScalingTag(
                        "Name",
                        self.environment_parameters["ClientEnvironmentKey"] +
                        "-CassandraNonSeedEc2", True),
                    AutoScalingTag(
                        "Environment",
                        self.environment_parameters["EnvironmentName"], True),
                    AutoScalingTag(
                        "ResourceOwner",
                        self.environment_parameters["ResourceOwner"], True),
                    AutoScalingTag(
                        "ClientCode",
                        self.environment_parameters["ClientEnvironmentKey"],
                        True),
                ],
            ))
예제 #7
0
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp=Ref(sshlocation_param),
            ),
        ],
    ))

eth0 = template.add_resource(
    ec2.NetworkInterface(
        "Eth0",
        Description="eth0",
        GroupSet=[
            Ref(ssh_sg),
        ],
        SourceDestCheck=True,
        SubnetId=Ref(subnetid_param),
        Tags=Tags(
            Name="Interface 0",
            Interface="eth0",
        ),
        SecondaryPrivateIpAddressCount=Ref(secondary_ip_param),
    ))

eipassoc1 = template.add_resource(
    ec2.EIPAssociation(
        "EIPAssoc1",
        NetworkInterfaceId=Ref(eth0),
        AllocationId=GetAtt("EIP1", "AllocationId"),
        PrivateIpAddress=GetAtt("Eth0", "PrimaryPrivateIpAddress"),
    ))
예제 #8
0
파일: nexus.py 프로젝트: nxtlytics/ivy-rain
    def configure(self):
        """
        Returns a Nexus template
        """
        self.defaults = {'instance_type': 't3.xlarge'}

        self.service = 'nexus'
        self.set_description('Sets up Nexus repository manager servers')
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()
        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2',
                          'amazon')))

        config = constants.ENVIRONMENTS[self.env][self.service]

        # We want the preferred subnet only.
        subnet = self.get_subnets('private', _preferred_only=True)[0]

        # Add our security group
        security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(self.name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for {}'.format(self.name),
                SecurityGroupIngress=[
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "80",
                        "ToPort": "80",
                        "CidrIp": constants.SUPERNET
                    },  # HTTP
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "443",
                        "ToPort": "443",
                        "CidrIp": constants.SUPERNET
                    },  # HTTPS
                    # {"IpProtocol": "tcp", "FromPort": "8081", "ToPort": "8081", "CidrIp": constants.SUPERNET},  # NexusRM Direct (disabled!)
                ],
                SecurityGroupEgress=[{
                    "IpProtocol": "-1",
                    "FromPort": "-1",
                    "ToPort": "-1",
                    "CidrIp": "0.0.0.0/0"
                }]))

        # Add our EBS data volume
        data_volume = ec2.Volume(
            '{}DataVolume'.format(self.name),
            Size=config.get('data_volume_size', 20),
            VolumeType='gp2',
            AvailabilityZone=subnet['AvailabilityZone'],
            DeletionPolicy='Retain',
            Tags=self.get_tags(service_override=self.service,
                               role_override=self.name) +
            [ec2.Tag('Name', self.name + "-datavol")])
        self.add_resource(data_volume)
        self.add_iam_policy(
            iam.Policy(PolicyName='AttachVolume',
                       PolicyDocument={
                           'Statement': [{
                               'Effect':
                               'Allow',
                               'Resource':
                               '*',
                               'Action': [
                                   'ec2:AttachVolume', 'ec2:DeleteSnapshot',
                                   'ec2:DescribeTags',
                                   'ec2:DescribeVolumeAttribute',
                                   'ec2:DescribeVolumeStatus',
                                   'ec2:DescribeVolumes', 'ec2:DetachVolume'
                               ]
                           }]
                       }))

        # Add a ENI for static IP address
        eni = self.add_resource(
            ec2.NetworkInterface(
                '{}InstanceENI'.format(self.name),
                SubnetId=subnet['SubnetId'],
                Description='ENI for {}'.format(self.name),
                GroupSet=[Ref(security_group)] + self.security_groups,
                SourceDestCheck=True,
                Tags=self.get_tags(service_override=self.service,
                                   role_override=self.name)))
        self.get_eni_policies()

        # Add a route53 A record for the main Nexus host
        route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone']
        private_dns = config.get('private_dns',
                                 'nexus.{}'.format(route53_zone))
        self.add_resource(
            route53.RecordSetGroup(
                '{}Route53'.format(self.name),
                HostedZoneName=route53_zone,
                RecordSets=[
                    route53.RecordSet(Name=private_dns,
                                      ResourceRecords=[
                                          GetAtt(eni,
                                                 'PrimaryPrivateIpAddress')
                                      ],
                                      Type='A',
                                      TTL=600)
                ]))
        # Add CNAME records for each repository, pointing to the main
        for repository in config['repositories']:
            self.add_resource(
                route53.RecordSetGroup(
                    '{}{}Route53'.format(self.name, self.cfn_name(repository)),
                    HostedZoneName=route53_zone,
                    RecordSets=[
                        route53.RecordSet(Name='{}.{}'.format(
                            repository, route53_zone),
                                          ResourceRecords=[private_dns],
                                          Type='CNAME',
                                          TTL=600)
                    ]))

        # Add S3 IAM role for nexus blobstore access
        self.add_iam_policy(
            iam.Policy(
                PolicyName='S3Access',
                PolicyDocument={
                    'Statement': [{
                        "Effect":
                        "Allow",
                        "Action": [
                            "s3:ListBucket", "s3:GetBucketLocation",
                            "s3:ListBucketMultipartUploads",
                            "s3:ListBucketVersions", "s3:GetBucketAcl",
                            "s3:GetLifecycleConfiguration",
                            "s3:PutLifecycleConfiguration"
                        ],
                        "Resource": [
                            'arn:{}:s3:::{}'.format(self.get_partition(),
                                                    config['s3_bucket'])
                        ]
                    }, {
                        "Effect":
                        "Allow",
                        "Action": [
                            "s3:GetObject", "s3:PutObject", "s3:DeleteObject",
                            "s3:AbortMultipartUpload",
                            "s3:ListMultipartUploadParts",
                            "s3:GetObjectTagging", "s3:PutObjectTagging",
                            "s3:GetObjectTagging", "s3:DeleteObjectTagging"
                        ],
                        "Resource": [
                            'arn:{}:s3:::{}/*'.format(self.get_partition(),
                                                      config['s3_bucket'])
                        ]
                    }]
                }))

        # Substitute the userdata template and feed it to CFN
        userdata_template = self.get_cloudinit_template(replacements=(
            ('__PROMPT_COLOR__', self.prompt_color()),
            ('__SERVICE__', self.service),
            ('__DEFAULT_DOMAIN__',
             route53_zone[:-1]),  # route53_zone has a trailing '.', strip it
            ('__TOP_DOMAIN__', constants.ROOT_ROUTE53_ZONE),
            # ('__REPOSITORIES__', " ".join(['"{}"'.format(x) for x in config['repositories']]))  # '"abc" "def" "ghi"'
        ))
        userdata = Sub(
            userdata_template.replace(
                '${', '${!')  # Replace bash brackets with CFN escaped style
            .replace(
                '{#', '${'
            ),  # Replace rain-style CFN escapes with proper CFN brackets
            {
                'CFN_ENI_ID': Ref(eni),
                'CFN_EBS_ID': Ref(data_volume)
            })

        launch_configuration = self.add_resource(
            autoscaling.LaunchConfiguration(
                '{}LaunchConfiguration'.format(self.name),
                AssociatePublicIpAddress=False,
                KeyName=Ref(self.keypair_name),
                ImageId=Ref(self.ami),
                InstanceType=Ref(self.instance_type),
                InstanceMonitoring=False,
                IamInstanceProfile=Ref(self.instance_profile),
                UserData=Base64(userdata)))
        self.add_resource(
            autoscaling.AutoScalingGroup(
                '{}ASGroup'.format(self.name),
                AvailabilityZones=[subnet['AvailabilityZone']],
                HealthCheckType='EC2',
                LaunchConfigurationName=Ref(launch_configuration),
                MinSize=0,
                MaxSize=1,
                DesiredCapacity=0,
                VPCZoneIdentifier=[subnet['SubnetId']],
                Tags=self.get_autoscaling_tags(service_override=self.service,
                                               role_override=self.name) +
                [autoscaling.Tag('Name', self.name, True)]))
######### Monitor VM Pre-setup ###########
eip1 = t.add_resource(ec2.EIP(
    "EIPMonitor",
    Domain="vpc",
))

eth0 = t.add_resource(
    ec2.NetworkInterface(
        "Eth0",
        Description="eth0",
        DependsOn=AllInternalAccessSecurityGroup.title,
        GroupSet=[
            Ref(AllInternalAccessSecurityGroup),
            Ref(MonitorSecurityGroup)
        ],  #Split(",", Join(",", [Join(",", Ref(SecurityGroupIdsParam)), Ref(MonitorSecurityGroup), ])),
        # SourceDestCheck=True,
        SubnetId=Select("0", Ref(PublicSubnetsToSpanParam)),
        Tags=Tags(
            Name="Interface 0",
            Interface="eth0",
        ),
    ))

eipassoc1 = t.add_resource(
    ec2.EIPAssociation(
        "EIPAssoc1",
        NetworkInterfaceId=Ref(eth0),
        AllocationId=GetAtt("EIPMonitor", "AllocationId"),
        PrivateIpAddress=GetAtt("Eth0", "PrimaryPrivateIpAddress"),
    ))
예제 #10
0
def GenerateDockerRegistryLayer():
    t = Template()

    t.add_description("""\
    DockerRegistry Layer
    """)

    stackname_param = t.add_parameter(
        Parameter(
            "StackName",
            Description="Environment Name (default: test)",
            Type="String",
            Default="test",
        ))

    vpcid_param = t.add_parameter(
        Parameter(
            "VpcId",
            Type="String",
            Description="VpcId of your existing Virtual Private Cloud (VPC)",
            Default="vpc-fab00e9f"))

    subnets = t.add_parameter(
        Parameter(
            "Subnets",
            Type="CommaDelimitedList",
            Description=(
                "The list SubnetIds, for public subnets in the "
                "region and in your Virtual Private Cloud (VPC) - minimum one"
            ),
            Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8"))

    keypair_param = t.add_parameter(
        Parameter("KeyPair",
                  Description="Name of an existing EC2 KeyPair to enable SSH "
                  "access to the instance",
                  Type="String",
                  Default="glueteam"))

    registry_ami_id_param = t.add_parameter(
        Parameter("RegistryAmiId",
                  Description="Registry server AMI ID",
                  Type="String",
                  Default="ami-a10897d6"))

    iam_role_param = t.add_parameter(
        Parameter(
            "IamRole",
            Description="IAM Role name",
            Type="String",
        ))

    s3bucket_param = t.add_parameter(
        Parameter(
            "BucketName",
            Description="S3 Bucket Name (default: )",
            Type="String",
            Default="",
        ))

    # --------- Docker registry

    registry_sg = t.add_resource(
        ec2.SecurityGroup(
            'RegistrySG',
            GroupDescription='Security group for Registry host',
            VpcId=Ref(vpcid_param),
            Tags=Tags(Name=Join("", [Ref(stackname_param), "RegistrySG"])),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp="0.0.0.0/0",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                ),
            ]))

    registry_eip = t.add_resource(ec2.EIP(
        'RegistryEIP',
        Domain='vpc',
    ))

    registry_eth0 = t.add_resource(
        ec2.NetworkInterface(
            "RegistryEth0",
            Description=Join("", [Ref(stackname_param), "Registry Eth0"]),
            GroupSet=[
                Ref(registry_sg),
            ],
            SourceDestCheck=True,
            SubnetId=Select(0, Ref(subnets)),
            Tags=Tags(
                Name=Join("", [Ref(stackname_param), "Registry Interface 0"]),
                Interface="eth0",
            )))

    registry_host = t.add_resource(
        ec2.Instance(
            'RegistryHost',
            ImageId=Ref(registry_ami_id_param),
            InstanceType='t2.micro',
            KeyName=Ref(keypair_param),
            IamInstanceProfile=Ref(iam_role_param),
            NetworkInterfaces=[
                ec2.NetworkInterfaceProperty(
                    NetworkInterfaceId=Ref(registry_eth0),
                    DeviceIndex="0",
                ),
            ],
            Tags=Tags(Name=Join("", [Ref(stackname_param), "Registry"]),
                      Id=Join("", [Ref(stackname_param), "Registry"])),
            UserData=Base64(
                Join('', [
                    '#!/bin/bash\n',
                    'yum update -y aws-cfn-bootstrap\n',
                    'mkdir -p /root/build/redis /root/build/registry\n',
                    'touch /root/build/redis/Dockerfile\n',
                    'touch /root/build/redis/redis.conf\n',
                    'touch /root/build/registry/Dockerfile\n',
                ])),
        ))

    registry_eip_assoc = t.add_resource(
        ec2.EIPAssociation(
            "RegistryEIPAssoc",
            NetworkInterfaceId=Ref(registry_eth0),
            AllocationId=GetAtt("RegistryEIP", "AllocationId"),
            PrivateIpAddress=GetAtt("RegistryEth0", "PrimaryPrivateIpAddress"),
        ))

    return t
예제 #11
0
def GenerateStepJenkinsLayer():
    t = Template()

    t.add_description("""\
    Jenkins for Step Hackathon Layer
    """)

    stackname_param = t.add_parameter(
        Parameter(
            "StackName",
            Description="Environment Name (default: hackathon)",
            Type="String",
            Default="hackathon",
        ))

    vpcid_param = t.add_parameter(
        Parameter(
            "VpcId",
            Type="String",
            Description="VpcId of your existing Virtual Private Cloud (VPC)",
            Default="vpc-fab00e9f"))

    subnets = t.add_parameter(
        Parameter(
            "Subnets",
            Type="CommaDelimitedList",
            Description=(
                "The list SubnetIds, for public subnets in the "
                "region and in your Virtual Private Cloud (VPC) - minimum one"
            ),
            Default="subnet-b68f3bef,subnet-9a6208ff,subnet-bfdd4fc8"))

    keypair_param = t.add_parameter(
        Parameter("KeyPair",
                  Description="Name of an existing EC2 KeyPair to enable SSH "
                  "access to the instance",
                  Type="String",
                  Default="glueteam"))

    jenkins_ami_id_param = t.add_parameter(
        Parameter("JenkinsAmiId",
                  Description="Jenkins server AMI ID (default: ami-f3641a84)",
                  Type="String",
                  Default="ami-f3641a84"))

    operations_subdomain_hosted_zone_param = t.add_parameter(
        Parameter("DashsoftHostedZoneParam",
                  Description="HostedZone (default: hackathon.operations.dk)",
                  Type="String",
                  Default="hackathon.operations.dk"))

    iam_role_param = t.add_parameter(
        Parameter(
            "IamRole",
            Description="IAM Role name",
            Type="String",
        ))

    # --------- Jenkins instance

    jenkins_sg = t.add_resource(
        ec2.SecurityGroup(
            'JenkinsSG',
            GroupDescription='Security group for Jenkins host',
            VpcId=Ref(vpcid_param),
            Tags=Tags(Name=Join("", [Ref(stackname_param), "SG"])),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp="0.0.0.0/0",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="443",
                    ToPort="443",
                    CidrIp="0.0.0.0/0",
                ),
            ]))

    jenkins_eip = t.add_resource(ec2.EIP(
        'JenkinsEIP',
        Domain='vpc',
    ))

    jenkins_eth0 = t.add_resource(
        ec2.NetworkInterface(
            "JenkinsEth0",
            Description=Join("", [Ref(stackname_param), " Eth0"]),
            GroupSet=[
                Ref(jenkins_sg),
            ],
            SourceDestCheck=True,
            SubnetId=Select(0, Ref(subnets)),
            Tags=Tags(
                Name=Join("", [Ref(stackname_param), " Interface 0"]),
                Interface="eth0",
            )))

    jenkins_host = t.add_resource(
        ec2.Instance(
            'JenkinsHost',
            ImageId=Ref(jenkins_ami_id_param),
            InstanceType='m3.medium',
            KeyName=Ref(keypair_param),
            IamInstanceProfile=Ref(iam_role_param),
            NetworkInterfaces=[
                ec2.NetworkInterfaceProperty(
                    NetworkInterfaceId=Ref(jenkins_eth0),
                    DeviceIndex="0",
                ),
            ],
            Tags=Tags(Name=Ref(stackname_param), Id=Ref(stackname_param)),
            UserData=Base64(Join('', [
                '#!/bin/bash\n',
            ])),
        ))

    jenkins_eip_assoc = t.add_resource(
        ec2.EIPAssociation(
            "JenkinsEIPAssoc",
            NetworkInterfaceId=Ref(jenkins_eth0),
            AllocationId=GetAtt("JenkinsEIP", "AllocationId"),
            PrivateIpAddress=GetAtt("JenkinsEth0", "PrimaryPrivateIpAddress"),
        ))

    jenkins_host_cname = t.add_resource(
        route53.RecordSetType(
            "JenkinsHostCname",
            HostedZoneName=Join(
                "", [Ref(operations_subdomain_hosted_zone_param), "."]),
            Comment=Join("", ["Jenkins host CNAME for ",
                              Ref(stackname_param)]),
            Name=Join(
                "",
                ["jenkins.",
                 Ref(operations_subdomain_hosted_zone_param), "."]),
            Type="A",
            TTL="60",
            ResourceRecords=[GetAtt("JenkinsHost", "PublicIp")],
            DependsOn="JenkinsEIPAssoc"))

    return t
예제 #12
0
파일: bind.py 프로젝트: nxtlytics/ivy-rain
    def configure(self):
        """
        Returns a BIND template
        """
        self.defaults = {'instance_type': 't3.micro'}

        self.service = 'bind'
        self.set_description('Sets up BIND DNS servers')
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()
        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'amzn2-ami-hvm-2.0.????????-x86_64-gp2',
                          'amazon')))

        config = constants.ENVIRONMENTS[self.env][self.service]

        # All subnets in public get a DNS server
        subnets = self.get_subnets('public')

        # Add our security group
        security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(self.name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for {}'.format(self.name),
                SecurityGroupIngress=[
                    {
                        "IpProtocol": "tcp",
                        "FromPort": "53",
                        "ToPort": "53",
                        "CidrIp": "0.0.0.0/0"
                    },  # DNS TCP
                    {
                        "IpProtocol": "udp",
                        "FromPort": "53",
                        "ToPort": "53",
                        "CidrIp": "0.0.0.0/0"
                    },  # DNS UDP
                ],
                SecurityGroupEgress=[{
                    "IpProtocol": "-1",
                    "FromPort": "-1",
                    "ToPort": "-1",
                    "CidrIp": "0.0.0.0/0"
                }]))

        route53_zone = constants.ENVIRONMENTS[self.env]['route53_zone']

        zonefile = ''
        for zone in config['forwarders']:
            zonefile += "\n" + self.make_bind_zone(zone)

        for subnet in subnets:
            subnet_name = subnet['AvailabilityZone']
            role = '{}-{}-{}'.format(self.env, self.service,
                                     subnet_name)  # myenv-bind-us-west-2a

            # Add the elastic IP and the ENI for it, then attach it.
            eip = self.add_resource(
                ec2.EIP('{}InstanceEIP'.format(self.cfn_name(role)),
                        Domain='vpc'))
            eni = self.add_resource(
                ec2.NetworkInterface(
                    '{}InstanceENI'.format(self.cfn_name(role)),
                    SubnetId=subnet['SubnetId'],
                    Description='ENI for {}'.format(role),
                    GroupSet=[Ref(security_group)] + self.security_groups,
                    SourceDestCheck=True,
                    Tags=self.get_tags(service_override=self.service,
                                       role_override=role)))
            self.get_eni_policies()

            self.add_resource(
                ec2.EIPAssociation('{}AssociateVPNInstanceENI'.format(
                    self.cfn_name(role)),
                                   AllocationId=GetAtt(eip, "AllocationId"),
                                   NetworkInterfaceId=Ref(eni)))

            # Add a route53 DNS name
            self.add_resource(
                route53.RecordSetGroup('{}Route53'.format(self.cfn_name(role)),
                                       HostedZoneName=route53_zone,
                                       RecordSets=[
                                           route53.RecordSet(
                                               Name="{}.{}".format(
                                                   role, route53_zone),
                                               ResourceRecords=[Ref(eip)],
                                               Type='A',
                                               TTL=600)
                                       ]))

            # Substitute the userdata template and feed it to CFN
            userdata_template = self.get_cloudinit_template(
                replacements=(('__PROMPT_COLOR__', self.prompt_color()),
                              ('__SERVICE__',
                               self.service), ('__BIND_ZONEFILE__', zonefile)))
            userdata = Sub(
                userdata_template.replace(
                    '${',
                    '${!')  # Replace bash brackets with CFN escaped style
                .replace(
                    '{#', '${'
                ),  # Replace rain-style CFN escapes with proper CFN brackets
                {'CFN_ENI_ID': Ref(eni)})

            launch_configuration = self.add_resource(
                autoscaling.LaunchConfiguration(
                    '{}LaunchConfiguration'.format(self.cfn_name(role)),
                    AssociatePublicIpAddress=True,
                    KeyName=Ref(self.keypair_name),
                    ImageId=Ref(self.ami),
                    InstanceType=Ref(self.instance_type),
                    InstanceMonitoring=False,
                    IamInstanceProfile=Ref(self.instance_profile),
                    UserData=Base64(userdata)))
            self.add_resource(
                autoscaling.AutoScalingGroup(
                    '{}ASGroup'.format(self.cfn_name(role)),
                    AvailabilityZones=[subnet['AvailabilityZone']],
                    HealthCheckType='EC2',
                    LaunchConfigurationName=Ref(launch_configuration),
                    MinSize=0,
                    MaxSize=1,
                    DesiredCapacity=0,
                    VPCZoneIdentifier=[subnet['SubnetId']],
                    Tags=self.get_autoscaling_tags(
                        service_override=self.service, role_override=role) +
                    [autoscaling.Tag('Name', role, True)]))
예제 #13
0
    def configure(self):
        """
        Returns a cassandra template with seed nodes
        """
        self.add_description('Sets up Cassandra in all Zones')
        self.get_eni_policies()
        self.get_default_security_groups()
        self.get_standard_parameters()
        self.get_standard_policies()

        _global_config = constants.ENVIRONMENTS[self.env]

        self.ami = self.add_parameter(
            Parameter('AMI',
                      Type='String',
                      Description='AMI ID for instances',
                      Default=get_latest_ami_id(
                          self.region, 'ivy-cassandra',
                          _global_config.get('ami_owner', 'self'))))
        _cassandra_security_group = self.add_resource(
            ec2.SecurityGroup(
                '{}SecurityGroup'.format(self.name),
                VpcId=self.vpc_id,
                GroupDescription='Security Group for {} Instances'.format(
                    self.name),
                SecurityGroupIngress=[
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 7000,
                        'ToPort': 7001,
                        'CidrIp': self.vpc_cidr
                    },  # inter-node
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 7199,
                        'ToPort': 7199,
                        'CidrIp': self.vpc_cidr
                    },  # jmx
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 9042,
                        'ToPort': 9042,
                        'CidrIp': self.vpc_cidr
                    },  # client port
                    {
                        'IpProtocol': 'tcp',
                        'FromPort': 9160,
                        'ToPort': 9160,
                        'CidrIp': self.vpc_cidr
                    },  # client (thrift)
                ]))
        self.add_resource(
            ec2.SecurityGroupIngress(
                '{}IngressSecurityGroup'.format(self.name),
                GroupId=Ref(_cassandra_security_group),
                IpProtocol='-1',
                FromPort=-1,
                ToPort=-1,
                SourceSecurityGroupId=Ref(_cassandra_security_group
                                          )  # this allows members all traffic
            ))
        self.add_security_group(Ref(_cassandra_security_group))

        # Add support for creating EBS snapshots and tagging them
        self.add_iam_policy(
            iam.Policy(PolicyName='CassandraBackups',
                       PolicyDocument={
                           'Statement': [{
                               'Effect':
                               'Allow',
                               'Resource':
                               '*',
                               'Action': [
                                   'ec2:AttachVolume', 'ec2:CreateSnapshot',
                                   'ec2:CreateTags', 'ec2:DeleteSnapshot',
                                   'ec2:DescribeInstances',
                                   'ec2:DescribeSnapshots', 'ec2:DescribeTags',
                                   'ec2:DescribeVolumeAttribute',
                                   'ec2:DescribeVolumeStatus',
                                   'ec2:DescribeVolumes', 'ec2:DetachVolume'
                               ]
                           }]
                       }))

        for cluster in constants.ENVIRONMENTS[
                self.env]['cassandra']['clusters']:
            for _instance in cluster['instances']:

                subnet = [
                    s for s in self.get_subnets('private')
                    if netaddr.IPAddress(_instance['ip']) in netaddr.IPNetwork(
                        s['CidrBlock'])
                ][0]

                service = 'cassandra-{}'.format(cluster['name'])
                role = '-'.join([
                    self.name, cluster['name'], subnet['AvailabilityZone'],
                    _instance['ip']
                ])
                tags = self.get_tags(service_override=service,
                                     role_override=role)

                # Create ENI for this server, and hold onto a Ref for it so we can feed it into the userdata
                uniq_id = hashlib.md5(role.encode('utf-8')).hexdigest()[:10]
                eni = ec2.NetworkInterface(
                    self.name + cluster['name'] + "ENI" + uniq_id,
                    Description=
                    'Cassandra: Cluster: {} ENV: {} PrivateSubnet {}'.format(
                        cluster['name'], self.env, subnet['SubnetId']),
                    GroupSet=self.security_groups,
                    PrivateIpAddress=_instance['ip'],
                    SourceDestCheck=True,
                    SubnetId=subnet['SubnetId'],
                    Tags=tags,
                )
                self.add_resource(eni)

                # Add the rootfs
                _block_device_mapping = get_block_device_mapping(
                    self.parameters['InstanceType'].resource['Default'])
                _block_device_mapping += {
                    ec2.BlockDeviceMapping(DeviceName="/dev/xvda",
                                           Ebs=ec2.EBSBlockDevice(
                                               DeleteOnTermination=True,
                                               VolumeSize=cluster.get(
                                                   'rootfs_size', 20),
                                               VolumeType="gp2",
                                           ))
                }

                # Seed the cluster from one node in the remote DC, plus three nodes in this DC
                # We want to avoid making too many nodes into seeds
                if cluster.get('remote_seed'):
                    remote_env_name = cluster['remote_seed']['datacenter']
                    remote_cluster_name = cluster['remote_seed']['cluster']
                    remote_clusters = constants.ENVIRONMENTS[remote_env_name][
                        'cassandra']['clusters']
                    # filter to just the remote cluster in the remote DC and return that one only
                    remote_cluster = list(
                        filter(lambda x: x['name'] == remote_cluster_name,
                               remote_clusters))[0]
                    remote_seeds = [
                        i['ip'] for i in remote_cluster['instances']
                    ][:1]
                    local_seeds = [i['ip'] for i in cluster['instances']][:3]
                    seeds = ','.join(remote_seeds + local_seeds)
                else:
                    # Use the first three cassandra nodes as seeds
                    seeds = ','.join([i['ip']
                                      for i in cluster['instances']][:3])

                if cluster.get('data_volume_size'):
                    # Create the EBS volume
                    data_volume = ec2.Volume(
                        '{}{}DataVolume{}'.format(
                            self.name, cluster['name'], uniq_id
                        ),  # something like 'envnameCassandraappDataVolumec47145e176'
                        Size=cluster.get('data_volume_size', 20),
                        VolumeType='gp2',
                        AvailabilityZone=subnet['AvailabilityZone'],
                        DeletionPolicy='Retain',
                        Tags=tags + [ec2.Tag('Name', role + "-datavol")])
                    self.add_resource(data_volume)
                else:
                    data_volume = None

                # Create the user data in two phases
                # Phase 1: substitute from constants in Rain
                user_data_template = self.get_cloudinit_template(
                    cluster['cassandra_template'],
                    replacements=(('__PROMPT_COLOR__', self.prompt_color()),
                                  ('__CASSANDRA_CLUSTER__', cluster['name']),
                                  ('__CASSANDRA_CLUSTER_OVERRIDE__',
                                   cluster.get('cluster_name_override',
                                               "")), ('__CASSANDRA_SEEDS__',
                                                      seeds), ('__SERVICE__',
                                                               service)))
                # Phase 2: Allow AWS Cloudformation to further substitute Ref()'s in the userdata
                userdata = Base64(
                    Sub(
                        user_data_template.replace(
                            '${', '${!'
                        )  # Replace bash brackets with CFN escaped style
                        .replace(
                            '{#', '${'
                        ),  # Replace rain-style CFN escapes with proper CFN brackets
                        {
                            'CFN_ENI_ID':
                            Ref(eni),
                            'CFN_DATA_EBS_VOLUME_ID':
                            Ref(data_volume) if data_volume else ""
                        }))

                # Create the Launch Configuration / ASG
                _instance_type = cluster.get('instance_type',
                                             Ref(self.instance_type))
                launch_configuration = self.add_resource(
                    autoscaling.LaunchConfiguration(
                        '{}{}LaunchConfiguration{}'.format(
                            self.name, cluster['name'], uniq_id),
                        AssociatePublicIpAddress=False,
                        BlockDeviceMappings=_block_device_mapping,
                        EbsOptimized=True if _instance_type
                        in EBS_OPTIMIZED_INSTANCES else False,
                        ImageId=Ref(self.ami),
                        InstanceType=_instance_type,
                        InstanceMonitoring=False,
                        IamInstanceProfile=Ref(self.instance_profile),
                        KeyName=Ref(self.keypair_name),
                        SecurityGroups=self.security_groups,
                        UserData=userdata))
                self.add_resource(
                    autoscaling.AutoScalingGroup(
                        '{}{}ASGroup{}'.format(self.name, cluster['name'],
                                               uniq_id),
                        AvailabilityZones=[subnet['AvailabilityZone']],
                        HealthCheckType='EC2',
                        LaunchConfigurationName=Ref(launch_configuration),
                        MinSize=1,
                        MaxSize=1,
                        VPCZoneIdentifier=[subnet['SubnetId']],
                        Tags=self.get_autoscaling_tags(
                            service_override=service, role_override=role) +
                        [autoscaling.Tag('Name', role, True)]))