Example #1
0
 def add_launchconfig_spot(self):
     self.launchconfig_spot = self.template.add_resource(
         autoscaling.LaunchConfiguration(
             "LaunchConfigurationOnSpot",
             UserData=Base64(
                 Join('', [
                     "#!/bin/bash\n", "cfn-signal -e 0",
                     "    --resource AutoscalingGroup", "    --stack ",
                     Ref("AWS::StackName"), "    --region ",
                     Ref("AWS::Region"), "\n",
                     "yum -y install docker htop stress && service docker start\n",
                     "docker run -d -p 80:8080 stealthizer/docker-aws-info\n"
                 ])),
             ImageId=self.amiId,
             KeyName=self.keyname,
             SpotPrice=self.spot_price,
             IamInstanceProfile=Ref("InstanceProfile"),
             BlockDeviceMappings=[
                 ec2.BlockDeviceMapping(
                     DeviceName="/dev/xvda",
                     Ebs=ec2.EBSBlockDevice(VolumeSize="8")),
             ],
             SecurityGroups=[
                 Ref(self.securityGroup),
                 Ref(self.securityGroupLoadBalancer)
             ],
             InstanceType=self.instance_type,
         ))
def create_launch_template_resource(template,
                                    api_launch_template_name_variable,
                                    api_instance_class_parameter,
                                    ec2_instance_profile_resource,
                                    api_security_group_resource,
                                    ecs_cluster_resource):
    return template.add_resource(
        ec2.LaunchTemplate(
            'LaunchTemplate',
            LaunchTemplateName=api_launch_template_name_variable,
            LaunchTemplateData=ec2.LaunchTemplateData(
                ImageId='ami-0ae254c8a2d3346a7',
                InstanceType=Ref(api_instance_class_parameter),
                IamInstanceProfile=ec2.IamInstanceProfile(
                    Arn=GetAtt(ec2_instance_profile_resource, 'Arn')),
                InstanceInitiatedShutdownBehavior='terminate',
                Monitoring=ec2.Monitoring(Enabled=True),
                SecurityGroups=[Ref(api_security_group_resource)],
                BlockDeviceMappings=[
                    ec2.BlockDeviceMapping(DeviceName='/dev/xvdcz',
                                           Ebs=ec2.EBSBlockDevice(
                                               DeleteOnTermination=True,
                                               VolumeSize=22,
                                               VolumeType='gp2'))
                ],
                UserData=Base64(
                    Join('', [
                        '#!/bin/bash\n', 'echo ECS_CLUSTER=',
                        Ref(ecs_cluster_resource),
                        ' >> /etc/ecs/ecs.config;echo ECS_BACKEND_HOST= >> /etc/ecs/ecs.config;'
                    ])))))
Example #3
0
    def create_template(self):
        t = self.template
        t.add_description("Acceptance Tests for cumulus scaling groups")

        # TODO fix
        # instance = self.name + self.context.environment['env']
        instance = "someinstance"
        # TODO: give to builder
        the_chain = chain.Chain()

        the_chain.add(ingress_rule.IngressRule(
            port_to_open="22",
            cidr="10.0.0.0/8"
        ))

        instance_profile_name = "InstanceProfile" + self.name

        the_chain.add(InstanceProfileRole(
            instance_profile_name=instance_profile_name,
            role=iam.Role(
                "SomeRoleName1",
                AssumeRolePolicyDocument=Policy(
                    Statement=[
                        Statement(
                            Effect=Allow,
                            Action=[AssumeRole],
                            Principal=Principal("Service", ["ec2.amazonaws.com", "s3.amazonaws.com"])
                        )
                    ]
                ),
            )))

        launchConfigName = 'lc' + self.name

        the_chain.add(launch_config.LaunchConfig(asg_name=self.name,
                                                 launch_config_name=launchConfigName,
                                                 meta_data=self.get_metadata(),
                                                 instance_profile_name=instance_profile_name), )

        the_chain.add(block_device_data.BlockDeviceData(ec2.BlockDeviceMapping(
            DeviceName="/dev/xvda",
            Ebs=ec2.EBSBlockDevice(
                VolumeSize="40"
            ))))

        the_chain.add(scaling_group.ScalingGroup(
            launch_config_name=launchConfigName,
        ))

        chain_context = chaincontext.ChainContext(
            template=t,
            instance_name=instance
        )

        the_chain.run(chain_context)
Example #4
0
    def create_template(self):
        t = self.template
        t.add_description("Acceptance Tests for cumulus scaling groups")

        the_chain = chain.Chain()

        application_port = "8000"

        the_chain.add(
            launch_config.LaunchConfig(prefix='websitesimple',
                                       vpc_id=Ref('VpcId'),
                                       meta_data=self.get_metadata(),
                                       bucket_name=self.context.bucket_name))

        the_chain.add(
            ingress_rule.IngressRule(port_to_open="22", cidr="10.0.0.0/8"))

        the_chain.add(
            ingress_rule.IngressRule(port_to_open=application_port,
                                     cidr="10.0.0.0/8"))

        the_chain.add(
            block_device_data.BlockDeviceData(
                ec2.BlockDeviceMapping(
                    DeviceName="/dev/xvda",
                    Ebs=ec2.EBSBlockDevice(VolumeSize="40"))))

        the_chain.add(
            target_group.TargetGroup(port=application_port,
                                     vpc_id=Ref("VpcId")))

        the_chain.add(scaling_group.ScalingGroup())

        the_chain.add(
            dns.Dns(base_domain=Ref("BaseDomain"),
                    hosted_zone_id=Ref("AlbCanonicalHostedZoneID"),
                    target=Ref("AlbDNSName"),
                    dns_name=troposphere.Join('', [
                        self.context.namespace,
                        "-websitesimple",
                    ])))

        the_chain.add(alb_port.AlbPort(port_to_open=application_port, ))

        the_chain.add(
            listener_rule.ListenerRule(base_domain_name=Ref("BaseDomain"),
                                       alb_listener_rule=Ref("IAlbListener"),
                                       path_pattern="/*",
                                       priority="2"))

        chain_context = chaincontext.ChainContext(template=t, )

        the_chain.run(chain_context)
def my_block_device_mappings_ebs(diskcount,devicenamebase,volumesize,volumetype):
    block_device_mappings_ebs = []
    block_device_mappings_ebs.append(my_block_device_mappings_root("/dev/sd",ref_disk_all_root_volumesize,"gp2"))
    for i in xrange(diskcount):
        block_device_mappings_ebs.append(
            ec2.BlockDeviceMapping(
                DeviceName = devicenamebase + chr(i+98),
                Ebs = ec2.EBSBlockDevice(
                    VolumeSize = volumesize,
                    VolumeType = volumetype,
                    DeleteOnTermination = True,
        )))
    return block_device_mappings_ebs
Example #6
0
 def __generate_blockmap(self, blockmap=None):
     if blockmap is None:
         blockmap = []
     blockmap = [
         ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                Ebs=ec2.EBSBlockDevice(
                                    VolumeSize=Ref("RootVolSize"),
                                    VolumeType="gp2")),
     ]
     app_config = self.config['apps'].values()[0]
     if 'mounts' in app_config:
         for mount in app_config['mounts']:
             blockmap.append(
                 ec2.BlockDeviceMapping(
                     DeviceName=mount['path'],
                     Ebs=ec2.EBSBlockDevice(
                         VolumeSize=Ref("{}VolSize".format(mount['name'])),
                         SnapshotId=If(
                             "{}NotRestoringFromSnapshot".format(
                                 mount['name']), Ref("AWS::NoValue"),
                             Ref("{}SnapID".format(mount['name']))),
                         VolumeType=mount.get('type', 'standard'),
                         DeleteOnTermination=True)))
     return blockmap
Example #7
0
    def __init__(self, volume_count, pairing_ratio, backing_volume_type,
                 backing_volume_size, working_volume_type,
                 working_volume_size):

        assert volume_count <= 25, 'Too many volumes specified'

        self.volume_count = volume_count
        self.pairing_ratio = pairing_ratio
        self.backing_volume_type = backing_volume_type
        self.backing_volume_size = backing_volume_size
        self.working_volume_type = working_volume_type
        self.working_volume_size = working_volume_size

        self.working_volume_count = self.volume_count / (self.pairing_ratio +
                                                         1)
        self.backing_volume_count = \
            self.working_volume_count * self.pairing_ratio
        assert self.volume_count == \
            self.working_volume_count + self.backing_volume_count, \
            'Not all volumes can be used based on the pairing ratio'
        assert self.backing_volume_count == 0 or (
            self.backing_volume_size is not None and self.backing_volume_type
            is not None), 'Backing volumes require type and size'

        if (self.backing_volume_size is not None):
            self.backing_device = ec2.EBSBlockDevice(
                VolumeType=self.backing_volume_type,
                VolumeSize=self.backing_volume_size,
                DeleteOnTermination=True,
            )

        self.working_device = ec2.EBSBlockDevice(
            VolumeType=self.working_volume_type,
            VolumeSize=self.working_volume_size,
            DeleteOnTermination=True,
        )
Example #8
0
    'DNS:*.*.compute.internal',
    'DNS:*.ec2.internal',
    'IP:10.3.0.1',
]
(API_KEY_PEM, API_CERT_PEM, API_KEY,
 API_CERT) = create_cert('kube-apiserver',
                         san_list=api_san_list,
                         sign_key=CA_KEY,
                         sign_cert=CA_CERT)

LAUNCH_CONFIGURATION = TEMPLATE.add_resource(
    autoscaling.LaunchConfiguration(
        'LaunchConfiguration',
        BlockDeviceMappings=[
            ec2.BlockDeviceMapping(DeviceName='/dev/sdb',
                                   Ebs=ec2.EBSBlockDevice(
                                       VolumeSize=Ref(DOCKER_GRAPH_SIZE), )),
        ],
        IamInstanceProfile=Ref(INSTANCE_PROFILE),
        ImageId=FindInMap('RegionMap', Ref(AWS_REGION), 'AMI'),
        InstanceType=Ref(INSTANCE_TYPE),
        KeyName=Ref(KEY_NAME),
        SecurityGroups=[
            Ref(SECURITY_GROUP),
            Ref(API_SERVER_SECURITY_GROUP),
            Ref(CONSUL_HTTP_API_SECURITY_GROUP)
        ],
        UserData=Base64(
            Join('', [
                '#cloud-config\n\n',
                'coreos:\n',
                '  update:\n',
    def create_template(self):
        t = self.template
        t.add_description("Acceptance Tests for cumulus scaling groups")

        # TODO fix
        # instance = self.name + self.context.environment['env']
        # TODO: give to builder
        the_chain = chain.Chain()

        application_port = "8000"

        instance_profile_name = "InstanceProfile" + self.name

        the_chain.add(
            InstanceProfileRole(
                instance_profile_name=instance_profile_name,
                role=iam.Role(
                    "SomeRoleName1",
                    AssumeRolePolicyDocument=Policy(Statement=[
                        Statement(Effect=Allow,
                                  Action=[AssumeRole],
                                  Principal=Principal("Service", [
                                      "ec2.amazonaws.com", "s3.amazonaws.com"
                                  ]))
                    ]),
                )))

        the_chain.add(
            launch_config.LaunchConfig(meta_data=self.get_metadata(),
                                       vpc_id=Ref("VpcId")))

        the_chain.add(
            block_device_data.BlockDeviceData(
                ec2.BlockDeviceMapping(
                    DeviceName="/dev/xvda",
                    Ebs=ec2.EBSBlockDevice(VolumeSize="40"))))

        the_chain.add(
            target_group.TargetGroup(port=application_port,
                                     vpc_id=Ref("VpcId")))

        the_chain.add(scaling_group.ScalingGroup())

        the_chain.add(
            ingress_rule.IngressRule(port_to_open="22", cidr="10.0.0.0/8"))

        the_chain.add(
            ingress_rule.IngressRule(port_to_open=application_port,
                                     cidr="10.0.0.0/8"))

        the_chain.add(
            dns.Dns(
                base_domain=Ref("BaseDomain"),
                hosted_zone_id=Ref("AlbCanonicalHostedZoneID"),
                dns_name=Ref("AlbDNSName"),
            ))

        the_chain.add(
            alb_port.AlbPort(
                port_to_open=application_port,
                alb_sg_name="AlbSg",
            ))

        the_chain.add(
            listener_rule.ListenerRule(base_domain_name=Ref("BaseDomain"),
                                       alb_listener_rule=Ref("IAlbListener"),
                                       path_pattern="/*",
                                       priority="2"))

        chain_context = chaincontext.ChainContext(
            template=t, instance_name=instance_profile_name)

        the_chain.run(chain_context)
Example #10
0
def create_instance():
    return ec2.Instance(
        'devserver',
        BlockDeviceMappings=[
            ec2.BlockDeviceMapping(
                DeviceName='/dev/xvda',
                Ebs=ec2.EBSBlockDevice(
                    VolumeSize=100,
                    VolumeType='gp2',
                    DeleteOnTermination=True,
                ),
            ),
        ],
        ImageId=Ref('amiId'),
        InstanceType='t2.medium',
        KeyName=Ref('keypair'),
        SecurityGroupIds=[Ref('secgrpDevServer')],
        SubnetId=Ref('subnetA'),
        Tags=_tags(),
        UserData=Base64(
            textwrap.dedent(r'''
            #!/bin/bash -ex

            exec > >(tee ~/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
            echo BEGIN
            date '+%Y-%m-%d %H:%M:%S'

            # --- System Config
            hostnamectl set-hostname gwa-dev
            yum install -y git jq tree vim

            # --- UX
            cat <<-EOT > /etc/profile.d/ux.sh
                alias vi='vim'
                alias tree='tree -C'
            EOT
            cat <<-EOT >> /etc/vimrc
                set autoindent
                set modeline
                set tabstop=4
                set listchars=tab:——
            EOT

            # --- Docker
            yum install -y docker
            systemctl enable docker
            systemctl start docker

            usermod -aG docker ec2-user

            docker network create geowave-admin

            # --- Jenkins
            sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo
            sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key
            yum install -y jenkins java-1.8.0-openjdk

            usermod -aG docker jenkins

            systemctl enable jenkins
            systemctl start jenkins

            echo END
            date '+%Y-%m-%d %H:%M:%S'
        ''').lstrip()),
    )
)


dhis2_instance = t.add_resource(
    ec2.Instance(
        "DHIS2Instance",

        # AdditionalInfo="",
        # Affinity="",
        # AvailabilityZone="",
        BlockDeviceMappings=[
            ec2.BlockDeviceMapping(
                "BlockDeviceMapping",
                DeviceName="/dev/sda1",
                Ebs=ec2.EBSBlockDevice(
                    "BootVolume",
                    VolumeSize="16",
                ),
            ),
        ],
        # CreditSpecification={},
        # DisableApiTermination=False,
        # EbsOptimized=False,
        # ElasticGpuSpecifications=[],
        # ElasticInferenceAccelerators=[],
        # HostId="",
        # IamInstanceProfile="",
        ImageId=Ref(imageid_param),
        # InstanceInitiatedShutdownBehavior="",
        InstanceType="t3.medium",
        # Ipv6AddressCount=0,
        # Ipv6Addresses=[],
Example #12
0
)

bastion_instance = ec2.Instance(
    "BastionInstance",
    template=template,
    ImageId=Ref(bastion_ami),
    InstanceType=Ref(bastion_instance_type),
    KeyName=Ref(bastion_key_name),
    SecurityGroupIds=[Ref(bastion_security_group)],
    SubnetId=Ref(public_subnet_a),
    BlockDeviceMappings=[
        ec2.BlockDeviceMapping(
            DeviceName="/dev/sda1",
            Ebs=ec2.EBSBlockDevice(
                VolumeType="gp2",
                VolumeSize=8,
                Encrypted=use_aes256_encryption,
                KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")),
            ),
        ),
    ],
    Condition=bastion_type_and_ami_set,
    Tags=[
        {
            "Key": "Name",
            "Value": Join("-", [Ref("AWS::StackName"), "bastion"]),
        },
        {
            "Key": "aws-web-stacks:role",
            "Value": "bastion",
        },
    ],
Example #13
0
def emit_configuration():
    # Parameters here
    jenkins_instance_class = template.add_parameter(
        Parameter(
            'JenkinsInstanceType',
            Type='String',
            Default='t2.micro',
            Description='Chef jenkins instance type',
            AllowedValues=cfn.usable_instances(),
            ConstraintDescription='Instance size must be a valid instance type'
        ))

    # jenkins IAM role
    jenkins_role_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV])
    jenkins_iam_role = template.add_resource(
        iam.Role('JenkinsIamRole',
                 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY,
                 Path="/",
                 Policies=[
                     iam.Policy(PolicyName='JenkinsPolicy',
                                PolicyDocument=json.loads(
                                    cfn.load_template(
                                        "jenkins_policy.json.j2", {
                                            "env": CLOUDENV,
                                            "cloud": CLOUDNAME,
                                            "region": "us-east-1"
                                        }))),
                     iam.Policy(PolicyName='JenkinsDefaultPolicy',
                                PolicyDocument=json.loads(
                                    cfn.load_template(
                                        "default_policy.json.j2", {
                                            "env": CLOUDENV,
                                            "cloud": CLOUDNAME,
                                            "region": "us-east-1"
                                        })))
                 ],
                 DependsOn=cfn.vpcs[0].title))

    jenkins_instance_profile = template.add_resource(
        iam.InstanceProfile("JenkinsInstanceProfile",
                            Path="/",
                            Roles=[Ref(jenkins_iam_role)],
                            DependsOn=jenkins_iam_role.title))

    jenkins_user_data = cfn.load_template("default-init.bash.j2", {
        "env": CLOUDENV,
        "cloud": CLOUDNAME,
        "deploy": "jenkins"
    })

    ingress_rules = [
        ec2.SecurityGroupRule(IpProtocol=p[0],
                              CidrIp=DEFAULT_ROUTE,
                              FromPort=p[1],
                              ToPort=p[1])
        for p in [('tcp', 22), ('tcp', 80), ('tcp', 443)]
    ]

    security_group = template.add_resource(
        ec2.SecurityGroup(
            "JenkinsSecurityGroup",
            GroupDescription='Security Group for jenkins instances',
            VpcId=Ref(cfn.vpcs[0]),
            SecurityGroupIngress=ingress_rules,
            DependsOn=cfn.vpcs[0].title,
            Tags=Tags(Name='.'.join(['jenkins-sg', CLOUDNAME, CLOUDENV]))))

    launch_cfg = template.add_resource(
        autoscaling.LaunchConfiguration(
            "JenkinsLaunchConfiguration",
            ImageId=FindInMap('RegionMap', Ref("AWS::Region"),
                              int(cfn.Amis.EBS)),
            InstanceType=Ref(jenkins_instance_class),
            IamInstanceProfile=Ref(jenkins_instance_profile),
            AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS,
            BlockDeviceMappings=[
                ec2.BlockDeviceMapping(
                    DeviceName='/dev/sda1',
                    Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True))
            ],
            KeyName=Ref(cfn.keyname),
            SecurityGroups=[Ref(security_group)],
            DependsOn=[jenkins_instance_profile.title, security_group.title],
            UserData=Base64(jenkins_user_data)))

    asg_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV])
    asg = template.add_resource(
        autoscaling.AutoScalingGroup(
            "JenkinsASG",
            AvailabilityZones=cfn.get_asg_azs(),
            DesiredCapacity="1",
            LaunchConfigurationName=Ref(launch_cfg),
            MinSize="1",
            MaxSize="1",
            NotificationConfiguration=autoscaling.NotificationConfiguration(
                TopicARN=Ref(cfn.alert_topic),
                NotificationTypes=[
                    EC2_INSTANCE_TERMINATE, EC2_INSTANCE_TERMINATE_ERROR,
                    EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR
                ]),
            VPCZoneIdentifier=[
                Ref(sn) for sn in cfn.get_vpc_subnets(cfn.vpcs[0],
                                                      cfn.SubnetTypes.PLATFORM)
            ]))
Example #14
0
    Filters=[{'Name': 'tag-value','Values': ['UatDevPublicSecurityGroup']}]
    )

ec2_instance = template.add_resource(ec2.Instance(
    "NagiosServer",
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    KeyName='ec2deployuatdev',
    SubnetId=subnetdata['Subnets'][0]['SubnetId'],
    SecurityGroupIds=[sgdata['SecurityGroups'][0]['GroupId'],],
    Tenancy='default',
    InstanceType="t2.micro",
    BlockDeviceMappings=[
        ec2.BlockDeviceMapping(
            DeviceName="/dev/sda1",
            Ebs=ec2.EBSBlockDevice(
                VolumeType="gp2",
                VolumeSize="20"
            )
        ),
    ],
    Tags=Tags(**{
        'Name': 'NagiosServer',
        'Environment': 'uatdev',
        })
))

template.add_output([
    Output(
        "InstanceId",
        Description="InstanceId of the newly created EC2 instance",
        Value=Ref(ec2_instance),
    ),
Example #15
0
    def instance(self, index):

        hosts = ", ".join(["\"node{node}.{stack_name}.{domain}:10101\"".format(node=node, stack_name='${AWS::StackName}', domain=self.domain) for node in range(self.cluster_size)])
        internal_hosts = ", ".join(["\"node{node}.{stack_name}.{domain}:12000\"".format(node=node, stack_name='${AWS::StackName}', domain=self.domain) for node in range(self.cluster_size)])
        config_file = dedent('''
            data-dir = "/home/{username}/pilosa/data1"
            bind = "node{node}.{stack_name}.{domain}:10101"
            log-path = "/home/ubuntu/pilosa.log"

            [cluster]
            replicas = {replicas}
            internal-port = 12000
            type = "http"
            hosts = [{hosts}]
            internal-hosts = [{internal_hosts}]
            ''')[1:].format(node=index, replicas=self.replicas, stack_name='${AWS::StackName}', domain=self.domain, username=self.username, hosts=hosts, internal_hosts=internal_hosts)

        user_data = dedent('''
                {common}

                # update open file limits
                cat >> /etc/security/limits.conf <<- EOF
                * soft nofile 262144
                * hard nofile 262144
                * hard memlock unlimited
                * soft memlock unlimited
                EOF

                # install pilosa
                go get -u github.com/pilosa/pilosa
                cd $GOPATH/src/github.com/pilosa/pilosa
                make install

                # set up pilosa config file
                cat > /etc/pilosa.cfg <<- EOF
                {config_file}
                EOF

                # clean up root's mess
                chown -R {username}:{username} /home/{username}

                # all output should go to pilosa.log - pilosa.out should be empty
                sudo -u {username} PATH=$PATH nohup pilosa server --config=/etc/pilosa.cfg &> /home/{username}/pilosa.out &
                '''[1:]).format(config_file=config_file, common=self.common_user_data, username=self.username)

        return ec2.Instance(
            'PilosaInstance{}'.format(index),
            ImageId = Ref(self.ami),
            BlockDeviceMappings=[ec2.BlockDeviceMapping(
                DeviceName="/dev/sda1",
                Ebs=ec2.EBSBlockDevice(VolumeSize=Ref(self.volume_size), VolumeType=Ref(self.volume_type))
            )],
            InstanceType = Ref(self.instance_type),
            KeyName = Ref(self.key_pair),
            IamInstanceProfile=Ref(self.instance_profile),
            NetworkInterfaces=[
                ec2.NetworkInterfaceProperty(
                    GroupSet=[Ref(self.instance_security_group.title)],
                    AssociatePublicIpAddress='true',
                    DeviceIndex='0',
                    DeleteOnTermination='true',
                    SubnetId=Ref(self.subnet))],

            UserData = Base64(Sub(user_data)),
        )
Example #16
0
 def _launch_config(self):
     return LaunchConfiguration(
         "LaunchConfiguration",
         Metadata=autoscaling.Metadata(
             cloudformation.Init({
                 "config":
                 cloudformation.InitConfig(files=cloudformation.InitFiles({
                     '/etc/cfn/cfn-hup.conf':
                     cloudformation.InitFile(content=Join(
                         '', [
                             '[main]\n',
                             'stack=',
                             self.ref_stack_id,
                             '\n',
                             'region=',
                             self.ref_region,
                             '\n',
                         ]),
                                             mode='000400',
                                             owner='root',
                                             group='root'),
                     '/etc/cfn/hooks.d/cfn-auto-reloader.conf':
                     cloudformation.InitFile(
                         content=Join('', [
                             '[cfn-auto-reloader-hook]\n',
                             'triggers=post.update\n',
                             'path=Resources.WebServerInstance.\
                 Metadata.AWS::CloudFormation::Init\n',
                             'action=/opt/aws/bin/cfn-init -v ',
                             '         --stack ',
                             self.ref_stack_name,
                             '         --resource WebServerInstance ',
                             '         --region ',
                             self.ref_region,
                             '\n',
                             'runas=root\n',
                         ]))
                 }),
                                           services={
                                               "sysvinit":
                                               cloudformation.InitServices({
                                                   "rsyslog":
                                                   cloudformation.
                                                   InitService(
                                                       enabled=True,
                                                       ensureRunning=True,
                                                       files=[
                                                           '/etc/rsyslog.d/20-somethin.conf'
                                                       ])
                                               })
                                           })
             })),
         UserData=Base64(Join('', self.config['app_instance_user_data'])),
         ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
         KeyName=self.config['sshkey'],
         IamInstanceProfile=Ref(self.instance_iam_role_instance_profile),
         BlockDeviceMappings=[
             ec2.BlockDeviceMapping(DeviceName=self.config['device_name'],
                                    Ebs=ec2.EBSBlockDevice(VolumeSize="8")),
         ],
         SecurityGroups=self.config['app_sg'],
         InstanceType=self.config['instance_type'],
     )
Example #17
0
    def configure(self):
        config = constants.ENVIRONMENTS[self.env]['mesos']['agent']
        self.defaults = {
            'instance_type': config.get('instance_type', 'r5.xlarge')
        }

        self.add_description('Sets up Mesos Agents in all Zones')
        self.get_standard_parameters()
        self.get_standard_policies()
        self.get_default_security_groups()

        _global_config = constants.ENVIRONMENTS[self.env]

        self.ami = self.add_parameter(
            Parameter(
                'AMI',
                Type='String',
                Description='AMI ID for instances',
                Default=get_latest_ami_id(self.region, 'ivy-mesos', _global_config.get('ami_owner', 'self'))
            )
        )

        # Mesos Agent Security Group
        self.mesos_agent_security_group = self.add_resource(
            ec2.SecurityGroup(
                'MesosAgentSecurityGroup',
                VpcId=self.vpc_id,
                GroupDescription='Security Group for MesosAgent Instances',
                SecurityGroupIngress=[
                    # public http via ELB
                    {'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'CidrIp': self.vpc_cidr},
                    # internal service SSL direct
                    {'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443, 'CidrIp': self.vpc_cidr},
                    # host-network services (tcp)
                    {'IpProtocol': 'tcp', 'FromPort': 5000, 'ToPort': 5049, 'CidrIp': self.vpc_cidr},
                    # host-network services (udp)
                    {'IpProtocol': 'udp', 'FromPort': 5000, 'ToPort': 5049, 'CidrIp': self.vpc_cidr},
                    # mesos agent api
                    {'IpProtocol': 'tcp', 'FromPort': 5050, 'ToPort': 5051, 'CidrIp': self.vpc_cidr},
                    # internal http-alt direct
                    {'IpProtocol': 'tcp', 'FromPort': 8000, 'ToPort': 8000, 'CidrIp': self.vpc_cidr},
                    # internal http via ELB
                    {'IpProtocol': 'tcp', 'FromPort': 8080, 'ToPort': 8080, 'CidrIp': self.vpc_cidr},
                    # internal http-alt direct
                    {'IpProtocol': 'tcp', 'FromPort': 9090, 'ToPort': 9090, 'CidrIp': self.vpc_cidr},
                    # mesos tasks (udp)
                    {'IpProtocol': 'udp', 'FromPort': 31000, 'ToPort': 32000, 'CidrIp': self.vpc_cidr},
                    # mesos tasks (tcp)
                    {'IpProtocol': 'tcp', 'FromPort': 31000, 'ToPort': 32000, 'CidrIp': self.vpc_cidr}
                ]
            )
        )
        self.add_resource(
            ec2.SecurityGroupIngress(
                'MesosAgentIngressSecurityGroup',
                GroupId=Ref(self.mesos_agent_security_group),
                IpProtocol='-1',
                FromPort=-1,
                ToPort=-1,
                SourceSecurityGroupId=Ref(self.mesos_agent_security_group)
                # All Mesos agents can access all ports on each other
            )
        )
        self.add_security_group(Ref(self.mesos_agent_security_group))

        # Security group for the internet-facing (external) ELBs - not added to the mesos agents themselves
        self.elb_external_security_group = self.add_resource(
            ec2.SecurityGroup(
                'MesosAgentELBExternalSecurityGroup',
                VpcId=self.vpc_id,
                GroupDescription='External Security Group for MesosAgent ELB Instances',
                SecurityGroupIngress=[
                    {'IpProtocol': 'tcp', 'FromPort': 80, 'ToPort': 80, 'CidrIp': '0.0.0.0/0'},  # http
                    {'IpProtocol': 'tcp', 'FromPort': 443, 'ToPort': 443, 'CidrIp': '0.0.0.0/0'},  # https
                    {'IpProtocol': 'tcp', 'FromPort': 8443, 'ToPort': 8443, 'CidrIp': '0.0.0.0/0'},  # https-alt
                    {'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'CidrIp': '0.0.0.0/0'}  # ping (health checks)
                ]
            )
        )

        #
        # Docker roles
        #

        # Allow assume /docker roles by ec2metaproxy
        self.add_iam_policy(
            iam.Policy(
                PolicyName='AssumeDockerRoles',
                PolicyDocument={
                    'Statement': [
                        {
                            'Effect': 'Allow',
                            'Action': ["sts:AssumeRole"],
                            "Resource": {
                                "Fn::Join": [
                                    "",
                                    ["arn:{}:iam::".format(self.get_partition()), {"Ref": "AWS::AccountId"}, ":role/docker/*"]
                                ]
                            },
                        }
                    ]
                }
            )
        )
        # Add docker roles to assumable roles list
        for r in self.generate_docker_roles():
            self.add_resource(r)

        #
        # Load Balancers
        #

        lb_type = config.get('lb_type', 'classic')
        elb_log_bucket = config.get('log_bucket', '{}-{}-logs'.format(constants.TAG, self.env))

        if lb_type == 'classic':
            internal_elb = self.add_resource(
                self.generate_load_balancer(
                    "{}MesosAgentInternalELB".format(self.env),
                    "internal",
                    8080,
                    constants.SSL_CERTIFICATES[config['private_elb_cert']]['Arn'],
                    elb_log_bucket
                )
            )

            external_elb = self.add_resource(
                self.generate_load_balancer(
                    "{}MesosAgentExternalELB".format(self.env),
                    "internet-facing",
                    80,
                    constants.SSL_CERTIFICATES[config['public_elb_cert']]['Arn'],
                    elb_log_bucket
                )
            )
        elif lb_type == 'application':
            internal_elb, internal_target_group = self.generate_app_load_balancer(
                "{}MesosAgentInternalALB".format(self.env),
                "internal",
                8080,
                constants.SSL_CERTIFICATES[config['private_elb_cert']]['Arn'],
                elb_log_bucket
            )
            self.add_resource(internal_elb)
            self.add_resource(internal_target_group)

            external_elb, external_target_group = self.generate_app_load_balancer(
                "{}MesosAgentExternalALB".format(self.env),
                "internet-facing",
                80,
                constants.SSL_CERTIFICATES[config['public_elb_cert']]['Arn'],
                elb_log_bucket
            )
            self.add_resource(external_elb)
            self.add_resource(external_target_group)

        # extra public load balancers (for SSL termination, ELB doesn't do SNI)
        extra_public_load_balancers = []
        for lb_config in config.get('extra_public_load_balancers', []):
            if lb_type == 'classic':
                extra_public_load_balancers.append(Ref(self.add_resource(
                    self.generate_load_balancer(
                        "{}{}MesosAgentExternalELB".format(self.env, lb_config['name']),
                        "internet-facing",
                        80,
                        constants.SSL_CERTIFICATES[lb_config['cert']]['Arn'],
                        elb_log_bucket
                    )
                )))
            elif lb_type == 'application':
                _extra_public_lb, _extra_external_tg = self.generate_app_load_balancer(
                    "{}{}MesosAgentExternalALB".format(self.env, lb_config['name']),
                    "internet-facing",
                    80,
                    constants.SSL_CERTIFICATES[lb_config['cert']]['Arn'],
                    elb_log_bucket
                )
                self.add_resource(_extra_public_lb)
                extra_public_load_balancers.append(Ref(self.add_resource(_extra_external_tg)))

        #
        # Instances
        #

        # Add docker volume
        block_device_mapping = get_block_device_mapping(self.parameters['InstanceType'].resource['Default'])
        block_device_mapping.extend([
            ec2.BlockDeviceMapping(
                DeviceName="/dev/xvda",  # rootfs
                Ebs=ec2.EBSBlockDevice(
                    DeleteOnTermination=True,
                    VolumeSize=config.get('rootfs_size', 50),
                    VolumeType="gp2"
                )
            ),
            ec2.BlockDeviceMapping(
                DeviceName="/dev/xvdb",
                Ebs=ec2.EBSBlockDevice(
                    DeleteOnTermination=True,
                    VolumeSize=config.get('dockervol_size', 100),
                    VolumeType=config.get('dockervol_type', 'gp2')
                )
            )
        ])

        # Launch configurations
        preferred_only = config.get('preferred_placement', False)

        if lb_type == 'classic':
            # Private ASG
            self.generate_asg("private",
                              count=config['count'].get('private', 2),
                              block_mapping=block_device_mapping,
                              load_balancers=[Ref(internal_elb), Ref(external_elb)] + extra_public_load_balancers,
                              preferred_subnets_only=preferred_only
                              )

            # Public ASG
            self.generate_asg("public",
                              count=config['count'].get('public', 0),
                              block_mapping=block_device_mapping,
                              load_balancers=[Ref(internal_elb), Ref(external_elb)] + extra_public_load_balancers,
                              preferred_subnets_only=preferred_only
                              )
        elif lb_type == 'application':
            # Private ASG
            self.generate_asg("private",
                              count=config['count'].get('private', 2),
                              block_mapping=block_device_mapping,
                              target_group_arns=[Ref(internal_target_group), Ref(external_target_group)] + extra_public_load_balancers,
                              preferred_subnets_only=preferred_only
                              )

            # Public ASG
            self.generate_asg("public",
                              count=config['count'].get('public', 0),
                              block_mapping=block_device_mapping,
                              target_group_arns=[Ref(internal_target_group), Ref(external_target_group)] + extra_public_load_balancers,
                              preferred_subnets_only=preferred_only
                              )

        #
        # DNS Records
        #

        if self.get_partition() != 'aws-us-gov':
            zone = constants.ENVIRONMENTS[self.env]['route53_zone']
            self.add_resource(
                route53.RecordSetGroup(
                    'ELBRoute53',
                    HostedZoneName=zone,
                    RecordSets=[
                        route53.RecordSet(
                            Name='internal.{}'.format(zone)[:-1],
                            ResourceRecords=[GetAtt(internal_elb, 'DNSName')],
                            Type='CNAME',
                            TTL=300
                        ),
                        route53.RecordSet(
                            Name='external.{}'.format(zone)[:-1],
                            ResourceRecords=[GetAtt(external_elb, 'DNSName')],
                            Type='CNAME',
                            TTL=300
                        )
                    ]
                )
            )
Example #18
0
def main():

    t = Template()
    t.add_version("2010-09-09")
    t.add_description(
        "Currently supporting RHEL/CentOS 7.5.  Setup IAM role and security groups, "
        "launch instance, create/attach 10 EBS volumes, install/fix ZFS "
        "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), "
        "create zfs RAID6 pool, setup NFS server, export NFS share")

    InstUserData = list()
    InstUserData = [
        '#!/usr/bin/env bash\n',
        '\n',
        'set -x\n',
        '\n',
        '##exit 0\n',  # use this to disable all user-data and bring up files
        '\n',
        'zfs_pool_name="',
        Ref('ZfsPool'),
        '"\n',
        'zfs_mount_point="',
        Ref('ZfsMountPoint'),
        '"\n',
        'nfs_cidr_block="',
        Ref('NFSCidr'),
        '"\n',
        'nfs_opts="',
        Ref('NFSOpts'),
        '"\n',
        'my_wait_handle="',
        Ref('NFSInstanceWaitHandle'),
        '"\n',
        '\n',
    ]

    with open(
            '_include/Tropo_build_zfs_export_nfs.sh',
            'r',
    ) as ud_file:
        user_data_file = ud_file.readlines()

    for l in user_data_file:
        InstUserData.append(l)

    t.add_metadata({
        'AWS::CloudFormation::Interface': {
            'ParameterGroups': [{
                'Label': {
                    'default': 'Instance Configuration'
                },
                'Parameters': [
                    "OperatingSystem", "VPCId", "Subnet", "UsePublicIp",
                    "CreateElasticIP", "EC2KeyName", "NFSInstanceType",
                    "SshAccessCidr", "ExistingSecurityGroup",
                    "ExistingPlacementGroup", "S3BucketName"
                ]
            }, {
                'Label': {
                    'default': 'Storage Options - Required'
                },
                'Parameters': [
                    "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType",
                    "VolumeIops"
                ]
            }, {
                'Label': {
                    'default': 'ZFS Pool and FS Options - Required'
                },
                'Parameters': ["ZfsPool", "ZfsMountPoint"]
            }, {
                'Label': {
                    'default': 'NFS Options - Required'
                },
                'Parameters': ["NFSCidr", "NFSOpts"]
            }],
            'ParameterLabels': {
                'OperatingSystem': {
                    'default': 'Operating System of AMI'
                },
                'VPCId': {
                    'default': 'VPC ID'
                },
                'Subnet': {
                    'default': 'Subnet ID'
                },
                'UsePublicIp': {
                    'default': 'Assign a Public IP '
                },
                'CreateElasticIP': {
                    'default': 'Create and use an EIP '
                },
                'EC2KeyName': {
                    'default': 'EC2 Key Name'
                },
                'NFSInstanceType': {
                    'default': 'Instance Type'
                },
                'SshAccessCidr': {
                    'default': 'SSH Access CIDR Block'
                },
                'ExistingSecurityGroup': {
                    'default': 'OPTIONAL:  Existing Security Group'
                },
                'ExistingPlacementGroup': {
                    'default': 'OPTIONAL:  Existing Placement Group'
                },
                'S3BucketName': {
                    'default': 'Optional S3 Bucket Name'
                },
                'RAIDLevel': {
                    'default': 'RAID Level'
                },
                'VolumeSize': {
                    'default': 'Volume size of the EBS vol'
                },
                'VolumeType': {
                    'default': 'Volume type of the EBS vol'
                },
                'EBSVolumeType': {
                    'default': 'Volume type of the EBS vol'
                },
                'VolumeIops': {
                    'default': 'IOPS for each EBS vol (only for io1)'
                },
                'ZfsPool': {
                    'default': 'ZFS pool name'
                },
                'ZfsMountPoint': {
                    'default': 'Mount Point'
                },
                'NFSCidr': {
                    'default': 'NFS CIDR block for mounts'
                },
                'NFSOpts': {
                    'default': 'NFS options'
                },
            }
        }
    })

    EC2KeyName = t.add_parameter(
        Parameter(
            'EC2KeyName',
            Type="AWS::EC2::KeyPair::KeyName",
            Description=
            "Name of an existing EC2 KeyPair to enable SSH access to the instance.",
            ConstraintDescription="REQUIRED: Must be a valud EC2 key pair"))

    OperatingSystem = t.add_parameter(
        Parameter('OperatingSystem',
                  Type="String",
                  Description="Operating System",
                  Default="centos7",
                  AllowedValues=[
                      "alinux2",
                      "centos7",
                      "rhel7",
                  ],
                  ConstraintDescription="Must be: alinux2, centos7, rhel7"))

    NFSInstanceType = t.add_parameter(
        Parameter(
            'NFSInstanceType',
            Type="String",
            Description="NFS instance type",
            Default="r4.16xlarge",
            AllowedValues=[
                "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge"
            ],
            ConstraintDescription="Must an EC2 instance type from the list"))

    VolumeType = t.add_parameter(
        Parameter(
            'VolumeType',
            Type="String",
            Description="Type of EBS volume",
            Default="EBS",
            AllowedValues=["EBS", "InstanceStore"],
            ConstraintDescription="Volume type has to EBS or InstanceStore"))

    EBSVolumeType = t.add_parameter(
        Parameter('EBSVolumeType',
                  Description="Type of EBS volumes to create",
                  Type="String",
                  Default="io1",
                  ConstraintDescription="Must be a either: io1, gp2, st1",
                  AllowedValues=["io1", "gp2", "st1"]))

    VolumelSize = t.add_parameter(
        Parameter('VolumeSize',
                  Type="Number",
                  Default="500",
                  Description="Volume size in GB"))

    VolumeIops = t.add_parameter(
        Parameter('VolumeIops',
                  Type="Number",
                  Default="20000",
                  Description="IOPS for the EBS volume"))

    RAIDLevel = t.add_parameter(
        Parameter(
            'RAIDLevel',
            Description="RAID Level, currently only 6 (8+2p) is supported",
            Type="String",
            Default="0",
            AllowedValues=["0"],
            ConstraintDescription="Must be 0"))

    ZfsPool = t.add_parameter(
        Parameter('ZfsPool',
                  Description="ZFS pool name",
                  Type="String",
                  Default="v01"))

    ZfsMountPoint = t.add_parameter(
        Parameter(
            'ZfsMountPoint',
            Description=
            "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)",
            Type="String",
            Default="testzfs"))

    VPCId = t.add_parameter(
        Parameter('VPCId',
                  Type="AWS::EC2::VPC::Id",
                  Description="VPC Id for this instance"))

    ExistingPlacementGroup = t.add_parameter(
        Parameter('ExistingPlacementGroup',
                  Type="String",
                  Description="OPTIONAL:  Existing placement group"))

    Subnet = t.add_parameter(
        Parameter('Subnet',
                  Type="AWS::EC2::Subnet::Id",
                  Description="Subnet IDs"))

    ExistingSecurityGroup = t.add_parameter(
        Parameter(
            'ExistingSecurityGroup',
            Type="AWS::EC2::SecurityGroup::Id",
            Description=
            "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234")
    )

    UsePublicIp = t.add_parameter(
        Parameter(
            'UsePublicIp',
            Type="String",
            Description="Should a public IP address be given to the instance",
            Default="true",
            ConstraintDescription="true/talse",
            AllowedValues=["true", "false"]))

    CreateElasticIP = t.add_parameter(
        Parameter(
            'CreateElasticIP',
            Type="String",
            Description=
            "Create an Elasic IP address, that will be assinged to an instance",
            Default="true",
            ConstraintDescription="true/false",
            AllowedValues=["true", "false"]))

    S3BucketName = t.add_parameter(
        Parameter('S3BucketName',
                  Type="String",
                  Description="S3 bucket to allow this instance read access."))

    SshAccessCidr = t.add_parameter(
        Parameter(
            'SshAccessCidr',
            Type="String",
            Description="CIDR Block for SSH access, default 0.0.0.0/0",
            Default="0.0.0.0/0",
            AllowedPattern=
            "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
            ConstraintDescription="Must be a valid CIDR x.x.x.x/x"))

    NFSCidr = t.add_parameter(
        Parameter(
            'NFSCidr',
            Type="String",
            Description=
            "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0",
            Default="10.0.0.0/16",
            AllowedPattern=
            "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
            ConstraintDescription="Must be a valid CIDR x.x.x.x/x"))

    NFSOpts = t.add_parameter(
        Parameter(
            'NFSOpts',
            Description="NFS export options",
            Type="String",
            Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)")
    )

    VarLogMessagesFile = t.add_parameter(
        Parameter(
            'VarLogMessagesFile',
            Type="String",
            Description=
            "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)"
        ))

    RootRole = t.add_resource(
        iam.Role("RootRole",
                 AssumeRolePolicyDocument={
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 },
                 Policies=[
                     iam.Policy(PolicyName="s3bucketaccess",
                                PolicyDocument={
                                    "Statement": [{
                                        "Effect": "Allow",
                                        "Action": ["s3:GetObject"],
                                        "Resource": {
                                            "Fn::Join": [
                                                "",
                                                [
                                                    "arn:aws:s3:::", {
                                                        "Ref": "S3BucketName"
                                                    }, "/*"
                                                ]
                                            ]
                                        }
                                    }, {
                                        "Effect": "Allow",
                                        "Action": ["s3:ListBucket"],
                                        "Resource": {
                                            "Fn::Join": [
                                                "",
                                                [
                                                    "arn:aws:s3:::", {
                                                        "Ref": "S3BucketName"
                                                    }
                                                ]
                                            ]
                                        }
                                    }],
                                }),
                 ]))

    NFSSecurityGroup = t.add_resource(
        SecurityGroup("NFSSecurityGroup",
                      VpcId=Ref(VPCId),
                      GroupDescription="NFS Secuirty group",
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="2049",
                              ToPort="2049",
                              CidrIp=Ref(NFSCidr),
                          ),
                      ]))

    SshSecurityGroup = t.add_resource(
        SecurityGroup("SshSecurityGroup",
                      VpcId=Ref(VPCId),
                      GroupDescription="SSH Secuirty group",
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="22",
                              ToPort="22",
                              CidrIp=Ref(SshAccessCidr),
                          ),
                      ]))

    RootInstanceProfile = t.add_resource(
        InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)]))

    EIPAddress = t.add_resource(
        EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip"))

    tags = Tags(Name=Ref("AWS::StackName"))

    NFSInstance = t.add_resource(
        ec2.Instance(
            'NFSInstance',
            ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"),
                              Ref(OperatingSystem)),
            KeyName=Ref(EC2KeyName),
            InstanceType=(Ref(NFSInstanceType)),
            NetworkInterfaces=[
                NetworkInterfaceProperty(
                    GroupSet=If("not_existing_sg",
                                [Ref(NFSSecurityGroup),
                                 Ref(SshSecurityGroup)], [
                                     Ref(NFSSecurityGroup),
                                     Ref(SshSecurityGroup),
                                     Ref(ExistingSecurityGroup)
                                 ]),
                    AssociatePublicIpAddress=Ref(UsePublicIp),
                    DeviceIndex='0',
                    DeleteOnTermination='true',
                    SubnetId=Ref(Subnet))
            ],
            IamInstanceProfile=(Ref(RootInstanceProfile)),
            PlacementGroupName=(Ref(ExistingPlacementGroup)),
            BlockDeviceMappings=If(
                'vol_type_ebs',
                [
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdh",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdi",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdj",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdk",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdl",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                    ec2.BlockDeviceMapping(
                        DeviceName="/dev/sdm",
                        Ebs=ec2.EBSBlockDevice(
                            VolumeSize=(Ref(VolumelSize)),
                            DeleteOnTermination="True",
                            Iops=(Ref(VolumeIops)),
                            VolumeType=(Ref(EBSVolumeType)))),
                ],
                {"Ref": "AWS::NoValue"},
            ),
            UserData=Base64(Join('', InstUserData)),
        ))
    # End of NFSInstance

    t.add_mapping(
        'AWSRegionAMI', {
            "ap-northeast-1": {
                "centos7": "ami-8e8847f1",
                "rhel7": "ami-6b0d5f0d"
            },
            "ap-northeast-2": {
                "centos7": "ami-bf9c36d1",
                "rhel7": "ami-3eee4150"
            },
            "ap-south-1": {
                "centos7": "ami-1780a878",
                "rhel7": "ami-5b673c34"
            },
            "ap-southeast-1": {
                "centos7": "ami-8e0205f2",
                "rhel7": "ami-76144b0a"
            },
            "ap-southeast-2": {
                "centos7": "ami-d8c21dba",
                "rhel7": "ami-67589505"
            },
            "ca-central-1": {
                "centos7": "ami-e802818c",
                "rhel7": "ami-49f0762d"
            },
            "eu-central-1": {
                "centos7": "ami-dd3c0f36",
                "rhel7": "ami-c86c3f23"
            },
            "eu-west-1": {
                "centos7": "ami-3548444c",
                "rhel7": "ami-7c491f05"
            },
            "eu-west-2": {
                "centos7": "ami-00846a67",
                "rhel7": "ami-7c1bfd1b"
            },
            "eu-west-3": {
                "centos7": "ami-262e9f5b",
                "rhel7": "ami-5026902d"
            },
            "sa-east-1": {
                "centos7": "ami-cb5803a7",
                "rhel7": "ami-b0b7e3dc"
            },
            "us-east-1": {
                "centos7": "ami-9887c6e7",
                "rhel7": "ami-6871a115"
            },
            "us-east-2": {
                "centos7": "ami-9c0638f9",
                "rhel7": "ami-03291866"
            },
            "us-west-1": {
                "centos7": "ami-4826c22b",
                "rhel7": "ami-18726478"
            },
            "us-west-2": {
                "centos7": "ami-3ecc8f46",
                "rhel7": "ami-28e07e50"
            }
        })

    t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), ""))

    t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS"))

    t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True"))

    t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), "")))

    t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True"))

    nfswaithandle = t.add_resource(
        WaitConditionHandle('NFSInstanceWaitHandle'))

    nfswaitcondition = t.add_resource(
        WaitCondition("NFSInstanceWaitCondition",
                      Handle=Ref(nfswaithandle),
                      Timeout="1500",
                      DependsOn="NFSInstance"))

    t.add_output([
        Output("ElasticIP",
               Description="Elastic IP address for the instance",
               Value=Ref(EIPAddress),
               Condition="create_elastic_ip")
    ])

    t.add_output([
        Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance))
    ])

    t.add_output([
        Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp'))
    ])

    t.add_output([
        Output("InstancePublicIP",
               Value=GetAtt('NFSInstance', 'PublicIp'),
               Condition="Has_Public_Ip")
    ])

    t.add_output([
        Output("ElasticPublicIP",
               Value=GetAtt('NFSInstance', 'PublicIp'),
               Condition="create_elastic_ip")
    ])

    t.add_output([
        Output("PrivateMountPoint",
               Description="Mount point on private network",
               Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"]))
    ])

    t.add_output([
        Output("ExampleClientMountCommands",
               Description="Example commands to mount NFS on the clients",
               Value=Join("", [
                   "sudo mkdir /nfs1; sudo mount ",
                   GetAtt('NFSInstance', 'PrivateIp'), ":/",
                   Ref("ZfsPool"), "/",
                   Ref("ZfsMountPoint"), " /nfs1"
               ]))
    ])

    t.add_output([
        Output("S3BucketName",
               Value=(Ref("S3BucketName")),
               Condition="Has_Bucket")
    ])

    #    "Volume01" : { "Value" : { "Ref" : "Volume01" } },
    #    "Volume02" : { "Value" : { "Ref" : "Volume02" } },
    #    "Volume03" : { "Value" : { "Ref" : "Volume03" } },
    #    "Volume04" : { "Value" : { "Ref" : "Volume04" } },
    #    "Volume05" : { "Value" : { "Ref" : "Volume05" } },
    #    "Volume06" : { "Value" : { "Ref" : "Volume06" } },
    #    "Volume07" : { "Value" : { "Ref" : "Volume07" } },
    #    "Volume08" : { "Value" : { "Ref" : "Volume08" } },
    #    "Volume09" : { "Value" : { "Ref" : "Volume09" } },
    #    "Volume10" : { "Value" : { "Ref" : "Volume10" } }

    print(t.to_json(indent=2))
Example #19
0
def gen_template(config):

    num_couchbase_servers = config.server_number
    couchbase_instance_type = config.server_type

    num_sync_gateway_servers = config.sync_gateway_number
    sync_gateway_server_type = config.sync_gateway_type

    num_gateloads = config.load_number
    gateload_instance_type = config.load_type

    num_lbs = config.lb_number
    lb_instance_type = config.lb_type

    t = Template()
    t.add_description(
        'An Ec2-classic stack with Couchbase Server, Sync Gateway + load testing tools '
    )

    def createCouchbaseSecurityGroups(t):

        # Couchbase security group
        secGrpCouchbase = ec2.SecurityGroup('CouchbaseSecurityGroup')
        secGrpCouchbase.GroupDescription = "Allow access to Couchbase Server"
        secGrpCouchbase.SecurityGroupIngress = [
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="8091",
                ToPort="8091",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # sync gw user port
                IpProtocol="tcp",
                FromPort="4984",
                ToPort="4984",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # sync gw admin port
                IpProtocol="tcp",
                FromPort="4985",
                ToPort="4985",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # expvars
                IpProtocol="tcp",
                FromPort="9876",
                ToPort="9876",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="4369",
                ToPort="4369",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="5984",
                ToPort="5984",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="8092",
                ToPort="8092",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="11209",
                ToPort="11209",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="11210",
                ToPort="11210",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="11211",
                ToPort="11211",
                CidrIp="0.0.0.0/0",
            ),
            ec2.SecurityGroupRule(  # couchbase server
                IpProtocol="tcp",
                FromPort="21100",
                ToPort="21299",
                CidrIp="0.0.0.0/0",
            )
        ]

        # Add security group to template
        t.add_resource(secGrpCouchbase)

        return secGrpCouchbase

    #
    # Parameters
    #
    keyname_param = t.add_parameter(
        Parameter(
            'KeyName',
            Type='String',
            Description='Name of an existing EC2 KeyPair to enable SSH access')
    )

    secGrpCouchbase = createCouchbaseSecurityGroups(t)

    # Create an IAM Role to give the EC2 instance permissions to
    # push Cloudwatch Logs, which avoids the need to bake in the
    # AWS_KEY + AWS_SECRET_KEY into an ~/.aws/credentials file or
    # env variables
    mobileTestKitRole = iam.Role(
        'MobileTestKit',
        ManagedPolicyArns=['arn:aws:iam::aws:policy/CloudWatchFullAccess'],
        AssumeRolePolicyDocument={
            'Version':
            '2012-10-17',
            'Statement': [{
                'Action': 'sts:AssumeRole',
                'Principal': {
                    'Service': 'ec2.amazonaws.com'
                },
                'Effect': 'Allow',
            }]
        })
    t.add_resource(mobileTestKitRole)

    # The InstanceProfile instructs the EC2 instance to use
    # the mobileTestKitRole created above.  It will be referenced
    # in the instance.IamInstanceProfile property for all EC2 instances created
    instanceProfile = iam.InstanceProfile(
        'EC2InstanceProfile',
        Roles=[Ref(mobileTestKitRole)],
    )
    t.add_resource(instanceProfile)

    # Couchbase Server Instances
    for i in xrange(num_couchbase_servers):
        name = "couchbaseserver{}".format(i)
        instance = ec2.Instance(name)
        instance.ImageId = "ami-6d1c2007"  # centos7
        instance.InstanceType = couchbase_instance_type
        instance.SecurityGroups = [Ref(secGrpCouchbase)]
        instance.KeyName = Ref(keyname_param)
        instance.Tags = Tags(Name=name, Type="couchbaseserver")
        instance.IamInstanceProfile = Ref(instanceProfile)

        instance.BlockDeviceMappings = [
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(
                                       DeleteOnTermination=True,
                                       VolumeSize=200,
                                       VolumeType="gp2"))
        ]
        t.add_resource(instance)

    # Sync Gw instances (ubuntu ami)
    for i in xrange(num_sync_gateway_servers):
        name = "syncgateway{}".format(i)
        instance = ec2.Instance(name)
        instance.ImageId = "ami-6d1c2007"  # centos7
        instance.InstanceType = sync_gateway_server_type
        instance.SecurityGroups = [Ref(secGrpCouchbase)]
        instance.KeyName = Ref(keyname_param)
        instance.IamInstanceProfile = Ref(instanceProfile)
        instance.BlockDeviceMappings = [
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(
                                       DeleteOnTermination=True,
                                       VolumeSize=200,
                                       VolumeType="gp2"))
        ]

        # Make syncgateway0 a cache writer, and the rest cache readers
        # See https://github.com/couchbase/sync_gateway/wiki/Distributed-channel-cache-design-notes
        if i == 0:
            instance.Tags = Tags(Name=name,
                                 Type="syncgateway",
                                 CacheType="writer")
        else:
            instance.Tags = Tags(Name=name, Type="syncgateway")

        t.add_resource(instance)

    # Gateload instances (ubuntu ami)
    for i in xrange(num_gateloads):
        name = "gateload{}".format(i)
        instance = ec2.Instance(name)
        instance.ImageId = "ami-6d1c2007"  # centos7
        instance.InstanceType = gateload_instance_type
        instance.SecurityGroups = [Ref(secGrpCouchbase)]
        instance.KeyName = Ref(keyname_param)
        instance.IamInstanceProfile = Ref(instanceProfile)
        instance.Tags = Tags(Name=name, Type="gateload")
        instance.BlockDeviceMappings = [
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(
                                       DeleteOnTermination=True,
                                       VolumeSize=200,
                                       VolumeType="gp2"))
        ]

        t.add_resource(instance)

    # Load Balancer instances (ubuntu ami)
    for i in xrange(num_lbs):
        name = "loadbalancer{}".format(i)
        instance = ec2.Instance(name)
        instance.ImageId = "ami-6d1c2007"  # centos7
        instance.InstanceType = lb_instance_type
        instance.SecurityGroups = [Ref(secGrpCouchbase)]
        instance.KeyName = Ref(keyname_param)
        instance.IamInstanceProfile = Ref(instanceProfile)
        instance.Tags = Tags(Name=name, Type="loadbalancer")
        instance.BlockDeviceMappings = [
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(
                                       DeleteOnTermination=True,
                                       VolumeSize=200,
                                       VolumeType="gp2"))
        ]

        t.add_resource(instance)

    return t.to_json()
Example #20
0
    def add_instance(self,
                     stack_name,
                     template,
                     provision_refs,
                     instance_num,
                     version=None):
        instance = ec2.Instance(f'node{instance_num}')
        instance.IamInstanceProfile = Ref(provision_refs.instance_profile)
        instance.ImageId = self.app.config.get('provision', 'aws_ec2_ami_id')
        instance.InstanceType = self.app.config.get('provision',
                                                    'aws_ec2_instance_type')
        instance.KeyName = self.app.config.get('provision', 'aws_ec2_key_name')
        instance.NetworkInterfaces = [
            ec2.NetworkInterfaceProperty(GroupSet=[
                provision_refs.security_group_ec2,
            ],
                                         AssociatePublicIpAddress='true',
                                         DeviceIndex='0',
                                         DeleteOnTermination='true',
                                         SubnetId=provision_refs.random_subnet)
        ]

        instance.EbsOptimized = 'true'
        if 'i3' not in instance.InstanceType:
            instance.BlockDeviceMappings = [
                # Set root volume size to 500gb
                ec2.BlockDeviceMapping(DeviceName='/dev/sda1',
                                       Ebs=ec2.EBSBlockDevice(VolumeSize='500',
                                                              VolumeType='io1',
                                                              Iops='1000'))
            ]
        instance.Tags = Tags(Name=f'{stack_name}-node{instance_num}')
        version_flag = f' --version={version}' if version else ''
        join_network_arguments = f'--name={stack_name}{version_flag} --set-default --install --no-configure'

        instance.UserData = Base64(
            Join(
                '',
                [
                    '#!/bin/bash -xe\n',
                    'apt update -y -q\n',
                    'UCF_FORCE_CONFOLD=1 DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -qq -y install python3-pip\n',
                    'apt install -y -q htop tmux zsh jq libssl-dev libleveldb-dev || true\n',
                    'ln -s /usr/lib/x86_64-linux-gnu/libleveldb.so /usr/lib/x86_64-linux-gnu/libleveldb.so.1\n',
                    'mkdir /data\n',
                ] + ([
                    # If type i3, mount NVMe drive at /data
                    'DEV=/dev/$(lsblk | grep nvme | awk \'{print $1}\')\n',
                    'mkfs -t xfs $DEV\n',
                    'UUID=$(blkid -s UUID -o value $DEV)\n',
                    'echo "UUID=$UUID /data xfs defaults 0 0" >> /etc/fstab\n',
                    'mount -a\n'
                ] if 'i3' in instance.InstanceType else []) + [
                    # Install hydra
                    'pip3 install cement colorlog\n',
                    f'pip3 install {self.app.config.get("provision", "pip_install") % self.app.config["hydra"]}\n',
                    'chown ubuntu:ubuntu /data\n',
                    'su -l -c "hydra info" ubuntu\n',  # Generate default hydra.yml
                    "sed -i 's/workdir: .*/workdir: \\/data/' /home/ubuntu/.hydra.yml\n",  # Change workdir to /data
                    f'su -l -c "hydra client join-network {join_network_arguments}" ubuntu\n'
                ]))
        template.add_resource(instance)
        template.add_output([
            Output(
                f"ID{instance_num}",
                Description="InstanceId of the newly created EC2 instance",
                Value=Ref(instance),
            ),
            Output(
                f"IP{instance_num}",
                Description=
                "Public IP address of the newly created EC2 instance",
                Value=GetAtt(instance, "PublicIp"),
            ),
        ])
        return instance
Example #21
0
        },
        "eu-west-1": {
            "AMI": "ami-24506250"
        },
        "sa-east-1": {
            "AMI": "ami-3e3be423"
        },
        "ap-southeast-1": {
            "AMI": "ami-74dda626"
        },
        "ap-northeast-1": {
            "AMI": "ami-dcfa4edd"
        }
    })
ebs = ec2.EBSBlockDevice(VolumeSize=20,
                         VolumeType="gp2",
                         DeletionPolicy="Snapshot")
ec2_instance = template.add_resource(
    ec2.Instance("Ec2Instance",
                 ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                 InstanceType="t1.micro",
                 KeyName=Ref(keyname_param),
                 SecurityGroups=["default"],
                 UserData=Base64("80"),
                 BlockDeviceMappings=[
                     ec2.BlockDeviceMapping(DeviceName="/dev/sdf", Ebs=ebs)
                 ]))

template.add_output([
    Output(
        "InstanceId",
Example #22
0
securityGroup = ec2.SecurityGroup('InstanceSecurityGroup')
securityGroup.GroupDescription = 'SSH and HTTP Access'
securityGroup.SecurityGroupIngress = [
    ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp=Ref(sshLocation)),
    ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0'),
    ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=443, ToPort=443, CidrIp='0.0.0.0/0'),
]

instance = ec2.Instance("ec2instance", ImageId="ami-cd0f5cb6", InstanceType="t2.micro")
instance.SecurityGroups = [Ref(securityGroup)]
instance.KeyName = Ref(keyName)
instance.BlockDeviceMappings = [
    ec2.BlockDeviceMapping(
        DeviceName='/dev/sda1',
        Ebs=ec2.EBSBlockDevice(
            VolumeSize=30
        )
    )
]
instance.Tags = [ec2.Tag('Name', Ref(name))]

ipAddress = ec2.EIP('IPAddress')
ipAssociation = ec2.EIPAssociation(
    'EIPAssociation',
    InstanceId=Ref(instance),
    EIP=Ref(ipAddress)
)

# It would be nice to generate the route53 record here as well, but a
# different account has the Hosted Zone configured :(
Example #23
0
def buildStack(bootstrap, env):
    t = Template()

    t.add_description("""\
    Configures autoscaling group for hello world app""")

    vpcCidr = t.add_parameter(
        Parameter(
            "VPCCidr",
            Type="String",
            Description="VPC cidr (x.x.x.x/xx)",
        ))

    publicSubnet1 = t.add_parameter(
        Parameter(
            "PublicSubnet1",
            Type="String",
            Description="A public VPC subnet ID for the api app load balancer.",
        ))

    publicSubnet2 = t.add_parameter(
        Parameter(
            "PublicSubnet2",
            Type="String",
            Description="A public VPC subnet ID for the api load balancer.",
        ))

    dbName = t.add_parameter(
        Parameter(
            "DBName",
            Default="HelloWorldApp",
            Description="The database name",
            Type="String",
            MinLength="1",
            MaxLength="64",
            AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
            ConstraintDescription=("must begin with a letter and contain only"
                                   " alphanumeric characters.")))

    dbUser = t.add_parameter(
        Parameter(
            "DBUser",
            NoEcho=True,
            Description="The database admin account username",
            Type="String",
            MinLength="1",
            MaxLength="16",
            AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
            ConstraintDescription=("must begin with a letter and contain only"
                                   " alphanumeric characters.")))

    dbPassword = t.add_parameter(
        Parameter(
            "DBPassword",
            NoEcho=True,
            Description="The database admin account password",
            Type="String",
            MinLength="8",
            MaxLength="41",
            AllowedPattern="[a-zA-Z0-9]*",
            ConstraintDescription="must contain only alphanumeric characters.")
    )

    dbType = t.add_parameter(
        Parameter(
            "DBType",
            Default="db.t2.medium",
            Description="Database instance class",
            Type="String",
            AllowedValues=[
                "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge",
                "db.m5.4xlarge", "db.m5.12xlarge", "db.m5.24xlarge",
                "db.m4.large", "db.m4.xlarge", "db.m4.2xlarge",
                "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge",
                "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge",
                "db.r4.4xlarge", "db.r4.8xlarge", "db.r4.16xlarge",
                "db.x1e.xlarge", "db.x1e.2xlarge", "db.x1e.4xlarge",
                "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge",
                "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large",
                "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge",
                "db.r3.8xlarge", "db.t2.micro", "db.t2.small", "db.t2.medium",
                "db.t2.large", "db.t2.xlarge", "db.t2.2xlarge"
            ],
            ConstraintDescription="must select a valid database instance type.",
        ))

    dbAllocatedStorage = t.add_parameter(
        Parameter(
            "DBAllocatedStorage",
            Default="5",
            Description="The size of the database (Gb)",
            Type="Number",
            MinValue="5",
            MaxValue="1024",
            ConstraintDescription="must be between 5 and 1024Gb.",
        ))

    whitelistedCIDR = t.add_parameter(
        Parameter(
            "WhitelistedCIDR",
            Description="CIDR whitelisted to be open on public instances",
            Type="String",
        ))

    #### NETWORK SECTION ####
    vpc = t.add_resource(
        VPC("VPC", CidrBlock=Ref(vpcCidr), EnableDnsHostnames=True))

    subnet1 = t.add_resource(
        Subnet("Subnet1",
               CidrBlock=Ref(publicSubnet1),
               AvailabilityZone="eu-west-1a",
               VpcId=Ref(vpc)))
    subnet2 = t.add_resource(
        Subnet("Subnet2",
               CidrBlock=Ref(publicSubnet2),
               AvailabilityZone="eu-west-1b",
               VpcId=Ref(vpc)))

    internetGateway = t.add_resource(InternetGateway('InternetGateway'))

    gatewayAttachment = t.add_resource(
        VPCGatewayAttachment('AttachGateway',
                             VpcId=Ref(vpc),
                             InternetGatewayId=Ref(internetGateway)))

    routeTable = t.add_resource(RouteTable('RouteTable', VpcId=Ref(vpc)))

    route = t.add_resource(
        Route(
            'Route',
            DependsOn='AttachGateway',
            GatewayId=Ref('InternetGateway'),
            DestinationCidrBlock='0.0.0.0/0',
            RouteTableId=Ref(routeTable),
        ))

    subnetRouteTableAssociation = t.add_resource(
        SubnetRouteTableAssociation(
            'SubnetRouteTableAssociation',
            SubnetId=Ref(subnet1),
            RouteTableId=Ref(routeTable),
        ))

    subnetRouteTableAssociation2 = t.add_resource(
        SubnetRouteTableAssociation(
            'SubnetRouteTableAssociation2',
            SubnetId=Ref(subnet2),
            RouteTableId=Ref(routeTable),
        ))

    #### SECURITY GROUP ####
    loadBalancerSg = t.add_resource(
        ec2.SecurityGroup(
            "LoadBalancerSecurityGroup",
            VpcId=Ref(vpc),
            GroupDescription="Enable SSH access via port 22",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                ),
            ],
        ))

    instanceSg = t.add_resource(
        ec2.SecurityGroup(
            "InstanceSecurityGroup",
            VpcId=Ref(vpc),
            GroupDescription="Enable SSH access via port 22",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp=Ref(whitelistedCIDR),
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="8000",
                    ToPort="8000",
                    SourceSecurityGroupId=Ref(loadBalancerSg),
                ),
            ],
        ))

    rdsSg = t.add_resource(
        SecurityGroup("RDSSecurityGroup",
                      GroupDescription="Security group for RDS DB Instance.",
                      VpcId=Ref(vpc),
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="5432",
                              ToPort="5432",
                              SourceSecurityGroupId=Ref(instanceSg),
                          ),
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="5432",
                              ToPort="5432",
                              CidrIp=Ref(whitelistedCIDR),
                          ),
                      ]))

    #### DATABASE SECTION ####
    subnetGroup = t.add_resource(
        DBSubnetGroup(
            "SubnetGroup",
            DBSubnetGroupDescription=
            "Subnets available for the RDS DB Instance",
            SubnetIds=[Ref(subnet1), Ref(subnet2)],
        ))

    db = t.add_resource(
        DBInstance(
            "RDSHelloWorldApp",
            DBName=Join("", [Ref(dbName), env]),
            DBInstanceIdentifier=Join("", [Ref(dbName), env]),
            EnableIAMDatabaseAuthentication=True,
            PubliclyAccessible=True,
            AllocatedStorage=Ref(dbAllocatedStorage),
            DBInstanceClass=Ref(dbType),
            Engine="postgres",
            EngineVersion="10.4",
            MasterUsername=Ref(dbUser),
            MasterUserPassword=Ref(dbPassword),
            DBSubnetGroupName=Ref(subnetGroup),
            VPCSecurityGroups=[Ref(rdsSg)],
        ))

    t.add_output(
        Output("RDSConnectionString",
               Description="Connection string for database",
               Value=GetAtt("RDSHelloWorldApp", "Endpoint.Address")))

    if (bootstrap):
        return t

    #### INSTANCE SECTION ####
    keyName = t.add_parameter(
        Parameter(
            "KeyName",
            Type="String",
            Description="Name of an existing EC2 KeyPair to enable SSH access",
            MinLength="1",
            AllowedPattern="[\x20-\x7E]*",
            MaxLength="255",
            ConstraintDescription="can contain only ASCII characters.",
        ))

    scaleCapacityMin = t.add_parameter(
        Parameter(
            "ScaleCapacityMin",
            Default="1",
            Type="String",
            Description="Number of api servers to run",
        ))

    scaleCapacityMax = t.add_parameter(
        Parameter(
            "ScaleCapacityMax",
            Default="1",
            Type="String",
            Description="Number of api servers to run",
        ))

    scaleCapacityDesired = t.add_parameter(
        Parameter(
            "ScaleCapacityDesired",
            Default="1",
            Type="String",
            Description="Number of api servers to run",
        ))

    amiId = t.add_parameter(
        Parameter(
            "AmiId",
            Type="String",
            Default="ami-09693313102a30b2c",
            Description="The AMI id for the api instances",
        ))

    instanceType = t.add_parameter(
        Parameter("InstanceType",
                  Description="WebServer EC2 instance type",
                  Type="String",
                  Default="t2.medium",
                  AllowedValues=[
                      "t2.nano", "t2.micro", "t2.small", "t2.medium",
                      "t2.large", "m3.medium", "m3.large", "m3.xlarge",
                      "m3.2xlarge", "m4.large", "m4.xlarge", "m4.2xlarge",
                      "m4.4xlarge", "m4.10xlarge", "c4.large", "c4.xlarge",
                      "c4.2xlarge", "c4.4xlarge", "c4.8xlarge"
                  ],
                  ConstraintDescription="must be a valid EC2 instance type."))

    assumeRole = t.add_resource(
        Role("AssumeRole",
             AssumeRolePolicyDocument=json.loads("""\
{
  "Version": "2012-10-17",
  "Statement": [
    {
    "Action": "sts:AssumeRole",
    "Principal": {
      "Service": "ec2.amazonaws.com"
    },
    "Effect": "Allow",
    "Sid": ""
    }
  ]
}\
""")))

    instanceProfile = t.add_resource(
        InstanceProfile("InstanceProfile", Roles=[Ref(assumeRole)]))

    rolePolicyType = t.add_resource(
        PolicyType("RolePolicyType",
                   Roles=[Ref(assumeRole)],
                   PolicyName=Join("", ["CloudWatchHelloWorld", "-", env]),
                   PolicyDocument=json.loads("""\
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Action": [
        "logs:CreateLogGroup",
        "logs:CreateLogStream",
        "logs:DescribeLogStreams",
        "logs:PutLogEvents"
      ],
    "Effect": "Allow",
    "Resource": [
        "arn:aws:logs:*:*:*"
      ]
    }
  ]
}\
""")))

    appPassword = t.add_parameter(
        Parameter(
            "AppPassword",
            NoEcho=True,
            Description="The Password for the app user",
            Type="String",
            MinLength="8",
            MaxLength="41",
            AllowedPattern="[a-zA-Z0-9]*",
            ConstraintDescription="must contain only alphanumeric characters.")
    )

    launchConfig = t.add_resource(
        LaunchConfiguration(
            "LaunchConfiguration",
            Metadata=autoscaling.Metadata(
                cloudformation.Init({
                    "config":
                    cloudformation.InitConfig(files=cloudformation.InitFiles({
                        "/home/app/environment":
                        cloudformation.InitFile(content=Join(
                            "", [
                                "SPRING_DATASOURCE_URL=", "jdbc:postgresql://",
                                GetAtt("RDSHelloWorldApp", "Endpoint.Address"),
                                ":5432/HelloWorldApp" + env +
                                "?currentSchema=hello_world", "\n",
                                "SPRING_DATASOURCE_USERNAME=app", "\n",
                                "SPRING_DATASOURCE_PASSWORD="******"\n",
                                "SPRING_PROFILES_ACTIVE=", env, "\n"
                            ]),
                                                mode="000600",
                                                owner="app",
                                                group="app")
                    }), )
                }), ),
            UserData=Base64(
                Join('', [
                    "#!/bin/bash\n", "/opt/aws/bin/cfn-init",
                    "    --resource LaunchConfiguration", "    --stack ",
                    Ref("AWS::StackName"), "    --region ",
                    Ref("AWS::Region"), "\n", "/opt/aws/bin/cfn-signal -e $? ",
                    "         --stack ", {
                        "Ref": "AWS::StackName"
                    }, "         --resource AutoscalingGroup ",
                    "         --region ", {
                        "Ref": "AWS::Region"
                    }, "\n"
                ])),
            ImageId=Ref(amiId),
            KeyName=Ref(keyName),
            IamInstanceProfile=Ref(instanceProfile),
            BlockDeviceMappings=[
                ec2.BlockDeviceMapping(DeviceName="/dev/xvda",
                                       Ebs=ec2.EBSBlockDevice(VolumeSize="8")),
            ],
            SecurityGroups=[Ref(instanceSg)],
            InstanceType=Ref(instanceType),
            AssociatePublicIpAddress='True',
        ))

    applicationElasticLB = t.add_resource(
        elb.LoadBalancer("ApplicationElasticLB",
                         Name="ApplicationElasticLB-" + env,
                         Scheme="internet-facing",
                         Type="application",
                         SecurityGroups=[Ref(loadBalancerSg)],
                         Subnets=[Ref(subnet1), Ref(subnet2)]))

    targetGroup = t.add_resource(
        elb.TargetGroup("TargetGroupHelloWorld",
                        HealthCheckProtocol="HTTP",
                        HealthCheckTimeoutSeconds="15",
                        HealthyThresholdCount="5",
                        Matcher=elb.Matcher(HttpCode="200,404"),
                        Port="8000",
                        Protocol="HTTP",
                        UnhealthyThresholdCount="3",
                        TargetGroupAttributes=[
                            elb.TargetGroupAttribute(
                                Key="deregistration_delay.timeout_seconds",
                                Value="120",
                            )
                        ],
                        VpcId=Ref(vpc)))

    listener = t.add_resource(
        elb.Listener("Listener",
                     Port="80",
                     Protocol="HTTP",
                     LoadBalancerArn=Ref(applicationElasticLB),
                     DefaultActions=[
                         elb.Action(Type="forward",
                                    TargetGroupArn=Ref(targetGroup))
                     ]))

    t.add_output(
        Output("URL",
               Description="URL of the sample website",
               Value=Join("",
                          ["http://",
                           GetAtt(applicationElasticLB, "DNSName")])))

    autoScalingGroup = t.add_resource(
        AutoScalingGroup(
            "AutoscalingGroup",
            DesiredCapacity=Ref(scaleCapacityDesired),
            LaunchConfigurationName=Ref(launchConfig),
            MinSize=Ref(scaleCapacityMin),
            MaxSize=Ref(scaleCapacityMax),
            VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)],
            TargetGroupARNs=[Ref(targetGroup)],
            HealthCheckType="ELB",
            HealthCheckGracePeriod=360,
            UpdatePolicy=UpdatePolicy(
                AutoScalingReplacingUpdate=AutoScalingReplacingUpdate(
                    WillReplace=True, ),
                AutoScalingRollingUpdate=AutoScalingRollingUpdate(
                    PauseTime='PT5M',
                    MinInstancesInService="1",
                    MaxBatchSize='1',
                    WaitOnResourceSignals=True)),
            CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal(
                Timeout="PT15M", Count=Ref(scaleCapacityDesired)))))

    # print(t.to_json())
    return t
Example #24
0
    def add_resources(self):
        """Add resources to template."""
        template = self.template
        variables = self.get_variables()

        vpnrole = template.add_resource(
            iam.Role(
                'VPNRole',
                AssumeRolePolicyDocument=iam_policies.assumerolepolicy('ec2'),
                ManagedPolicyArns=variables['VPNManagedPolicies'].ref,
                Path='/',
                Policies=[
                    iam.Policy(
                        PolicyName=Join('-', [
                            'customer-vpn-server-role',
                            variables['EnvironmentName'].ref,
                            variables['CustomerName'].ref
                        ]),
                        PolicyDocument=Policy(
                            Version='2012-10-17',
                            Statement=[
                                # ModifyInstanceAttribute is for src/dst check
                                Statement(Action=[
                                    awacs.ec2.DescribeRouteTables,
                                    awacs.ec2.DescribeAddresses,
                                    awacs.ec2.AssociateAddress,
                                    awacs.ec2.CreateRoute,
                                    awacs.ec2.ReplaceRoute,
                                    awacs.ec2.ModifyInstanceAttribute
                                ],
                                          Effect=Allow,
                                          Resource=['*']),
                                Statement(
                                    Action=[
                                        awacs.aws.Action('s3', 'Get*'),
                                        awacs.aws.Action('s3', 'List*'),
                                        awacs.aws.Action('s3', 'Put*')
                                    ],
                                    Effect=Allow,
                                    Resource=[
                                        Join(
                                            '',
                                            [
                                                'arn:aws:s3:::',
                                                variables['ChefDataBucketName']
                                                .ref,  # noqa pylint: disable=line-too-long
                                                '/',
                                                variables['EnvironmentName'].
                                                ref,
                                                '/',
                                                variables['BucketKey'].ref,
                                                '/*'
                                            ])
                                    ]),
                                Statement(
                                    Action=[awacs.s3.ListBucket],
                                    Effect=Allow,
                                    Resource=[
                                        Join('', [
                                            'arn:aws:s3:::',
                                            variables['ChefDataBucketName'].ref
                                        ])  # noqa pylint: disable=line-too-long
                                    ],
                                    Condition=Condition(
                                        StringLike(
                                            's3:prefix',
                                            [
                                                Join(
                                                    '',
                                                    [
                                                        variables[
                                                            'EnvironmentName'].
                                                        ref,  # noqa pylint: disable=line-too-long
                                                        '/',
                                                        variables['BucketKey'].
                                                        ref,  # noqa pylint: disable=line-too-long
                                                        '/*'
                                                    ])
                                            ])))
                            ]))
                ]))
        vpninstanceprofile = template.add_resource(
            iam.InstanceProfile('VPNInstanceProfile',
                                Path='/',
                                Roles=[Ref(vpnrole)]))

        amiid = template.add_resource(
            cfn_custom_classes.AMIId(
                'AMIId',
                Condition='MissingVPNAMI',
                Platform=variables['VPNOS'].ref,
                Region=Ref('AWS::Region'),
                ServiceToken=variables['AMILookupArn'].ref))

        # Lookup subnets from core VPC stack
        subnetlookuplambdarole = template.add_resource(
            iam.Role(
                'SubnetLookupLambdaRole',
                Condition='PrivateSubnetCountOmitted',
                AssumeRolePolicyDocument=iam_policies.assumerolepolicy(
                    'lambda'),
                ManagedPolicyArns=[
                    IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole'
                ],
                Policies=[
                    iam.Policy(
                        PolicyName=Join('-', [
                            'subnetlookup-lambda-role',
                            variables['EnvironmentName'].ref,
                            variables['CustomerName'].ref
                        ]),
                        PolicyDocument=Policy(
                            Version='2012-10-17',
                            Statement=[
                                Statement(
                                    Action=[
                                        awacs.aws.Action(
                                            'cloudformation',
                                            'DescribeStack*'),
                                        awacs.aws.Action(
                                            'cloudformation', 'Get*')
                                    ],
                                    Effect=Allow,
                                    Resource=[
                                        Join('', [
                                            'arn:aws:cloudformation:',
                                            Ref('AWS::Region'), ':',
                                            Ref('AWS::AccountId'), ':stack/',
                                            variables['CoreVPCStack'].ref, '/*'
                                        ])
                                    ])
                            ]))
                ]))

        cfncustomresourcesubnetlookup = template.add_resource(
            awslambda.Function(
                'CFNCustomResourceSubnetLookup',
                Condition='PrivateSubnetCountOmitted',
                Description='Find subnets created by core stack',
                Code=awslambda.Code(
                    ZipFile=variables['SubnetLookupLambdaFunction']),
                Handler='index.handler',
                Role=GetAtt(subnetlookuplambdarole, 'Arn'),
                Runtime='python2.7',
                Timeout=10))

        subnetlookup = template.add_resource(
            cfn_custom_classes.SubnetLookup(
                'SubnetLookup',
                Condition='PrivateSubnetCountOmitted',
                CoreVPCStack=variables['CoreVPCStack'].ref,
                Region=Ref('AWS::Region'),
                ServiceToken=GetAtt(cfncustomresourcesubnetlookup, 'Arn')))

        common_userdata_prefix = [
            "#cloud-config\n",
            "package_update: true\n",
            "package_upgrade: false\n",
            "write_files:\n",
            "  - path: /usr/local/bin/update_vpn_routes.sh\n",
            "    permissions: '0755'\n",
            "    content: |\n",
            "      #!/bin/bash\n",
            "      \n",
            "      export AWS_DEFAULT_REGION=\"",
            Ref('AWS::Region'),
            "\"\n",
            "      my_instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)\n",  # noqa pylint: disable=line-too-long
            "      \n",
            "      publicroutetableid=",
            If(
                'PrivateSubnetCountOmitted',
                GetAtt(subnetlookup.title, 'PublicRouteTableId'),
                If(
                    'PublicRouteTableSpecified',
                    variables['PublicRouteTable'].ref,
                    ImportValue(
                        Sub("${%s}-PublicRouteTable" %
                            variables['CoreVPCStack'].name)))),  # noqa pylint: disable=line-too-long
            "\n",
            "      private_route_tables=(",
            If(
                'PrivateSubnetCountOmitted',
                GetAtt(subnetlookup.title, 'PrivateRouteTables'),
                If(
                    '3PrivateSubnetsCreated',
                    If(
                        'PublicRouteTableSpecified',
                        Join(' ', [
                            variables['PrivateRouteTable1'].ref,
                            variables['PrivateRouteTable2'].ref,
                            variables['PrivateRouteTable3'].ref
                        ]),
                        Join(
                            ' ',
                            [
                                ImportValue(
                                    Sub("${%s}-PrivateRouteTable1" %
                                        variables['CoreVPCStack'].name)),  # noqa pylint: disable=line-too-long
                                ImportValue(
                                    Sub("${%s}-PrivateRouteTable2" %
                                        variables['CoreVPCStack'].name)),  # noqa pylint: disable=line-too-long
                                ImportValue(
                                    Sub("${%s}-PrivateRouteTable3" %
                                        variables['CoreVPCStack'].name))
                            ])),  # noqa pylint: disable=line-too-long
                    If(
                        '2PrivateSubnetsCreated',
                        If(
                            'PublicRouteTableSpecified',
                            Join(' ', [
                                variables['PrivateRouteTable1'].ref,
                                variables['PrivateRouteTable2'].ref
                            ]),
                            Join(
                                ' ',
                                [
                                    ImportValue(
                                        Sub("${%s}-PrivateRouteTable1" %
                                            variables['CoreVPCStack'].name)),  # noqa pylint: disable=line-too-long
                                    ImportValue(
                                        Sub("${%s}-PrivateRouteTable2" %
                                            variables['CoreVPCStack'].name))
                                ])),  # noqa pylint: disable=line-too-long,
                        If(
                            'PublicRouteTableSpecified',
                            variables['PrivateRouteTable1'].ref,
                            ImportValue(
                                Sub("${%s}-PrivateRouteTable1" %
                                    variables['CoreVPCStack'].name)))))),  # noqa pylint: disable=line-too-long
            ")\n",
            "\n",
            "      openvpnroutepubdest=",
            variables['VPNSubnet'].ref,
            "\n",
            "      \n",
            "      # Disabling sourceDestCheck\n",
            "      aws ec2 modify-instance-attribute --instance-id ${my_instance_id} --source-dest-check \"{\\\"Value\\\": false}\"\n",  # noqa pylint: disable=line-too-long
            "      \n",
            "      if aws ec2 describe-route-tables | grep ${openvpnroutepubdest}; then\n",  # noqa pylint: disable=line-too-long
            "          # Update 'OpenVPNRoutePub' to point to this instance\n",  # noqa pylint: disable=line-too-long
            "          aws ec2 replace-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n",  # noqa pylint: disable=line-too-long
            "          # Update private routes\n",
            "          for i in \"${private_route_tables[@]}\"\n",
            "          do\n",
            "              aws ec2 replace-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n",  # noqa pylint: disable=line-too-long
            "          done\n",
            "      else\n",
            "          # Create 'OpenVPNRoutePub'\n",
            "          aws ec2 create-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n",  # noqa pylint: disable=line-too-long
            "          # Create private routes\n",
            "          for i in \"${private_route_tables[@]}\"\n",
            "          do\n",
            "              aws ec2 create-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n",  # noqa pylint: disable=line-too-long
            "          done\n",
            "      fi\n",
            "      \n",
            "\n",
            "  - path: /etc/chef/sync_cookbooks.sh\n",
            "    permissions: '0755'\n",
            "    owner: 'root'\n",
            "    group: 'root'\n",
            "    content: |\n",
            "      #!/bin/bash\n",
            "      set -e -o pipefail\n",
            "      \n",
            "      aws --region ",
            Ref('AWS::Region'),
            " s3 sync s3://",
            variables['ChefBucketName'].ref,
            "/",
            variables['EnvironmentName'].ref,
            "/",
            variables['BucketKey'].ref,
            "/ /etc/chef/\n",
            "      if compgen -G \"/etc/chef/cookbooks-*.tar.gz\" > /dev/null; then\n",  # noqa pylint: disable=line-too-long
            "          echo \"Cookbook archive found.\"\n",
            "          if [ -d \"/etc/chef/cookbooks\" ]; then\n",
            "              echo \"Removing previously extracted cookbooks.\"\n",  # noqa pylint: disable=line-too-long
            "              rm -r /etc/chef/cookbooks\n",
            "          fi\n",
            "          echo \"Extracting highest numbered cookbook archive.\"\n",  # noqa pylint: disable=line-too-long
            "          cbarchives=(/etc/chef/cookbooks-*.tar.gz)\n",
            "          tar -zxf \"${cbarchives[@]: -1}\" -C /etc/chef\n",
            "          chown -R root:root /etc/chef\n",
            "      fi\n",
            "      \n",
            "\n",
            "  - path: /etc/chef/perform_chef_run.sh\n",
            "    permissions: '0755'\n",
            "    owner: 'root'\n",
            "    group: 'root'\n",
            "    content: |\n",
            "      #!/bin/bash\n",
            "      set -e -o pipefail\n",
            "      \n",
            "      chef-client -z -r '",
            If('ChefRunListSpecified', variables['ChefRunList'].ref,
               Join('', ['recipe[', variables['CustomerName'].ref, '_vpn]'])),
            "' -c /etc/chef/client.rb -E ",
            variables['EnvironmentName'].ref,
            " --force-formatter --no-color -F min\n",
            "\n",
            "  - path: /etc/chef/client.rb\n",
            "    permissions: '0644'\n",
            "    owner: 'root'\n",
            "    group: 'root'\n",
            "    content: |\n",
            "      log_level :info\n",
            "      log_location '/var/log/chef/client.log'\n",
            "      ssl_verify_mode :verify_none\n",
            "      cookbook_path '/etc/chef/cookbooks'\n",
            "      node_path '/etc/chef/nodes'\n",
            "      role_path '/etc/chef/roles'\n",
            "      data_bag_path '/etc/chef/data_bags'\n",
            "      environment_path '/etc/chef/environments'\n",
            "      local_mode 'true'\n",
            "\n",
            "  - path: /etc/chef/environments/",
            variables['EnvironmentName'].ref,
            ".json\n",
            "    permissions: '0644'\n",
            "    owner: 'root'\n",
            "    group: 'root'\n",
            "    content: |\n",
            "      {\n",
            "        \"name\": \"",
            variables['EnvironmentName'].ref,
            "\",\n",
            "        \"default_attributes\": {\n",
            "          \"sturdy\": {\n",
            "            \"openvpn\": {\n",
            "              \"core_vpc_cidr\": \"",
            variables['VpcCidr'].ref,
            "\",\n",
            "              \"vpn_elastic_ip\": \"",
            variables['VpnEipPublicIp'].ref,
            "\",\n",
            "              \"vpn_subnet_cidr\": \"",
            variables['VPNSubnet'].ref,
            "\",\n",
            "              \"chef_data_bucket_name\": \"",
            variables['ChefDataBucketName'].ref,
            "\",\n",
            "              \"chef_data_bucket_folder\": \"",
            variables['EnvironmentName'].ref,
            "/",
            variables['BucketKey'].ref,
            "\",\n",
            "              \"chef_data_bucket_region\": \"",
            Ref('AWS::Region'),
            "\"\n",
            "            }\n",
            "          }\n",
            "        },\n",
            "        \"json_class\": \"Chef::Environment\",\n",
            "        \"description\": \"",
            variables['EnvironmentName'].ref,
            " environment\",\n",
            "        \"chef_type\": \"environment\"\n",
            "      }\n",
            "\n",
            "runcmd:\n",
            "  - set -euf\n",
            "  - echo 'Attaching EIP'\n",
            "  - pip install aws-ec2-assign-elastic-ip\n",
            # Allowing this command to fail (with ||true) as sturdy_openvpn
            # 2.3.0+ can handle this association instead. This will be removed
            # entirely in the next major release of this module (at which time
            # use of the updated sturdy_openvpn cookbook will be required)
            "  - aws-ec2-assign-elastic-ip --region ",
            Ref('AWS::Region'),
            " --valid-ips ",
            variables['VpnEipPublicIp'].ref,
            " || true\n",
            "  - echo 'Updating Routes'\n",
            "  - /usr/local/bin/update_vpn_routes.sh\n",
            "  - echo 'Installing Chef'\n",
            "  - curl --max-time 10 --retry-delay 5 --retry 5 -L https://www.chef.io/chef/install.sh | bash -s -- -v ",  # noqa pylint: disable=line-too-long
            variables['ChefClientVersion'].ref,
            "\n",
            "  - echo 'Configuring Chef'\n",
            "  - mkdir -p /var/log/chef /etc/chef/data_bags /etc/chef/nodes /etc/chef/roles\n",  # noqa pylint: disable=line-too-long
            "  - chmod 0755 /etc/chef\n",
            "  - /etc/chef/sync_cookbooks.sh\n",
            "  - /etc/chef/perform_chef_run.sh\n"
        ]

        vpnserverlaunchconfig = template.add_resource(
            autoscaling.LaunchConfiguration(
                'VpnServerLaunchConfig',
                AssociatePublicIpAddress=True,
                BlockDeviceMappings=[
                    # CentOS AMIs don't include this by default
                    ec2.BlockDeviceMapping(
                        DeviceName='/dev/sda1',
                        Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True))
                ],
                IamInstanceProfile=Ref(vpninstanceprofile),
                ImageId=If('MissingVPNAMI', GetAtt(amiid, 'ImageId'),
                           variables['VPNAMI'].ref),
                InstanceType=variables['ManagementInstanceType'].ref,
                InstanceMonitoring=False,  # extra granularity not worth cost
                KeyName=If('SSHKeySpecified', variables['KeyName'].ref,
                           Ref('AWS::NoValue')),
                PlacementTenancy=variables['VpcInstanceTenancy'].ref,
                SecurityGroups=variables['VPNSecurityGroups'].ref,
                UserData=If(
                    'RHELUserData',
                    Base64(
                        Join(
                            '',
                            common_userdata_prefix + [
                                "yum_repos:\n",
                                "  epel:\n",
                                "    name: Extra Packages for $releasever - $basearch\n",  # noqa pylint: disable=line-too-long
                                "    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch\n",  # noqa pylint: disable=line-too-long
                                "    enabled: true\n",
                                "    failovermethod: priority\n",
                                "    gpgcheck: true\n",
                                "    gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7\n",  # noqa pylint: disable=line-too-long
                                "packages:\n",
                                "  - awscli\n",
                                "  - python-pip\n",
                                "  - python2-boto\n",
                                "  - python2-boto3\n"
                            ])),
                    Base64(
                        Join(
                            '', common_userdata_prefix + [
                                "packages:\n", "  - awscli\n",
                                "  - python-pip\n", "  - python-boto\n",
                                "  - python-boto3\n"
                            ])))))

        template.add_resource(
            autoscaling.AutoScalingGroup(
                'VPNServerASG',
                MinSize=1,
                MaxSize=1,
                LaunchConfigurationName=Ref(vpnserverlaunchconfig),
                Tags=[
                    autoscaling.Tag(
                        'Name',
                        Join('-', [
                            variables['CustomerName'].ref, 'vpn',
                            variables['EnvironmentName'].ref
                        ]), True),
                    autoscaling.Tag('environment',
                                    variables['EnvironmentName'].ref, True),
                    autoscaling.Tag('customer', variables['CustomerName'].ref,
                                    True)
                ],
                VPCZoneIdentifier=If(
                    'PublicSubnetsOmitted',
                    GetAtt(subnetlookup.title, 'PublicSubnetList'),
                    variables['PublicSubnets'].ref)))
def my_block_device_mappings_root(devicenamebase, volumesize, volumetype):
    block_device_mappings_root = (ec2.BlockDeviceMapping(
        DeviceName=devicenamebase + "a1",
        Ebs=ec2.EBSBlockDevice(VolumeSize=volumesize, VolumeType=volumetype)))
    return block_device_mappings_root
def build_template(instance_type="m3.xlarge",
                   keypair=None,
                   groups=["default"],
                   setup_script="instance-setup.sh",
                   iops=1000,
                   enterprise=False):
    print("## Building CloudFormation Template")

    template = Template()

    ## CF Template Parameters for Community vs. Standard
    if enterprise is False:
        repopath = "/etc/yum.repos.d/mongodb.repo"
        repocontent = [
            "[MongoDB]\n", "name=MongoDB Repository\n",
            "baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64\n",
            "gpgcheck=0\n", "enabled=1\n"
        ]
        shortname = "MongoDBInstance"
        longname = "MongoDB {version} {iops} IOPS".format(
            version=MONGODB_VERSION, iops=iops)
    else:
        repopath = "/etc/yum.repos.d/mongodb-enterprise.repo"
        repocontent = [
            "[MongoDB-Enterprise]\n", "name=MongoDB Enterprise Repository\n",
            "baseurl=https://repo.mongodb.com/yum/redhat/6/mongodb-enterprise/stable/$basearch/\n",
            "gpgcheck=0\n", "enabled=1\n"
        ]
        shortname = "MongoDBStandardInstance"
        longname = "MongoDB Standard {version} {iops} IOPS".format(
            version=MONGODB_VERSION, iops=iops)

    ## CF Template UserData script
    ## Hack up UserData a bit so refs are setup correctly
    user_data = [
        "#!/bin/bash\n", "yum update -y aws-cfn-bootstrap\n",
        "/opt/aws/bin/cfn-init -v -s ",
        Ref("AWS::StackName"), " -r ", shortname, " --region ",
        Ref("AWS::Region"), " > /tmp/cfn-init.log 2>&1\n"
    ]
    with open(setup_script) as lines:
        for line in lines:
            user_data.append(line)

    ## CF Template Block Device Mappings
    block_device_mappings = []
    for mount, type in {
            "/dev/xvdf": "data",
            "/dev/xvdg": "journal",
            "/dev/xvdh": "log"
    }.items():
        block_device_mappings.append(
            ec2.BlockDeviceMapping(
                DeviceName=mount,
                Ebs=ec2.EBSBlockDevice(
                    VolumeSize=STORAGE_MAP[iops][type]["size"],
                    Iops=STORAGE_MAP[iops][type]["iops"],
                    VolumeType="io1",
                    DeleteOnTermination=False,
                ),
            ))

    ## CF Template Region-AMI-Mapping
    template.add_mapping("RegionAMIMap", REGION_AMI_MAP)

    ## CF Template EC2 Instance
    mongodb_instance = template.add_resource(
        ec2.Instance(
            shortname,
            ImageId=FindInMap("RegionAMIMap", Ref("AWS::Region"), "AMI"),
            InstanceType=instance_type,
            KeyName=keypair,
            SecurityGroups=groups,
            EbsOptimized=True,
            BlockDeviceMappings=block_device_mappings,
            Metadata={
                "AWS::CloudFormation::Init": {
                    "config": {
                        "files": {
                            repopath: {
                                "content": {
                                    "Fn::Join": ["", repocontent]
                                },
                                "mode": "000644",
                                "owner": "root",
                                "group": "root"
                            }
                        }
                    }
                }
            },
            UserData=Base64(Join("", user_data)),
            Tags=Tags(Name=longname, ),
        ))

    ## CF Template Outputs
    template.add_output([
        Output("MongoDBInstanceID",
               Description="MongoDBInstance ID",
               Value=Ref(mongodb_instance)),
        Output("MongoDBInstanceDNS",
               Description="MongoDBInstance Public DNS",
               Value=GetAtt(mongodb_instance, "PublicDnsName"))
    ])

    print("## CloudFormation Template Built")

    return template.to_json()
                [
                    "#!/bin/bash\n",
                    "cfn-signal -e 0",
                    "    --resource AutoscalingGroup",
                    "    --stack ",
                    Ref("AWS::StackName"),
                    "    --region ",
                    Ref("AWS::Region"),
                    "\n",
                ],
            )),
        ImageId=Ref(AmiId),
        KeyName=Ref(KeyName),
        BlockDeviceMappings=[
            ec2.BlockDeviceMapping(DeviceName="/dev/sda1",
                                   Ebs=ec2.EBSBlockDevice(VolumeSize="8")),
        ],
        SecurityGroups=[Ref(SecurityGroup)],
        InstanceType="m1.small",
    ))

LoadBalancer = t.add_resource(
    LoadBalancer(
        "LoadBalancer",
        ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
            Enabled=True,
            Timeout=120,
        ),
        Subnets=[Ref(PublicSubnet1), Ref(PublicSubnet2)],
        HealthCheck=elb.HealthCheck(
            Target="HTTP:80/",
Example #28
0
    def create_asg(
            self,
            layer_name,
            instance_profile,
            instance_type=None,
            ami_name='ubuntu1404LtsAmiId',
            ec2_key=None,
            user_data=None,
            default_instance_type=None,
            security_groups=None,
            min_size=1,
            max_size=1,
            root_volume_size=None,
            root_volume_type=None,
            include_ephemerals=True,
            number_ephemeral_vols=2,
            ebs_data_volumes=None,  #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}]
            custom_tags=None,
            load_balancer=None,
            instance_monitoring=False,
            subnet_type='private',
            launch_config_metadata=None,
            creation_policy=None,
            update_policy=None,
            depends_on=None):
        '''
        Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group
        @param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed
        @param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group
        @param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy.
        @param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region
        @param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group
        @param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group
        @param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name
        @param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group
        @param min_size [int] value to set as the minimum number of instances for the Auto Scaling group
        @param max_size [int] value to set as the maximum number of instances for the Auto Scaling group
        @param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance
        @param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration
        @param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration
        @param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments
        @param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group
        @param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group
        @param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group
        @param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into
        '''
        if subnet_type not in ['public', 'private']:
            raise RuntimeError(
                'Unable to determine which type of subnet instances should be launched into. '
                + str(subnet_type) + ' is not one of ["public", "private"].')

        if ec2_key != None and type(ec2_key) != Ref:
            ec2_key = Ref(ec2_key)
        elif ec2_key == None:
            ec2_key = Ref(self.template.parameters['ec2Key'])

        if default_instance_type == None:
            default_instance_type = 'm1.small'

        if type(instance_type) != str:
            instance_type = Ref(instance_type)

        sg_list = []
        for sg in security_groups:
            if isinstance(sg, Ref):
                sg_list.append(sg)
            else:
                sg_list.append(Ref(sg))

        launch_config_obj = autoscaling.LaunchConfiguration(
            layer_name + 'LaunchConfiguration',
            IamInstanceProfile=Ref(instance_profile),
            ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name),
            InstanceType=instance_type,
            SecurityGroups=sg_list,
            KeyName=ec2_key,
            Metadata=(launch_config_metadata or None),
            InstanceMonitoring=instance_monitoring)

        if user_data != None:
            launch_config_obj.UserData = user_data

        block_devices = []
        if root_volume_type != None and root_volume_size != None:
            ebs_device = ec2.EBSBlockDevice(VolumeSize=root_volume_size)

            if root_volume_type != None:
                ebs_device.VolumeType = root_volume_type

            block_devices.append(
                ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=ebs_device))

        device_names = ['/dev/sd%s' % c for c in 'bcdefghijklmnopqrstuvwxyz']

        if ebs_data_volumes != None and len(ebs_data_volumes) > 0:
            for ebs_volume in ebs_data_volumes:
                device_name = device_names.pop()
                ebs_block_device = ec2.EBSBlockDevice(
                    DeleteOnTermination=ebs_volume.get('delete_on_termination',
                                                       True),
                    VolumeSize=ebs_volume.get('size', '100'),
                    VolumeType=ebs_volume.get('type', 'gp2'))

                if 'iops' in ebs_volume:
                    ebs_block_device.Iops = int(ebs_volume.get('iops'))
                if 'snapshot_id' in ebs_volume:
                    ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id')

                block_devices.append(
                    ec2.BlockDeviceMapping(DeviceName=device_name,
                                           Ebs=ebs_block_device))

        if include_ephemerals and number_ephemeral_vols > 0:
            device_names.reverse()
            for x in range(0, number_ephemeral_vols):
                device_name = device_names.pop()
                block_devices.append(
                    ec2.BlockDeviceMapping(DeviceName=device_name,
                                           VirtualName='ephemeral' + str(x)))

        if len(block_devices) > 0:
            launch_config_obj.BlockDeviceMappings = block_devices

        launch_config = self.template.add_resource(launch_config_obj)

        if depends_on:
            auto_scaling_obj = autoscaling.AutoScalingGroup(
                layer_name + 'AutoScalingGroup',
                AvailabilityZones=self.azs,
                LaunchConfigurationName=Ref(launch_config),
                MaxSize=max_size,
                MinSize=min_size,
                DesiredCapacity=min(min_size, max_size),
                VPCZoneIdentifier=self.subnets[subnet_type.lower()],
                TerminationPolicies=[
                    'OldestLaunchConfiguration', 'ClosestToNextInstanceHour',
                    'Default'
                ],
                DependsOn=depends_on)
        else:
            auto_scaling_obj = autoscaling.AutoScalingGroup(
                layer_name + 'AutoScalingGroup',
                AvailabilityZones=self.azs,
                LaunchConfigurationName=Ref(launch_config),
                MaxSize=max_size,
                MinSize=min_size,
                DesiredCapacity=min(min_size, max_size),
                VPCZoneIdentifier=self.subnets[subnet_type.lower()],
                TerminationPolicies=[
                    'OldestLaunchConfiguration', 'ClosestToNextInstanceHour',
                    'Default'
                ])

        lb_tmp = []

        if load_balancer is not None:
            try:
                if type(load_balancer) is dict:
                    for lb in load_balancer:
                        lb_tmp.append(Ref(load_balancer[lb]))
                elif type(load_balancer) is not Ref:
                    for lb in load_balancer:
                        lb_tmp.append(Ref(lb))
                else:
                    lb_tmp.append(load_balancer)
            except TypeError:
                lb_tmp.append(Ref(load_balancer))
        else:
            lb_tmp = None

        if lb_tmp is not None and len(lb_tmp) > 0:
            auto_scaling_obj.LoadBalancerNames = lb_tmp

        if creation_policy is not None:
            auto_scaling_obj.resource['CreationPolicy'] = creation_policy

        if update_policy is not None:
            auto_scaling_obj.resource['UpdatePolicy'] = update_policy

        if custom_tags != None and len(custom_tags) > 0:
            if type(custom_tags) != list:
                custom_tags = [custom_tags]
            auto_scaling_obj.Tags = custom_tags
        else:
            auto_scaling_obj.Tags = []

        auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True))
        return self.template.add_resource(auto_scaling_obj)
Example #29
0
    ]
))

eip = template.add_resource(ec2.EIP("Eip"))

ec2_instance = template.add_resource(ec2.Instance(
    'Ec2Instance',
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    InstanceType=Ref(instance_type),
    KeyName=Ref(key_name),
    SecurityGroups=[Ref(security_group)],
    BlockDeviceMappings=[
        ec2.BlockDeviceMapping(
            DeviceName="/dev/sda1",
            Ebs=ec2.EBSBlockDevice(
                VolumeSize=Ref(root_size),
            )
        ),
    ],
    UserData=Base64(Join('', [
        '#!/bin/bash\n',
        'sudo apt-get update\n',
        'sudo apt-get install -y build-essential\n,
        'wget https://raw.githubusercontent.com/dokku/dokku/', Ref(dokku_version), '/bootstrap.sh\n',
        'sudo',
        ' DOKKU_TAG=', Ref(dokku_version),
        ' DOKKU_VHOST_ENABLE=', Ref(dokku_vhost_enable),
        ' DOKKU_WEB_CONFIG=', Ref(dokku_web_config),
        ' DOKKU_HOSTNAME=', Ref(dokku_hostname),
        ' DOKKU_KEY_FILE=/home/ubuntu/.ssh/authorized_keys',  # use the key configured by key_name
        ' bash bootstrap.sh\n',
Example #30
0
        LaunchTemplateName=Ref(api_launch_template_name),
        LaunchTemplateData=ec2.LaunchTemplateData(
            ImageId='ami-066826c6a40879d75',
            InstanceType=Ref(api_instance_class),
            IamInstanceProfile=ec2.IamInstanceProfile(
                Arn=GetAtt(ec2_instance_profile, 'Arn')
            ),
            InstanceInitiatedShutdownBehavior='terminate',
            Monitoring=ec2.Monitoring(Enabled=True),
            SecurityGroups=[Ref(api_security_group)],
            BlockDeviceMappings=[
                ec2.BlockDeviceMapping(
                    DeviceName='/dev/xvdcz',
                    Ebs=ec2.EBSBlockDevice(
                        DeleteOnTermination=True,
                        VolumeSize=22,
                        VolumeType='gp2'
                    )
                )
            ],
            UserData=Base64(
                Join('', [
                    '#!/bin/bash\n',
                    'echo ECS_CLUSTER=',
                    Ref(ecs_cluster),
                    ' >> /etc/ecs/ecs.config;echo ECS_BACKEND_HOST= >> /etc/ecs/ecs.config;'
                ])
            )
        )
    )
)