예제 #1
0
def generate_json():
    r = {}
    t = Template()
    # t.add_description(Join('', ["DOMjudge Cluster - ", Ref('AWS::StackName')]))
    t.add_description("DOMjudge Cluster")

    r['notify_topic'] = Select(0, Ref("AWS::NotificationARNs"))

    t.add_mapping('SizeMap', {
        'nano': {
            'RDSInstanceType': 'db.t2.micro',
            'WebInstanceType': 't2.micro',
            'WebASGMinSize': 1,
            'WebASGMaxSize': 4,
            'JudgeASGMinSize': 1,
            'JudgeASGMaxSize': 4,
        },
        'small': {
            'RDSInstanceType': 'db.t2.micro',
            'WebInstanceType': 't2.micro',
            'WebASGMinSize': 1,
            'WebASGMaxSize': 4,
            'JudgeASGMinSize': 1,
            'JudgeASGMaxSize': 4,
        },
        'medium': {
            'RDSInstanceType': 'db.t2.micro',
            'WebInstanceType': 't2.micro',
            'WebASGMinSize': 1,
            'WebASGMaxSize': 4,
            'JudgeASGMinSize': 1,
            'JudgeASGMaxSize': 4,
        },
        'large': {
            'RDSInstanceType': 'db.t2.micro',
            'WebInstanceType': 't2.micro',
            'WebASGMinSize': 1,
            'WebASGMaxSize': 4,
            'JudgeASGMinSize': 1,
            'JudgeASGMaxSize': 4,
        },
    })

    parameters.init(t, r)
    dynamodb.init(t, r)
    iam.init(t, r)
    securitygroups.init(t, r)
    rds.init(t, r)
    webserver.init(t, r)
    judgehost.init(t, r)

    return t.to_json()
예제 #2
0
    def output_template(self):
        template = Template()
        for parameter in self.parameters:
            template.add_parameter(parameter)

        for mapping in self.mappings:
            template.add_mapping(mapping[0], mapping[1])

        for resource in self.resources:
            template.add_resource(resource)

        for output in self.outputs:
            template.add_output(output)

        print template.to_json()
        return
예제 #3
0
    def InstanceVolumeTemplate(self):
        self.stack_name = "volumeTest{0}".format(int(time.time()))
        template = Template()
        keyname_param = template.add_parameter(
            Parameter(
                "KeyName",
                Description="Name of an existing EC2 KeyPair "
                "to enable SSH access to the instance",
                Type="String",
            ))
        template.add_mapping('RegionMap',
                             {"": {
                                 "AMI": self.tester.get_emi().id
                             }})
        for i in xrange(2):
            ec2_instance = template.add_resource(
                ec2.Instance("Instance{0}".format(i),
                             ImageId=FindInMap("RegionMap", Ref("AWS::Region"),
                                               "AMI"),
                             InstanceType="t1.micro",
                             KeyName=Ref(keyname_param),
                             SecurityGroups=[self.group.name],
                             UserData=Base64("80")))
            vol = template.add_resource(
                ec2.Volume("Volume{0}".format(i),
                           Size="8",
                           AvailabilityZone=GetAtt("Instance{0}".format(i),
                                                   "AvailabilityZone")))
            mount = template.add_resource(
                ec2.VolumeAttachment("MountPt{0}".format(i),
                                     InstanceId=Ref("Instance{0}".format(i)),
                                     VolumeId=Ref("Volume{0}".format(i)),
                                     Device="/dev/vdc"))
        stack = self.tester.create_stack(self.stack_name,
                                         template.to_json(),
                                         parameters=[("KeyName",
                                                      self.keypair.name)])

        def stack_completed():
            return self.tester.cloudformation.describe_stacks(
                self.stack_name).status == "CREATE_COMPLETE"

        self.tester.wait_for_result(stack_completed, True, timeout=600)
        self.tester.delete_stack(self.stack_name)
예제 #4
0
def create_cft():
    """"Creates the CFT"""
    template = Template()
    template.add_version("2010-09-09")
    template.add_description("CFT to create merch cube user")

    # Add Parameters
    for param in parameters:
        template.add_parameter(param)

    # Add Maps
    template.add_mapping("EnvGroupToEnv", env_group_to_env)

    # Add Resources
    template.add_resource(managed_policy)

    template.add_resource(merch_cube_user)

    return template
예제 #5
0
 def InstanceVolumeTemplate(self):
     self.stack_name = "volumeTest{0}".format(int(time.time()))
     template = Template()
     keyname_param = template.add_parameter(Parameter("KeyName", Description="Name of an existing EC2 KeyPair "
                                                                             "to enable SSH access to the instance",
                                                      Type="String",))
     template.add_mapping('RegionMap', {"": {"AMI": self.tester.get_emi().id}})
     for i in xrange(2):
         ec2_instance = template.add_resource(ec2.Instance("Instance{0}".format(i),
                                                           ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                                                           InstanceType="t1.micro", KeyName=Ref(keyname_param),
                                                           SecurityGroups=[self.group.name], UserData=Base64("80")))
         vol = template.add_resource(ec2.Volume("Volume{0}".format(i), Size="8",
                                                AvailabilityZone=GetAtt("Instance{0}".format(i), "AvailabilityZone")))
         mount = template.add_resource(ec2.VolumeAttachment("MountPt{0}".format(i), InstanceId=Ref("Instance{0}".format(i)),
                                                            VolumeId=Ref("Volume{0}".format(i)), Device="/dev/vdc"))
     stack = self.tester.create_stack(self.stack_name, template.to_json(), parameters=[("KeyName",self.keypair.name)])
     def stack_completed():
         return self.tester.cloudformation.describe_stacks(self.stack_name).status == "CREATE_COMPLETE"
     self.tester.wait_for_result(stack_completed, True, timeout=600)
     self.tester.delete_stack(self.stack_name)
예제 #6
0
class Stack(object):
    def __init__(self):
        self.template = Template()
        self.template.add_version("2010-09-09")
        self.template.add_description("Creates resources for a Ethereum node")

        parameters = Parameters()
        vpn = Vpn(parameters=parameters)

        for key, value in Mappings().mappings.iteritems():
            self.template.add_mapping(key, value)

        for param in parameters.values():
            self.template.add_parameter(param)

        for res in vpn.values():
            self.template.add_resource(res)

        self.template.add_metadata({
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [
                    {
                        "Label": {
                            "default": "EC2"
                        },
                        "Parameters": [
                            "InstanceImage", "InstanceKeyPair",
                            "InstanceStorageData", "InstanceStorageOS",
                            "InstanceType"
                        ]
                    },
                    {
                        "Label": {
                            "default": "VPC"
                        },
                        "Parameters": ["VPC"]
                    },
                ]
            }
        })
class DevDeployment(AbstractDeployment):
    def __init__(self, template=None):
        super(DevDeployment, self).__init__('dev')
        if template:
            self.template = template
        else:
            self.template = Template()
            self.template.add_version("2010-09-09")
            self.template.add_description("DevDeployment EC2 instance")

            self.template.add_parameter(
                Parameter("Os",
                          Description="Chosen operating system",
                          Type="String",
                          Default="AmazonLinux2",
                          AllowedValues=[
                              "AmazonLinux2",
                              "Windows2016Base",
                          ]))

            self.template.add_mapping(
                'AmiByOs', {
                    "us-east-1": {
                        "Windows2016Base": "ami-06bee8e1000e44ca4",
                        "AmazonLinux2": "ami-0c6b1d09930fac512",
                    },
                    "us-west-2": {
                        "Windows2016Base": "ami-07f35a597a32e470d",
                        "AmazonLinux2": "ami-0cb72367e98845d43",
                    },
                })

            self.template.add_resource(
                ec2.Instance(
                    "DevEc2",
                    ImageId=FindInMap("AmiByOs", Ref("AWS::Region"),
                                      Ref("Os")),
                    InstanceType="t2.small",
                ))
예제 #8
0
    def base_template(self):

        t = Template()

        t.add_mapping("AWSRegion2AMI", {"eu-west-1": {"AMI": "ami-00d88f77"}})

        if "vpc" in self.data:
            t.add_mapping("SubnetConfig", {"VPC": self.data["vpc"]})
        else:
            t.add_mapping(
                "SubnetConfig",
                {
                    "VPC": {
                        "CIDR": "10.0.0.0/16",
                        "SubnetA": "10.0.0.0/20",
                        "SubnetB": "10.0.16.0/20",
                        "SubnetC": "10.0.32.0/20",
                    }
                },
            )

        return t
예제 #9
0
              ],
              ConstraintDescription='must be a environment type.'))

t.add_mapping(
    'prod', {
        'cidr': {
            'Vpc': '10.0.0.0/16',
            'Public': '10.0.0.0/24',
            'Private': '10.0.1.0/24'
        },
        'tags': {
            'Vpc': 'vpc',
            'Igw': 'igw',
            'PublicSubnet': 'prod-PublicSubnet',
            'PrivateSubnet': 'prod-PrivateSubnet',
            'PublicRouteTable': 'prod-PublicRoute',
            'PrivateRouteTable': 'prod-PrivateRoute',
            'NatGateway': 'prod-NatGateway'
        },
        'output': {
            'Vpc': 'prod-vpc-id',
            'Igw': 'prod-igw-id',
            'PublicSubnet': 'prod-PublicSubnet-id',
            'PrivateSubnet': 'prod-PrivateSubnet-id',
            'PublicRouteTable': 'prod-PublicRoute-id',
            'PrivateRouteTable': 'prod-PrivateRoute-id',
            'NatGateway': 'prod-NatGateway-id'
        }
    })

t.add_mapping(
    'veri', {
예제 #10
0
private_servers = []
db_servers = []

template = Template()

template.add_description('This template for AWS Cloud Formation. Ilevel Project')

keyname_param = template.add_parameter(Parameter(
    "KeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH "
                "access to the instance",
    Type="String",
))

template.add_mapping('RegionMap', {
    "us-west-2":      {"AMI": "ami-83a5bce2"},
})


security_groups = [template.add_resource(ec2.SecurityGroup(
    "WEB",
     GroupDescription='For WEB servers',
     SecurityGroupIngress=[{'CidrIp': '0.0.0.0/0',                          
                                  'FromPort': '22',
                                  'IpProtocol': 'tcp',
                                  'ToPort': '22'},
                                 {'CidrIp': '0.0.0.0/0',
                                  'FromPort': '80',
                                  'IpProtocol': 'tcp',
                                  'ToPort': '80'}],
                           )),
예제 #11
0
# Converted from EC2InstanceSample.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/

import troposphere.ec2 as ec2
from troposphere import (Base64, cloudformation, FindInMap, GetAtt, GetAZs,
                         Join, Parameter, Output, Ref, Tags, Template)

template = Template()

template.add_mapping('RegionMap', {
    "us-east-1": {
        "AMI": "ami-4d87fc5a"
    },
})

ec2_instance_1 = template.add_resource(
    ec2.Instance("Ec2Instance1",
                 ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                 InstanceType="t1.micro",
                 KeyName="mcheriyath",
                 SecurityGroups=["mcheriyath-sg"],
                 Tags=Tags(**{
                     'Name': 'DevOpsDenver',
                     'Owner': '*****@*****.**'
                 })))

ec2_instance_2 = template.add_resource(
    ec2.Instance("Ec2Instance2",
                 ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                 InstanceType="t1.micro",
                 KeyName="mcheriyath",
예제 #12
0
                    Cookies=cloudfront.Cookies(
                        Forward=
                        'all',  # Don't do this. Done here to validate cookie-removal logic
                    ),
                ),
            ),
            ViewerCertificate=cloudfront.ViewerCertificate(
                AcmCertificateArn=Ref(acm_cert),
                SslSupportMethod='sni-only',
            ),
        ),
        Tags=GetAtt(cloudformation_tags, 'TagList'),
    ))

hosted_zone_map = "HostedZoneMap"
template.add_mapping(hosted_zone_map, cfnutils.mappings.r53_hosted_zone_id())

template.add_resource(
    route53.RecordSetType(
        "DomainA",
        AliasTarget=route53.AliasTarget(
            DNSName=GetAtt(example_distribution, 'DomainName'),
            HostedZoneId=FindInMap(hosted_zone_map, Ref(AWS_REGION),
                                   'CloudFront'),
        ),
        Comment=Sub('DNS for ${AWS::StackName}'),
        HostedZoneName=Join('', [Ref(param_hosted_zone_name), '.']),
        Name=domain_name,
        Type='A',
    ))
template.add_resource(
예제 #13
0
 def test_multiple_mappings(self):
     template = Template()
     template.add_mapping("map", {"k1": {"n1": "v1"}})
     template.add_mapping("map", {"k2": {"n2": "v2"}})
     json = template.to_json()
     self.assertEqual(multiple_mappings, json)
    Roles=[Ref(AccessRoleAmbari)],
))

CFNRolePolicies = t.add_resource(iam.PolicyType(
    "CFNRolePolicies",
    PolicyName="CFNaccess",
    PolicyDocument={ "Statement": [{ "Action": "cloudformation:Describe*", "Resource": "*", "Effect": "Allow" }] },
    Roles=[Ref("AccessRoleAmbari")],
))

t.add_mapping("CENTOS7", {
    "eu-west-1": {"AMI": "ami-33734044"},
    "ap-southeast-1": {"AMI": "ami-2a7b6b78"},
    "ap-southeast-2": {"AMI": "ami-d38dc6e9"},
    "eu-central-1": {"AMI": "ami-e68f82fb"},
    "ap-northeast-1": {"AMI": "ami-b80b6db8"},
    "us-east-1": {"AMI": "ami-61bbf104"},
    "sa-east-1": {"AMI": "ami-fd0197e0"},
    "us-west-1": {"AMI": "ami-f77fbeb3"},
    "us-west-2": {"AMI": "ami-d440a6e7"}
})



waitHandleAmbari = t.add_resource(WaitConditionHandle("waitHandleAmbari"))

waitConditionAmbari = t.add_resource(
    WaitCondition(
        "waitConditionAmbari",
        Handle=Ref(waitHandleAmbari),
        Timeout="2700",
예제 #15
0
파일: rds.py 프로젝트: honeybe/code-sample
# -*- coding: utf-8 -*-
from troposphere import FindInMap, Ref
from troposphere import Template, Tags
import troposphere.rds as rds

t = Template()
t.add_version("2010-09-09")
t.add_description("rds sample")

t.add_mapping("RDSConfig", {
    "Sample": {
        "TagName": "RDSSample",
        "username": "******",
        "password": "******",
        "dbname": "sample",
        "subnetids": [
            "subnet-xxxxxxxa",
            "subnet-xxxxxxxc"
        ]
    },
})

# db_subnet_group
subnetgroup = t.add_resource(rds.DBSubnetGroup(
    "subnetgroup",
    DBSubnetGroupDescription="SubnetGroup sample",
    SubnetIds=FindInMap("RDSConfig", "Sample", "subnetids"),
    Tags=Tags(
        Name=FindInMap("RDSConfig", "Sample", "TagName"),
        Application=Ref("AWS::StackName")
    )
# -*- coding: utf-8 -*-
from troposphere import FindInMap, Ref
from troposphere import Template, Tags
import troposphere.ec2 as ec2

t = Template()
t.add_version("2010-09-09")
t.add_description("VPC 120 with multiple subnets")

t.add_mapping("SubnetConfig", {
    "VPC": {"CIDR": "10.120.0.0/24"},
    "PublicA": {"CIDR": "10.120.0.0/26", "AvailabilityZone": "ap-northeast-1a"},
    "PublicB": {"CIDR": "10.120.0.64/26", "AvailabilityZone": "ap-northeast-1c"},
    "PrivateA": {"CIDR": "10.120.0.128/26", "AvailabilityZone": "ap-northeast-1a"},
    "PrivateB": {"CIDR": "10.120.0.192/26", "AvailabilityZone": "ap-northeast-1c"},
})
t.add_mapping("SecurityGroupConfig", {
    "sg": {"CIDR": "127.0.0.1/32"},
})

# VPC構築
VPC = t.add_resource(ec2.VPC(
    "VPC",
    CidrBlock=FindInMap("SubnetConfig", "VPC", "CIDR"),
    EnableDnsSupport=True,
    EnableDnsHostnames=True,
    Tags=Tags(
        Application=Ref("AWS::StackName"),
        Network="Public"
    )
))
예제 #17
0
    "SSHLocation",
    Description="The IP address range that can be used to SSH to the "
                "EC2 instances",
    Type="String",
    MinLength="9",
    MaxLength="18",
    Default="0.0.0.0/0",
    AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})"
                   "/(\\d{1,2})",
    ConstraintDescription="must be a valid IP CIDR range of the "
                          "form x.x.x.x/x."
))
  
template.add_mapping('RegionMap', {
    "us-east-1": {"AMI": "ami-76817c1e"},
    "us-west-1": {"AMI": "ami-f0d3d4b5"},
    "us-west-2": {"AMI": "ami-d13845e1"},
    "eu-west-1": {"AMI": "ami-892fe1fe"}
})
### Security Groups Stuff ###
###########  To Be Implemented later  ####################
kafka_sg = template.add_resource(ec2.SecurityGroup(
    "KafkaSecurityGroup",
    VpcId=Ref(vpcid_param),
    GroupDescription="Enable SSH, Kafka access via ports 22, bla bla bla",
    SecurityGroupIngress=[
        ec2.SecurityGroupRule(
            IpProtocol="tcp",
            FromPort="22",
            ToPort="22",
            CidrIp=Ref(sshlocation_param),
        ),
t.add_mapping('AWSInstanceType2Arch', {
    't1.micro': {'Arch': 'PV64'},
    't2.micro': {'Arch': 'HVM64'},
    't2.small': {'Arch': 'HVM64'},
    't2.medium': {'Arch': 'HVM64'},
    'm1.small': {'Arch': 'PV64'},
    'm1.medium': {'Arch': 'PV64'},
    'm1.large': {'Arch': 'PV64'},
    'm1.xlarge': {'Arch': 'PV64'},
    'm2.xlarge': {'Arch': 'PV64'},
    'm2.2xlarge': {'Arch': 'PV64'},
    'm2.4xlarge': {'Arch': 'PV64'},
    'm3.medium': {'Arch': 'HVM64'},
    'm3.large': {'Arch': 'HVM64'},
    'm3.xlarge': {'Arch': 'HVM64'},
    'm3.2xlarge': {'Arch': 'HVM64'},
    'c1.medium': {'Arch': 'PV64'},
    'c1.xlarge': {'Arch': 'PV64'},
    'c3.large': {'Arch': 'HVM64'},
    'c3.xlarge': {'Arch': 'HVM64'},
    'c3.2xlarge': {'Arch': 'HVM64'},
    'c3.4xlarge': {'Arch': 'HVM64'},
    'c3.8xlarge': {'Arch': 'HVM64'},
    'g2.2xlarge': {'Arch': 'HVMG2'},
    'r3.large': {'Arch': 'HVM64'},
    'r3.xlarge': {'Arch': 'HVM64'},
    'r3.2xlarge': {'Arch': 'HVM64'},
    'r3.4xlarge': {'Arch': 'HVM64'},
    'r3.8xlarge': {'Arch': 'HVM64'},
    'i2.xlarge': {'Arch': 'HVM64'},
    'i2.2xlarge': {'Arch': 'HVM64'},
    'i2.4xlarge': {'Arch': 'HVM64'},
    'i2.8xlarge': {'Arch': 'HVM64'},
    'hi1.4xlarge': {'Arch': 'HVM64'},
    'hs1.8xlarge': {'Arch': 'HVM64'},
    'cr1.8xlarge': {'Arch': 'HVM64'},
    'cc2.8xlarge': {'Arch': 'HVM64'},
})
예제 #19
0
def create(AMIMap, instanceProfile, snsTopic, dnsCheckerDDB):
	# Create checker stack
	checker = Template()

	checker.add_description("Stack defining the checker instance for dnsChecker implementation")

	# Create AMI Map
	checker.add_mapping("AMIMap",AMIMap)

	# Create checker VPC
	checkerVPC = checker.add_resource(
		VPC(
			"checkerVPC",
			CidrBlock="10.0.0.0/16",
			Tags=Tags(
				Name="checkerVPC"
			)
		)
	)

	# Create checker IGW
	checkerIGW = checker.add_resource(
		InternetGateway(
			"checkerIGW"
		)
	)

	# Attach IGW to VPC
	checkerIGWAttachment = checker.add_resource(
		VPCGatewayAttachment(
			"checkerIGWAttachment",
			VpcId=Ref(checkerVPC),
			InternetGatewayId=Ref(checkerIGW)
		)
	)

	# Create checker Subnet
	checkerSubnet = checker.add_resource(
		Subnet(
			"checkerSubnet",
			CidrBlock="10.0.0.0/24",
			VpcId=Ref(checkerVPC)
		)
	)

	# Create checker RTB
	checkerRTB = checker.add_resource(
		RouteTable(
			"checkerRTB",
			VpcId=Ref(checkerVPC)
		)
	)

	# Create route to IGW
	checkerDefaultRoute = checker.add_resource(
		Route(
			"checkerDefaultRoute",
			DependsOn="checkerIGWAttachment",
			GatewayId=Ref(checkerIGW),
			DestinationCidrBlock="0.0.0.0/0",
			RouteTableId=Ref(checkerRTB)
		)
	)

	# Associate RTB with Subnet
	checkerSubnetRTBAssociation = checker.add_resource(
		SubnetRouteTableAssociation(
			"checkerSubnetRTBAssociation",
			SubnetId=Ref(checkerSubnet),
			RouteTableId=Ref(checkerRTB)
		)
	)

	# Create checker Security Group

	checkerSecurityGroup = checker.add_resource(
		SecurityGroup(
			"checkerSecurityGroup",
			GroupDescription="Allow inbound access on port 80",
			SecurityGroupIngress=[
				SecurityGroupRule(
					IpProtocol="tcp",
					FromPort="80",
					ToPort="80",
					CidrIp="0.0.0.0/0"
				)
			],
			VpcId=Ref(checkerVPC)
		)
	)

	# Create checker instance metadata
	checkerInstanceMetadata = Metadata(
		Init(
			# Use ConfigSets to ensure docker service is running before trying to run containers
			# (since cfn-init runs "services" block last)
			InitConfigSets(
				ordered=["first","second"]
			),
			first=InitConfig(
				packages={
					"yum": {
						"docker": [],
						"curl": []
					}
				},
				files=InitFiles(
					{
						"/etc/cfn/cfn-hup.conf": InitFile(
							content=Join("",
								[
									"[main]\n",
									"stack=",Ref("AWS::StackName"),"\n",
									"region=",Ref("AWS::Region"),"\n"
								]
							),
							mode="000400",
							owner="root",
							group="root"
						),
						"/etc/cfn/hooks.d/cfn-auto-reloader.conf": InitFile(
							content=Join("",
								[
									"[cfn-auto-reloader-hook]\n",
									"triggers=post.update\n",
									"path=Resources.checkerInstance.Metadata\n",
									"action=/opt/aws/bin/cfn-init -v --stack ", Ref("AWS::StackName"), " ",
									"--resource checkerInstance ",
									"--region ", Ref("AWS::Region"), " ",
									"-c ordered\n",
									"runas=root\n"
								]
							),
							mode="000400",
							owner="root",
							group="root"
						),
						"/tmp/checker.conf": InitFile(
							content=Join("",
								[
									"{\n",
									"\"dnsCheckerDDB\" : \""+dnsCheckerDDB+"\",\n",
									"\"region\" : \"", Ref("AWS::Region"), "\"\n",
									"}"
								]
							),
							mode="000400",
							owner="root",
							group="root"
						)
					}
				),
				services={
					"sysvinit": InitServices(
						{
							"docker": InitService(
								enabled=True,
								ensureRunning=True
							),
							"cfn-hup": InitService(
								enabled=True,
								ensureRunning=True,
								files=[
									"/etc/cfn/cfn-hup.conf",
									"/etc/cfn/hooks.d/cfn-auto-reloader.conf"
								]
							)
						}
					)
				}
			),
			second=InitConfig(
				commands={
					"02runNginxContainer": {
						"command" : Join("",
							[
								"docker run -dit --name nginx -v /var/log/nginx/:/var/log/nginx ",
								"-v /tmp/:/tmp -p 80:80 kelledro/dnschecker_nginx"
							]
						)
					},
					"01runUwsgiContainer": {
						"command" : "docker run -dit --name uwsgi -v /tmp:/tmp kelledro/dnschecker_uwsgi"
					},
					"50subscribeToSNS": {
						"command": Join("",
							[
								"aws sns subscribe --protocol http --topic-arn ", snsTopic, " ",
								"--notification-endpoint http://$(curl -s 169.254.169.254/latest/meta-data/public-ipv4) ",
								"--region us-west-2"
							]
						)
					}
				}
			)
		)
	)

	# Create checker Instance
	checkerInstance = checker.add_resource(
		Instance(
			"checkerInstance",
			ImageId=FindInMap("AMIMap",Ref("AWS::Region"),"id"),
			InstanceType="t2.micro",
			KeyName="kelledy", # TODO remove this after testing
			IamInstanceProfile=instanceProfile,
			Metadata=checkerInstanceMetadata,
			UserData=Base64(
				Join("",
					[
						"#!/bin/bash\n",
						"/opt/aws/bin/cfn-init -v ",
						"--stack ", Ref("AWS::StackName"), " ",
						"--resource checkerInstance ",
						"--region ", Ref("AWS::Region"), " ",
						"-c ordered"
					]
				)
			),
			NetworkInterfaces=[
				NetworkInterfaceProperty(
					GroupSet=[
						Ref(checkerSecurityGroup)
					],
					AssociatePublicIpAddress="true",
					DeviceIndex="0",
					DeleteOnTermination="true",
					SubnetId=Ref(checkerSubnet),

				)
			],
			Tags=Tags(
				Name="checkerInstance"
			)
		)
	)
	return checker
class CreateTemplate(object):
    def __init__(self):
        self.start_template()
        self.add_mappings()
        self.add_parameters()
        self.add_security_groups()
        self.add_kippo_rds()
        self.add_kippo_sensors()

    def start_template(self):
        self.template = Template()
        self.template.add_version('2010-09-09')
        self.template.add_description('Kippo cluster CloudFormation stack')

    def add_mappings(self):
        # Ubuntu Trusty 14.04 LTS amd64
        self.template.add_mapping('Ec2AmiMap', {
            'ap-northeast-1': {'AmiId': 'ami-c011d4c0'},
            'ap-southeast-1': {'AmiId': 'ami-76546924'},
            'eu-central-1': {'AmiId': 'ami-00dae61d'},
            'eu-west-1': {'AmiId': 'ami-2396f654'},
            'sa-east-1': {'AmiId': 'ami-75b23768'},
            'us-east-1': {'AmiId': 'ami-f63b3e9e'},
            'us-west-1': {'AmiId': 'ami-057f9d41'},
            'cn-north-1': {'AmiId': 'ami-78d84541'},
            'us-gov-west-1': {'AmiId': 'ami-85fa9ba6'},
        })

    def add_parameters(self):

        self.template.add_parameter(Parameter(
            'Ec2InstanceType',
            Default='t2.micro',
            Description='Instance type of the EC2 instances',
            Type='String',
        ))
        self.template.add_parameter(Parameter(
            'Ec2SubnetIdList',
            Description='List of subnet IDs in which to create the EC2 instances',
            Type='List<AWS::EC2::Subnet::Id>',
        ))
        self.template.add_parameter(Parameter(
            'ElbSubnetIdList',
            Description='List of subnet IDs in which to create the ELB',
            Type='List<AWS::EC2::Subnet::Id>',
        ))
        self.template.add_parameter(Parameter(
            'KeyName',
            Description='Name of the keypair to install on the EC2 instances',
            Type='AWS::EC2::KeyPair::KeyName',
        ))
        self.template.add_parameter(Parameter(
            'KippoSensorCount',
            Default='1',
            Description='Number of kippo sensors to create',
            Type='Number',
        ))
        self.template.add_parameter(Parameter(
            'RdsInstanceType',
            Default='t2.micro',
            Description='Instance type of the RDS instance',
            Type='String',
        ))
        self.template.add_parameter(Parameter(
            'RdsRootPassword',
            Description='Password to use for the root RDS user',
            Type='String',
        ))
        self.template.add_parameter(Parameter(
            'RdsStorage',
            Default='20',
            Description='Amount of storage (GB) for the RDS instance',
            Type='Number',
        ))
        self.template.add_parameter(Parameter(
            'RdsSubnetIdList',
            Description='List of subnet IDs in which to create the RDS instance',
            Type='List<AWS::EC2::Subnet::Id>',
        ))
        self.template.add_parameter(Parameter(
            'RealSshPort',
            Description='Port number to use for the real SSH service',
            Type='Number',
        ))
        self.template.add_parameter(Parameter(
            'VpcId',
            Description='ID of the VPC in which to create the kippo cluster',
            Type='AWS::EC2::VPC::Id',
        ))

    def add_security_groups(self):
        # kippo sensor EC2 security group
        self.template.add_resource(SecurityGroup(
            'Ec2SecurityGroup',
            GroupDescription='Security group for the kippo sensor ASG',
            SecurityGroupIngress=[
                # Allow SSH (to kippo) from anywhere
                SecurityGroupRule(
                    CidrIp='0.0.0.0/0',
                    FromPort=22,
                    ToPort=22,
                    IpProtocol='tcp',
                ),
                # Allow real SSH from anywhere
                SecurityGroupRule(
                    CidrIp='0.0.0.0/0',
                    FromPort=Ref('RealSshPort'),
                    ToPort=Ref('RealSshPort'),
                    IpProtocol='tcp',
                ),
                # Allow HTTP from the kippo ELB
                SecurityGroupRule(
                    FromPort=80,
                    ToPort=80,
                    IpProtocol='tcp',
                    SourceSecurityGroupId=Ref('ElbSecurityGroup'),
                ),
                # Allow HTTPS from the kippo ELB
                SecurityGroupRule(
                    FromPort=443,
                    ToPort=443,
                    IpProtocol='tcp',
                    SourceSecurityGroupId=Ref('ElbSecurityGroup'),
                ),
            ],
            VpcId=Ref('VpcId'),
        ))

        # kippo sensor ELB security group
        self.template.add_resource(SecurityGroup(
            'ElbSecurityGroup',
            GroupDescription='Security group for the kippo sensor ELB',
            SecurityGroupIngress=[
                # Allow HTTP from anywhere
                SecurityGroupRule(
                    CidrIp='0.0.0.0/0',
                    FromPort=80,
                    ToPort=80,
                    IpProtocol='tcp',
                ),
                # Allow HTTPS from anywhere
                SecurityGroupRule(
                    CidrIp='0.0.0.0/0',
                    FromPort=443,
                    ToPort=443,
                    IpProtocol='tcp',
                ),
            ],
            VpcId=Ref('VpcId'),
        ))

        # RDS security group
        self.template.add_resource(SecurityGroup(
            'RdsSecurityGroup',
            VpcId=Ref('VpcId'),
            GroupDescription='Security group for the kippo RDS instance',
            SecurityGroupIngress=[
                # Allow MySQL from kippo EC2 instances
                SecurityGroupRule(
                    FromPort=3306,
                    ToPort=3306,
                    IpProtocol='tcp',
                    SourceSecurityGroupId=Ref('Ec2SecurityGroup'),
                ),
            ],
        ))

    def add_kippo_rds(self):
        self.template.add_resource(DBSubnetGroup(
            'RdsSubnetGroup',
            DBSubnetGroupDescription='Subnet group for the kippo RDS instance',
            SubnetIds=Ref('RdsSubnetIdList'),
        ))

        self.template.add_resource(DBInstance(
            'RdsInstance',
            AllocatedStorage=Ref('RdsStorage'),
            DBInstanceClass=Ref('RdsInstanceType'),
            DBInstanceIdentifier='kippo-database',
            DBSubnetGroupName=Ref('RdsSubnetGroup'),
            Engine='MySQL',
            EngineVersion='5.6.22',
            MasterUsername='******',
            MasterUserPassword=Ref('RdsRootPassword'),
            MultiAZ=True,
            Port=3306,
            VPCSecurityGroups=[Ref('RdsSecurityGroup')],
        ))

        self.template.add_output(Output(
            'RdsEndpoint',
            Description='RDS endpoint address',
            Value=GetAtt('RdsInstance', 'Endpoint.Address'),
        ))

    def add_kippo_sensors(self):
        # Create the ELB
        self.template.add_resource(LoadBalancer(
            'Elb',
            Listeners=[
                Listener(
                    InstancePort=80,
                    LoadBalancerPort=80,
                    Protocol='http',
                ),
                Listener(
                    InstancePort=443,
                    LoadBalancerPort=443,
                    Protocol='tcp',    # Plain TCP forwarding for HTTPS/SSL
                ),
            ],
            CrossZone=True,
            Subnets=Ref('ElbSubnetIdList'),
            SecurityGroups=[Ref('ElbSecurityGroup')],
            Scheme='internet-facing',
            HealthCheck=HealthCheck(
                Target='HTTP:80/kippo-graph/',
                HealthyThreshold=2,
                UnhealthyThreshold=5,
                Interval=120,
                Timeout=60,
            ),
        ))

        self.template.add_output(Output(
            'ElbEndpoint',
            Description='ELB endpoint address',
            Value=GetAtt('Elb', 'DNSName'),
        ))

        self.template.add_resource(LaunchConfiguration(
            'LaunchConfiguration',
            KeyName=Ref('KeyName'),
            ImageId=FindInMap('Ec2AmiMap', Ref('AWS::Region'), 'AmiId'),
            InstanceType=Ref('Ec2InstanceType'),
            SecurityGroups=[Ref('Ec2SecurityGroup')],
            AssociatePublicIpAddress=True,
            UserData=Base64(Join('\n', [
                '#cloud-config',
                'repo_upgrade: security',
                'runcmd:',
                ' - "/usr/bin/wget -O /tmp/configure_kippo_sensor.sh https://raw.githubusercontent.com/cdodd/aws-kippo-cluster/master/bootstrap/configure_kippo_sensor.sh"',
                Join(
                    '',
                    [
                        ' - "bash /tmp/configure_kippo_sensor.sh',
                        ' ',
                        GetAtt('RdsInstance', 'Endpoint.Address'),
                        ' ',
                        Ref('RdsRootPassword'),
                        ' ',
                        Ref('RealSshPort'),
                        '"',
                    ],
                ),
            ])),
        ))

        self.template.add_resource(AutoScalingGroup(
            'Asg',
            DesiredCapacity=Ref('KippoSensorCount'),
            HealthCheckGracePeriod=1800,
            HealthCheckType='ELB',
            LaunchConfigurationName=Ref('LaunchConfiguration'),
            LoadBalancerNames=[Ref('Elb')],
            MaxSize=Ref('KippoSensorCount'),
            MinSize=Ref('KippoSensorCount'),
            Tags=[Tag(key='Name', value='kippo-sensor', propogate='true')],
            VPCZoneIdentifier=Ref('Ec2SubnetIdList'),
        ))

    def output_template(self):
        return self.template.to_json()
예제 #21
0
region = RegionInfo()
region.endpoint = "CLC IP"
region.name = "eucalyptus"
stack_name = "test-stack-1"

tester = boto.connect_cloudformation(region=region, port=8773, path="/services/CloudFormation", is_secure=False,
                                     aws_access_key_id="your access key",
                                     aws_secret_access_key="your secret key")

template = Template()

keyname_param = template.add_parameter(Parameter("KeyName",
                                                 Description="Name of an existing EC2 KeyPair to enable SSH access to the instance",
                                                 Type="String", ))

template.add_mapping('RegionMap', {"": {"AMI": "emi to use"}})

for i in xrange(2):
    ec2_instance = template.add_resource(ec2.Instance("Instance{0}".format(i),
                                                      ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                                                      InstanceType="t1.micro", KeyName=Ref(keyname_param),
                                                      SecurityGroups=["default"], UserData=Base64("80")))
    vol = template.add_resource(ec2.Volume("Volume{0}".format(i), Size="1",
                                           AvailabilityZone=GetAtt("Instance{0}".format(i), "AvailabilityZone")))
    mount = template.add_resource(ec2.VolumeAttachment("MountPt{0}".format(i), InstanceId=Ref("Instance{0}".format(i)),
                                                       VolumeId=Ref("Volume{0}".format(i)), Device="/dev/vdc"))
# tester.delete_stack(stack_name)

stack = tester.create_stack(stack_name, template.to_json(), parameters=[("KeyName", "your key name")])
예제 #22
0
t.add_mapping('Profile', {
    "prod" : {
        "InstanceType": "m3.2xlarge",
        "ClusterSize": "4",
        "MultiAZ" : True,
        "DBAllocatedStorage" : "50",
        "DBInstanceType" : "db.m3.2xlarge",
        "DBBackupRetentionPeriod": 7

        },
    "stress" : {
        "InstanceType": "m3.xlarge",
        "ClusterSize": "4",
        "MultiAZ" : False,
        "DBAllocatedStorage" : "10",
        "DBInstanceType" : "db.m3.xlarge",
        "DBBackupRetentionPeriod": 0
    },
    "preprod" : {
        "InstanceType": "m3.large",
        "ClusterSize": "3",
        "MultiAZ" : False,
        "DBAllocatedStorage" : "10",
        "DBInstanceType" : "db.m3.large",
        "DBBackupRetentionPeriod": 1
        },
    "test" : {
        "InstanceType": "m3.large",
        "ClusterSize": "3",
        "MultiAZ" : False,
        "DBAllocatedStorage" : "5",
        "DBInstanceType" : "db.m3.large",
        "DBBackupRetentionPeriod": 0
        }
    })
예제 #23
0

template = Template()

keyname_param = template.add_parameter(Parameter(
    "KeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH "
                "access to the instance",
    Type="String",
))

template.add_mapping('RegionMap', {
    "us-east-1": {"AMI": "ami-7f418316"},
    "us-west-1": {"AMI": "ami-951945d0"},
    "us-west-2": {"AMI": "ami-16fd7026"},
    "eu-west-1": {"AMI": "ami-24506250"},
    "sa-east-1": {"AMI": "ami-3e3be423"},
    "ap-southeast-1": {"AMI": "ami-74dda626"},
    "ap-northeast-1": {"AMI": "ami-dcfa4edd"}
})
ebs=ec2.EBSBlockDevice(VolumeSize=20,VolumeType="gp2",DeletionPolicy="Snapshot")
ec2_instance = template.add_resource(ec2.Instance(
    "Ec2Instance",
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    InstanceType="t1.micro",
    KeyName=Ref(keyname_param),
    SecurityGroups=["default"],
    UserData=Base64("80"),
    BlockDeviceMappings=[
      ec2.BlockDeviceMapping(DeviceName="/dev/sdf",
                                 Ebs=ebs)
security_groups_param = template.add_parameter(Parameter(
    "SecurityGroupsList",
    Description="List of one or more existing SecurityGroups allowing connectivity "
                "to the server, in the form \"Group1\", ... \"GroupN\"",
    Type="CommaDelimitedList",
))

template.add_mapping('RegionMap', {
        "us-east-2"   : { "AMI" : "ami-2a0f324f"},
        "us-east-1"   : { "AMI" : "ami-afd15ed0"},
        "us-west-1"   : { "AMI" : "ami-00d8c660"},
        "us-west-2"   : { "AMI" : "ami-31394949"},
        "ap-south-1"   : { "AMI" : "ami-7d95b612"},
        "ap-northeast-2"   : { "AMI" : "ami-d117bebf"},
        "ap-southeast-1"   : { "AMI" : "ami-a7f0c4db"},
        "ap-southeast-2"   : { "AMI" : "ami-c267b0a0"},
        "ap-northeast-1"   : { "AMI" : "ami-2724cf58"},
        "ca-central-1"   : { "AMI" : "ami-c59818a1"},
        "eu-central-1"   : { "AMI" : "ami-43eec3a8"},
        "eu-west-1"   : { "AMI" : "ami-921423eb"},
        "eu-west-2"   : { "AMI" : "ami-924aa8f5"},
        "eu-west-3"   : { "AMI" : "ami-a88233d5"},
        "sa-east-1"   : { "AMI" : "ami-4fd48923"}
})


delimiter = ''
user_data_script = (
        "#!/bin/bash\n",
        'sudo yum update -y\n',
        'wget https://packages.chef.io/files/stable/chefdk/2.5.3/sles/12/chefdk-2.5.3-1.sles12.x86_64.rpm\n',
예제 #25
0
def main():

    t = Template()
    t.set_description("test instance launch")
    t.set_version("2010-09-09")

    InstUserData = [
        '#!/usr/bin/env bash\n',
        '\n',
        'set -x\n',
        '\n',
        'my_wait_handle="',
        Ref('InstanceWaitHandle'),
        '"\n',
        'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS",  "Reason" : "Instance launched",  "UniqueId" : "launch001",  "Data" : "Instance launched."}\'  "${my_wait_handle}"',
        '\n',
        '\n',
    ]

    EC2KeyName = t.add_parameter(
        Parameter(
            'EC2KeyName',
            Type="AWS::EC2::KeyPair::KeyName",
            Description=
            "Name of an existing EC2 KeyPair to enable SSH access to the instance.",
            ConstraintDescription="REQUIRED: Must be a valud EC2 key pair",
        ))

    OperatingSystem = t.add_parameter(
        Parameter('OperatingSystem',
                  Type="String",
                  Description="Operating System",
                  Default="centos7",
                  AllowedValues=[
                      "alinux2",
                      "centos7",
                      "rhel7",
                  ],
                  ConstraintDescription="Must be: alinux2, centos7, rhel7"))

    myInstanceType = t.add_parameter(
        Parameter(
            'MyInstanceType',
            Type="String",
            Description="Instance type",
            Default="m5.2xlarge",
        ))

    VpcId = t.add_parameter(
        Parameter(
            'VpcId',
            Type="AWS::EC2::VPC::Id",
            Description="VPC Id for this instance",
        ))

    Subnet = t.add_parameter(
        Parameter('Subnet',
                  Type="AWS::EC2::Subnet::Id",
                  Description="Subnet IDs"))

    ExistingSecurityGroup = t.add_parameter(
        Parameter(
            'ExistingSecurityGroup',
            Type="AWS::EC2::SecurityGroup::Id",
            Description=
            "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234")
    )

    UsePublicIp = t.add_parameter(
        Parameter(
            'UsePublicIp',
            Type="String",
            Description="Should a public IP address be given to the instance",
            Default="true",
            ConstraintDescription="true/false",
            AllowedValues=["true", "false"]))

    SshAccessCidr = t.add_parameter(
        Parameter(
            'SshAccessCidr',
            Type="String",
            Description="CIDR Block for SSH access, default 127.0.0.1/32",
            Default="127.0.0.1/32",
            AllowedPattern=
            "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
            ConstraintDescription="Must be a valid CIDR x.x.x.x/x"))

    RootRole = t.add_resource(
        iam.Role("RootRole",
                 AssumeRolePolicyDocument={
                     "Statement": [{
                         "Effect": "Allow",
                         "Principal": {
                             "Service": ["ec2.amazonaws.com"]
                         },
                         "Action": ["sts:AssumeRole"]
                     }]
                 }))

    SshSecurityGroup = t.add_resource(
        SecurityGroup("SshSecurityGroup",
                      VpcId=Ref(VpcId),
                      GroupDescription="SSH Secuirty group",
                      SecurityGroupIngress=[
                          ec2.SecurityGroupRule(
                              IpProtocol="tcp",
                              FromPort="22",
                              ToPort="22",
                              CidrIp=Ref(SshAccessCidr),
                          ),
                      ]))

    RootInstanceProfile = t.add_resource(
        InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)]))

    tags = Tags(Name=Ref("AWS::StackName"))

    myInstance = t.add_resource(
        ec2.Instance(
            'MyInstance',
            ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"),
                              Ref(OperatingSystem)),
            KeyName=Ref(EC2KeyName),
            InstanceType=(Ref(myInstanceType)),
            NetworkInterfaces=[
                NetworkInterfaceProperty(
                    GroupSet=If(
                        "not_existing_sg", [Ref(SshSecurityGroup)],
                        [Ref(SshSecurityGroup),
                         Ref(ExistingSecurityGroup)]),
                    AssociatePublicIpAddress=Ref(UsePublicIp),
                    DeviceIndex='0',
                    DeleteOnTermination='true',
                    SubnetId=Ref(Subnet))
            ],
            IamInstanceProfile=(Ref(RootInstanceProfile)),
            UserData=Base64(Join('', InstUserData)),
        ))

    t.add_mapping(
        'AWSRegionAMI', {
            "ap-northeast-1": {
                "centos7": "ami-8e8847f1",
                "rhel7": "ami-6b0d5f0d"
            },
            "ap-northeast-2": {
                "centos7": "ami-bf9c36d1",
                "rhel7": "ami-3eee4150"
            },
            "ap-south-1": {
                "centos7": "ami-1780a878",
                "rhel7": "ami-5b673c34"
            },
            "ap-southeast-1": {
                "centos7": "ami-8e0205f2",
                "rhel7": "ami-76144b0a"
            },
            "ap-southeast-2": {
                "centos7": "ami-d8c21dba",
                "rhel7": "ami-67589505"
            },
            "ca-central-1": {
                "centos7": "ami-e802818c",
                "rhel7": "ami-49f0762d"
            },
            "eu-central-1": {
                "centos7": "ami-dd3c0f36",
                "rhel7": "ami-c86c3f23"
            },
            "eu-west-1": {
                "centos7": "ami-3548444c",
                "rhel7": "ami-7c491f05"
            },
            "eu-west-2": {
                "centos7": "ami-00846a67",
                "rhel7": "ami-7c1bfd1b"
            },
            "eu-west-3": {
                "centos7": "ami-262e9f5b",
                "rhel7": "ami-5026902d"
            },
            "sa-east-1": {
                "centos7": "ami-cb5803a7",
                "rhel7": "ami-b0b7e3dc"
            },
            "us-east-1": {
                "centos7": "ami-9887c6e7",
                "rhel7": "ami-6871a115"
            },
            "us-east-2": {
                "centos7": "ami-9c0638f9",
                "rhel7": "ami-03291866"
            },
            "us-west-1": {
                "centos7": "ami-4826c22b",
                "rhel7": "ami-18726478"
            },
            "us-west-2": {
                "centos7": "ami-3ecc8f46",
                "rhel7": "ami-28e07e50"
            }
        })

    t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), ""))

    t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "true"))

    mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle'))

    mywaitcondition = t.add_resource(
        WaitCondition("InstanceWaitCondition",
                      Handle=Ref(mywaithandle),
                      Timeout="1500",
                      DependsOn="MyInstance"))

    t.add_output([
        Output("InstanceID", Description="Instance ID", Value=Ref(myInstance))
    ])

    t.add_output(
        [Output("InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp'))])

    t.add_output([
        Output("InstancePublicIP",
               Value=GetAtt('MyInstance', 'PublicIp'),
               Condition="Has_Public_Ip")
    ])

    ##print(t.to_yaml())
    print(t.to_json(indent=2))
예제 #26
0
# SEE: https://coreos.com/os/docs/latest/booting-on-ec2.html#beta
TEMPLATE.add_mapping(
    'RegionMap', {
        EU_CENTRAL_1: {
            'AMI': 'ami-e83ddb87'
        },
        AP_NORTHEAST_1: {
            'AMI': 'ami-67e9fd09'
        },
        SA_EAST_1: {
            'AMI': 'ami-9666eafa'
        },
        AP_SOUTHEAST_2: {
            'AMI': 'ami-9a7d5ef9'
        },
        AP_SOUTHEAST_1: {
            'AMI': 'ami-b8d319db'
        },
        US_EAST_1: {
            'AMI': 'ami-cfaba5a5'
        },
        US_WEST_2: {
            'AMI': 'ami-141df674'
        },
        US_WEST_1: {
            'AMI': 'ami-6c037e0c'
        },
        EU_WEST_1: {
            'AMI': 'ami-d149cea2'
        },
    })
예제 #27
0
instance_type = template.add_parameter(Parameter(
    "InstanceType",
    Description = "EC2 instance type to launch for Application servers",
    Type = "String",
    Default = "m1.medium",
    AllowedValues = [ "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "m3.xlarge", "m3.2xlarge", "c1.medium", "c1.xlarge", "cg1.4xlarge" ],
    ConstraintDescription = "must be a valid EC2 instance type"
))


template.add_mapping('RegionMap', {
    "us-east-1":      {"AMI": "ami-99247ff0"},
    "us-west-1":      {"AMI": "ami-ae0234eb"},
    "us-west-2":      {"AMI": "ami-f40991c4"},
    "eu-west-1":      {"AMI": "ami-c1c527b6"},
    "sa-east-1":      {"AMI": "ami-df45e3c2"},
    "ap-southeast-1": {"AMI": "ami-2a9cc978"},
    "ap-southeast-2": {"AMI": "ami-1970ec23"},
    "ap-northeast-1": {"AMI": "ami-91d3b690"}
})

role = template.add_resource(Role('EurekaRole',
    AssumeRolePolicyDocument = {
        "Statement": [{
            "Effect": "Allow",
            "Principal":{
                "Service":["ec2.amazonaws.com"]
            },
            "Action":["sts:AssumeRole"]
        }]
    },
예제 #28
0
desInstances_param = t.add_parameter(
    Parameter(
        "DesNumInstances",
        Description=
        "Number of instances that need to be running before creation is marked as complete.",
        ConstraintDescription=
        "Must be in the range specified by MinNumInstances and MaxNumInstances.",
        Default="1",
        Type="Number"))

## Mappings
region_map = t.add_mapping(
    'RegionMap', {
        "us-east-1": {
            "AMIid": ami_id,
            "SGid": security_groups_ids,
            "SNETid": subnet_ids,
            "ELBName": elb_name,
        }
    })

## Resources
iam_role_resource = t.add_resource(
    Role("IAMRole",
         Path="/",
         AssumeRolePolicyDocument={
             "Version":
             "2012-10-17",
             "Statement": [{
                 "Action": ["sts:AssumeRole"],
                 "Effect": "Allow",
예제 #29
0
def create():

    es = Template()

    es.add_description("Stack defining the elasticsearch instance")

    # Get latest AMIs
    def getAMI(region):
        AMIMap = {}
        print("Getting latest AMZN linux AMI in %s" % region)
        ec2conn = boto.ec2.connect_to_region(region)
        images = ec2conn.get_all_images(
            owners=["amazon"], filters={"name": "amzn-ami-hvm-*.x86_64-gp2"})
        latestDate = ""
        latestAMI = ""
        for image in images:
            if image.creationDate > latestDate:
                latestDate = image.creationDate
                latestAMI = image.id
        AMIMap[region] = {"id": latestAMI}
        return AMIMap

    # Create AMI Map
    es.add_mapping("AMIMap", getAMI(region))

    # Create es VPC
    esVPC = es.add_resource(
        VPC("esVPC", CidrBlock="10.0.0.0/16", Tags=Tags(Name="esVPC")))

    # Create es IGW
    esIGW = es.add_resource(InternetGateway("esIGW"))

    # Attach IGW to VPC
    esIGWAttachment = es.add_resource(
        VPCGatewayAttachment("esIGWAttachment",
                             VpcId=Ref(esVPC),
                             InternetGatewayId=Ref(esIGW)))

    # Create es Subnet
    esSubnet = es.add_resource(
        Subnet("esSubnet", CidrBlock="10.0.0.0/24", VpcId=Ref(esVPC)))

    # Create es RTB
    esRTB = es.add_resource(RouteTable("esRTB", VpcId=Ref(esVPC)))

    # Create route to IGW
    esDefaultRoute = es.add_resource(
        Route("esDefaultRoute",
              DependsOn="esIGWAttachment",
              GatewayId=Ref(esIGW),
              DestinationCidrBlock="0.0.0.0/0",
              RouteTableId=Ref(esRTB)))

    # Associate RTB with Subnet
    esSubnetRTBAssociation = es.add_resource(
        SubnetRouteTableAssociation("esSubnetRTBAssociation",
                                    SubnetId=Ref(esSubnet),
                                    RouteTableId=Ref(esRTB)))

    # Create es Security Group
    esSecurityGroup = es.add_resource(
        SecurityGroup("esSecurityGroup",
                      GroupDescription="Allow inbound access on port 9200",
                      SecurityGroupIngress=[
                          SecurityGroupRule(IpProtocol="tcp",
                                            FromPort="9200",
                                            ToPort="9200",
                                            CidrIp="0.0.0.0/0")
                      ],
                      VpcId=Ref(esVPC)))

    # Create es instance metadata
    esInstanceMetadata = Metadata(
        Init(
            # Use ConfigSets to ensure GPG key and repo file are in place
            # before trying to install elasticsearch
            InitConfigSets(ordered=["first", "second"]),
            first=InitConfig(
                files=InitFiles({
                    # cfn-hup notices when the cloudformation stack is changed
                    "/etc/cfn/cfn-hup.conf":
                    InitFile(content=Join("", [
                        "[main]\n", "stack=",
                        Ref("AWS::StackName"), "\n", "region=",
                        Ref("AWS::Region"), "\n"
                    ]),
                             mode="000400",
                             owner="root",
                             group="root"),
                    # cfn-hup will then trigger cfn-init to run.
                    # This lets us update the instance just by updating the stack
                    "/etc/cfn/hooks.d/cfn-auto-reloader.conf":
                    InitFile(content=Join("", [
                        "[cfn-auto-reloader-hook]\n", "triggers=post.update\n",
                        "path=Resources.esInstance.Metadata\n",
                        "action=/opt/aws/bin/cfn-init -v --stack ",
                        Ref("AWS::StackName"), " ", "--resource esInstance ",
                        "--region ",
                        Ref("AWS::Region"), " ", "--c ordered\n"
                        "runas=root\n"
                    ]),
                             mode="000400",
                             owner="root",
                             group="root"),
                    # repo file for elastic search
                    "/etc/yum.repos.d/elasticsearch.repo":
                    InitFile(content=Join("", [
                        "[elasticsearch-2.x]\n",
                        "name=Elasticsearch repository for 2.x packages\n",
                        "baseurl=http://packages.elastic.co/elasticsearch/2.x/centos\n",
                        "gpgcheck=1\n",
                        "gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch\n",
                        "enabled=1\n"
                    ]),
                             mode="000400",
                             owner="root",
                             group="root")
                }),
                commands={
                    # Install elasticsearch key so package will install
                    "installGPG": {
                        "command":
                        "rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch"
                    }
                }),
            second=InitConfig(
                packages={
                    "yum": {
                        # Install elasticsearch
                        "elasticsearch": [],
                    }
                },
                commands={
                    # Enable external access to elasticsearch
                    "listenOnAllinterfaces": {
                        "command":
                        "echo \"network.host: 0.0.0.0\" >> /etc/elasticsearch/elasticsearch.yml"
                    }
                },
                services={
                    "sysvinit":
                    InitServices({
                        "elasticsearch":
                        InitService(enabled=True, ensureRunning=True),
                        "cfn-hup":
                        InitService(
                            enabled=True,
                            ensureRunning=True,
                            files=[
                                "/etc/cfn/cfn-hup.conf",
                                "/etc/cfn/hooks.d/cfn-auto-reloader.conf"
                            ])
                    })
                })))

    # Create es Instance
    esInstance = es.add_resource(
        Instance(
            "esInstance",
            ImageId=FindInMap("AMIMap", Ref("AWS::Region"), "id"),
            InstanceType="t2.micro",
            Metadata=esInstanceMetadata,
            UserData=Base64(
                Join("", [
                    "#!/bin/bash\n", "/opt/aws/bin/cfn-init -v ", "--stack ",
                    Ref("AWS::StackName"), " ", "--resource esInstance ",
                    "--region ",
                    Ref("AWS::Region"), " ", "-c ordered"
                ])),
            NetworkInterfaces=[
                NetworkInterfaceProperty(
                    GroupSet=[Ref(esSecurityGroup)],
                    AssociatePublicIpAddress="true",
                    DeviceIndex="0",
                    DeleteOnTermination="true",
                    SubnetId=Ref(esSubnet),
                )
            ],
            Tags=Tags(Name="esInstance")))

    # Output address
    es.add_output([
        Output("esAddress",
               Description="Elastic Search address",
               Value=Join(
                   "", ["http://",
                        GetAtt("esInstance", "PublicIp"), ":9200/"]))
    ])
    return es
예제 #30
0
keyname_param = t.add_parameter(Parameter(
    "SshKeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH "
                "access to NAT instances. If not specified no key is used.",
    Default="AWS::NoValue",
    Type="AWS::EC2::KeyPair::KeyName",
))


# These are standard freely available Amazon NAT AMIs
t.add_mapping('RegionMap', {
    'us-west-1': {
        'NATAMI': 'ami-ada746e9',
    },
    'us-west-2': {
        'NATAMI': 'ami-75ae8245',
    },
    'us-east-1': {
        'NATAMI': 'ami-b0210ed8',
    }
})

VPC = t.add_resource(VPC(
    "VPC",
    EnableDnsSupport="true",
    CidrBlock=vpc_cidr,
    EnableDnsHostnames="true",
    Tags=Tags(
        Name=Ref("AWS::StackName"),
        Application=Ref("AWS::StackName"),
        Network="VPN Connected VPC",
예제 #31
0
 def test_single_mapping(self):
     template = Template()
     template.add_mapping("map", {"n": "v"})
     json = template.to_json()
     self.assertEqual(single_mapping, json)
예제 #32
0
        "InstanceType",
        Default="t2.micro",
        Type="String",
        AllowedValues=["t2.micro", "m1.small"],
    ))

ExistingSecurityGroups = t.add_parameter(
    Parameter(
        "ExistingSecurityGroups",
        Type="List<AWS::EC2::SecurityGroup::Id>",
    ))

t.add_mapping("AWSInstanceType2Arch", {
    u'm1.small': {
        u'Arch': u'PV64'
    },
    u't2.micro': {
        u'Arch': u'HVM64'
    }
})

t.add_mapping(
    "AWSRegionArch2AMI", {
        u'ap-northeast-1': {
            u'HVM64': u'ami-cbf90ecb',
            u'PV64': u'ami-27f90e27'
        },
        u'ap-southeast-1': {
            u'HVM64': u'ami-68d8e93a',
            u'PV64': u'ami-acd9e8fe'
        },
        u'ap-southeast-2': {
예제 #33
0
파일: client.py 프로젝트: doerodney/cloud
def generate_stack_template():
    template = Template()

    generate_description(template)

    generate_version(template)

    # ---Parameters------------------------------------------------------------
    param_vpc_id = Parameter(
        'VpcIdentifer',
        Description='The identity of the VPC (vpc-abcdwxyz) in which this stack shall be created.',
        Type='AWS::EC2::VPC::Id',
    )
    template.add_parameter(param_vpc_id)

    param_vpc_security_group = Parameter(
        'VpcSecurityGroup',
        Description='The security group (sg-abcdwxyz) to apply to the resources created by this stack.',
        Type='AWS::EC2::SecurityGroup::Id',
    )
    template.add_parameter(param_vpc_security_group)

    param_webserver_instance_subnet_id = Parameter(
        'VpcSubnetIdentifer',
        Description='The identity of the public subnet (subnet-abcdwxyz) in which the web server shall be created.',
        Type='AWS::EC2::Subnet::Id',
    )
    template.add_parameter(param_webserver_instance_subnet_id)

    param_keyname = Parameter(
        'PemKeyName',
        Description='Name of an existing EC2 KeyPair file (.pem) to use to create EC2 instances',
        Type='AWS::EC2::KeyPair::KeyName'
    )
    template.add_parameter(param_keyname)

    param_instance_type = Parameter(
        'EC2InstanceType',
        Description='EC2 instance type, reference this parameter to insure consistency',
        Type='String',
        Default='t2.medium',  # Prices from (2015-12-03) (Windows, us-west (North CA))
        AllowedValues=[  # Source :  https://aws.amazon.com/ec2/pricing/
            't2.small',  # $0.044/hour
            't2.micro',  # $0.022/hour
            't2.medium',  # $0.088/hour
            't2.large',  # $0.166/hour
            'm3.medium',  # $0.140/hour
            'm3.large',  # $0.28/hour
            'c4.large'   # $0.221/hour
        ],
        ConstraintDescription='Must be a valid EC2 instance type'
    )
    template.add_parameter(param_instance_type)


    #---Mappings---------------------------------------------------------------
    mapping_environment_attribute_map = template.add_mapping(
        'EnvironmentAttributeMap',
        {
            'ap-southeast-1': {
                'WebServerAmi': 'ami-1ddc0b7e'
            },
            'ap-southeast-2': {
                'WebServerAmi': 'ami-0c95b86f'
            },
            'us-east-1': {
                'WebServerAmi': 'ami-a4827dc9'
            },
            'us-west-1': {
                'WebServerAmi': 'ami-f5f41398'
            }
        }
    )

    # ---Resources-------------------------------------------------------------
    ref_region = Ref('AWS::Region')
    ref_stack_name = Ref('AWS::StackName')

    # Create the metadata for the server instance.
    name_web_server = 'WebServer'
    webserver_instance_metadata = cloudformation.Metadata(
        cloudformation.Init({
            'config': cloudformation.InitConfig(
                packages={
                    'yum': {
                        'nginx': [],
                        'git': []
                    }
                },
                files=cloudformation.InitFiles({
                    # cfn-hup.conf initialization
                    '/etc/cfn/authorapp.conf': cloudformation.InitFile(
                        content=Join('',
                        [
                            'server {', '\n',
                            '	listen 3030 ssl http2;', '\n',
                            '	root /var/www/authorapp;', '\n',
                            '\n',
                            '	ssl_certificate       /vagrant/ssl/ca.crt;', '\n',
                            '	ssl_certificate_key   /vagrant/ssl/ca.key;', '\n',
                            '\n',
                            '	location / {', '\n',
                            '	}', '\n',
                            '\n',
                            '	location /api {', '\n',
                            '		proxy_pass http://10.50.50.1:3000;', '\n',
                            '	}', '\n',
                            '}', '\n',
                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),

                }),
                services=dict(
                    sysvinit=cloudformation.InitServices(
                        {
                            # start cfn-hup service -
                            # required for CloudFormation stack update
                            'cfn-hup': cloudformation.InitService(
                                enabled=True,
                                ensureRunning=True,
                                files=[
                                    '/etc/cfn/cfn-hup.conf',
                                    '/etc/cfn/hooks.d/cfn-auto-reloader.conf'
                                ]
                            ),
                            # Disable sendmail service - not required.
                            'sendmail': cloudformation.InitService(
                                enabled=False,
                                ensureRunning=False
                            )
                        }
                    )
                )
            )
        })
    )

    resource_web_server = ec2.Instance(
        name_web_server,
        Metadata=webserver_instance_metadata,
        ImageId=FindInMap('EnvironmentAttributeMap', ref_region, 'WebServerAmi'),
        InstanceType=Ref(param_instance_type),
        KeyName=Ref(param_keyname),
        NetworkInterfaces=[
            ec2.NetworkInterfaceProperty(
                AssociatePublicIpAddress=str(True),
                DeleteOnTermination=str(True),
                Description='Network interface for web server',
                DeviceIndex=str(0),
                GroupSet=[Ref(param_vpc_security_group)],
                SubnetId=Ref(param_webserver_instance_subnet_id),
            )
        ],
        Tags=Tags(Name=name_web_server, VPC=Ref(param_vpc_id)),
        UserData=Base64(
            Join(
                '',
                [
                    '#!/bin/bash -xe\n',
                    'yum update -y aws-cfn-bootstrap\n',

                    'yum update -y', '\n'

                    '/opt/aws/bin/cfn-init --verbose ',
                    ' --stack ', ref_stack_name,
                    ' --resource %s ' % name_web_server,
                    ' --region ', ref_region, '\n',

                    '/opt/aws/bin/cfn-signal --exit-code $? ',
                    ' --stack ', ref_stack_name,
                    ' --resource ',
                    name_web_server,
                    '\n'
                ]
            )
        )
    )
    template.add_resource(resource_web_server)
    template.add_output(
        Output('WebServer',
               Description='Web Server',
               Value=GetAtt(name_web_server, 'PublicIp')
        )
    )

    return template
예제 #34
0
from troposphere.sns import Subscription, Topic
from troposphere.s3 import Bucket, PublicRead, WebsiteConfiguration
from os.path import expanduser

home = expanduser("~")

filename = home + "/outputs/network.template"

t = Template()

t.add_description(
    "Network Stack",
)
t.add_mapping(
    'AWSRegion2AMI', {
        "us-east-1"      : { "AMI" : "ami-b66ed3de" },
        "us-west-2"      : { "AMI" : "ami-b5a7ea85" }
    })
t.add_mapping(
     "AWSInstanceType2Arch", {
      "t2.micro"    : { "Arch" : "64" },
      "t2.medium"   : { "Arch" : "64" },
      "m3.medium"   : { "Arch" : "64" },
      "m3.large"    : { "Arch" : "64" },
      "m3.xlarge"   : { "Arch" : "64" }
    }
)
t.add_mapping(
    "EnvValues", {
        "prod"    :   {"cidr":"0"},
        "qa"      :   {"cidr":"1"},
))

AmbariUseEBS = t.add_parameter(Parameter(
    "AmbariUseEBS",
    Default="no",
    ConstraintDescription="Must be yes or no only.",
    Type="String",
    Description="Use EBS Volumes for the Ambari Node",
    AllowedValues=["yes", "no"],
))


AmbariUseEBSBool = t.add_condition("AmbariUseEBSBool", Equals(Ref(AmbariUseEBS),"yes"))

t.add_mapping("SubnetConfig",
    {'Public': {'CIDR': '10.0.0.0/24'}, 'VPC': {'CIDR': '10.0.0.0/16'}}
)

t.add_mapping("CENTOS7", {
    "eu-west-1": {"AMI": "ami-33734044"},
    "ap-southeast-1": {"AMI": "ami-2a7b6b78"},
    "ap-southeast-2": {"AMI": "ami-d38dc6e9"},
    "eu-central-1": {"AMI": "ami-e68f82fb"},
    "ap-northeast-1": {"AMI": "ami-b80b6db8"},
    "us-east-1": {"AMI": "ami-61bbf104"},
    "sa-east-1": {"AMI": "ami-fd0197e0"},
    "us-west-1": {"AMI": "ami-f77fbeb3"},
    "us-west-2": {"AMI": "ami-d440a6e7"}
})

VPC = t.add_resource(ec2.VPC(
예제 #36
0
def main():
    """
    Create a ElastiCache Redis Node and EC2 Instance
    """

    template = Template()

    # Description
    template.add_description(
        'AWS CloudFormation Sample Template ElastiCache_Redis:'
        'Sample template showing how to create an Amazon'
        'ElastiCache Redis Cluster. **WARNING** This template'
        'creates an Amazon EC2 Instance and an Amazon ElastiCache'
        'Cluster. You will be billed for the AWS resources used'
        'if you create a stack from this template.')

    # Mappings
    template.add_mapping('AWSInstanceType2Arch', {
        't1.micro':     {'Arch': 'PV64'},
        't2.micro':     {'Arch': 'HVM64'},
        't2.small':     {'Arch': 'HVM64'},
        't2.medium':    {'Arch': 'HVM64'},
        'm1.small':     {'Arch': 'PV64'},
        'm1.medium':    {'Arch': 'PV64'},
        'm1.large':     {'Arch': 'PV64'},
        'm1.xlarge':    {'Arch': 'PV64'},
        'm2.xlarge':    {'Arch': 'PV64'},
        'm2.2xlarge':   {'Arch': 'PV64'},
        'm2.4xlarge':   {'Arch': 'PV64'},
        'm3.medium':    {'Arch': 'HVM64'},
        'm3.large':     {'Arch': 'HVM64'},
        'm3.xlarge':    {'Arch': 'HVM64'},
        'm3.2xlarge':   {'Arch': 'HVM64'},
        'c1.medium':    {'Arch': 'PV64'},
        'c1.xlarge':    {'Arch': 'PV64'},
        'c3.large':     {'Arch': 'HVM64'},
        'c3.xlarge':    {'Arch': 'HVM64'},
        'c3.2xlarge':   {'Arch': 'HVM64'},
        'c3.4xlarge':   {'Arch': 'HVM64'},
        'c3.8xlarge':   {'Arch': 'HVM64'},
        'c4.large':     {'Arch': 'HVM64'},
        'c4.xlarge':    {'Arch': 'HVM64'},
        'c4.2xlarge':   {'Arch': 'HVM64'},
        'c4.4xlarge':   {'Arch': 'HVM64'},
        'c4.8xlarge':   {'Arch': 'HVM64'},
        'g2.2xlarge':   {'Arch': 'HVMG2'},
        'r3.large':     {'Arch': 'HVM64'},
        'r3.xlarge':    {'Arch': 'HVM64'},
        'r3.2xlarge':   {'Arch': 'HVM64'},
        'r3.4xlarge':   {'Arch': 'HVM64'},
        'r3.8xlarge':   {'Arch': 'HVM64'},
        'i2.xlarge':    {'Arch': 'HVM64'},
        'i2.2xlarge':   {'Arch': 'HVM64'},
        'i2.4xlarge':   {'Arch': 'HVM64'},
        'i2.8xlarge':   {'Arch': 'HVM64'},
        'd2.xlarge':    {'Arch': 'HVM64'},
        'd2.2xlarge':   {'Arch': 'HVM64'},
        'd2.4xlarge':   {'Arch': 'HVM64'},
        'd2.8xlarge':   {'Arch': 'HVM64'},
        'hi1.4xlarge':  {'Arch': 'HVM64'},
        'hs1.8xlarge':  {'Arch': 'HVM64'},
        'cr1.8xlarge':  {'Arch': 'HVM64'},
        'cc2.8xlarge':  {'Arch': 'HVM64'}
        })

    template.add_mapping('AWSRegionArch2AMI', {
        'us-east-1': {'PV64': 'ami-0f4cfd64',
                      'HVM64': 'ami-0d4cfd66',
                      'HVMG2': 'ami-5b05ba30'},
        'us-west-2': {'PV64': 'ami-d3c5d1e3',
                      'HVM64': 'ami-d5c5d1e5',
                      'HVMG2': 'ami-a9d6c099'},
        'us-west-1': {'PV64': 'ami-85ea13c1',
                      'HVM64': 'ami-87ea13c3',
                      'HVMG2': 'ami-37827a73'},
        'eu-west-1': {'PV64': 'ami-d6d18ea1',
                      'HVM64': 'ami-e4d18e93',
                      'HVMG2': 'ami-72a9f105'},
        'eu-central-1': {'PV64': 'ami-a4b0b7b9',
                         'HVM64': 'ami-a6b0b7bb',
                         'HVMG2': 'ami-a6c9cfbb'},
        'ap-northeast-1': {'PV64': 'ami-1a1b9f1a',
                           'HVM64': 'ami-1c1b9f1c',
                           'HVMG2': 'ami-f644c4f6'},
        'ap-southeast-1': {'PV64': 'ami-d24b4280',
                           'HVM64': 'ami-d44b4286',
                           'HVMG2': 'ami-12b5bc40'},
        'ap-southeast-2': {'PV64': 'ami-ef7b39d5',
                           'HVM64': 'ami-db7b39e1',
                           'HVMG2': 'ami-b3337e89'},
        'sa-east-1': {'PV64': 'ami-5b098146',
                      'HVM64': 'ami-55098148',
                      'HVMG2': 'NOT_SUPPORTED'},
        'cn-north-1': {'PV64': 'ami-bec45887',
                       'HVM64': 'ami-bcc45885',
                       'HVMG2': 'NOT_SUPPORTED'}
        })

    template.add_mapping('Region2Principal', {
        'us-east-1': {'EC2Principal': 'ec2.amazonaws.com',
                      'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'us-west-2': {'EC2Principal': 'ec2.amazonaws.com',
                      'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'us-west-1': {'EC2Principal': 'ec2.amazonaws.com',
                      'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com',
                      'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com',
                           'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com',
                           'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com',
                           'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com',
                      'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
        'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn',
                       'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'},
        'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com',
                         'OpsWorksPrincipal': 'opsworks.amazonaws.com'}
        })

    # Parameters
    cachenodetype = template.add_parameter(Parameter(
        'ClusterNodeType',
        Description='The compute and memory capacity of the nodes in the Redis'
                    ' Cluster',
        Type='String',
        Default='cache.m1.small',
        AllowedValues=['cache.m1.small',
                       'cache.m1.large',
                       'cache.m1.xlarge',
                       'cache.m2.xlarge',
                       'cache.m2.2xlarge',
                       'cache.m2.4xlarge',
                       'cache.c1.xlarge'],
        ConstraintDescription='must select a valid Cache Node type.',
        ))

    instancetype = template.add_parameter(Parameter(
        'InstanceType',
        Description='WebServer EC2 instance type',
        Type='String',
        Default='t2.micro',
        AllowedValues=['t1.micro',
                       't2.micro',
                       't2.small',
                       't2.medium',
                       'm1.small',
                       'm1.medium',
                       'm1.large',
                       'm1.xlarge',
                       'm2.xlarge',
                       'm2.2xlarge',
                       'm2.4xlarge',
                       'm3.medium',
                       'm3.large',
                       'm3.xlarge',
                       'm3.2xlarge',
                       'c1.medium',
                       'c1.xlarge',
                       'c3.large',
                       'c3.xlarge',
                       'c3.2xlarge',
                       'c3.4xlarge',
                       'c3.8xlarge',
                       'c4.large',
                       'c4.xlarge',
                       'c4.2xlarge',
                       'c4.4xlarge',
                       'c4.8xlarge',
                       'g2.2xlarge',
                       'r3.large',
                       'r3.xlarge',
                       'r3.2xlarge',
                       'r3.4xlarge',
                       'r3.8xlarge',
                       'i2.xlarge',
                       'i2.2xlarge',
                       'i2.4xlarge',
                       'i2.8xlarge',
                       'd2.xlarge',
                       'd2.2xlarge',
                       'd2.4xlarge',
                       'd2.8xlarge',
                       'hi1.4xlarge',
                       'hs1.8xlarge',
                       'cr1.8xlarge',
                       'cc2.8xlarge',
                       'cg1.4xlarge'],
        ConstraintDescription='must be a valid EC2 instance type.',
        ))

    keyname = template.add_parameter(Parameter(
        'KeyName',
        Description='Name of an existing EC2 KeyPair to enable SSH access'
                    ' to the instance',
        Type='AWS::EC2::KeyPair::KeyName',
        ConstraintDescription='must be the name of an existing EC2 KeyPair.',
        ))

    sshlocation = template.add_parameter(Parameter(
        'SSHLocation',
        Description='The IP address range that can be used to SSH to'
                    ' the EC2 instances',
        Type='String',
        MinLength='9',
        MaxLength='18',
        Default='0.0.0.0/0',
        AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.'
                       '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})',
        ConstraintDescription='must be a valid IP CIDR range of the'
                              ' form x.x.x.x/x.'
        ))

    # Resources
    webserverrole = template.add_resource(iam.Role(
        'WebServerRole',
        AssumeRolePolicyDocument=Policy(
            Statement=[
                Statement(
                    Effect=Allow,
                    Action=[AssumeRole],
                    Principal=Principal('Service',
                                        [FindInMap('Region2Principal',
                                                   Ref('AWS::Region'),
                                                   'EC2Principal')]),
                    )
                ]
            ),
        Path='/',
    ))

    template.add_resource(iam.PolicyType(
        'WebServerRolePolicy',
        PolicyName='WebServerRole',
        PolicyDocument=awacs.aws.Policy(
            Statement=[awacs.aws.Statement(
                Action=[awacs.aws.Action("elasticache",
                        "DescribeCacheClusters")],
                Resource=["*"],
                Effect=awacs.aws.Allow
            )]
        ),
        Roles=[Ref(webserverrole)],
    ))

    webserverinstanceprofile = template.add_resource(iam.InstanceProfile(
        'WebServerInstanceProfile',
        Path='/',
        Roles=[Ref(webserverrole)],
    ))

    webserversg = template.add_resource(ec2.SecurityGroup(
        'WebServerSecurityGroup',
        GroupDescription='Enable HTTP and SSH access',
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol='tcp',
                FromPort='22',
                ToPort='22',
                CidrIp=Ref(sshlocation),
                ),
            ec2.SecurityGroupRule(
                IpProtocol='tcp',
                FromPort='80',
                ToPort='80',
                CidrIp='0.0.0.0/0',
                )
            ]
        ))

    webserverinstance = template.add_resource(ec2.Instance(
        'WebServerInstance',
        Metadata=cloudformation.Metadata(
            cloudformation.Init({
                'config': cloudformation.InitConfig(
                    packages={
                        'yum': {
                            'httpd':     [],
                            'php':       [],
                            'php-devel': [],
                            'gcc':       [],
                            'make':      []
                            }
                        },

                    files=cloudformation.InitFiles({
                        '/var/www/html/index.php': cloudformation.InitFile(
                            content=Join('', [
                                '<?php\n',
                                'echo \"<h1>AWS CloudFormation sample'
                                ' application for Amazon ElastiCache'
                                ' Redis Cluster</h1>\";\n',
                                '\n',
                                '$cluster_config = json_decode('
                                'file_get_contents(\'/tmp/cacheclusterconfig\''
                                '), true);\n',
                                '$endpoint = $cluster_config[\'CacheClusters'
                                '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add'
                                'ress\'];\n',
                                '$port = $cluster_config[\'CacheClusters\'][0]'
                                '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];'
                                '\n',
                                '\n',
                                'echo \"<p>Connecting to Redis Cache Cluster '
                                'node \'{$endpoint}\' on port {$port}</p>\";'
                                '\n',
                                '\n',
                                '$redis=new Redis();\n',
                                '$redis->connect($endpoint, $port);\n',
                                '$redis->set(\'testkey\', \'Hello World!\');'
                                '\n',
                                '$return = $redis->get(\'testkey\');\n',
                                '\n',
                                'echo \"<p>Retrieved value: $return</p>\";'
                                '\n',
                                '?>\n'
                                ]),
                            mode='000644',
                            owner='apache',
                            group='apache'
                            ),
                        '/etc/cron.d/get_cluster_config':
                            cloudformation.InitFile(
                                content='*/5 * * * * root'
                                        ' /usr/local/bin/get_cluster_config',
                                mode='000644',
                                owner='root',
                                group='root'
                                ),
                        '/usr/local/bin/get_cluster_config':
                            cloudformation.InitFile(
                                content=Join('', [
                                    '#! /bin/bash\n',
                                    'aws elasticache describe-cache-clusters ',
                                    '         --cache-cluster-id ',
                                    Ref('RedisCluster'),
                                    '         --show-cache-node-info'
                                    ' --region ', Ref('AWS::Region'),
                                    ' > /tmp/cacheclusterconfig\n'
                                    ]),
                                mode='000755',
                                owner='root',
                                group='root'
                                ),
                        '/usr/local/bin/install_phpredis':
                            cloudformation.InitFile(
                                content=Join('', [
                                    '#! /bin/bash\n',
                                    'cd /tmp\n',
                                    'wget https://github.com/nicolasff/'
                                    'phpredis/zipball/master -O phpredis.zip'
                                    '\n',
                                    'unzip phpredis.zip\n',
                                    'cd nicolasff-phpredis-*\n',
                                    'phpize\n',
                                    './configure\n',
                                    'make && make install\n',
                                    'touch /etc/php.d/redis.ini\n',
                                    'echo extension=redis.so > /etc/php.d/'
                                    'redis.ini\n'
                                    ]),
                                mode='000755',
                                owner='root',
                                group='root'
                                ),
                        '/etc/cfn/cfn-hup.conf': cloudformation.InitFile(
                            content=Join('', [
                                '[main]\n',
                                'stack=', Ref('AWS::StackId'), '\n',
                                'region=', Ref('AWS::Region'), '\n'
                                ]),
                            mode='000400',
                            owner='root',
                            group='root'
                            ),
                        '/etc/cfn/hooks.d/cfn-auto-reloader.conf':
                            cloudformation.InitFile(
                                content=Join('', [
                                    '[cfn-auto-reloader-hook]\n',
                                    'triggers=post.update\n',
                                    'path=Resources.WebServerInstance.Metadata'
                                    '.AWS::CloudFormation::Init\n',
                                    'action=/opt/aws/bin/cfn-init -v ',
                                    '         --stack ', Ref('AWS::StackName'),
                                    '         --resource WebServerInstance ',
                                    '         --region ', Ref('AWS::Region'),
                                    '\n',
                                    'runas=root\n'
                                    ]),
                                # Why doesn't the Amazon template have this?
                                # mode='000400',
                                # owner='root',
                                # group='root'
                                ),
                        }),

                    commands={
                        '01-install_phpredis': {
                            'command': '/usr/local/bin/install_phpredis'
                            },
                        '02-get-cluster-config': {
                            'command': '/usr/local/bin/get_cluster_config'
                            }
                        },

                    services={
                        "sysvinit": cloudformation.InitServices({
                            "httpd": cloudformation.InitService(
                                enabled=True,
                                ensureRunning=True,
                                ),
                            "cfn-hup": cloudformation.InitService(
                                enabled=True,
                                ensureRunning=True,
                                files=['/etc/cfn/cfn-hup.conf',
                                       '/etc/cfn/hooks.d/'
                                       'cfn-auto-reloader.conf']
                                ),
                            }),
                        },
                    )
                })
            ),
        ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'),
                          FindInMap('AWSInstanceType2Arch',
                                    Ref(instancetype), 'Arch')),
        InstanceType=Ref(instancetype),
        SecurityGroups=[Ref(webserversg)],
        KeyName=Ref(keyname),
        IamInstanceProfile=Ref(webserverinstanceprofile),
        UserData=Base64(Join('', [
            '#!/bin/bash -xe\n',
            'yum update -y aws-cfn-bootstrap\n',

            '# Setup the PHP sample application\n',
            '/opt/aws/bin/cfn-init -v ',
            '         --stack ', Ref('AWS::StackName'),
            '         --resource WebServerInstance ',
            '         --region ', Ref('AWS::Region'), '\n',

            '# Signal the status of cfn-init\n',
            '/opt/aws/bin/cfn-signal -e $? ',
            '         --stack ', Ref('AWS::StackName'),
            '         --resource WebServerInstance ',
            '         --region ', Ref('AWS::Region'), '\n'
            ])),
        CreationPolicy=CreationPolicy(
            ResourceSignal=ResourceSignal(Timeout='PT15M')
            ),
        Tags=Tags(Application=Ref('AWS::StackId'),
                  Details='Created using Troposhpere')
        ))

    redisclustersg = template.add_resource(elasticache.SecurityGroup(
        'RedisClusterSecurityGroup',
        Description='Lock the cluster down',
        ))

    template.add_resource(elasticache.SecurityGroupIngress(
        'RedisClusterSecurityGroupIngress',
        CacheSecurityGroupName=Ref(redisclustersg),
        EC2SecurityGroupName=Ref(webserversg),
        ))

    template.add_resource(elasticache.CacheCluster(
        'RedisCluster',
        Engine='redis',
        CacheNodeType=Ref(cachenodetype),
        NumCacheNodes='1',
        CacheSecurityGroupNames=[Ref(redisclustersg)],
        ))

    # Outputs
    template.add_output([
        Output(
            'WebsiteURL',
            Description='Application URL',
            Value=Join('', [
                'http://',
                GetAtt(webserverinstance, 'PublicDnsName'),

                ])
            )
        ])

    # Print CloudFormation Template
    print(template.to_json())
from troposphere import Parameter, Output, Ref, Template
import troposphere.ec2 as ec2

template = Template()

keyname_param = template.add_parameter(Parameter(
    "KeyName",
    Type="String",
    Description="Name of an existing EC2 KeyPair to enable SSH",
    Default="pduleba-eu-west-1-key-pair",
))

template.add_mapping('RegionMap', {
    "eu-west-1": {"AMI": "ami-01720b5f421cf0179"},
    "eu-west-2": {"AMI": "ami-0e80a462ede03e653"},
    "eu-west-3": {"AMI": "ami-000bdaf1845abe910"},
    "eu-north-1": {"AMI": "ami-01d2b64cb4bdfe5db"},
    "eu-south-1": {"AMI": "ami-04a490111f1f73f3a"},
    "eu-central-1": {"AMI": "ami-03c3a7e4263fd998c"}
})

ec2_instance = template.add_resource(ec2.Instance(
    "Ec2Instance",
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    InstanceType="t1.micro",
    KeyName=Ref(keyname_param),
    SecurityGroups=["default"],
    UserData=Base64("80")
))

template.add_output([
    Output(
template.add_mapping(
    'RegionMap', {
        "ap-south-1": {
            "AMI": "ami-0937dcc711d38ef3f"
        },
        "eu-west-3": {
            "AMI": "ami-0854d53ce963f69d8"
        },
        "eu-north-1": {
            "AMI": "ami-6d27a913"
        },
        "eu-west-2": {
            "AMI": "ami-0664a710233d7c148"
        },
        "eu-west-1": {
            "AMI": "ami-0fad7378adf284ce0"
        },
        "ap-northeast-2": {
            "AMI": "ami-018a9a930060d38aa"
        },
        "ap-northeast-1": {
            "AMI": "ami-0d7ed3ddb85b521a6"
        },
        "sa-east-1": {
            "AMI": "ami-0b04450959586da29"
        },
        "ca-central-1": {
            "AMI": "ami-0de8b8e4bc1f125fe"
        },
        "ap-southeast-1": {
            "AMI": "ami-04677bdaa3c2b6e24"
        },
        "ap-southeast-2": {
            "AMI": "ami-0c9d48b5db609ad6e"
        },
        "eu-central-1": {
            "AMI": "ami-0eaec5838478eb0ba"
        },
        "us-east-1": {
            "AMI": "ami-035be7bafff33b6b6"
        },
        "us-east-2": {
            "AMI": "ami-04328208f4f0cf1fe"
        },
        "us-west-1": {
            "AMI": "ami-0799ad445b5727125"
        },
        "us-west-2": {
            "AMI": "ami-032509850cf9ee54e"
        }
    })
t.add_mapping(
    "CENTOS7", {
        "eu-west-1": {
            "AMI": "ami-33734044"
        },
        "ap-southeast-1": {
            "AMI": "ami-2a7b6b78"
        },
        "ap-southeast-2": {
            "AMI": "ami-d38dc6e9"
        },
        "eu-central-1": {
            "AMI": "ami-e68f82fb"
        },
        "ap-northeast-1": {
            "AMI": "ami-b80b6db8"
        },
        "us-east-1": {
            "AMI": "ami-61bbf104"
        },
        "sa-east-1": {
            "AMI": "ami-fd0197e0"
        },
        "us-west-1": {
            "AMI": "ami-f77fbeb3"
        },
        "us-west-2": {
            "AMI": "ami-d440a6e7"
        }
    })
t.add_mapping(
    'RegionMap', {
        "us-east-1": {
            "hostedzoneID": "Z3AQBSTGFYJSTF",
            "websiteendpoint": "s3-website-us-east-1.amazonaws.com"
        },
        "us-west-1": {
            "hostedzoneID": "Z2F56UZL2M1ACD",
            "websiteendpoint": "s3-website-us-west-1.amazonaws.com"
        },
        "us-west-2": {
            "hostedzoneID": "Z3BJ6K6RIION7M",
            "websiteendpoint": "s3-website-us-west-2.amazonaws.com"
        },
        "eu-west-1": {
            "hostedzoneID": "Z1BKCTXD74EZPE",
            "websiteendpoint": "s3-website-eu-west-1.amazonaws.com"
        },
        "ap-southeast-1": {
            "hostedzoneID": "Z3O0J2DXBE1FTB",
            "websiteendpoint": "s3-website-ap-southeast-1.amazonaws.com"
        },
        "ap-southeast-2": {
            "hostedzoneID": "Z1WCIGYICN2BYD",
            "websiteendpoint": "s3-website-ap-southeast-2.amazonaws.com"
        },
        "ap-northeast-1": {
            "hostedzoneID": "Z2M4EHUR26P7ZW",
            "websiteendpoint": "s3-website-ap-northeast-1.amazonaws.com"
        },
        "sa-east-1": {
            "hostedzoneID": "Z31GFT0UA1I2HV",
            "websiteendpoint": "s3-website-sa-east-1.amazonaws.com"
        },
        "cloudfront": {
            "hostedzoneID": "Z2FDTNDATAQYW2"
        }
    })
예제 #41
0
def create():

    es = Template()

    es.add_description("Stack defining the elasticsearch instance")

    # Get latest AMIs
    def getAMI(region):
        AMIMap = {}
        print("Getting latest AMZN linux AMI in %s" % region)
        ec2conn = boto.ec2.connect_to_region(region)
        images = ec2conn.get_all_images(owners=["amazon"], filters={"name": "amzn-ami-hvm-*.x86_64-gp2"})
        latestDate = ""
        latestAMI = ""
        for image in images:
            if image.creationDate > latestDate:
                latestDate = image.creationDate
                latestAMI = image.id
        AMIMap[region] = {"id": latestAMI}
        return AMIMap

    # Create AMI Map
    es.add_mapping("AMIMap",getAMI(region))

    # Create es VPC
    esVPC = es.add_resource(
        VPC(
            "esVPC",
            CidrBlock="10.0.0.0/16",
            Tags=Tags(
                Name="esVPC"
            )
        )
    )

    # Create es IGW
    esIGW = es.add_resource(
        InternetGateway(
            "esIGW"
        )
    )

    # Attach IGW to VPC
    esIGWAttachment = es.add_resource(
        VPCGatewayAttachment(
            "esIGWAttachment",
            VpcId=Ref(esVPC),
            InternetGatewayId=Ref(esIGW)
        )
    )

    # Create es Subnet
    esSubnet = es.add_resource(
        Subnet(
            "esSubnet",
            CidrBlock="10.0.0.0/24",
            VpcId=Ref(esVPC)
        )
    )

    # Create es RTB
    esRTB = es.add_resource(
        RouteTable(
            "esRTB",
            VpcId=Ref(esVPC)
        )
    )

    # Create route to IGW
    esDefaultRoute = es.add_resource(
        Route(
            "esDefaultRoute",
            DependsOn="esIGWAttachment",
            GatewayId=Ref(esIGW),
            DestinationCidrBlock="0.0.0.0/0",
            RouteTableId=Ref(esRTB)
        )
    )

    # Associate RTB with Subnet
    esSubnetRTBAssociation = es.add_resource(
        SubnetRouteTableAssociation(
            "esSubnetRTBAssociation",
            SubnetId=Ref(esSubnet),
            RouteTableId=Ref(esRTB)
        )
    )

    # Create es Security Group
    esSecurityGroup = es.add_resource(
        SecurityGroup(
            "esSecurityGroup",
            GroupDescription="Allow inbound access on port 9200",
            SecurityGroupIngress=[
                SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="9200",
                    ToPort="9200",
                    CidrIp="0.0.0.0/0"
                )
            ],
            VpcId=Ref(esVPC)
        )
    )

    # Create es instance metadata
    esInstanceMetadata = Metadata(
        Init(
            # Use ConfigSets to ensure GPG key and repo file are in place
            # before trying to install elasticsearch
            InitConfigSets(
                ordered=["first","second"]
            ),
            first=InitConfig(
                files=InitFiles(
                    {
                        # cfn-hup notices when the cloudformation stack is changed
                        "/etc/cfn/cfn-hup.conf": InitFile(
                            content=Join("",
                                [
                                    "[main]\n",
                                    "stack=",Ref("AWS::StackName"),"\n",
                                    "region=",Ref("AWS::Region"),"\n"
                                ]
                            ),
                            mode="000400",
                            owner="root",
                            group="root"
                        ),
                        # cfn-hup will then trigger cfn-init to run.
                        # This lets us update the instance just by updating the stack
                        "/etc/cfn/hooks.d/cfn-auto-reloader.conf": InitFile(
                            content=Join("",
                                [
                                    "[cfn-auto-reloader-hook]\n",
                                    "triggers=post.update\n",
                                    "path=Resources.esInstance.Metadata\n",
                                    "action=/opt/aws/bin/cfn-init -v --stack ", Ref("AWS::StackName"), " ",
                                    "--resource esInstance ",
                                    "--region ", Ref("AWS::Region"), " ",
                                    "--c ordered\n"
                                    "runas=root\n"
                                ]
                            ),
                            mode="000400",
                            owner="root",
                            group="root"
                        ),
                        # repo file for elastic search
                        "/etc/yum.repos.d/elasticsearch.repo": InitFile(
                            content=Join("",
                                [
                                    "[elasticsearch-2.x]\n",
                                    "name=Elasticsearch repository for 2.x packages\n",
                                    "baseurl=http://packages.elastic.co/elasticsearch/2.x/centos\n",
                                    "gpgcheck=1\n",
                                    "gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch\n",
                                    "enabled=1\n"
                                ]
                            ),
                            mode="000400",
                            owner="root",
                            group="root"
                        )
                    }
                ),
                commands={
                    # Install elasticsearch key so package will install
                    "installGPG": {
                        "command": "rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch"
                    }
                }
            ),
            second=InitConfig(
                packages={
                    "yum": {
                        # Install elasticsearch
                        "elasticsearch": [],
                    }
                },
                commands={
                    # Enable external access to elasticsearch
                    "listenOnAllinterfaces": {
                        "command": "echo \"network.host: 0.0.0.0\" >> /etc/elasticsearch/elasticsearch.yml"
                    }
                },
                services={
                    "sysvinit": InitServices(
                        {
                            "elasticsearch": InitService(
                                enabled=True,
                                ensureRunning=True
                            ),
                            "cfn-hup": InitService(
                                enabled=True,
                                ensureRunning=True,
                                files=[
                                    "/etc/cfn/cfn-hup.conf",
                                    "/etc/cfn/hooks.d/cfn-auto-reloader.conf"
                                ]
                            )
                        }
                    )
                }
            )
        )
    )

    # Create es Instance
    esInstance = es.add_resource(
        Instance(
            "esInstance",
            ImageId=FindInMap("AMIMap",Ref("AWS::Region"),"id"),
            InstanceType="t2.micro",
            Metadata=esInstanceMetadata,
            UserData=Base64(
                Join("",
                    [
                        "#!/bin/bash\n",
                        "/opt/aws/bin/cfn-init -v ",
                        "--stack ", Ref("AWS::StackName"), " ",
                        "--resource esInstance ",
                        "--region ", Ref("AWS::Region"), " ",
                        "-c ordered"
                    ]
                )
            ),
            NetworkInterfaces=[
                NetworkInterfaceProperty(
                    GroupSet=[
                        Ref(esSecurityGroup)
                    ],
                    AssociatePublicIpAddress="true",
                    DeviceIndex="0",
                    DeleteOnTermination="true",
                    SubnetId=Ref(esSubnet),

                )
            ],
            Tags=Tags(
                Name="esInstance"
            )
        )
    )

    # Output address
    es.add_output(
        [Output
            ("esAddress",
            Description="Elastic Search address",
            Value=Join("",
                [
                    "http://", GetAtt("esInstance", "PublicIp"), ":9200/"
                ]
            )
            )
        ]
    )
    return es
예제 #42
0
    ConstraintDescription = "must be a valid EC2 instance type"
))

java_license = template.add_parameter(Parameter(
    "OracleJava",
    Description = "Type 'yes' to accept the Oracle Java license found here: http://www.oracle.com/technetwork/java/javase/terms/license/index.html",
    Type = "String",
    AllowedValues = [ "yes", "YES", "'yes'", "Yes" ],
    ConstraintDescription = "Type 'yes' to agree to the license"
))

template.add_mapping('RegionMap', {
    "us-east-1":      {"AMI": "ami-7724131e"},
    "us-west-1":      {"AMI": "ami-3cdcef79"},
    "us-west-2":      {"AMI": "ami-a86f0998"},
    "eu-west-1":      {"AMI": "ami-a8e10bdf"},
    "sa-east-1":      {"AMI": "ami-4bf85856"},
    "ap-southeast-1": {"AMI": "ami-149fc846"},
    "ap-southeast-2": {"AMI": "ami-e5d749df"},
    "ap-northeast-1": {"AMI": "ami-8f39568e"}
})

# Create a security group
sg = template.add_resource(ec2.SecurityGroup('AsgardSecurityGroup'))
sg.GroupDescription = 'Access to Asgard Instance'
sg.SecurityGroupIngress = [
    ec2.SecurityGroupRule(
        IpProtocol = 'tcp',
        FromPort = '22',
        ToPort = '22',
        CidrIp = '0.0.0.0/0'
    ),
예제 #43
0
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map):
    """
    :param int cluster_size: The number of nodes to create in the Flocker
        cluster (including control service node).
    :param dict client_ami_map: A map between AWS region name and AWS AMI ID
        for the client.
    :param dict node_ami_map: A map between AWS region name and AWS AMI ID
        for the node.
    :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm
        cluster.
    """
    # Base JSON template.
    template = Template()

    # Keys corresponding to CloudFormation user Inputs.
    access_key_id_param = template.add_parameter(
        Parameter(
            "AmazonAccessKeyID",
            Description="Required: Your Amazon AWS access key ID",
            Type="String",
            NoEcho=True,
            AllowedPattern="[\w]+",
            MinLength="16",
            MaxLength="32",
        ))
    secret_access_key_param = template.add_parameter(
        Parameter(
            "AmazonSecretAccessKey",
            Description="Required: Your Amazon AWS secret access key",
            Type="String",
            NoEcho=True,
            MinLength="1",
        ))
    keyname_param = template.add_parameter(
        Parameter(
            "EC2KeyPair",
            Description=
            "Required: Name of an existing EC2 KeyPair to enable SSH "
            "access to the instance",
            Type="AWS::EC2::KeyPair::KeyName",
        ))
    template.add_parameter(
        Parameter(
            "S3AccessPolicy",
            Description="Required: Is current IAM user allowed to access S3? "
            "S3 access is required to distribute Flocker and Docker "
            "configuration amongst stack nodes. Reference: "
            "http://docs.aws.amazon.com/IAM/latest/UserGuide/"
            "access_permissions.html Stack creation will fail if user "
            "cannot access S3",
            Type="String",
            AllowedValues=["Yes"],
        ))
    volumehub_token = template.add_parameter(
        Parameter(
            "VolumeHubToken",
            Description=("Optional: Your Volume Hub token. "
                         "You'll find the token at "
                         "https://volumehub.clusterhq.com/v1/token."),
            Type="String",
            Default="",
        ))

    template.add_mapping('RegionMapClient',
                         {k: {
                             "AMI": v
                         }
                          for k, v in client_ami_map.items()})
    template.add_mapping('RegionMapNode',
                         {k: {
                             "AMI": v
                         }
                          for k, v in node_ami_map.items()})

    # Select a random AvailabilityZone within given AWS Region.
    zone = Select(0, GetAZs(""))

    # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution
    # between nodes.
    s3bucket = Bucket('ClusterConfig', DeletionPolicy='Retain')
    template.add_resource(s3bucket)

    # Create SecurityGroup for cluster instances.
    instance_sg = template.add_resource(
        ec2.SecurityGroup(
            "InstanceSecurityGroup",
            GroupDescription=(
                "Enable ingress access on all protocols and ports."),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol=protocol,
                    FromPort="0",
                    ToPort="65535",
                    CidrIp="0.0.0.0/0",
                ) for protocol in ('tcp', 'udp')
            ]))

    # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes.
    base_user_data = [
        '#!/bin/bash\n',
        'aws_region="',
        Ref("AWS::Region"),
        '"\n',
        'aws_zone="',
        zone,
        '"\n',
        'access_key_id="',
        Ref(access_key_id_param),
        '"\n',
        'secret_access_key="',
        Ref(secret_access_key_param),
        '"\n',
        's3_bucket="',
        Ref(s3bucket),
        '"\n',
        'stack_name="',
        Ref("AWS::StackName"),
        '"\n',
        'volumehub_token="',
        Ref(volumehub_token),
        '"\n',
        'node_count="{}"\n'.format(cluster_size),
        'apt-get update\n',
    ]

    # XXX Flocker agents are indexed from 1 while the nodes overall are indexed
    # from 0.
    flocker_agent_number = 1

    # Gather WaitConditions
    wait_condition_names = []

    for i in range(cluster_size):
        if i == 0:
            node_name = CONTROL_NODE_NAME
        else:
            node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i)

        # Create an EC2 instance for the {Agent, Control} Node.
        ec2_instance = ec2.Instance(node_name,
                                    ImageId=FindInMap("RegionMapNode",
                                                      Ref("AWS::Region"),
                                                      "AMI"),
                                    InstanceType="m3.large",
                                    KeyName=Ref(keyname_param),
                                    SecurityGroups=[Ref(instance_sg)],
                                    AvailabilityZone=zone,
                                    Tags=Tags(Name=node_name))

        # WaitCondition and corresponding Handler to signal completion
        # of {Flocker, Docker, Swarm} configuration on the node.
        wait_condition_handle = WaitConditionHandle(
            INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name))
        template.add_resource(wait_condition_handle)
        wait_condition = WaitCondition(
            INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name),
            Handle=Ref(wait_condition_handle),
            Timeout=NODE_CONFIGURATION_TIMEOUT,
        )
        template.add_resource(wait_condition)

        # Gather WaitConditions
        wait_condition_names.append(wait_condition.name)

        user_data = base_user_data[:]
        user_data += [
            'node_number="{}"\n'.format(i),
            'node_name="{}"\n'.format(node_name),
            'wait_condition_handle="',
            Ref(wait_condition_handle),
            '"\n',
        ]

        # Setup S3 utilities to push/pull node-specific data to/from S3 bucket.
        user_data += _sibling_lines(S3_SETUP)

        if i == 0:
            # Control Node configuration.
            control_service_instance = ec2_instance
            user_data += ['flocker_node_type="control"\n']
            user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR)
            user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP)
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_MANAGER_SETUP)
            template.add_output([
                Output(
                    "ControlNodeIP",
                    Description="Public IP of Flocker Control and "
                    "Swarm Manager.",
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])
        else:
            # Agent Node configuration.
            ec2_instance.DependsOn = control_service_instance.name
            user_data += [
                'flocker_node_type="agent"\n',
                'flocker_agent_number="{}"\n'.format(flocker_agent_number)
            ]
            flocker_agent_number += 1
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_NODE_SETUP)
            template.add_output([
                Output(
                    "AgentNode{}IP".format(i),
                    Description=(
                        "Public IP of Agent Node for Flocker and Swarm."),
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])

        user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER)
        user_data += _sibling_lines(VOLUMEHUB_SETUP)
        user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
        ec2_instance.UserData = Base64(Join("", user_data))
        template.add_resource(ec2_instance)

    # Client Node creation.
    client_instance = ec2.Instance(CLIENT_NODE_NAME,
                                   ImageId=FindInMap("RegionMapClient",
                                                     Ref("AWS::Region"),
                                                     "AMI"),
                                   InstanceType="m3.medium",
                                   KeyName=Ref(keyname_param),
                                   SecurityGroups=[Ref(instance_sg)],
                                   AvailabilityZone=zone,
                                   Tags=Tags(Name=CLIENT_NODE_NAME))
    wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE)
    template.add_resource(wait_condition_handle)
    wait_condition = WaitCondition(
        CLIENT_WAIT_CONDITION,
        Handle=Ref(wait_condition_handle),
        Timeout=NODE_CONFIGURATION_TIMEOUT,
    )
    template.add_resource(wait_condition)

    # Client Node {Flockerctl, Docker-compose} configuration.
    user_data = base_user_data[:]
    user_data += [
        'wait_condition_handle="',
        Ref(wait_condition_handle),
        '"\n',
        'node_number="{}"\n'.format("-1"),
    ]
    user_data += _sibling_lines(S3_SETUP)
    user_data += _sibling_lines(CLIENT_SETUP)
    user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
    client_instance.UserData = Base64(Join("", user_data))

    # Start Client Node after Control Node and Agent Nodes are
    # up and running Flocker, Docker, Swarm stack.
    client_instance.DependsOn = wait_condition_names
    template.add_resource(client_instance)

    # List of Output fields upon successful creation of the stack.
    template.add_output([
        Output(
            "ClientNodeIP",
            Description="Public IP address of the client node.",
            Value=GetAtt(client_instance, "PublicIp"),
        )
    ])
    template.add_output(
        Output(
            "ClientConfigDockerSwarmHost",
            Value=Join("", [
                "export DOCKER_HOST=tcp://",
                GetAtt(control_service_instance, "PublicIp"), ":2376"
            ]),
            Description="Client config: Swarm Manager's DOCKER_HOST setting."))
    template.add_output(
        Output("ClientConfigDockerTLS",
               Value="export DOCKER_TLS_VERIFY=1",
               Description="Client config: Enable TLS client for Swarm."))
    return template.to_json()
예제 #44
0

template = Template()

keyname_param = template.add_parameter(Parameter(
    "KeyName",
    Description="Name of an existing EC2 KeyPair to enable SSH "
                "access to the instance",
    Type="String",
))

template.add_mapping('RegionMap', {
    "us-east-1": {"AMI": "ami-7f418316"},
    "us-west-1": {"AMI": "ami-951945d0"},
    "us-west-2": {"AMI": "ami-16fd7026"},
    "eu-west-1": {"AMI": "ami-24506250"},
    "sa-east-1": {"AMI": "ami-3e3be423"},
    "ap-southeast-1": {"AMI": "ami-74dda626"},
    "ap-northeast-1": {"AMI": "ami-dcfa4edd"}
})

ec2_instance = template.add_resource(ec2.Instance(
    "Ec2Instance",
    ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
    InstanceType="t1.micro",
    KeyName=Ref(keyname_param),
    SecurityGroups=["default"],
    UserData=Base64("80")
))

instanceSecurityGroup = template.add_resource(
예제 #45
0
from troposphere import Ref, Template, Parameter, FindInMap

################ Create Template ########################################
base_template = Template()

############### Create Mappings and Parameters ###########################
ubuntu_images = [
    "ami-07d0cf3af28718ef8", "ami-0cfee17793b08a293", "ami-00d4e9ff62bc40e03"
]
'''map ami types'''
base_template.add_mapping(
    "Images", {
        "18.04": {
            "Ubuntu": ubuntu_images[0]
        },
        "16.04": {
            "Ubuntu": ubuntu_images[1]
        },
        "14.04": {
            "Ubuntu": ubuntu_images[2]
        }
    })
'''map instance types'''
base_template.add_mapping(
    "InstanceType", {
        "18.04": {
            "type": "t2.medium"
        },
        "16.04": {
            "type": "t2.medium"
        },
        "14.04": {
    ),
    Type="String",
    Description="Public subnet 1 CIDR block.",
    ConstraintDescription=(
        "Must be a valid IP CIDR range of the form x.x.x.x/x and subnet of "
        "VPC."
    )
))

t.add_mapping(
    "AWSNATAMI",
    {
        "ap-northeast-1": {"AMI": "ami-03cf3903"},
        "ap-southeast-1": {"AMI": "ami-b49dace6"},
        "ap-southeast-2": {"AMI": "ami-e7ee9edd"},
        "eu-central-1": {"AMI": "ami-46073a5b"},
        "eu-west-1": {"AMI": "ami-6975eb1e"},
        "sa-east-1": {"AMI": "ami-fbfa41e6"},
        "us-east-1": {"AMI": "ami-303b1458"},
        "us-west-1": {"AMI": "ami-7da94839"},
        "us-west-2": {"AMI": "ami-69ae8259"}
    }
)

vpc = t.add_resource(VPC(
    "vpc",
    InstanceTenancy="default",
    EnableDnsSupport="true",
    CidrBlock=Ref(vpcCidr),
    EnableDnsHostnames="true"
))
		"remote_user = ec2-user",
		"become = true",
		"become_method = sudo",
		"become_user = root",
		"EOF",
		AnsiblePullCmd
	])
)


t.add_mapping(
	'RegionMap', {
		"us-west-1": {"AMI": "ami-951945d0"},
		"us-west-2": {"AMI": "ami-16fd7026"},
		"eu-west-1": {"AMI": "ami-24506250"},
		"sa-east-1": {"AMI": "ami-3e3be423"},
		"ap-southeast-1": {"AMI": "ami-74dda626"},
		"ap-northeast-1": {"AMI": "ami-dcfa4edd"},
		"ap-northeast-2": {"AMI": "ami-0d097db2fb6e0f05e"}
	}
)

ec2_instance=t.add_resource(
	ec2.Instance(
		"instance",
		ImageId=FindInMap("RegionMap",Ref("AWS::Region"),"AMI"),
		InstanceType="t2.micro",
		KeyName=Ref(keyname_param),
		SecurityGroupIds=[Ref(security_param)],
		SubnetId=SubnetID,
		UserData=ud,
UseEBS = t.add_parameter(
    Parameter(
        "UseEBS",
        Default="no",
        ConstraintDescription="Must be yes or no only.",
        Type="String",
        Description="Use EBS Volumes for the Worker Node",
        AllowedValues=["yes", "no"],
    ))

UseEBSBool = t.add_condition("UseEBSBool", Equals(Ref(UseEBS), "yes"))

t.add_mapping("SubnetConfig", {
    'Public': {
        'CIDR': '10.0.0.0/24'
    },
    'VPC': {
        'CIDR': '10.0.0.0/16'
    }
})

t.add_mapping(
    "RHEL66", {
        'ap-northeast-1': {
            'AMI': 'ami-a15666a0'
        },
        'ap-southeast-1': {
            'AMI': 'ami-3813326a'
        },
        'ap-southeast-2': {
            'AMI': 'ami-55e38e6f'
        },
예제 #49
0
 def test_max_mappings(self):
     template = Template()
     for i in range(0, MAX_MAPPINGS):
         template.add_mapping(str(i), {"n": "v"})
     with self.assertRaises(ValueError):
         template.add_mapping("mapping", {"n": "v"})
예제 #50
0
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map):
    """
    :param int cluster_size: The number of nodes to create in the Flocker
        cluster (including control service node).
    :param dict client_ami_map: A map between AWS region name and AWS AMI ID
        for the client.
    :param dict node_ami_map: A map between AWS region name and AWS AMI ID
        for the node.
    :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm
        cluster.
    """
    # Base JSON template.
    template = Template()

    # Keys corresponding to CloudFormation user Inputs.
    access_key_id_param = template.add_parameter(Parameter(
        "AmazonAccessKeyID",
        Description="Required: Your Amazon AWS access key ID",
        Type="String",
        NoEcho=True,
        AllowedPattern="[\w]+",
        MinLength="16",
        MaxLength="32",
    ))
    secret_access_key_param = template.add_parameter(Parameter(
        "AmazonSecretAccessKey",
        Description="Required: Your Amazon AWS secret access key",
        Type="String",
        NoEcho=True,
        MinLength="1",
    ))
    keyname_param = template.add_parameter(Parameter(
        "EC2KeyPair",
        Description="Required: Name of an existing EC2 KeyPair to enable SSH "
                    "access to the instance",
        Type="AWS::EC2::KeyPair::KeyName",
    ))
    template.add_parameter(Parameter(
        "S3AccessPolicy",
        Description="Required: Is current IAM user allowed to access S3? "
                    "S3 access is required to distribute Flocker and Docker "
                    "configuration amongst stack nodes. Reference: "
                    "http://docs.aws.amazon.com/IAM/latest/UserGuide/"
                    "access_permissions.html Stack creation will fail if user "
                    "cannot access S3",
        Type="String",
        AllowedValues=["Yes"],
    ))
    volumehub_token = template.add_parameter(Parameter(
        "VolumeHubToken",
        Description=(
            "Optional: Your Volume Hub token. "
            "You'll find the token at "
            "https://volumehub.clusterhq.com/v1/token."
        ),
        Type="String",
        Default="",
    ))

    # Base AMIs pre-baked with the following products:
    # Docker 1.9.1
    # Flocker 1.9.0.dev1+1221.gde4c49f
    # Please update the version fields above when new AMIs are generated.
    template.add_mapping(
        'RegionMap', {
            'client': client_ami_map,
            'node': node_ami_map,
        }
    )

    # Select a random AvailabilityZone within given AWS Region.
    zone = Select(0, GetAZs(""))

    # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution
    # between nodes.
    s3bucket = Bucket('ClusterConfig',
                      DeletionPolicy='Retain')
    template.add_resource(s3bucket)

    # Create SecurityGroup for cluster instances.
    instance_sg = template.add_resource(
        ec2.SecurityGroup(
            "InstanceSecurityGroup",
            GroupDescription=(
                "Enable ingress access on all protocols and ports."
            ),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol=protocol,
                    FromPort="0",
                    ToPort="65535",
                    CidrIp="0.0.0.0/0",
                ) for protocol in ('tcp', 'udp')
            ]
        )
    )

    # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes.
    base_user_data = [
        '#!/bin/bash\n',
        'aws_region="', Ref("AWS::Region"), '"\n',
        'aws_zone="', zone, '"\n',
        'access_key_id="', Ref(access_key_id_param), '"\n',
        'secret_access_key="', Ref(secret_access_key_param), '"\n',
        's3_bucket="', Ref(s3bucket), '"\n',
        'stack_name="', Ref("AWS::StackName"), '"\n',
        'volumehub_token="', Ref(volumehub_token), '"\n',
        'node_count="{}"\n'.format(cluster_size),
        'apt-get update\n',
    ]

    # XXX Flocker agents are indexed from 1 while the nodes overall are indexed
    # from 0.
    flocker_agent_number = 1

    # Gather WaitConditions
    wait_condition_names = []

    for i in range(cluster_size):
        if i == 0:
            node_name = CONTROL_NODE_NAME
        else:
            node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i)

        # Create an EC2 instance for the {Agent, Control} Node.
        ec2_instance = ec2.Instance(
            node_name,
            ImageId=FindInMap("RegionMap", "node", Ref("AWS::Region")),
            InstanceType="m3.large",
            KeyName=Ref(keyname_param),
            SecurityGroups=[Ref(instance_sg)],
            AvailabilityZone=zone,
            Tags=Tags(Name=node_name))

        # WaitCondition and corresponding Handler to signal completion
        # of {Flocker, Docker, Swarm} configuration on the node.
        wait_condition_handle = WaitConditionHandle(
            INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name))
        template.add_resource(wait_condition_handle)
        wait_condition = WaitCondition(
            INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name),
            Handle=Ref(wait_condition_handle),
            Timeout=NODE_CONFIGURATION_TIMEOUT,
        )
        template.add_resource(wait_condition)

        # Gather WaitConditions
        wait_condition_names.append(wait_condition.name)

        user_data = base_user_data[:]
        user_data += [
            'node_number="{}"\n'.format(i),
            'node_name="{}"\n'.format(node_name),
            'wait_condition_handle="', Ref(wait_condition_handle), '"\n',
        ]

        # Setup S3 utilities to push/pull node-specific data to/from S3 bucket.
        user_data += _sibling_lines(S3_SETUP)

        if i == 0:
            # Control Node configuration.
            control_service_instance = ec2_instance
            user_data += ['flocker_node_type="control"\n']
            user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR)
            user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP)
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_MANAGER_SETUP)
            template.add_output([
                Output(
                    "ControlNodeIP",
                    Description="Public IP of Flocker Control and "
                                "Swarm Manager.",
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])
        else:
            # Agent Node configuration.
            ec2_instance.DependsOn = control_service_instance.name
            user_data += [
                'flocker_node_type="agent"\n',
                'flocker_agent_number="{}"\n'.format(
                    flocker_agent_number
                )
            ]
            flocker_agent_number += 1
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_NODE_SETUP)
            template.add_output([
                Output(
                    "AgentNode{}IP".format(i),
                    Description=(
                        "Public IP of Agent Node for Flocker and Swarm."
                    ),
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])

        user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER)
        user_data += _sibling_lines(VOLUMEHUB_SETUP)
        user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
        ec2_instance.UserData = Base64(Join("", user_data))
        template.add_resource(ec2_instance)

    # Client Node creation.
    client_instance = ec2.Instance(
        CLIENT_NODE_NAME,
        ImageId=FindInMap("RegionMap", "client", Ref("AWS::Region")),
        InstanceType="m3.medium",
        KeyName=Ref(keyname_param),
        SecurityGroups=[Ref(instance_sg)],
        AvailabilityZone=zone,
        Tags=Tags(Name=CLIENT_NODE_NAME))
    wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE)
    template.add_resource(wait_condition_handle)
    wait_condition = WaitCondition(
        CLIENT_WAIT_CONDITION,
        Handle=Ref(wait_condition_handle),
        Timeout=NODE_CONFIGURATION_TIMEOUT,
    )
    template.add_resource(wait_condition)

    # Client Node {Flockerctl, Docker-compose} configuration.
    user_data = base_user_data[:]
    user_data += [
        'wait_condition_handle="', Ref(wait_condition_handle), '"\n',
        'node_number="{}"\n'.format("-1"),
    ]
    user_data += _sibling_lines(S3_SETUP)
    user_data += _sibling_lines(CLIENT_SETUP)
    user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
    client_instance.UserData = Base64(Join("", user_data))

    # Start Client Node after Control Node and Agent Nodes are
    # up and running Flocker, Docker, Swarm stack.
    client_instance.DependsOn = wait_condition_names
    template.add_resource(client_instance)

    # List of Output fields upon successful creation of the stack.
    template.add_output([
        Output(
            "ClientNodeIP",
            Description="Public IP address of the client node.",
            Value=GetAtt(client_instance, "PublicIp"),
        )
    ])
    template.add_output(Output(
        "ClientConfigDockerSwarmHost",
        Value=Join("",
                   ["export DOCKER_HOST=tcp://",
                    GetAtt(control_service_instance, "PublicIp"), ":2376"]),
        Description="Client config: Swarm Manager's DOCKER_HOST setting."
    ))
    template.add_output(Output(
        "ClientConfigDockerTLS",
        Value="export DOCKER_TLS_VERIFY=1",
        Description="Client config: Enable TLS client for Swarm."
    ))
    return template.to_json()
예제 #51
0
ssh_cidr = template.add_parameter(Parameter(
    "SshCidr",
    Description="CIDR block from which to allow SSH access. Restrict this to your IP, if possible.",
    Type="String",
    Default="0.0.0.0/0",
))

# "16.04 hvm ssd" AMIs from https://cloud-images.ubuntu.com/locator/ec2/
template.add_mapping('RegionMap', {
    "ap-northeast-1": {"AMI": "ami-0417e362"},
    "ap-northeast-2": {"AMI": "ami-536ab33d"},
    "ap-south-1": {"AMI": "ami-df413bb0"},
    "ap-southeast-1": {"AMI": "ami-9f28b3fc"},
    "ap-southeast-2": {"AMI": "ami-bb1901d8"},
    "ca-central-1": {"AMI": "ami-a9c27ccd"},
    "eu-central-1": {"AMI": "ami-958128fa"},
    "eu-west-1": {"AMI": "ami-674cbc1e"},
    "eu-west-2": {"AMI": "ami-03998867"},
    "sa-east-1": {"AMI": "ami-a41869c8"},
    "us-east-1": {"AMI": "ami-1d4e7a66"},
    "us-east-2": {"AMI": "ami-dbbd9dbe"},
    "us-west-1": {"AMI": "ami-969ab1f6"},
    "us-west-2": {"AMI": "ami-8803e0f0"},
})

security_group = template.add_resource(ec2.SecurityGroup(
    'SecurityGroup',
    GroupDescription='Allows SSH access from SshCidr and HTTP/HTTPS access from anywhere.',
    SecurityGroupIngress=[
        ec2.SecurityGroupRule(
            IpProtocol='tcp',
            FromPort=22,
예제 #52
0
import troposphere.elasticloadbalancing as elb
import argparse

# Depends on already created VPC.

t = Template()

vpcid_param = t.add_parameter(Parameter("VpcId", Description="VpcId of the existing VPC", Type="String"))

subnetid_param = t.add_parameter(
    Parameter(
        "SubnetId", Description="SubnetId of an existing subnet (for the primary network) in the VPC", Type="String"
    )
)

t.add_mapping("RegionMap", {"us-east-1": {"AMI": "ami-7f418316"}, "us-west-1": {"AMI": "ami-951945d0"}})

frontend_ec2_sg = t.add_resource(
    ec2.SecurityGroup(
        "rzienertHttpSecurityGroup",
        VpcId=Ref(vpcid_param),
        GroupDescription="Enable HTTP traffic for frontend class servers",
        SecurityGroupIngress=[ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0")],
    )
)

ec2_instance = t.add_resource(
    ec2.Instance(
        "rzienertEC2Instance",
        ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
        InstanceType="t1.micro",
예제 #53
0
def generate_stack_template():
    template = Template()

    generate_description(template)

    generate_version(template)

    # ---Parameters------------------------------------------------------------
    param_vpc_id = Parameter(
        'VpcIdentifer',
        Description='The identity of the VPC (vpc-abcdwxyz) in which this stack shall be created.',
        Type='AWS::EC2::VPC::Id',
    )
    template.add_parameter(param_vpc_id)

    param_vpc_cidr_block = Parameter(
        'VpcCidrBlock',
        Description='The CIDR block of the VPC (w.x.y.z/n) in which this stack shall be created.',
        Type='String',
        Default='10.0.0.0/16'
    )
    template.add_parameter(param_vpc_cidr_block)

    param_database_instance_subnet_id = Parameter(
        'VpcSubnetIdentifer',
        Description='The identity of the private subnet (subnet-abcdwxyz) in which the database server shall be created.',
        Type='AWS::EC2::Subnet::Id',
    )
    template.add_parameter(param_database_instance_subnet_id)

    param_keyname = Parameter(
        'PemKeyName',
        Description='Name of an existing EC2 KeyPair file (.pem) to use to create EC2 instances',
        Type='AWS::EC2::KeyPair::KeyName'
    )
    template.add_parameter(param_keyname)

    param_instance_type = Parameter(
        'EC2InstanceType',
        Description='EC2 instance type, reference this parameter to insure consistency',
        Type='String',
        Default='t2.medium',  # Prices from (2015-12-03) (Windows, us-west (North CA))
        AllowedValues=[  # Source :  https://aws.amazon.com/ec2/pricing/
            't2.small',  # $0.044/hour
            't2.micro',  # $0.022/hour
            't2.medium',  # $0.088/hour
            't2.large',  # $0.166/hour
            'm3.medium',  # $0.140/hour
            'm3.large',  # $0.28/hour
            'c4.large'   # $0.221/hour
        ],
        ConstraintDescription='Must be a valid EC2 instance type'
    )
    template.add_parameter(param_instance_type)

    param_s3_bucket = Parameter(
        'S3Bucket',
        Description='The bucket in which applicable content can be found.',
        Type='String',
        Default='author-it-deployment-test-us-east-1'
    )
    template.add_parameter(param_s3_bucket)

    param_s3_key = Parameter(
        'S3Key',
        Description='The key within the bucket in which relevant files are located.',
        Type='String',
        Default='source/database/postgresql/single'
    )
    template.add_parameter(param_s3_key)

    param_database_admin_password = Parameter(
        'PostgresAdminPassword',
        Description='The password to be used by user postgres.',
        Type='String',
        NoEcho=True
    )
    template.add_parameter(param_database_admin_password)

    #---Mappings---------------------------------------------------------------
    mapping_environment_attribute_map = template.add_mapping(
        'EnvironmentAttributeMap',
        {
            'ap-southeast-1': {
                'DatabaseServerAmi': 'ami-1ddc0b7e'
            },
            'ap-southeast-2': {
                'DatabaseServerAmi': 'ami-0c95b86f'
            },
            'us-east-1': {
                'DatabaseServerAmi': 'ami-a4827dc9'
            },
            'us-west-1': {
                'DatabaseServerAmi': 'ami-f5f41398'
            }
        }
    )

    # ---Resources-------------------------------------------------------------
    ref_stack_id = Ref('AWS::StackId')
    ref_region = Ref('AWS::Region')
    ref_stack_name = Ref('AWS::StackName')
    path_database_admin_script = 'usr/ec2-user/postgresql/set_admin_password.sql'
    name_database_server_wait_handle = 'DatabaseServerWaitHandle'

    cmd_postgresql_initdb = dict(
        command='service postgresql-95 initdb'
    )

    cmd_start_postgresql_service = dict(
        command='service postgresql-95 start'
    )

    cmd_set_postgres_user_password = dict(
        command='psql -U postgres -f %s' % path_database_admin_script
    )

    cmd_start_postgresql_on_startup = dict(
        command='chkconfig postgresql on'
    )

    cmd_signal_success = dict(
        command='cfn-signal --exit-code $?'
    )

    # Create an instance of AWS::IAM::Role for the instance.
    # This allows:
    # - Access to S3 bucket content.
    # - Stack updates
    resource_instance_role = template.add_resource(iam.Role(
        'InstanceRole',
        AssumeRolePolicyDocument=Policy(
            Statement=[
                Statement(
                    Action=[AssumeRole],
                    Effect=Allow,
                    Principal=Principal(
                        'Service', ['ec2.amazonaws.com']
                    )
                )
            ]
        ),
        Path='/'
    ))

    # Create the S3 policy and attach it to the role.
    template.add_resource(iam.PolicyType(
        'InstanceS3DownloadPolicy',
        PolicyName='S3Download',
        PolicyDocument={
            'Statement':[
                {
                    'Effect': 'Allow',
                    'Action': ['s3:GetObject'],
                    'Resource': Join('', [
                        'arn:aws:s3:::',
                        Ref(param_s3_bucket),
                        '/*'
                    ])
                },
                {
                    'Effect': 'Allow',
                    'Action': ['cloudformation:DescribeStacks', 'ec2:DescribeInstances'],
                    'Resource': '*'
                }
            ]
        },
        Roles=[Ref(resource_instance_role)]
    ))

    # Create the CloudFormation stack update policy and attach it to the role.
    template.add_resource(iam.PolicyType(
        'InstanceStackUpdatePolicy',
        PolicyName='StackUpdate',
        PolicyDocument={
            'Statement':[
                {
                    "Effect" : "Allow",
                    "Action" : "Update:*",
                    "Resource" : "*"
                }
            ]
        },
        Roles=[Ref(resource_instance_role)]
    ))

    # Create the AWS::IAM::InstanceProfile from the role for reference in the
    # database server instance definition.
    resource_instance_profile = template.add_resource(iam.InstanceProfile(
        'InstanceProfile',
        Path='/',
        Roles=[Ref(resource_instance_role)]
    ))


    # Create a security group for the postgresql instance.
    # This must be internal to the VPC only.
    name_security_group_database = 'VpcDatabaseSecurityGroup'
    resource_database_security_group = ec2.SecurityGroup(
        name_security_group_database,
        GroupDescription=Join(' ', ['Security group for VPC database', Ref(param_vpc_id)]),
        Tags=Tags(Name=name_security_group_database),
        VpcId=Ref(param_vpc_id)
    )
    template.add_resource(resource_database_security_group)

    template.add_output(
        Output(
            'SecurityGroupForDatabase',
            Description='Security group created for database in VPC.',
            Value=Ref(resource_database_security_group)
        )
    )

    # Add ingress rule from VPC to database security group for database traffic.
    database_port = 5432
    ssh_port = 22
    template.add_resource(ec2.SecurityGroupIngress(
        'DatabaseSecurityGroupDatabaseIngress',
        CidrIp=Ref(param_vpc_cidr_block),
        FromPort=str(database_port),
        GroupId=Ref(resource_database_security_group),
        IpProtocol='tcp',
        ToPort=str(database_port)
    ))

    # Add ingress rule from VPC to database security group for ssh traffic.
    ssh_port = 22
    template.add_resource(ec2.SecurityGroupIngress(
        'DatabaseSecurityGroupSshIngress',
        CidrIp=Ref(param_vpc_cidr_block),
        FromPort=str(ssh_port),
        GroupId=Ref(resource_database_security_group),
        IpProtocol='tcp',
        ToPort=str(ssh_port)
    ))

    # Create the metadata for the database instance.
    name_database_server = 'DatabaseServer'
    database_instance_metadata = cloudformation.Metadata(
        cloudformation.Init({
            'config': cloudformation.InitConfig(
                packages={
                    'rpm': {
                        'postgresql': 'https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-6-x86_64/pgdg-ami201503-95-9.5-2.noarch.rpm'
                    },
                    'yum': {
                        'postgresql95': [],
                        'postgresql95-libs': [],
                        'postgresql95-server': [],
                        'postgresql95-devel': [],
                        'postgresql95-contrib': [],
                        'postgresql95-docs': []
                    }
                },
                files=cloudformation.InitFiles({
                    # cfn-hup.conf initialization
                    '/etc/cfn/cfn-hup.conf': cloudformation.InitFile(
                        content=Join('',
                        [
                            '[main]\n',
                            'stack=', ref_stack_id, '\n',
                            'region=', ref_region, '\n',
                            'interval=2', '\n',
                            'verbose=true', '\n'

                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),
                    # cfn-auto-reloader.conf initialization
                    '/etc/cfn/cfn-auto-reloader.conf': cloudformation.InitFile(
                        content=Join('', [
                            '[cfn-auto-reloader-hook]\n',
                            'triggers=post.update\n',
                            'path=Resources.%s.Metadata.AWS::CloudFormation::Init\n' % name_database_server,
                            'action=cfn-init.exe ',
                            ' --verbose '
                            ' --stack ', ref_stack_name,
                            ' --resource %s ' % name_database_server,  # resource that defines the Metadata
                            ' --region ', ref_region, '\n'
                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),
                    #
                    # pg_hba.conf retrieval from S3
                    '/var/lib/pgsql9/data/pg_hba.conf': cloudformation.InitFile(
                        source=Join('/', [
                            # Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']),
                            'https://s3.amazonaws.com',
                            Ref(param_s3_bucket),
                            Ref(param_s3_key),
                            'conf'
                            'pg_hba.conf'
                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),
                    # postgresql.conf retrieval from S3
                    '/var/lib/pgsql9/data/postgresql.conf': cloudformation.InitFile(
                        source=Join('/', [
                            #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']),
                            'https://s3.amazonaws.com',
                            Ref(param_s3_bucket),
                            Ref(param_s3_key),
                            'conf'
                            'postgresql.conf'
                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),
                    # pg_ident.conf retrieval from S3
                    '/var/lib/pgsql9/data/pg_ident.conf': cloudformation.InitFile(
                        source=Join('/', [
                            #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']),
                            'https://s3.amazonaws.com',
                            Ref(param_s3_bucket),
                            Ref(param_s3_key),
                            'conf'
                            'pg_ident.conf'
                        ]),
                        mode='000400',
                        owner='root',
                        group='root'
                    ),
                    # script to set postgresql admin password.
                    # (admin user = '******')
                    path_database_admin_script: cloudformation.InitFile(
                        source=Join('', [
                            'ALTER USER postgres WITH PASSWORD ',
                            Ref(param_database_admin_password),
                            ';',
                            '\n'
                        ])
                    )
                }),
                commands={
                    '10-postgresql_initdb': cmd_postgresql_initdb,
                    '20-start_postgresql_service': cmd_start_postgresql_service,
                    '30-set-postgres-user-password': cmd_set_postgres_user_password,
                    '40-start-postgresql-on-startup': cmd_start_postgresql_on_startup,
                    #'99-signal-success': cmd_signal_success
                },
                services=dict(
                    sysvinit=cloudformation.InitServices(
                        {
                            # start cfn-hup service -
                            # required for CloudFormation stack update
                            'cfn-hup': cloudformation.InitService(
                                enabled=True,
                                ensureRunning=True,
                                files=[
                                    '/etc/cfn/cfn-hup.conf',
                                    '/etc/cfn/hooks.d/cfn-auto-reloader.conf'
                                ]
                            ),
                            # start postgresql service
                            'postgresql-9.5': cloudformation.InitService(
                                enabled=True,
                                ensureRunning=True
                            ),
                            # Disable sendmail service - not required.
                            'sendmail': cloudformation.InitService(
                                enabled=False,
                                ensureRunning=False
                            )
                        }
                    )
                )
            )
        }),
        cloudformation.Authentication({
            'S3AccessCredentials': cloudformation.AuthenticationBlock(
                buckets=[Ref(param_s3_bucket)],
                roleName=Ref(resource_instance_role),
                type='S3'
            )
        })
    )


    # Add a wait handle to receive the completion signal.
    #resource_database_server_wait_handle = template.add_resource(
    #    cloudformation.WaitConditionHandle(
    #        name_database_server_wait_handle
    #    )
    # )

    #template.add_resource(
    #    cloudformation.WaitCondition(
    #        'DatabaseServerWaitCondition',
    #        DependsOn=name_database_server,
    #        Handle=Ref(resource_database_server_wait_handle),
    #        Timeout=300,
    #    )
    #)

    resource_database_server = ec2.Instance(
        name_database_server,
        DependsOn=name_security_group_database,
        IamInstanceProfile=Ref(resource_instance_profile),
        Metadata=database_instance_metadata,
        ImageId=FindInMap('EnvironmentAttributeMap', ref_region, 'DatabaseServerAmi'),
        InstanceType=Ref(param_instance_type),
        KeyName=Ref(param_keyname),
        SecurityGroupIds=[Ref(resource_database_security_group)],
        SubnetId=Ref(param_database_instance_subnet_id),
        Tags=Tags(Name=name_database_server, VPC=Ref(param_vpc_id)),
        UserData=Base64(
            Join(
                '',
                [
                    '#!/bin/bash -xe\n',
                    'yum update -y aws-cfn-bootstrap\n',

                    '/opt/aws/bin/cfn-init --verbose ',
                    ' --stack ', ref_stack_name,
                    ' --resource DatabaseServer ',
                    ' --region ', ref_region, '\n',

                    '/opt/aws/bin/cfn-signal --exit-code $? ',
                    ' --stack ', ref_stack_name,
                    ' --resource ',
                    name_database_server,
                    '\n'
                ]
            )
        )
    )
    template.add_resource(resource_database_server)
    template.add_output(
        Output('DatabaseServer',
               Description='PostgreSQL single instance database server',
               Value=Ref(resource_database_server)
        )
    )

    return template
예제 #54
0
instance_type = template.add_parameter(Parameter(
    "InstanceType",
    Description = "EC2 instance type to launch for Application servers",
    Type = "String",
    Default = "m1.medium",
    AllowedValues = [ "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "m3.xlarge", "m3.2xlarge", "c1.medium", "c1.xlarge", "cg1.4xlarge" ],
    ConstraintDescription = "must be a valid EC2 instance type"
))


template.add_mapping('RegionMap', {
    "us-east-1":      {"AMI": "ami-8f4118e6"},
    "us-west-1":      {"AMI": "ami-905761d5"},
    "us-west-2":      {"AMI": "ami-6ebe265e"},
    "eu-west-1":      {"AMI": "ami-eb0ae99c"},
    "sa-east-1":      {"AMI": "ami-1b9a3c06"},
    "ap-southeast-1": {"AMI": "ami-8e4114dc"},
    "ap-southeast-2": {"AMI": "ami-5b5bc761"},
    "ap-northeast-1": {"AMI": "ami-91395c90"}
})

# Create a security group
sg = template.add_resource(ec2.SecurityGroup('AsgardSecurityGroup'))
sg.GroupDescription = 'Access to Asgard Instance'
sg.SecurityGroupIngress = [
    ec2.SecurityGroupRule(
        IpProtocol = 'tcp',
        FromPort = '22',
        ToPort = '22',
        CidrIp = '0.0.0.0/0'
    ),
예제 #55
0
파일: stack.py 프로젝트: FuncPro/funcpro
from troposphere import Template, Ref, Output, Parameter, Join, GetAtt, FindInMap
from troposphere.route53 import RecordSetType, RecordSet, RecordSetGroup, AliasTarget
from troposphere.s3 import Bucket, PublicRead, BucketOwnerFullControl, WebsiteConfiguration, RedirectAllRequestsTo

import boto3
import botocore


t = Template()


t.add_mapping("RegionMap", {
            "us-east-1" : { "S3hostedzoneID" : "Z3AQBSTGFYJSTF", "websiteendpoint" : "s3-website-us-east-1.amazonaws.com" },
            "us-west-1" : { "S3hostedzoneID" : "Z2F56UZL2M1ACD", "websiteendpoint" : "s3-website-us-west-1.amazonaws.com" },
            "us-west-2" : { "S3hostedzoneID" : "Z3BJ6K6RIION7M", "websiteendpoint" : "s3-website-us-west-2.amazonaws.com" },            
            "eu-west-1" : { "S3hostedzoneID" : "Z1BKCTXD74EZPE", "websiteendpoint" : "s3-website-eu-west-1.amazonaws.com" },
            "ap-southeast-1" : { "S3hostedzoneID" : "Z3O0J2DXBE1FTB", "websiteendpoint" : "s3-website-ap-southeast-1.amazonaws.com" },
            "ap-southeast-2" : { "S3hostedzoneID" : "Z1WCIGYICN2BYD", "websiteendpoint" : "s3-website-ap-southeast-2.amazonaws.com" },
            "ap-northeast-1" : { "S3hostedzoneID" : "Z2M4EHUR26P7ZW", "websiteendpoint" : "s3-website-ap-northeast-1.amazonaws.com" },
            "sa-east-1" : { "S3hostedzoneID" : "Z31GFT0UA1I2HV", "websiteendpoint" : "s3-website-sa-east-1.amazonaws.com" }
        })


hostedzone = t.add_parameter(Parameter(
    "HostedZone",
    Description="The DNS name of an existing Amazon Route 53 hosted zone",
    Type="String",
))


root_bucket = t.add_resource(
    Bucket("RootBucket",
t.add_mapping(
    "Region2Principal",
    {
        "ap-northeast-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "ap-southeast-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "ap-southeast-2": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "cn-north-1": {
            "EC2Principal": "ec2.amazonaws.com.cn",
            "OpsWorksPrincipal": "opsworks.amazonaws.com.cn",
        },
        "eu-central-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "eu-west-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "sa-east-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "us-east-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "us-west-1": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
        "us-west-2": {
            "EC2Principal": "ec2.amazonaws.com",
            "OpsWorksPrincipal": "opsworks.amazonaws.com",
        },
    },
)