def test_add_or_get_returns_with_out_adding_duplicate(self): t = Template() p = Parameter("param", Type="String", Default="foo") t.add_parameter(p) result = t.get_or_add_parameter(p) self.assertEquals(t.parameters["param"], p) self.assertEquals(result, p) self.assertEquals(len(t.parameters), 1)
def template(): t = Template() for p in parameters.values(): t.add_parameter(p) for k in conditions: t.add_condition(k, conditions[k]) for r in resources.values(): t.add_resource(r) return t
def test_parameter_label_replace(self): t = Template() p1 = t.add_parameter(Parameter("Foo")) t.add_parameter(Parameter("Bar")) t.set_parameter_label(p1, "Foo label") t.set_parameter_label("Foo", "Bar label") self.assertEqual(t.metadata, { "AWS::CloudFormation::Interface": { "ParameterLabels": { "Foo": {"default": "Bar label"}, }, }, })
def sceptre_handler(sceptre_user_data): t = Template() cidr_block_param = t.add_parameter(Parameter( "CidrBlock", Type="String", Default="10.0.0.0/16", )) vpc = t.add_resource(VPC( "VirtualPrivateCloud", CidrBlock=Ref(cidr_block_param), InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=True, )) igw = t.add_resource(InternetGateway( "InternetGateway", )) t.add_resource(VPCGatewayAttachment( "IGWAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(igw), )) t.add_output(Output( "VpcId", Description="New VPC ID", Value=Ref(vpc) )) return t.to_json()
def output_template(self): template = Template() for parameter in self.parameters: template.add_parameter(parameter) for mapping in self.mappings: template.add_mapping(mapping[0], mapping[1]) for resource in self.resources: template.add_resource(resource) for output in self.outputs: template.add_output(output) print template.to_json() return
def render(context): secgroup = ec2_security(context) instance = ec2instance(context) template = Template() template.add_resource(secgroup) template.add_resource(instance) keyname = template.add_parameter(Parameter(KEYPAIR, **{ "Type": "String", "Description": "EC2 KeyPair that enables SSH access to this instance", })) cfn_outputs = outputs() if context['project']['aws'].has_key('rds'): map(template.add_resource, rdsinstance(context)) cfn_outputs.extend([ mkoutput("RDSHost", "Connection endpoint for the DB cluster", (RDS_TITLE, "Endpoint.Address")), mkoutput("RDSPort", "The port number on which the database accepts connections", (RDS_TITLE, "Endpoint.Port")),]) if context['project']['aws'].has_key('ext'): map(template.add_resource, ext_volume(context)) if context['hostname']: # None if one couldn't be generated template.add_resource(external_dns(context)) template.add_resource(internal_dns(context)) cfn_outputs.extend([ mkoutput("DomainName", "Domain name of the newly created EC2 instance", Ref(R53_EXT_TITLE)), mkoutput("IntDomainName", "Domain name of the newly created EC2 instance", Ref(R53_INT_TITLE))]) map(template.add_output, cfn_outputs) return template.to_json()
def test_parameter_group(self): t = Template() p1 = t.add_parameter(Parameter("Foo")) t.add_parameter(Parameter("Bar")) t.add_parameter_to_group(p1, "gr") t.add_parameter_to_group("Bar", "gr") self.assertEqual(t.metadata, { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "gr"}, "Parameters": ["Foo", "Bar"], }, ], }, })
def InstanceVolumeTemplate(self): self.stack_name = "volumeTest{0}".format(int(time.time())) template = Template() keyname_param = template.add_parameter(Parameter("KeyName", Description="Name of an existing EC2 KeyPair " "to enable SSH access to the instance", Type="String",)) template.add_mapping('RegionMap', {"": {"AMI": self.tester.get_emi().id}}) for i in xrange(2): ec2_instance = template.add_resource(ec2.Instance("Instance{0}".format(i), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro", KeyName=Ref(keyname_param), SecurityGroups=[self.group.name], UserData=Base64("80"))) vol = template.add_resource(ec2.Volume("Volume{0}".format(i), Size="8", AvailabilityZone=GetAtt("Instance{0}".format(i), "AvailabilityZone"))) mount = template.add_resource(ec2.VolumeAttachment("MountPt{0}".format(i), InstanceId=Ref("Instance{0}".format(i)), VolumeId=Ref("Volume{0}".format(i)), Device="/dev/vdc")) stack = self.tester.create_stack(self.stack_name, template.to_json(), parameters=[("KeyName",self.keypair.name)]) def stack_completed(): return self.tester.cloudformation.describe_stacks(self.stack_name).status == "CREATE_COMPLETE" self.tester.wait_for_result(stack_completed, True, timeout=600) self.tester.delete_stack(self.stack_name)
def GenerateGlobalLayer(): t = Template() t.add_description("""\ Global Layer """) stackname_param = t.add_parameter(Parameter( "StackName", Description="Environment Name (default: StepGlobals)", Type="String", Default="StepGlobals", )) crontab_table = t.add_resource(dynamodb.Table( "scheduleTable", AttributeDefinitions=[ dynamodb.AttributeDefinition("taskname", "S"), ], KeySchema=[ dynamodb.Key("taskname", "HASH") ], ProvisionedThroughput=dynamodb.ProvisionedThroughput( 1, 1 ) )) t.add_output([ Output( "crontabtablename", Description="Crontab Table Name", Value=Ref(crontab_table), ) ]) return t
from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment from troposphere.ec2 import SecurityGroup, SecurityGroupIngress t = Template() t.add_version("2010-09-09") t.add_description( "AWS CloudFormation Sample Template: Redshift cluster in a VPC") dbname = t.add_parameter(Parameter( "DatabaseName", Description="The name of the first database to be created when the " "redshift cluster is created", Type="String", Default="defaultdb", AllowedPattern="([a-z]|[0-9])+", )) clustertype = t.add_parameter(Parameter( "ClusterType", Description="The type of the cluster", Type="String", Default="single-node", AllowedValues=[ "single-node", "multi-mode" ], ))
import template_utils as utils import troposphere.autoscaling as asg import troposphere.cloudwatch as cw import troposphere.elasticloadbalancing as elb t = Template() t.add_version('2010-09-09') t.add_description('An application server stack for the nyc-trees project.') # # Parameters # color_param = t.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green'], ConstraintDescription='must be either Blue or Green' )) vpc_param = t.add_parameter(Parameter( 'VpcId', Type='String', Description='Name of an existing VPC' )) keyname_param = t.add_parameter(Parameter( 'KeyName', Type='String', Default='nyc-trees-stg', Description='Name of an existing EC2 key pair' )) notification_arn_param = t.add_parameter(Parameter( 'GlobalNotificationsARN', Type='String', Description='Physical resource ID of an AWS::SNS::Topic for notifications'
LaunchConfiguration, ScalingPolicy, ) from troposphere.cloudwatch import ( Alarm, MetricDimension, ) from awacs.sts import AssumeRole t = Template() t.add_description('ElasticSearch Template') t.add_parameter(Parameter("VpcId", Type="AWS::EC2::VPC::Id", Description="VPC")) t.add_parameter( Parameter("PrivateSubnet", Description="PrivateSubnet", Type="List<AWS::EC2::Subnet::Id>", ConstraintDescription="PrivateSubnet")) t.add_parameter( Parameter( "InstanceType", Type="String", Description="instance type", Default="t2.small.elasticsearch", AllowedValues=[ "t2.small.elasticsearch",
ref_java_version = Ref('JavaVersion') # now the work begins t = Template() t.add_version("2010-09-09") t.add_description("""\ CloudFormation template to Deploy Hortonworks Data Platform on VPC with a public subnet""") ## Parameters AmbariInstanceType = t.add_parameter(Parameter( "AmbariInstanceType", Default="r3.2xlarge", ConstraintDescription="Must be a valid EC2 instance type.", Type="String", Description="Instance type for Ambari node", )) JavaProvider = t.add_parameter(Parameter( "JavaProvider", Default="open", Type="String", Description="Provider of Java packages: open or oracle", AllowedValues=['open','oracle'], ConstraintDescription="open or oracle", )) JavaVersion = t.add_parameter(Parameter( "JavaVersion",
from troposphere import Template, Parameter, Ref, Equals from troposphere import If, Output, Join, GetAtt from troposphere.redshift import Cluster, ClusterParameterGroup from troposphere.redshift import AmazonRedshiftParameter t = Template() t.add_version("2010-09-09") t.set_description("AWS CloudFormation Sample Template: Redshift cluster") dbname = t.add_parameter( Parameter( "DatabaseName", Description="The name of the first database to be created when the " "redshift cluster is created", Type="String", Default="defaultdb", AllowedPattern="([a-z]|[0-9])+", )) clustertype = t.add_parameter( Parameter( "ClusterType", Description="The type of the cluster", Type="String", Default="single-node", AllowedValues=["single-node", "multi-node"], )) numberofnodes = t.add_parameter(
from troposphere.elasticloadbalancing import LoadBalancer, HealthCheck from troposphere.opsworks import App, ElasticLoadBalancerAttachment, Instance from troposphere.opsworks import Layer, Stack from troposphere.opsworks import Source, Recipes, VolumeConfiguration template = Template() template.add_version("2010-09-09") ServiceRole = template.add_parameter(Parameter( "ServiceRole", Default="aws-opsworks-service-role", Description="The OpsWorks service role", Type="String", MinLength="1", MaxLength="64", AllowedPattern="[a-zA-Z][a-zA-Z0-9-]*", ConstraintDescription="must begin with a letter and contain only " + "alphanumeric characters.", )) InstanceRole = template.add_parameter(Parameter( "InstanceRole", Default="aws-opsworks-ec2-role", Description="The OpsWorks instance role", Type="String", MinLength="1", MaxLength="64", AllowedPattern="[a-zA-Z][a-zA-Z0-9-]*", ConstraintDescription="must begin with a letter and contain only " +
ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') ref_ambariserver = GetAtt('AmbariNode', 'PrivateDnsName') ref_java_provider = Ref('JavaProvider') t = Template() t.add_version("2010-09-09") t.add_description("""\ CloudFormation template to Deploy Hortonworks Data Platform on VPC with a public subnet""") AmbariInstanceType = t.add_parameter(Parameter( "AmbariInstanceType", Default="m3.large", ConstraintDescription="Must be a valid EC2 instance type.", Type="String", Description="Instance type for Ambari node", )) WorkerInstanceCount = t.add_parameter(Parameter( "WorkerInstanceCount", Default="2", Type="Number", Description="Number of Worker instances", MaxValue="99", MinValue="1", )) JavaProvider = t.add_parameter(Parameter( "JavaProvider",
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.add_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping('AWSInstanceType2Arch', { 't1.micro': {'Arch': 'PV64'}, 't2.micro': {'Arch': 'HVM64'}, 't2.small': {'Arch': 'HVM64'}, 't2.medium': {'Arch': 'HVM64'}, 'm1.small': {'Arch': 'PV64'}, 'm1.medium': {'Arch': 'PV64'}, 'm1.large': {'Arch': 'PV64'}, 'm1.xlarge': {'Arch': 'PV64'}, 'm2.xlarge': {'Arch': 'PV64'}, 'm2.2xlarge': {'Arch': 'PV64'}, 'm2.4xlarge': {'Arch': 'PV64'}, 'm3.medium': {'Arch': 'HVM64'}, 'm3.large': {'Arch': 'HVM64'}, 'm3.xlarge': {'Arch': 'HVM64'}, 'm3.2xlarge': {'Arch': 'HVM64'}, 'c1.medium': {'Arch': 'PV64'}, 'c1.xlarge': {'Arch': 'PV64'}, 'c3.large': {'Arch': 'HVM64'}, 'c3.xlarge': {'Arch': 'HVM64'}, 'c3.2xlarge': {'Arch': 'HVM64'}, 'c3.4xlarge': {'Arch': 'HVM64'}, 'c3.8xlarge': {'Arch': 'HVM64'}, 'c4.large': {'Arch': 'HVM64'}, 'c4.xlarge': {'Arch': 'HVM64'}, 'c4.2xlarge': {'Arch': 'HVM64'}, 'c4.4xlarge': {'Arch': 'HVM64'}, 'c4.8xlarge': {'Arch': 'HVM64'}, 'g2.2xlarge': {'Arch': 'HVMG2'}, 'r3.large': {'Arch': 'HVM64'}, 'r3.xlarge': {'Arch': 'HVM64'}, 'r3.2xlarge': {'Arch': 'HVM64'}, 'r3.4xlarge': {'Arch': 'HVM64'}, 'r3.8xlarge': {'Arch': 'HVM64'}, 'i2.xlarge': {'Arch': 'HVM64'}, 'i2.2xlarge': {'Arch': 'HVM64'}, 'i2.4xlarge': {'Arch': 'HVM64'}, 'i2.8xlarge': {'Arch': 'HVM64'}, 'd2.xlarge': {'Arch': 'HVM64'}, 'd2.2xlarge': {'Arch': 'HVM64'}, 'd2.4xlarge': {'Arch': 'HVM64'}, 'd2.8xlarge': {'Arch': 'HVM64'}, 'hi1.4xlarge': {'Arch': 'HVM64'}, 'hs1.8xlarge': {'Arch': 'HVM64'}, 'cr1.8xlarge': {'Arch': 'HVM64'}, 'cc2.8xlarge': {'Arch': 'HVM64'} }) template.add_mapping('AWSRegionArch2AMI', { 'us-east-1': {'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30'}, 'us-west-2': {'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099'}, 'us-west-1': {'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73'}, 'eu-west-1': {'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105'}, 'eu-central-1': {'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb'}, 'ap-northeast-1': {'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6'}, 'ap-southeast-1': {'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40'}, 'ap-southeast-2': {'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89'}, 'sa-east-1': {'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED'}, 'cn-north-1': {'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED'} }) template.add_mapping('Region2Principal', { 'us-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} }) # Parameters cachenodetype = template.add_parameter(Parameter( 'ClusterNodeType', Description='The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=['cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge'], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter(Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=['t1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge'], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter(Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter(Parameter( 'SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.' )) # Resources webserverrole = template.add_resource(iam.Role( 'WebServerRole', AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal')]), ) ] ), Path='/', )) template.add_resource(iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=awacs.aws.Policy( Statement=[awacs.aws.Statement( Action=[awacs.aws.Action("elasticache", "DescribeCacheClusters")], Resource=["*"], Effect=awacs.aws.Allow )] ), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource(iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource(ec2.SecurityGroup( 'WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ] )) webserverinstance = template.add_resource(ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation.InitFile( content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache' ), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root' ), '/usr/local/bin/get_cluster_config': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root' ), '/usr/local/bin/install_phpredis': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root' ), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root' ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=['/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf'] ), }), }, ) }) ), ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64(Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere') )) redisclustersg = template.add_resource(elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource(elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource(elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output( 'WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ]) ) ]) # Print CloudFormation Template print(template.to_json())
def main(): template = Template() template.set_version("2010-09-09") template.set_description( "AWS CloudFormation Sample Template: ELB with 2 EC2 instances" ) AddAMI(template) # Add the Parameters keyname_param = template.add_parameter( Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", ) ) template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge", ], ConstraintDescription="must be a valid EC2 instance type.", ) ) webport_param = template.add_parameter( Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", ) ) apiport_param = template.add_parameter( Parameter( "ApiServerPort", Type="String", Default="8889", Description="TCP/IP port of the api server", ) ) subnetA = template.add_parameter( Parameter("subnetA", Type="String", Default="subnet-096fd06d") ) subnetB = template.add_parameter( Parameter("subnetB", Type="String", Default="subnet-1313ef4b") ) VpcId = template.add_parameter( Parameter("VpcId", Type="String", Default="vpc-82c514e6") ) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(apiport_param), ToPort=Ref(apiport_param), CidrIp="0.0.0.0/0", ), ], ) ) # Add the web server instance WebInstance = template.add_resource( ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), ) ) # Add the api server instance ApiInstance = template.add_resource( ec2.Instance( "ApiInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(apiport_param)), ) ) # Add the application ELB ApplicationElasticLB = template.add_resource( elb.LoadBalancer( "ApplicationElasticLB", Name="ApplicationElasticLB", Scheme="internet-facing", Subnets=[Ref(subnetA), Ref(subnetB)], ) ) TargetGroupWeb = template.add_resource( elb.TargetGroup( "TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(WebInstance), Port=Ref(webport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId), ) ) TargetGroupApi = template.add_resource( elb.TargetGroup( "TargetGroupApi", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="ApiTarget", Port=Ref(apiport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(ApiInstance), Port=Ref(apiport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId), ) ) Listener = template.add_resource( elb.Listener( "Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(ApplicationElasticLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(TargetGroupWeb)) ], ) ) template.add_resource( elb.ListenerRule( "ListenerRuleApi", ListenerArn=Ref(Listener), Conditions=[elb.Condition(Field="path-pattern", Values=["/api/*"])], Actions=[ elb.ListenerRuleAction( Type="forward", TargetGroupArn=Ref(TargetGroupApi) ) ], Priority="1", ) ) template.add_output( Output( "URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(ApplicationElasticLB, "DNSName")]), ) ) print(template.to_json())
Unit="PERCENT", Dimensions=[ emr.MetricDimension('my.custom.master.property', 'my.custom.master.value') ]))) ] return rules template = Template() template.add_description( "Sample CloudFormation template for creating an EMR cluster") keyname = template.add_parameter( Parameter("KeyName", Description="Name of an existing EC2 KeyPair to enable SSH " "to the instances", Type=KEY_PAIR_NAME)) subnet = template.add_parameter( Parameter("Subnet", Description="Subnet ID for creating the EMR cluster", Type=SUBNET_ID)) spot = template.add_parameter( Parameter("SpotPrice", Description="Spot price (or use 0 for 'on demand' instance)", Type=NUMBER, Default="0.1")) withSpotPrice = "WithSpotPrice"
def test_parameter(self): t = Template() p = Parameter("MyParameter", Type="String") t.add_parameter(p) with self.assertRaises(ValueError): t.add_parameter(p)
def test_invalid_parameter_property_in_template(self): t = Template() p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*") t.add_parameter(p) with self.assertRaises(ValueError): t.to_json()
from troposphere import Template, Parameter, Tags, Sub import troposphere.ec2 as ec2 t = Template() p_account_code = t.add_parameter(Parameter('AccountCode', Type='String')) p_region_code = t.add_parameter(Parameter('RegionCode', Type='String')) p_application = t.add_parameter(Parameter('Application', Type='String')) r_vpc = t.add_resource( ec2.VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags( Application='demo-app', Name=Sub('${AccountCode}-${RegionCode}-${Application}-vpc')))) print(t.to_yaml())
from troposphere.policies import ( AutoScalingReplacingUpdate, AutoScalingRollingUpdate, UpdatePolicy ) import troposphere.ec2 as ec2 import troposphere.elasticloadbalancing as elb from troposphere.cloudwatch import Alarm, MetricDimension t = Template() t.add_description("""\ Configures autoscaling group for api app""") SecurityGroup = t.add_parameter(Parameter( "SecurityGroup", Type="String", Description="Security group for api instances.", )) DeployBucket = t.add_parameter(Parameter( "DeployBucket", Type="String", Description="The S3 bucket with the cloudformation scripts.", )) SSLCertificateId = t.add_parameter(Parameter( "SSLCertificateId", Type="String", Description="SSL certificate for load balancer.", ))
import datetime ami_id = sys.argv[1] stack_name = 'testing%s' % datetime.datetime.now().strftime("%Y%M%d%H%M") t = Template() t.add_version('2010-09-09') t.add_description("""Test template to take in new ami id from packer""") keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', Description='Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', Default='keyname' )) amiid_param = t.add_parameter( Parameter( 'AMIID', ConstraintDescription='AMI ID of the new image', Description='AMI ID of the new image', Type='String', Default=ami_id ))
ref_wait_ambari = Ref('waitHandleAmbari') # now the work begins t = Template() t.add_version("2010-09-09") t.add_description("""\ CloudFormation template to Deploy Hortonworks Data Platform on VPC with a public subnet""") ## Parameters BootDiskSize = t.add_parameter(Parameter( "BootDiskSize", Type="Number", Default="80", MinValue=10, MaxValue=2000, Description="Size of boot disk.", )) InstanceType = t.add_parameter(Parameter( "InstanceType", Default="m4.xlarge", ConstraintDescription="Must be a valid EC2 instance type.", Type="String", Description="Instance type for all hosts", )) AmbariPass = t.add_parameter(Parameter(
from cfn_encrypt import Encrypt, EncryptionContext, SecureParameter, GetSsmValue from troposphere import (Template, iam, GetAtt, Join, Ref, logs, Output, Sub, Parameter, awslambda, Base64, Export) from sys import argv do_example = False for arg in argv: if '-we' in arg: do_example = True t = Template() kms_key_arn = t.add_parameter( Parameter( "KmsKeyArn", Type="String", Description="KMS alias ARN for lambda", )) if do_example: plain_text = t.add_parameter( Parameter("PlainText", Type="String", Description="Text that you want to encrypt ( Hello World )", Default="Hello World", NoEcho=True)) # Create loggroup log_group_ssm = t.add_resource( logs.LogGroup( "LogGroupSsm",
# load config cfg = yaml.load(resource_string('config', 'generic_emr_config.yml')) networking_resources = utils.get_stack_resources( stack_name=cfg['networking_stack_name']) STACK_NAME = cfg['stack_name'] template = Template() description = 'Stack containing EMR with conda in all nodes' template.add_description(description) template.add_version('2010-09-09') instances = template.add_parameter( Parameter('Instances', Type='Number', Description='Number of core instances', MaxValue='10')) cluster = template.add_resource( emr.Cluster( 'Cluster', Name='Generic Cluster', ReleaseLabel=cfg['version'], JobFlowRole='GenericEMRInstanceProfile', ServiceRole='GenericEMRServiceRole', Instances=emr.JobFlowInstancesConfig( Ec2KeyName=cfg['ssh_key'], Ec2SubnetId=networking_resources['GenericEMRSubnet'], MasterInstanceGroup=emr.InstanceGroupConfigProperty( Name='Master Instance',
t.set_version("2010-09-09") t.set_description("""\ AWS CloudFormation Sample Template VPC_With_VPN_Connection.template: \ Sample template showing how to create a private subnet with a VPN connection \ using static routing to an existing VPN endpoint. NOTE: The VPNConnection \ created will define the configuration you need yonk the tunnels to your VPN \ endpoint - you can get the VPN Gateway configuration from the AWS Management \ console. You will be billed for the AWS resources used if you create a stack \ from this template.""") VPNAddress = t.add_parameter( Parameter( "VPNAddress", Type="String", Description="IP Address of your VPN device", MinLength="7", AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})", MaxLength="15", ConstraintDescription="must be a valid IP address of the form x.x.x.x", )) OnPremiseCIDR = t.add_parameter( Parameter( "OnPremiseCIDR", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), Description="IP Address range for your existing infrastructure", Default="10.0.0.0/16", MinLength="9", AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", MaxLength="18",
#!/usr/bin/env python from troposphere import Template, Parameter, Ref, Output from troposphere.cloudwatch import Alarm, MetricDimension t = Template() rds_instance = t.add_parameter(Parameter( 'RdsInstance', Type='String', Description='Instance to monitor' )) up_threshold = t.add_parameter( Parameter( 'UpThreshold', Type='String' ) ) up_evaluations = t.add_parameter( Parameter( 'UpEvaluations', Type='String' ) ) down_threshold = t.add_parameter( Parameter( 'DownThreshold', Type='String'
def test_creating_all_in_one(self): ''' Create VPC, Subnets, IGW, Route, SecurityGroup, Instance at once. ''' test_stack_name = 'TestStack' cf_client.delete_stack(StackName=test_stack_name) cf_client.get_waiter('stack_delete_complete').wait( StackName=test_stack_name) ### t = Template() keyname_param = t.add_parameter( Parameter( "KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance", Type="String", )) t.add_mapping( 'RegionMap', { "us-east-1": { "AMI": "ami-7f418316" }, "us-east-2": { "AMI": "ami-0c55b159cbfafe1f0" }, "us-west-1": { "AMI": "ami-951945d0" }, "us-west-2": { "AMI": "ami-16fd7026" }, "eu-west-1": { "AMI": "ami-24506250" }, "sa-east-1": { "AMI": "ami-3e3be423" }, "ap-southeast-1": { "AMI": "ami-74dda626" }, "ap-northeast-1": { "AMI": "ami-dcfa4edd" } }) t.add_resource( VPC("VPC", EnableDnsSupport="true", CidrBlock="10.100.0.0/16", EnableDnsHostnames="true", Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) t.add_resource( InternetGateway("InternetGateway", Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) t.add_resource( VPCGatewayAttachment( "IGWAttachment", VpcId=Ref("VPC"), InternetGatewayId=Ref("InternetGateway"), )) t.add_resource( RouteTable("RouteTable", VpcId=Ref("VPC"), Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) t.add_resource( Route( "IGWRoute", DependsOn='IGWAttachment', GatewayId=Ref("InternetGateway"), DestinationCidrBlock="0.0.0.0/0", RouteTableId=Ref("RouteTable"), )) # loop through usable availability zones for the aws account and create a subnet for each zone for i, az in list(enumerate(get_azs(), start=1)): t.add_resource( Subnet("PublicSubnet{0}".format(i), VpcId=Ref("VPC"), CidrBlock="10.100.{0}.0/24".format(i), AvailabilityZone="{0}".format(az), MapPublicIpOnLaunch=True, Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) # have to do this, or else subnet will use vpc default router table, # which only has one entry of default local route. t.add_resource( SubnetRouteTableAssociation( "SubnetRouteTableAssociation{0}".format(i), SubnetId=Ref("PublicSubnet{0}".format(i)), RouteTableId=Ref("RouteTable"), )) t.add_resource( SecurityGroup("SecurityGroup", GroupDescription="Enable all ingress", VpcId=Ref('VPC'), SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', CidrIp="0.0.0.0/0", FromPort=0, ToPort=65535), ], Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) ec2_instance = t.add_resource( Instance( "Instance", SecurityGroupIds=[Ref('SecurityGroup')], SubnetId=Ref('PublicSubnet1'), KeyName=Ref(keyname_param), InstanceType="m4.xlarge", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), Tags=Tags( Name="aws test troposphere", Application=Ref("AWS::StackName"), Developer="cisco::haoru", ), )) t.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description= "Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP", Description= "Public IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description= "Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ), ]) dump_template(t, True) cf_client.create_stack( StackName=test_stack_name, TemplateBody=t.to_yaml(), Parameters=[{ 'ParameterKey': 'KeyName', 'ParameterValue': KEY # Change this value as you wish }]) cf_client.get_waiter('stack_create_complete').wait( StackName=test_stack_name) public_ip = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'PublicIP')['OutputValue'] time.sleep(5) run(f"ssh -o 'StrictHostKeyChecking no' ubuntu@{public_ip} curl -s ifconfig.me" ) # run ssh-add <KEY> beforehand
def _sibling_lines(filename): """ Read file content into an output string. """ dirname = os.path.dirname(__file__) path = os.path.join(dirname, filename) with open(path, 'r') as f: return f.readlines() # Base JSON template. template = Template() # Keys corresponding to CloudFormation user Inputs. keyname_param = template.add_parameter(Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", )) access_key_id_param = template.add_parameter(Parameter( "AccessKeyID", Description="Your Amazon AWS access key ID", Type="String", )) secret_access_key_param = template.add_parameter(Parameter( "SecretAccessKey", Description="Your Amazon AWS secret access key.", Type="String", )) volumehub_token = template.add_parameter(Parameter( "VolumeHubToken",
from troposphere.cloudfront import TrustedSigners t = Template() t.set_description( "AWS CloudFormation Sample Template CloudFront_S3: Sample template " "showing how to create an Amazon CloudFront Streaming distribution " "using an S3 origin. " "**WARNING** This template creates a CloudFront distribution. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3dnsname = t.add_parameter( Parameter( "S3DNSName", Description="The DNS name of an existing S3 bucket to use as the " "Cloudfront distribution origin", Type="String", )) myDistribution = t.add_resource( StreamingDistribution( "myDistribution", StreamingDistributionConfig=StreamingDistributionConfig( Comment="Streaming distribution", Enabled=True, S3Origin=S3Origin(DomainName=Ref(s3dnsname)), TrustedSigners=TrustedSigners(Enabled=False, ), ))) t.add_output([
def test_attaching_ebs_to_instance(self): test_stack_name = 'TestAttachAdditionalEbsVolume' init_cf_env(test_stack_name) ### t = Template() attached_param = t.add_parameter( Parameter( "AttachVolume", Description="Should the volume be attached?", Type="String", Default="yes", AllowedValues=['yes', 'no'], )) attached_condition = t.add_condition( "Attached", Equals(Ref(attached_param), 'yes')) sg = ts_add_security_group(t) instance = ts_add_instance_with_public_ip(t, Ref(sg), tag='test ebs') volume = t.add_resource( Volume( "MyVolume", AvailabilityZone=GetAtt(instance, "AvailabilityZone"), Size=8, # 8G VolumeType='gp2', )) t.add_resource( VolumeAttachment("MyVolumeAttachment", Condition="Attached", Device='/dev/xvdh', InstanceId=Ref(instance), VolumeId=Ref(volume))) t.add_output([ Output( "PublicIP", Value=GetAtt(instance, "PublicIp"), ), Output( "VolumeId", Description="InstanceId of the created ebs volume", Value=Ref(volume), ), ]) dump_template(t, True) create_stack(test_stack_name, t) outputs = get_stack_outputs(test_stack_name) public_ip = get_output_value(outputs, 'PublicIP') volume_id = get_output_value(outputs, 'VolumeId') stdout = run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo fdisk -l') self.assertIn('/dev/xvdh', stdout) # The first time you use a newly created EBS volume, you must create a filesystem. run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo mkfs -t ext4 /dev/xvdh' ) # After the filesystem has been created, you can mount the device: run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo mkdir /mnt/volume/') run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo mount /dev/xvdh /mnt/volume/' ) run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} df -h') run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo touch /mnt/volume/testfile' ) run( f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo umount /mnt/volume/' ) # if you do not umount it first, the ebs status will be 'busy' after dettachment # dettach it update_stack(test_stack_name, t, [{ 'ParameterKey': 'AttachVolume', 'ParameterValue': 'no' }]) stdout = run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo fdisk -l') self.assertNotIn('/dev/xvdh', stdout) # attach again update_stack(test_stack_name, t, [{ 'ParameterKey': 'AttachVolume', 'ParameterValue': 'yes' }]) run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo mount /dev/xvdh /mnt/volume/' ) stdout = run( f'ssh {SSH_OPTIONS} ec2-user@{public_ip} ls /mnt/volume/testfile') self.assertIn('testfile', stdout) # file still there with self.subTest("Performance"): # writing performance run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo dd if=/dev/zero of=/mnt/volume/tempfile bs=1M count=1024' ) # write 1 MB, 1024 times # flush caches run( f'ssh {SSH_OPTIONS} ec2-user@{public_ip} "echo 3 | sudo tee /proc/sys/vm/drop_caches"', True) # reading performance run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo dd if=/mnt/volume/tempfile of=/dev/null bs=1M count=1024' ) # read 1 MB, 1024 times with self.subTest("Creating snapshot"): ''' Creating a snapshot of an attached, mounted volume is possible, but can cause problems with writes that aren't flushed to disk. You should either detach the volume from your instance or stop the instance first. If you absolutely must create a snapshot while the volume is in use, you can `freeze` it first. Unfreeze the volume as soon as the snapshot reaches the state pending. You don't have to wait until the snapshot has finished. ''' run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo fsfreeze -f /mnt/volume' ) # Freeze all writes on the vm volume_resource = boto3.resource('ec2').Volume(volume_id) snapshot = volume_resource.create_snapshot() snapshot.wait_until_completed() run(f'ssh {SSH_OPTIONS} ec2-user@{public_ip} sudo fsfreeze -u /mnt/volume' ) # Unfreeze to resume writes on the vm run(f'aws ec2 describe-snapshots --snapshot-ids {snapshot.id}') new_volume_id = ec2_client.create_volume( AvailabilityZone=get_azs()[0], SnapshotId=snapshot.id, )['VolumeId'] run(f'aws ec2 describe-volumes --volume-ids {new_volume_id}') boto3.resource('ec2').Volume(new_volume_id).delete() snapshot.delete()
def gen_template(config) -> dict: """Generates a Cloud Formation template to make a device stack on EC2 based on the passed configuration Arguments: config -- The configuration to use when generating the template (specifies things like number of server instances, etc) Returns: The generated template as a JSON object """ num_couchbase_servers = config.server_number couchbase_instance_type = config.server_type num_sync_gateway_servers = config.sync_gateway_number sync_gateway_server_type = config.sync_gateway_type t = Template() t.set_description( 'An Ec2-classic stack with Couchbase Server + Sync Gateway') def createCouchbaseSecurityGroups(t): # Couchbase security group secGrpCouchbase = ec2.SecurityGroup('CouchbaseSecurityGroup') secGrpCouchbase.GroupDescription = "Allow access to Couchbase Server" secGrpCouchbase.SecurityGroupIngress = [ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), # Sync Gatway Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="4984", ToPort="4985", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # expvars IpProtocol="tcp", FromPort="9876", ToPort="9876", CidrIp="0.0.0.0/0", ), # Couchbase Server Client-To-Node Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8091", ToPort="8096", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11207", ToPort="11207", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11210", ToPort="11211", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="18091", ToPort="18096", CidrIp="0.0.0.0/0", ), # Couchbase Server Node-To-Node Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="4369", ToPort="4369", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9100", ToPort="9105", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9110", ToPort="9118", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9120", ToPort="9122", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9130", ToPort="9130", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9999", ToPort="9999", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11209", ToPort="11210", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="19130", ToPort="19130", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="21100", ToPort="21100", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="21150", ToPort="21150", CidrIp="172.31.0.0/16", ) ] # Add security group to template t.add_resource(secGrpCouchbase) return secGrpCouchbase keyname_param = t.add_parameter( Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 KeyPair to enable SSH access') ) secGrpCouchbase = createCouchbaseSecurityGroups(t) # Couchbase Server Instances for i in range(num_couchbase_servers): name = "{}{}".format(config.couchbase_server_prefix, i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = couchbase_instance_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.Tags = Tags(Name=name, Type="couchbaseserver") instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] t.add_resource(instance) # Sync Gw instances (ubuntu ami) for i in range(num_sync_gateway_servers): name = "{}{}".format(config.sync_gateway_prefix, i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = sync_gateway_server_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] # Make syncgateway0 a cache writer, and the rest cache readers # See https://github.com/couchbase/sync_gateway/wiki/Distributed-channel-cache-design-notes if i == 0: instance.Tags = Tags(Name=name, Type="syncgateway", CacheType="writer") else: instance.Tags = Tags(Name=name, Type="syncgateway") t.add_resource(instance) return t.to_json()
sg.GroupDescription = "Allow access through ports 80 and 22" sg.SecurityGroupIngress = [ ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0"), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0") ] t.add_resource(sg) keypair = t.add_parameter( Parameter( "KeyName", Description= "Name of the ssh keky pair that will be used to access the instance", Type="String")) t.add_mapping( 'RegionMap', { "us-east-1": { "AMI": "ami-035be7bafff33b6b6" }, "us-west-1": { "AMI": "ami-951945d0" }, "us-west-2": { "AMI": "ami-16fd7026" }, "eu-west-1": {
if len(sys.argv) > 2: source = open(sys.argv[2], "r").read() # Reclaim a few bytes (maximum size is 4096!) by converting four space # indents to single space indents indent_re = re.compile(r"^((?: ){1,})", re.MULTILINE) source = indent_re.sub(lambda m: " " * (len(m.group(1)) / 4), source) else: source = None t = Template() t.add_description("Chaos Lambda") if source is None: s3_bucket = t.add_parameter( Parameter( "S3Bucket", Description="Name of the S3 bucket containing the Lambda zip file", Type="String", )) s3_key = t.add_parameter( Parameter( "S3Key", Description="Path to the Lambda zip file under the bucket", Default="chaos-lambda.zip", Type="String", )) lambda_code = Code(S3Bucket=Ref(s3_bucket), S3Key=Ref(s3_key)) module_name = "chaos" else: lambda_code = Code(ZipFile=source) module_name = "index"
# This is not strictly needed, but it takes the pain out of writing a # cloudformation template by hand. It also allows for DRY approaches # to maintaining cloudformation templates. from troposphere import Ref, Template, Parameter, Output, Join, GetAtt, Tags import troposphere.ec2 as ec2 import configuration t = Template() t.add_description( 'Couchbase Servers' ) keynameparameter = t.add_parameter(Parameter( 'KeyNameParameter', Type='AWS::EC2::KeyPair::KeyName', Description='KeyName' )) subnetid1parameter = t.add_parameter(Parameter( 'SubnetId1Parameter', Type='AWS::EC2::Subnet::Id', Description='SubnetId' )) subnetid2parameter = t.add_parameter(Parameter( 'SubnetId2Parameter', Type='AWS::EC2::Subnet::Id', Description='SubnetId' )) securitygroupidparameter = t.add_parameter(Parameter( 'SecurityGroupIdParameter', Type='AWS::EC2::SecurityGroup::Id', Description='SecurityGroupId'
t = Template() t.add_version() t.set_description( "AWS CloudFormation Sample Template ElasticBeanstalk_Nodejs_Sample: " "Configure and launch the AWS Elastic Beanstalk sample application. " "**WARNING** This template creates one or more Amazon EC2 instances. You " "will be billed for the AWS resources used if you create a stack from " "this template.") keyname = t.add_parameter( Parameter( "KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to " "the AWS Elastic Beanstalk instance", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.")) t.add_mapping( "Region2Principal", { 'ap-northeast-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'ap-southeast-1': { 'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com' }, 'ap-southeast-2': {
# Converted from EC2InstanceSample.template located at: # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ from troposphere import Base64, FindInMap, GetAtt from troposphere import Parameter, Output, Ref, Template import troposphere.ec2 as ec2 template = Template() keyname_param = template.add_parameter(Parameter( "KeyName", Description="Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="String", )) template.add_mapping('RegionMap', { "us-east-1": {"AMI": "ami-7f418316"}, "us-west-1": {"AMI": "ami-951945d0"}, "us-west-2": {"AMI": "ami-16fd7026"}, "eu-west-1": {"AMI": "ami-24506250"}, "sa-east-1": {"AMI": "ami-3e3be423"}, "ap-southeast-1": {"AMI": "ami-74dda626"}, "ap-northeast-1": {"AMI": "ami-dcfa4edd"} }) ebs=ec2.EBSBlockDevice(VolumeSize=20,VolumeType="gp2",DeletionPolicy="Snapshot") ec2_instance = template.add_resource(ec2.Instance( "Ec2Instance", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro",
from troposphere.cloudfront import Origin, DefaultCacheBehavior from troposphere.cloudfront import ForwardedValues from troposphere.cloudfront import S3Origin from troposphere.certificatemanager import Certificate, DomainValidationOption from troposphere.s3 import Bucket, BucketPolicy from troposphere.route53 import RecordSetType, AliasTarget t = Template() t.add_description('Serving static content from an S3 bucket with CloudFront') # www.myawesomesite.com domain_name = t.add_parameter(Parameter( 'domainName', Description = 'Domain name for your site', Type = 'String' )) # awesomesite.com zone_apex = t.add_parameter(Parameter( 'zoneApex', Description = 'Root domain name www.[example.com]', Type = 'String' )) origin_access_id = t.add_parameter(Parameter( 'originAccessIdentity', Description = 'Origin Access Identity ID', Type = 'String' ))
secGrpCbIngress = ec2.SecurityGroupIngress(name) secGrpCbIngress.GroupName = Ref(secGrpCouchbase) secGrpCbIngress.IpProtocol = "tcp" secGrpCbIngress.FromPort = from_port secGrpCbIngress.ToPort = to_port secGrpCbIngress.SourceSecurityGroupName = Ref(secGrpCouchbase) t.add_resource(secGrpCbIngress) return secGrpCouchbase # # Parameters # keyname_param = t.add_parameter(Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 KeyPair to enable SSH access' )) secGrpCouchbase = createCouchbaseSecurityGroups(t) # Couchbase Server Instances for i in xrange(NUM_COUCHBASE_SERVERS): name = "couchbaseserver{}".format(i) instance = ec2.Instance(name) instance.ImageId = "ami-96a818fe" # centos7 instance.InstanceType = COUCHBASE_INSTANCE_TYPE instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.Tags=Tags(Name=name, Type="couchbaseserver") instance.BlockDeviceMappings = [
"S3hostedzoneID": "Z1WCIGYICN2BYD", "websiteendpoint": "s3-website-ap-southeast-2.amazonaws.com" }, "ap-northeast-1": { "S3hostedzoneID": "Z2M4EHUR26P7ZW", "websiteendpoint": "s3-website-ap-northeast-1.amazonaws.com" }, "sa-east-1": { "S3hostedzoneID": "Z31GFT0UA1I2HV", "websiteendpoint": "s3-website-sa-east-1.amazonaws.com" } }) hostedzone = t.add_parameter( Parameter( "HostedZone", Description="The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) root_bucket = t.add_resource( Bucket("RootBucket", BucketName=Ref(hostedzone), AccessControl=PublicRead, WebsiteConfiguration=WebsiteConfiguration( IndexDocument="index.html", ))) www_bucket = t.add_resource( Bucket("WWWBucket", BucketName=Join('.', ['www', Ref(hostedzone)]), AccessControl=PublicRead, WebsiteConfiguration=WebsiteConfiguration( RedirectAllRequestsTo=RedirectAllRequestsTo(
def main(): template = Template() template.add_version("2010-09-09") template.add_description( "AWS CloudFormation Sample Template: ELB with 2 EC2 instances") AddAMI(template) # Add the Parameters keyname_param = template.add_parameter(Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", )) template.add_parameter(Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge" ], ConstraintDescription="must be a valid EC2 instance type.", )) webport_param = template.add_parameter(Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", )) apiport_param = template.add_parameter(Parameter( "ApiServerPort", Type="String", Default="8889", Description="TCP/IP port of the api server", )) subnetA = template.add_parameter(Parameter( "subnetA", Type="String", Default="subnet-096fd06d" )) subnetB = template.add_parameter(Parameter( "subnetB", Type="String", Default="subnet-1313ef4b" )) VpcId = template.add_parameter(Parameter( "VpcId", Type="String", Default="vpc-82c514e6" )) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(apiport_param), ToPort=Ref(apiport_param), CidrIp="0.0.0.0/0", ), ] ) ) # Add the web server instance WebInstance = template.add_resource(ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), )) # Add the api server instance ApiInstance = template.add_resource(ec2.Instance( "ApiInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(apiport_param)), )) # Add the application ELB ApplicationElasticLB = template.add_resource(elb.LoadBalancer( "ApplicationElasticLB", Name="ApplicationElasticLB", Scheme="internet-facing", Subnets=[Ref(subnetA), Ref(subnetB)] )) TargetGroupWeb = template.add_resource(elb.TargetGroup( "TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher( HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[elb.TargetDescription( Id=Ref(WebInstance), Port=Ref(webport_param))], UnhealthyThresholdCount="3", VpcId=Ref(VpcId) )) TargetGroupApi = template.add_resource(elb.TargetGroup( "TargetGroupApi", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher( HttpCode="200"), Name="ApiTarget", Port=Ref(apiport_param), Protocol="HTTP", Targets=[elb.TargetDescription( Id=Ref(ApiInstance), Port=Ref(apiport_param))], UnhealthyThresholdCount="3", VpcId=Ref(VpcId) )) Listener = template.add_resource(elb.Listener( "Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(ApplicationElasticLB), DefaultActions=[elb.Action( Type="forward", TargetGroupArn=Ref(TargetGroupWeb) )] )) template.add_resource(elb.ListenerRule( "ListenerRuleApi", ListenerArn=Ref(Listener), Conditions=[elb.Condition( Field="path-pattern", Values=["/api/*"])], Actions=[elb.Action( Type="forward", TargetGroupArn=Ref(TargetGroupApi) )], Priority="1" )) template.add_output(Output( "URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(ApplicationElasticLB, "DNSName")]) )) print(template.to_json())
def test_max_parameters(self): template = Template() for i in range(0, MAX_PARAMETERS): template.add_parameter(Parameter(str(i), Type='String')) with self.assertRaises(ValueError): template.add_parameter(Parameter("parameter", Type='String'))
#!/usr/bin/python # Asgard CloudFormation template from troposphere import Template, Parameter, Join, Ref, FindInMap, Output, GetAtt import troposphere.ec2 as ec2 template = Template() template.add_description('NetflixOSS Asgard 1.4.1 - Template by Answers for AWS') keyname = template.add_parameter(Parameter( "KeyPairName", Description = "Name of an existing EC2 KeyPair to enable SSH access to the instance", Type = "String", MinLength = "1", MaxLength = "64", AllowedPattern = "[-_ a-zA-Z0-9]*", ConstraintDescription = "can contain only alphanumeric characters, spaces, dashes and underscores." )) ip_address = template.add_parameter(Parameter( "YourIpAddress", Description = "Your IP address", Type = "String", )) instance_type = template.add_parameter(Parameter( "InstanceType", Description = "EC2 instance type to launch for Application servers", Type = "String",
from troposphere import Base64, Join, GetAtt from troposphere import Parameter, Ref, Template from troposphere import ec2, iam from troposphere.autoscaling import AutoScalingGroup, Tag from troposphere.autoscaling import LaunchConfiguration from troposphere.route53 import RecordSet, RecordSetGroup, AliasTarget import troposphere.elasticloadbalancing as elb templ = Template() templ.add_description('Kibana cloudformation template') instance_type = templ.add_parameter(Parameter( 'InstanceType', Type='String', Description='Instande type for instances', Default='m3.medium' )) key_name = templ.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access', )) cluster_name = templ.add_parameter(Parameter( 'ClusterName', Type='String', Description='Name for the Elasticserach cluster', Default='elasticsearch-cluster' ))
import json from troposphere import Template, Ref, Parameter from troposphere.logs import MetricFilter, MetricTransformation METRIC_NAMESPACE = "BBC/CHAOS-LAMBDA" t = Template() log_group = t.add_parameter( Parameter( "LambdaLogGroupName", Description="The name of the log group for the lambda function.", Type="String", ) ) t.add_description( "Metrics and filters for Chaos Lambda" ) lambda_metrics = { "liveliness": { "FilterPattern": ( "[datetime, event=\"triggered\", ...]" ), "MetricTransformations": [ MetricTransformation( MetricNamespace=METRIC_NAMESPACE, MetricName="triggered", MetricValue="1", )
class NetworkTemplateBuilder: """Build troposphere CFN templates for VPC creation.""" def __init__( self, vpc_configuration: VPCConfig, existing_vpc: bool = False, availability_zone: str = None, description="Network build by NetworkTemplateBuilder", ): self.__template = Template() self.__template.set_version("2010-09-09") self.__template.set_description(description) self.__availability_zone = self.__get_availability_zone( availability_zone) self.__vpc_config = vpc_configuration self.__vpc = self.__get_vpc(existing_vpc) self.__vpc_subnets = vpc_configuration.subnets self.__gateway_id = self.__get_gateway_id() self.__create_ig = self.__template.add_condition( "CreateInternetGateway", Equals(self.__gateway_id, "")) self.__existing_ig = self.__template.add_condition( # can't negate above condition with Not() "ExistingInternetGateway", Not(Equals(self.__gateway_id, ""))) def __get_vpc(self, existing_vpc): if existing_vpc: return self.__add_parameter(name="VpcId", description="The vpc id", expected_input_type="String") else: return self.__build_vpc() def __get_availability_zone(self, availability_zone): if availability_zone: return availability_zone else: return Ref( self.__template.add_parameter( Parameter( "AvailabilityZone", Description= "(Optional) The zone in which you want to create your subnet(s)", Type="String", Default="", ))) def build(self): """Build the template.""" self.__build_template() return self.__template def __build_template(self): internet_gateway = self.__build_internet_gateway(self.__vpc) nat_gateway = None subnet_refs = [] for subnet in self.__vpc_subnets: subnet_ref = self.__build_subnet(subnet, self.__vpc) subnet_refs.append(subnet_ref) if subnet.has_nat_gateway: nat_gateway = self.__build_nat_gateway(subnet, subnet_ref) for subnet, subnet_ref in zip(self.__vpc_subnets, subnet_refs): self.__build_route_table(subnet, subnet_ref, self.__vpc, internet_gateway, nat_gateway) def __build_vpc(self): vpc = self.__template.add_resource( VPC( self.__vpc_config.name, CidrBlock=self.__vpc_config.cidr, EnableDnsSupport=self.__vpc_config.enable_dns_support, EnableDnsHostnames=self.__vpc_config.enable_dns_hostnames, Tags=self.__vpc_config.tags, )) self.__template.add_output( Output("VpcId", Value=Ref(vpc), Description="The Vpc Id")) return vpc def __build_internet_gateway(self, vpc: VPC): internet_gateway = self.__template.add_resource( InternetGateway( "InternetGateway", Tags=Tags(Name=TAGS_PREFIX + "IG", Stack=Ref("AWS::StackId")), Condition=self.__create_ig, )) self.__template.add_resource( VPCGatewayAttachment( "VPCGatewayAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway), Condition=self.__create_ig, )) return Ref(internet_gateway) def __get_gateway_id(self): return Ref( self.__template.add_parameter( Parameter( "InternetGatewayId", Description= "(Optional) The id of the gateway (will be created if not specified)", Type="String", Default="", ))) def __build_subnet(self, subnet_config: SubnetConfig, vpc: VPC): if not subnet_config.cidr: cidr = Ref( self.__template.add_parameter( Parameter( f"{subnet_config.name}CIDR", Description=f"The CIDR of the {subnet_config.name}", Type="String", AllowedPattern= r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/(1[6-9]|2[0-9]|3[0-2])$", ))) else: cidr = subnet_config.cidr subnet = Subnet( subnet_config.name, CidrBlock=cidr, VpcId=Ref(vpc), MapPublicIpOnLaunch=subnet_config.map_public_ip_on_launch, Tags=subnet_config.tags(), AvailabilityZone=self.__availability_zone, ) self.__template.add_resource(subnet) self.__template.add_output( Output(subnet_config.name + "SubnetId", Value=Ref(subnet))) return subnet def __build_nat_gateway(self, subnet_config: SubnetConfig, subnet_ref: Subnet): nat_eip = self.__template.add_resource( EIP("NatEIP" + subnet_config.name, Domain="vpc")) return self.__template.add_resource( NatGateway( "NatGateway" + subnet_config.name, AllocationId=GetAtt(nat_eip, "AllocationId"), SubnetId=Ref(subnet_ref), )) def __build_route_table(self, subnet_config: SubnetConfig, subnet_ref: Subnet, vpc: VPC, internet_gateway, nat_gateway: NatGateway): internet_gateway = If(self.__create_ig, internet_gateway, self.__gateway_id) route_table = self.__template.add_resource( RouteTable( "RouteTable" + subnet_config.name, VpcId=Ref(vpc), Tags=Tags(Name=TAGS_PREFIX + "RouteTable" + subnet_config.name, Stack=Ref("AWS::StackId")), )) self.__template.add_resource( SubnetRouteTableAssociation("RouteAssociation" + subnet_config.name, SubnetId=Ref(subnet_ref), RouteTableId=Ref(route_table))) if subnet_config.default_gateway == Gateways.INTERNET_GATEWAY: self.__template.add_resource( Route( "DefaultRouteDependsOn" + subnet_config.name, RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", GatewayId=internet_gateway, DependsOn="VPCGatewayAttachment", Condition=self.__create_ig, )) self.__template.add_resource( Route( "DefaultRouteNoDependsOn" + subnet_config.name, RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", GatewayId=internet_gateway, Condition=self.__existing_ig, # cant use Not() )) elif subnet_config.default_gateway == Gateways.NAT_GATEWAY: self.__template.add_resource( Route( "NatRoute" + subnet_config.name, RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", NatGatewayId=Ref(nat_gateway), )) def __add_parameter(self, name, description, expected_input_type): return self.__template.add_parameter( Parameter(name, Description=description, Type=expected_input_type))
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.set_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping('AWSInstanceType2Arch', { 't1.micro': {'Arch': 'PV64'}, 't2.micro': {'Arch': 'HVM64'}, 't2.small': {'Arch': 'HVM64'}, 't2.medium': {'Arch': 'HVM64'}, 'm1.small': {'Arch': 'PV64'}, 'm1.medium': {'Arch': 'PV64'}, 'm1.large': {'Arch': 'PV64'}, 'm1.xlarge': {'Arch': 'PV64'}, 'm2.xlarge': {'Arch': 'PV64'}, 'm2.2xlarge': {'Arch': 'PV64'}, 'm2.4xlarge': {'Arch': 'PV64'}, 'm3.medium': {'Arch': 'HVM64'}, 'm3.large': {'Arch': 'HVM64'}, 'm3.xlarge': {'Arch': 'HVM64'}, 'm3.2xlarge': {'Arch': 'HVM64'}, 'c1.medium': {'Arch': 'PV64'}, 'c1.xlarge': {'Arch': 'PV64'}, 'c3.large': {'Arch': 'HVM64'}, 'c3.xlarge': {'Arch': 'HVM64'}, 'c3.2xlarge': {'Arch': 'HVM64'}, 'c3.4xlarge': {'Arch': 'HVM64'}, 'c3.8xlarge': {'Arch': 'HVM64'}, 'c4.large': {'Arch': 'HVM64'}, 'c4.xlarge': {'Arch': 'HVM64'}, 'c4.2xlarge': {'Arch': 'HVM64'}, 'c4.4xlarge': {'Arch': 'HVM64'}, 'c4.8xlarge': {'Arch': 'HVM64'}, 'g2.2xlarge': {'Arch': 'HVMG2'}, 'r3.large': {'Arch': 'HVM64'}, 'r3.xlarge': {'Arch': 'HVM64'}, 'r3.2xlarge': {'Arch': 'HVM64'}, 'r3.4xlarge': {'Arch': 'HVM64'}, 'r3.8xlarge': {'Arch': 'HVM64'}, 'i2.xlarge': {'Arch': 'HVM64'}, 'i2.2xlarge': {'Arch': 'HVM64'}, 'i2.4xlarge': {'Arch': 'HVM64'}, 'i2.8xlarge': {'Arch': 'HVM64'}, 'd2.xlarge': {'Arch': 'HVM64'}, 'd2.2xlarge': {'Arch': 'HVM64'}, 'd2.4xlarge': {'Arch': 'HVM64'}, 'd2.8xlarge': {'Arch': 'HVM64'}, 'hi1.4xlarge': {'Arch': 'HVM64'}, 'hs1.8xlarge': {'Arch': 'HVM64'}, 'cr1.8xlarge': {'Arch': 'HVM64'}, 'cc2.8xlarge': {'Arch': 'HVM64'} }) template.add_mapping('AWSRegionArch2AMI', { 'us-east-1': {'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30'}, 'us-west-2': {'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099'}, 'us-west-1': {'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73'}, 'eu-west-1': {'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105'}, 'eu-central-1': {'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb'}, 'ap-northeast-1': {'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6'}, 'ap-southeast-1': {'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40'}, 'ap-southeast-2': {'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89'}, 'sa-east-1': {'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED'}, 'cn-north-1': {'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED'} }) template.add_mapping('Region2Principal', { 'us-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} }) # Parameters cachenodetype = template.add_parameter(Parameter( 'ClusterNodeType', Description='The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=['cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge'], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter(Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=['t1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge'], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter(Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter(Parameter( 'SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.' )) # Resources webserverrole = template.add_resource(iam.Role( 'WebServerRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal')]), ) ] ), Path='/', )) template.add_resource(iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=PolicyDocument( Statement=[awacs.aws.Statement( Action=[awacs.aws.Action("elasticache", "DescribeCacheClusters")], Resource=["*"], Effect=awacs.aws.Allow )] ), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource(iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource(ec2.SecurityGroup( 'WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ] )) webserverinstance = template.add_resource(ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation.InitFile( content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache' ), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root' ), '/usr/local/bin/get_cluster_config': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root' ), '/usr/local/bin/install_phpredis': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root' ), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root' ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=['/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf'] ), }), }, ) }) ), ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64(Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere') )) redisclustersg = template.add_resource(elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource(elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource(elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output( 'WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ]) ) ]) # Print CloudFormation Template print(template.to_json())
Domain, EBSOptions, ElasticsearchClusterConfig, ) t = Template() PublicCidrIp = str(ip_network(get_ip())) t.add_description('Effective DevOps in AWS: Elasticsearch') t.add_parameter(Parameter( "InstanceType", Type="String", Description="instance type", Default="t2.small.elasticsearch", AllowedValues=[ "t2.small.elasticsearch" ], )) t.add_parameter(Parameter( "InstanceCount", Default="2", Type="String", Description="Number instances in the cluster", )) t.add_parameter(Parameter( "VolumeSize", Default="10",
#!/usr/bin/env python from troposphere import Ref, Template import troposphere.ec2 as ec2 from troposphere import GetAZs, Parameter, Output, Ref, GetAtt, FindInMap import sys t = Template() count = 0 #Define some things, I think environmentName = t.add_parameter(Parameter("EnvironmentName", Description="Environment Name", Default="Demo", Type="String")) vpn_sg = t.add_parameter(Parameter("vpnsg", Description = "VPN SG", Type = "String")) web_sg = t.add_parameter(Parameter("websg", Description="Web ELB SG", Type="String")) keyname = t.add_parameter(Parameter("demokey", Description="Demo description", Type="String")) mgmtkey = t.add_parameter(Parameter("mgmtkey", Description = "VPN ssh key", Type = "String"))
) sys.path.append(os.path.abspath(os.path.dirname(__file__)) + '/../config') from config import * from pprint import pprint t = Template() t.add_description("Network general stack") """ Cloudformation Parameters """ t.add_parameter( Parameter( "stackName", Type="String", Description="Stack name", )) t.add_parameter( Parameter( "vpcCidr", Type="String", Description="VPC Cidr Block", Default="10.10.0.0/16", )) """ Cloudformation Resources """ vpc = t.add_resource(
def generate_stack_template(): template = Template() generate_description(template) generate_version(template) # ---Parameters------------------------------------------------------------ param_vpc_id = Parameter( 'VpcIdentifer', Description='The identity of the VPC (vpc-abcdwxyz) in which this stack shall be created.', Type='AWS::EC2::VPC::Id', ) template.add_parameter(param_vpc_id) param_vpc_cidr_block = Parameter( 'VpcCidrBlock', Description='The CIDR block of the VPC (w.x.y.z/n) in which this stack shall be created.', Type='String', Default='10.0.0.0/16' ) template.add_parameter(param_vpc_cidr_block) param_database_instance_subnet_id = Parameter( 'VpcSubnetIdentifer', Description='The identity of the private subnet (subnet-abcdwxyz) in which the database server shall be created.', Type='AWS::EC2::Subnet::Id', ) template.add_parameter(param_database_instance_subnet_id) param_keyname = Parameter( 'PemKeyName', Description='Name of an existing EC2 KeyPair file (.pem) to use to create EC2 instances', Type='AWS::EC2::KeyPair::KeyName' ) template.add_parameter(param_keyname) param_instance_type = Parameter( 'EC2InstanceType', Description='EC2 instance type, reference this parameter to insure consistency', Type='String', Default='t2.medium', # Prices from (2015-12-03) (Windows, us-west (North CA)) AllowedValues=[ # Source : https://aws.amazon.com/ec2/pricing/ 't2.small', # $0.044/hour 't2.micro', # $0.022/hour 't2.medium', # $0.088/hour 't2.large', # $0.166/hour 'm3.medium', # $0.140/hour 'm3.large', # $0.28/hour 'c4.large' # $0.221/hour ], ConstraintDescription='Must be a valid EC2 instance type' ) template.add_parameter(param_instance_type) param_s3_bucket = Parameter( 'S3Bucket', Description='The bucket in which applicable content can be found.', Type='String', Default='author-it-deployment-test-us-east-1' ) template.add_parameter(param_s3_bucket) param_s3_key = Parameter( 'S3Key', Description='The key within the bucket in which relevant files are located.', Type='String', Default='source/database/postgresql/single' ) template.add_parameter(param_s3_key) param_database_admin_password = Parameter( 'PostgresAdminPassword', Description='The password to be used by user postgres.', Type='String', NoEcho=True ) template.add_parameter(param_database_admin_password) #---Mappings--------------------------------------------------------------- mapping_environment_attribute_map = template.add_mapping( 'EnvironmentAttributeMap', { 'ap-southeast-1': { 'DatabaseServerAmi': 'ami-1ddc0b7e' }, 'ap-southeast-2': { 'DatabaseServerAmi': 'ami-0c95b86f' }, 'us-east-1': { 'DatabaseServerAmi': 'ami-a4827dc9' }, 'us-west-1': { 'DatabaseServerAmi': 'ami-f5f41398' } } ) # ---Resources------------------------------------------------------------- ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') path_database_admin_script = 'usr/ec2-user/postgresql/set_admin_password.sql' name_database_server_wait_handle = 'DatabaseServerWaitHandle' cmd_postgresql_initdb = dict( command='service postgresql-95 initdb' ) cmd_start_postgresql_service = dict( command='service postgresql-95 start' ) cmd_set_postgres_user_password = dict( command='psql -U postgres -f %s' % path_database_admin_script ) cmd_start_postgresql_on_startup = dict( command='chkconfig postgresql on' ) cmd_signal_success = dict( command='cfn-signal --exit-code $?' ) # Create an instance of AWS::IAM::Role for the instance. # This allows: # - Access to S3 bucket content. # - Stack updates resource_instance_role = template.add_resource(iam.Role( 'InstanceRole', AssumeRolePolicyDocument=Policy( Statement=[ Statement( Action=[AssumeRole], Effect=Allow, Principal=Principal( 'Service', ['ec2.amazonaws.com'] ) ) ] ), Path='/' )) # Create the S3 policy and attach it to the role. template.add_resource(iam.PolicyType( 'InstanceS3DownloadPolicy', PolicyName='S3Download', PolicyDocument={ 'Statement':[ { 'Effect': 'Allow', 'Action': ['s3:GetObject'], 'Resource': Join('', [ 'arn:aws:s3:::', Ref(param_s3_bucket), '/*' ]) }, { 'Effect': 'Allow', 'Action': ['cloudformation:DescribeStacks', 'ec2:DescribeInstances'], 'Resource': '*' } ] }, Roles=[Ref(resource_instance_role)] )) # Create the CloudFormation stack update policy and attach it to the role. template.add_resource(iam.PolicyType( 'InstanceStackUpdatePolicy', PolicyName='StackUpdate', PolicyDocument={ 'Statement':[ { "Effect" : "Allow", "Action" : "Update:*", "Resource" : "*" } ] }, Roles=[Ref(resource_instance_role)] )) # Create the AWS::IAM::InstanceProfile from the role for reference in the # database server instance definition. resource_instance_profile = template.add_resource(iam.InstanceProfile( 'InstanceProfile', Path='/', Roles=[Ref(resource_instance_role)] )) # Create a security group for the postgresql instance. # This must be internal to the VPC only. name_security_group_database = 'VpcDatabaseSecurityGroup' resource_database_security_group = ec2.SecurityGroup( name_security_group_database, GroupDescription=Join(' ', ['Security group for VPC database', Ref(param_vpc_id)]), Tags=Tags(Name=name_security_group_database), VpcId=Ref(param_vpc_id) ) template.add_resource(resource_database_security_group) template.add_output( Output( 'SecurityGroupForDatabase', Description='Security group created for database in VPC.', Value=Ref(resource_database_security_group) ) ) # Add ingress rule from VPC to database security group for database traffic. database_port = 5432 ssh_port = 22 template.add_resource(ec2.SecurityGroupIngress( 'DatabaseSecurityGroupDatabaseIngress', CidrIp=Ref(param_vpc_cidr_block), FromPort=str(database_port), GroupId=Ref(resource_database_security_group), IpProtocol='tcp', ToPort=str(database_port) )) # Add ingress rule from VPC to database security group for ssh traffic. ssh_port = 22 template.add_resource(ec2.SecurityGroupIngress( 'DatabaseSecurityGroupSshIngress', CidrIp=Ref(param_vpc_cidr_block), FromPort=str(ssh_port), GroupId=Ref(resource_database_security_group), IpProtocol='tcp', ToPort=str(ssh_port) )) # Create the metadata for the database instance. name_database_server = 'DatabaseServer' database_instance_metadata = cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'rpm': { 'postgresql': 'https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-6-x86_64/pgdg-ami201503-95-9.5-2.noarch.rpm' }, 'yum': { 'postgresql95': [], 'postgresql95-libs': [], 'postgresql95-server': [], 'postgresql95-devel': [], 'postgresql95-contrib': [], 'postgresql95-docs': [] } }, files=cloudformation.InitFiles({ # cfn-hup.conf initialization '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', ref_stack_id, '\n', 'region=', ref_region, '\n', 'interval=2', '\n', 'verbose=true', '\n' ]), mode='000400', owner='root', group='root' ), # cfn-auto-reloader.conf initialization '/etc/cfn/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.%s.Metadata.AWS::CloudFormation::Init\n' % name_database_server, 'action=cfn-init.exe ', ' --verbose ' ' --stack ', ref_stack_name, ' --resource %s ' % name_database_server, # resource that defines the Metadata ' --region ', ref_region, '\n' ]), mode='000400', owner='root', group='root' ), # # pg_hba.conf retrieval from S3 '/var/lib/pgsql9/data/pg_hba.conf': cloudformation.InitFile( source=Join('/', [ # Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'pg_hba.conf' ]), mode='000400', owner='root', group='root' ), # postgresql.conf retrieval from S3 '/var/lib/pgsql9/data/postgresql.conf': cloudformation.InitFile( source=Join('/', [ #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'postgresql.conf' ]), mode='000400', owner='root', group='root' ), # pg_ident.conf retrieval from S3 '/var/lib/pgsql9/data/pg_ident.conf': cloudformation.InitFile( source=Join('/', [ #Join('', ['https://s3-', ref_region, '.', 'amazonaws.com']), 'https://s3.amazonaws.com', Ref(param_s3_bucket), Ref(param_s3_key), 'conf' 'pg_ident.conf' ]), mode='000400', owner='root', group='root' ), # script to set postgresql admin password. # (admin user = '******') path_database_admin_script: cloudformation.InitFile( source=Join('', [ 'ALTER USER postgres WITH PASSWORD ', Ref(param_database_admin_password), ';', '\n' ]) ) }), commands={ '10-postgresql_initdb': cmd_postgresql_initdb, '20-start_postgresql_service': cmd_start_postgresql_service, '30-set-postgres-user-password': cmd_set_postgres_user_password, '40-start-postgresql-on-startup': cmd_start_postgresql_on_startup, #'99-signal-success': cmd_signal_success }, services=dict( sysvinit=cloudformation.InitServices( { # start cfn-hup service - # required for CloudFormation stack update 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ] ), # start postgresql service 'postgresql-9.5': cloudformation.InitService( enabled=True, ensureRunning=True ), # Disable sendmail service - not required. 'sendmail': cloudformation.InitService( enabled=False, ensureRunning=False ) } ) ) ) }), cloudformation.Authentication({ 'S3AccessCredentials': cloudformation.AuthenticationBlock( buckets=[Ref(param_s3_bucket)], roleName=Ref(resource_instance_role), type='S3' ) }) ) # Add a wait handle to receive the completion signal. #resource_database_server_wait_handle = template.add_resource( # cloudformation.WaitConditionHandle( # name_database_server_wait_handle # ) # ) #template.add_resource( # cloudformation.WaitCondition( # 'DatabaseServerWaitCondition', # DependsOn=name_database_server, # Handle=Ref(resource_database_server_wait_handle), # Timeout=300, # ) #) resource_database_server = ec2.Instance( name_database_server, DependsOn=name_security_group_database, IamInstanceProfile=Ref(resource_instance_profile), Metadata=database_instance_metadata, ImageId=FindInMap('EnvironmentAttributeMap', ref_region, 'DatabaseServerAmi'), InstanceType=Ref(param_instance_type), KeyName=Ref(param_keyname), SecurityGroupIds=[Ref(resource_database_security_group)], SubnetId=Ref(param_database_instance_subnet_id), Tags=Tags(Name=name_database_server, VPC=Ref(param_vpc_id)), UserData=Base64( Join( '', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '/opt/aws/bin/cfn-init --verbose ', ' --stack ', ref_stack_name, ' --resource DatabaseServer ', ' --region ', ref_region, '\n', '/opt/aws/bin/cfn-signal --exit-code $? ', ' --stack ', ref_stack_name, ' --resource ', name_database_server, '\n' ] ) ) ) template.add_resource(resource_database_server) template.add_output( Output('DatabaseServer', Description='PostgreSQL single instance database server', Value=Ref(resource_database_server) ) ) return template
AnsiblePullCmd = \ "/usr/local/bin/ansible-pull -U {} Chapter04/ansible/{}.yml -i localhost".format( GithubAnsibleURL, ApplicationName ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.set_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidrIp, ),
from troposphere.elasticbeanstalk import ApplicationVersion, OptionSettings from troposphere.elasticbeanstalk import SourceBundle t = Template() t.add_description( "AWS CloudFormation Sample Template ElasticBeanstalk_Python_Sample: " "Configure and launch the AWS Elastic Beanstalk Python sample " "application. **WARNING** This template creates one or more Amazon EC2 " "instances. You will be billed for the AWS resources used if you create " "a stack from this template.") keyname = t.add_parameter(Parameter( "KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access " "to the AWS Elastic Beanstalk instance", Type="String", )) sampleApp = t.add_resource(Application( "sampleApplication", Description="AWS Elastic Beanstalk Python Sample Application", ApplicationVersions=[ ApplicationVersion( VersionLabel="Initial Version", Description="Version 1.0", SourceBundle=SourceBundle( S3Bucket=Join( '-', ["elasticbeanstalk-samples", Ref("AWS::Region")]), S3Key="python-sample.zip" )
AnsiblePullCmd = \ "/usr/local/bin/ansible-pull -U {} {}.yml -i localhost".format( GithubAnsibleURL, ApplicationName ) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_parameter(Parameter("VpcId", Type="AWS::EC2::VPC::Id", Description="VPC")) t.add_parameter( Parameter("PublicSubnet", Description="PublicSubnet", Type="List<AWS::EC2::Subnet::Id>", ConstraintDescription="PublicSubnet")) t.add_parameter( Parameter(
t.add_mapping("RegionMap", { "us-east-1" : { "S3hostedzoneID" : "Z3AQBSTGFYJSTF", "websiteendpoint" : "s3-website-us-east-1.amazonaws.com" }, "us-west-1" : { "S3hostedzoneID" : "Z2F56UZL2M1ACD", "websiteendpoint" : "s3-website-us-west-1.amazonaws.com" }, "us-west-2" : { "S3hostedzoneID" : "Z3BJ6K6RIION7M", "websiteendpoint" : "s3-website-us-west-2.amazonaws.com" }, "eu-west-1" : { "S3hostedzoneID" : "Z1BKCTXD74EZPE", "websiteendpoint" : "s3-website-eu-west-1.amazonaws.com" }, "ap-southeast-1" : { "S3hostedzoneID" : "Z3O0J2DXBE1FTB", "websiteendpoint" : "s3-website-ap-southeast-1.amazonaws.com" }, "ap-southeast-2" : { "S3hostedzoneID" : "Z1WCIGYICN2BYD", "websiteendpoint" : "s3-website-ap-southeast-2.amazonaws.com" }, "ap-northeast-1" : { "S3hostedzoneID" : "Z2M4EHUR26P7ZW", "websiteendpoint" : "s3-website-ap-northeast-1.amazonaws.com" }, "sa-east-1" : { "S3hostedzoneID" : "Z31GFT0UA1I2HV", "websiteendpoint" : "s3-website-sa-east-1.amazonaws.com" } }) hostedzone = t.add_parameter(Parameter( "HostedZone", Description="The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) root_bucket = t.add_resource( Bucket("RootBucket", BucketName=Ref(hostedzone), AccessControl=PublicRead, WebsiteConfiguration=WebsiteConfiguration( IndexDocument="index.html", ) )) www_bucket = t.add_resource( Bucket("WWWBucket", BucketName=Join('.', ['www', Ref(hostedzone)]),
def create_template(): template = Template(Description="ECR image tagger utility") deployment_id = template.add_parameter( Parameter( "DeploymentId", Type="String", ) ) artifact_repository = template.add_parameter( Parameter( "ArtifactRepository", Type="String", ) ) image_digest = template.add_parameter( Parameter( "ImageDigest", Type="String", ) ) desired_image_tag = template.add_parameter( Parameter( "DesiredImageTag", Type="String", ) ) image_uri = template.add_parameter( Parameter( "ImageUri", Type="String", ) ) role = template.add_resource( Role( "Role", AssumeRolePolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[sts.AssumeRole], Principal=Principal("Service", "lambda.amazonaws.com"), ), ], ), ) ) function, alias = common.add_versioned_lambda( template, Ref(deployment_id), Function( "Function", MemorySize=256, Timeout=30, Role=GetAtt(role, "Arn"), PackageType="Image", Code=Code( ImageUri=Ref(image_uri), ), ImageConfig=ImageConfig( Command=[ Join(":", (handler.__module__, handler.__name__)), ], ), ), ) log_group = template.add_resource( LogGroup( "LogGroup", LogGroupName=Join("/", ["/aws/lambda", Ref(function)]), RetentionInDays=common.LOG_RETENTION_DAYS, ) ) policy = template.add_resource( PolicyType( "Policy", PolicyName=Ref(role), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Effect=Allow, Action=[logs.PutLogEvents, logs.CreateLogStream], Resource=[GetAtt(log_group, "Arn")], ), Statement( Effect=Allow, Action=[ecr.BatchGetImage, ecr.PutImage], # TODO scope down Resource=["*"], ), ], ), Roles=[Ref(role)], ) ) template.add_resource( CustomResource( "ImageTag", ServiceToken=Ref(alias), DeploymentId=Ref(deployment_id), RepositoryName=Ref(artifact_repository), ImageDigest=Ref(image_digest), ImageTag=Ref(desired_image_tag), DependsOn=[policy], ) ) return template