def setup_parameters(self): t = self.template parameters = getattr(self, 'PARAMETERS') if not parameters: logger.debug("No parameters defined.") return for param, attrs in parameters.items(): p = Parameter(param, Type=attrs.get('type'), Description=attrs.get('description', '')) if 'default' in attrs: p.Default = attrs['default'] t.add_parameter(p)
def setup_parameters(self): t = self.template # First look for CF_PARAMETERS, then fall back to regular PARAMETERS # for backwards compatibility. parameters = getattr(self, 'CF_PARAMETERS', getattr(self, 'PARAMETERS', {})) if not parameters: logger.debug("No parameters defined.") return for param, attrs in parameters.items(): p = Parameter(param, Type=attrs.get('type'), Description=attrs.get('description', '')) if 'default' in attrs: p.Default = attrs['default'] t.add_parameter(p)
def set_up_stack(self): super(Application, self).set_up_stack() self.default_tags = self.get_input('Tags').copy() self.region = self.get_input('Region') self.add_description('Application server stack for MMW') # Parameters self.color = self.add_parameter( Parameter('StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green']), 'StackColor') self.keyname = self.add_parameter( Parameter('KeyName', Type='String', Description='Name of an existing EC2 key pair'), 'KeyName') self.availability_zones = self.add_parameter( Parameter( 'AvailabilityZones', Type='CommaDelimitedList', Description='Comma delimited list of availability zones'), 'AvailabilityZones') self.rds_password = self.add_parameter( Parameter( 'RDSPassword', Type='String', NoEcho=True, Description='Database password', ), 'RDSPassword') self.app_server_instance_type = self.add_parameter( Parameter( 'AppServerInstanceType', Type='String', Default='t2.small', Description='Application server EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.'), 'AppServerInstanceType') self.app_server_ami = self.add_parameter( Parameter('AppServerAMI', Type='String', Default=self.get_recent_app_server_ami(), Description='Application server AMI'), 'AppServerAMI') self.app_server_instance_profile = self.add_parameter( Parameter('AppServerInstanceProfile', Type='String', Default='AppServerInstanceProfile', Description='Application server instance profile'), 'AppServerInstanceProfile') self.app_server_auto_scaling_desired = self.add_parameter( Parameter( 'AppServerAutoScalingDesired', Type='String', Default='1', Description='Application server AutoScalingGroup desired'), 'AppServerAutoScalingDesired') self.app_server_auto_scaling_min = self.add_parameter( Parameter( 'AppServerAutoScalingMin', Type='String', Default='1', Description='Application server AutoScalingGroup minimum'), 'AppServerAutoScalingMin') self.app_server_auto_scaling_max = self.add_parameter( Parameter( 'AppServerAutoScalingMax', Type='String', Default='1', Description='Application server AutoScalingGroup maximum'), 'AppServerAutoScalingMax') self.app_server_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA Parameter( 'AppServerAutoScalingScheduleStartRecurrence', Type='String', Default='0 12 * * 1-5', Description='Application server ASG schedule start recurrence' ), 'AppServerAutoScalingScheduleStartRecurrence') self.app_server_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA Parameter( 'AppServerAutoScalingScheduleStartCapacity', Type='String', Default='1', Description='Application server ASG schedule start capacity'), 'AppServerAutoScalingScheduleStartCapacity') self.app_server_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA Parameter( 'AppServerAutoScalingScheduleEndRecurrence', Type='String', Default='0 0 * * *', Description='Application server ASG schedule end recurrence'), 'AppServerAutoScalingScheduleEndRecurrence') self.app_server_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA Parameter( 'AppServerAutoScalingScheduleEndCapacity', Type='String', Default='1', Description='Application server ASG schedule end capacity'), 'AppServerAutoScalingScheduleEndCapacity') self.ssl_certificate_arn = self.add_parameter( Parameter('SSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM'), 'SSLCertificateARN') self.backward_compat_ssl_certificate_arn = self.add_parameter( Parameter('BackwardCompatSSLCertificateARN', Type='String', Description='ARN for a SSL certificate stored in IAM'), 'BackwardCompatSSLCertificateARN') self.public_subnets = self.add_parameter( Parameter('PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets'), 'PublicSubnets') self.private_subnets = self.add_parameter( Parameter('PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets'), 'PrivateSubnets') self.public_hosted_zone_name = self.add_parameter( Parameter('PublicHostedZoneName', Type='String', Description='Route 53 public hosted zone name'), 'PublicHostedZoneName') self.vpc_id = self.add_parameter( Parameter('VpcId', Type='String', Description='VPC ID'), 'VpcId') self.notification_topic_arn = self.add_parameter( Parameter( 'GlobalNotificationsARN', Type='String', Description='ARN for an SNS topic to broadcast notifications'), 'GlobalNotificationsARN') self.blue_tile_distribution_endpoint = self.add_parameter( Parameter( 'BlueTileServerDistributionEndpoint', Type='String', Description='Endpoint for blue tile CloudFront distribution'), 'BlueTileServerDistributionEndpoint') self.green_tile_distribution_endpoint = self.add_parameter( Parameter( 'GreenTileServerDistributionEndpoint', Type='String', Description='Endpoint for green tile CloudFront distribution'), 'GreenTileServerDistributionEndpoint') self.itsi_base_url = self.add_parameter( Parameter('ITSIBaseURL', Type='String', Description='Base URL for ITSI portal'), 'ITSIBaseURL') self.itsi_secret_key = self.add_parameter( Parameter('ITSISecretKey', Type='String', NoEcho=True, Description='Secret key for ITSI portal integration'), 'ITSISecretKey') self.concord_secret_key = self.add_parameter( Parameter('ConcordSecretKey', Type='String', NoEcho=True, Description='Secret key for Concord OAuth integration'), 'ConcordSecretKey') self.hydroshare_base_url = self.add_parameter( Parameter('HydroShareBaseURL', Type='String', Description='Base URL for HydroShare portal'), 'HydroShareBaseURL') self.hydroshare_secret_key = self.add_parameter( Parameter( 'HydroShareSecretKey', Type='String', NoEcho=True, Description='Secret key for HydroShare portal integration'), 'HydroShareSecretKey') self.srat_catchment_api_url = self.add_parameter( Parameter('SRATCatchmentAPIURL', Type='String', Description='URL for the SRAT Catchment API'), 'SRATCatchmentAPIURL') self.srat_catchment_api_key = self.add_parameter( Parameter('SRATCatchmentAPIKey', Type='String', NoEcho=True, Description='API key for the SRAT Catchment API'), 'SRATCatchmentAPIKey') self.client_app_user_password = self.add_parameter( Parameter( 'ClientAppUserPassword', Type='String', NoEcho=True, Description='Password for the client apps django account', ), 'ClientAppUserPassword') self.papertrail_host = self.add_parameter( Parameter( 'PapertrailHost', Type='String', Description='Hostname for Papertrail log destination', ), 'PapertrailHost') self.papertrail_port = self.add_parameter( Parameter( 'PapertrailPort', Type='String', Description='Port for Papertrail log destination', ), 'PapertrailPort') app_server_lb_security_group, \ app_server_security_group = self.create_security_groups() app_server_lb, \ backward_compat_app_server_lb = self.create_load_balancers( app_server_lb_security_group) self.create_auto_scaling_resources(app_server_security_group, app_server_lb, backward_compat_app_server_lb) self.add_output( Output('AppServerLoadBalancerEndpoint', Value=GetAtt(app_server_lb, 'DNSName'))) self.add_output( Output('AppServerLoadBalancerHostedZoneNameID', Value=GetAtt(app_server_lb, 'CanonicalHostedZoneNameID'))) self.add_output( Output('BackwardCompatAppServerLoadBalancerEndpoint', Value=GetAtt(backward_compat_app_server_lb, 'DNSName'))) self.add_output( Output('BackwardCompatAppServerLoadBalancerHostedZoneNameID', Value=GetAtt(backward_compat_app_server_lb, 'CanonicalHostedZoneNameID')))
import sys import troposphere.elasticloadbalancing as elb from troposphere.elasticloadbalancing import Policy as ELBPolicy from troposphere.autoscaling import AutoScalingGroup, Tag from troposphere.autoscaling import LaunchConfiguration t = Template() # Take an existing VPC and a subnet having access to an S3 endpoint # Existing VPC input VPCIDParam = t.add_parameter(Parameter( "VPCID", Description="The VPC ID you wish to deploy in", Type="AWS::EC2::VPC::Id", )) # Subnet with S3 endpoint SubnetsWithS3EndpointParam = t.add_parameter(Parameter( "SubnetsWithS3Endpoint", Description="The private subnets with a configured S3 endpoint. Recommended to be spread across multiple AZ's.", Type="List<AWS::EC2::Subnet::Id>", )) # Key pair for autoscaling NAT instances KeyPairNameParam = t.add_parameter(Parameter( "KeyPairName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instances", Type="AWS::EC2::KeyPair::KeyName",
cache_node_type = template.add_parameter( Parameter( "CacheNodeType", Default="cache.t2.micro", Description="Cache instance type", Type="String", AllowedValues=[ dont_create_value, 'cache.t2.micro', 'cache.t2.small', 'cache.t2.medium', 'cache.m3.medium', 'cache.m3.large', 'cache.m3.xlarge', 'cache.m3.2xlarge', 'cache.m4.large', 'cache.m4.xlarge', 'cache.m4.2xlarge', 'cache.m4.4xlarge', 'cache.m4.10xlarge', 'cache.r3.large', 'cache.r3.xlarge', 'cache.r3.2xlarge', 'cache.r3.4xlarge', 'cache.r3.8xlarge', ], ConstraintDescription="must select a valid cache node type.", ), group="Cache", label="Instance Type",
def main(): template = Template() template.add_version("2010-09-09") template.add_description( "AWS CloudFormation Sample Template: NLB with 1 EC2 instance") AddAMI(template) # Add the Parameters keyname_param = template.add_parameter( Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", )) template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge" ], ConstraintDescription="must be a valid EC2 instance type.", )) webport_param = template.add_parameter( Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", )) subnetA = template.add_parameter( Parameter("subnetA", Type="String", Default="subnet-096fd06d")) subnetB = template.add_parameter( Parameter("subnetB", Type="String", Default="subnet-1313ef4b")) VpcId = template.add_parameter( Parameter("VpcId", Type="String", Default="vpc-82c514e6")) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ])) eipA = template.add_resource(ec2.EIP( 'eipA', Domain='vpc', )) eipB = template.add_resource(ec2.EIP( 'eipB', Domain='vpc', )) # Add the web server instance WebInstance = template.add_resource( ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), )) # Add the network LB NetworkLB = template.add_resource( elb.LoadBalancer( "NetworkLB", Name="NetworkLB", Scheme="internet-facing", SubnetMappings=[ elb.SubnetMapping(AllocationId=GetAtt(eipA, 'AllocationId'), SubnetId=Ref(subnetA)), elb.SubnetMapping(AllocationId=GetAtt(eipB, 'AllocationId'), SubnetId=Ref(subnetB)) ], Type='network')) TargetGroupWeb = template.add_resource( elb.TargetGroup("TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(WebInstance), Port=Ref(webport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId))) template.add_resource( elb.Listener("Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(NetworkLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(TargetGroupWeb)) ])) template.add_output( Output("URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(NetworkLB, "DNSName")]))) print(template.to_json())
'AutoAssignPublicIp': { 'default': 'Auto Assign Public IP' }, }, } }) # # Parameters # param_number_of_azs = t.add_parameter( Parameter('NumberOfAZs', Description='Number of Availability Zones to use in the VPC. ' 'This must match your selections in the list of ' 'Availability Zones parameter.', Default='2', AllowedValues=['2', '3', '4'], Type='String')) param_availability_zones = t.add_parameter( Parameter('AvailabilityZones', Description= 'List of Availability Zones to use for the subnets in the VPC.' 'Note: The logical order is preserved.', Type='List<AWS::EC2::AvailabilityZone::Name>')) param_vpc_cidr = t.add_parameter( Parameter( 'VpcCidr', Description='CIDR block for the VPC',
from __future__ import print_function from troposphere import (Template, Parameter, Ref, Condition, Equals, And, Or, Not, If) from troposphere import ec2 parameters = { "One": Parameter( "One", Type="String", ), "Two": Parameter( "Two", Type="String", ), "Three": Parameter( "Three", Type="String", ), "Four": Parameter( "Four", Type="String", ), "SshKeyName": Parameter( "SshKeyName", Type="String", ) } conditions = { "OneEqualsFoo":
def set_up_stack(self): """Sets up the stack""" if not self.INPUTS or not self.STACK_NAME_PREFIX or not self.HEALTH_ENDPOINT: raise MKInputError( 'Must define INPUTS, STACK_NAME_PREFIX, and HEALTH_ENDPOINT') super(AppServerStack, self).set_up_stack() tags = self.get_input('Tags').copy() self.add_description('{} App Server Stack for Cac'.format( self.STACK_NAME_PREFIX)) assert isinstance(tags, dict), 'tags must be a dictionary' self.availability_zones = get_availability_zones() tags.update({'StackType': 'AppServer'}) self.default_tags = tags self.app_server_instance_type_parameter = self.add_parameter( Parameter( 'AppServerInstanceType', Type='String', Default='t2.medium', Description='NAT EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.'), source='AppServerInstanceType') self.param_app_server_iam_profile = self.add_parameter( Parameter('AppServerIAMProfile', Type='String', Description='IAM Profile for instances'), source='AppServerIAMProfile') self.app_server_ami = self.add_parameter(Parameter( 'AppServerAMI', Type='String', Description='{} Server EC2 AMI'.format(self.STACK_NAME_PREFIX)), source='AppServerAMI') self.keyname_parameter = self.add_parameter(Parameter( 'KeyName', Type='String', Default='cac', Description='Name of an existing EC2 key pair'), source='KeyName') self.param_color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green', 'Orange']), source='StackColor') self.param_stacktype = self.add_parameter(Parameter( 'StackType', Type='String', Description='Stack type', AllowedValues=['Development', 'Staging', 'Production']), source='StackType') self.param_public_hosted_zone_name = self.add_parameter( Parameter('PublicHostedZoneName', Type='String', Description='Public hosted zone name'), source='PublicHostedZoneName') self.param_vpc = self.add_parameter(Parameter( 'VpcId', Type='String', Description='Name of an existing VPC'), source='VpcId') self.param_notification_arn = self.add_parameter( Parameter( 'GlobalNotificationsARN', Type='String', Description='Physical resource ID on an AWS::SNS::Topic for ' 'notifications'), source='GlobalNotificationsARN') self.param_ssl_certificate_arn = self.add_parameter( Parameter('SSLCertificateARN', Type='String', Description= 'Physical resource ID on an AWS::IAM::ServerCertificate ' 'for the application server load balancer'), source='SSLCertificateARN') self.param_public_subnets = self.add_parameter( Parameter('PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets'), source='AppServerPublicSubnets') self.param_private_subnets = self.add_parameter( Parameter('PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets'), source='AppServerPrivateSubnets') self.param_bastion_security_group = self.add_parameter( Parameter('BastionSecurityGroup', Type='String', Description='The ID of the bastion security group'), source='BastionSecurityGroup') self.param_database_security_group = self.add_parameter( Parameter('DatabaseSecurityGroup', Type='String', Description='The ID of the database security group'), source='DatabaseSecurityGroup') self.param_nat_security_group = self.add_parameter( Parameter('NATSecurityGroup', Type='String', Description='The ID of the NAT security group'), source='NATSecurityGroup') self.param_min_size = self.add_parameter(Parameter( 'ASGMinSize', Type='Number', Default='1', Description='Min size of ASG'), source='ASGMinSize') self.param_max_size = self.add_parameter(Parameter( 'ASGMaxSize', Type='Number', Default='1', Description='Max size of ASG'), source='ASGMaxSize') self.param_desired_capacity = self.add_parameter( Parameter('ASGDesiredCapacity', Type='Number', Default='1', Description='Desired capacity of ASG'), source='ASGDesiredCapacity') # # Security Group # app_server_load_balancer_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServerLoadBalancer', GroupDescription= 'Enables access to app servers via a load balancer', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443] ], Tags=Tags(Name='sgAppServerLoadBalancer', Color=Ref(self.param_color)))) app_server_security_group = self.add_resource( ec2.SecurityGroup( 'sgAppServer', GroupDescription='Enables access to App Servers', VpcId=Ref(self.param_vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p) for p in [22, 80, 443] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=80, ToPort=80) for sg in [app_server_load_balancer_security_group] ] + [ ec2.SecurityGroupRule(IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=443, ToPort=443) for sg in [app_server_load_balancer_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p) for p in [80, 443, PAPERTRAIL_PORT] ], Tags=Tags(Name='sgAppServer', Color=Ref(self.param_color)))) # ELB to App Server self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTP', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=80, ToPort=80)) self.add_resource( ec2.SecurityGroupEgress( 'sgEgressELBtoAppHTTPS', GroupId=Ref(app_server_load_balancer_security_group), DestinationSecurityGroupId=Ref(app_server_security_group), IpProtocol='tcp', FromPort=443, ToPort=443)) # Bastion to App Server, app server to db, app server to inet rules = [(self.param_bastion_security_group, app_server_security_group, [80, 443, 22]), (app_server_security_group, self.param_database_security_group, [POSTGRES]), (app_server_security_group, self.param_nat_security_group, [80, 443, 22, 587, PAPERTRAIL_PORT])] for num, (srcsg, destsg, ports) in enumerate(rules): for port in ports: self.add_resource( ec2.SecurityGroupEgress( 'sgEgress{}p{}'.format(num, port), GroupId=Ref(srcsg), DestinationSecurityGroupId=Ref(destsg), IpProtocol='tcp', FromPort=port, ToPort=port)) self.add_resource( ec2.SecurityGroupIngress('sgIngress{}p{}'.format( num, port), GroupId=Ref(destsg), SourceSecurityGroupId=Ref(srcsg), IpProtocol='tcp', FromPort=port, ToPort=port)) # # ELB # app_server_load_balancer = self.add_resource( elb.LoadBalancer( 'elbAppServer', ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=300), CrossZone=True, SecurityGroups=[Ref(app_server_load_balancer_security_group)], Listeners=[ elb.Listener(LoadBalancerPort='80', Protocol='HTTP', InstancePort='80', InstanceProtocol='HTTP'), elb.Listener(LoadBalancerPort='443', Protocol='HTTPS', InstancePort='443', InstanceProtocol='HTTP', SSLCertificateId=Ref( self.param_ssl_certificate_arn)) ], HealthCheck=elb.HealthCheck( Target=self.HEALTH_ENDPOINT, HealthyThreshold='3', UnhealthyThreshold='2', Interval='30', Timeout='5', ), Subnets=Ref(self.param_public_subnets), Tags=Tags(Name='elbAppServer', Color=Ref(self.param_color)))) self.add_resource( cw.Alarm('alarmAppServerBackend4xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=300, Threshold='5', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_4XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) self.add_resource( cw.Alarm('alarmAppServerBackend5xx', AlarmActions=[Ref(self.param_notification_arn)], Statistic='Sum', Period=60, Threshold='0', EvaluationPeriods=1, ComparisonOperator='GreaterThanThreshold', MetricName='HTTPCode_Backend_5XX', Namespace='AWS/ELB', Dimensions=[ cw.MetricDimension( 'metricLoadBalancerName', Name='LoadBalancerName', Value=Ref(app_server_load_balancer)) ])) # # ASG # app_server_launch_config = self.add_resource( asg.LaunchConfiguration( 'lcAppServer', ImageId=Ref(self.app_server_ami), IamInstanceProfile=Ref(self.param_app_server_iam_profile), InstanceType=Ref(self.app_server_instance_type_parameter), KeyName=Ref(self.keyname_parameter), SecurityGroups=[Ref(app_server_security_group)])) autoscaling_group = self.add_resource( asg.AutoScalingGroup( 'asgAppServer', AvailabilityZones=self.get_input( 'AppServerAvailabilityZones').split(','), Cooldown=300, DesiredCapacity=Ref(self.param_desired_capacity), HealthCheckGracePeriod=1000, HealthCheckType='ELB', LaunchConfigurationName=Ref(app_server_launch_config), LoadBalancerNames=[Ref(app_server_load_balancer)], MaxSize=Ref(self.param_max_size), MinSize=Ref(self.param_min_size), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.param_notification_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ]) ], VPCZoneIdentifier=Ref(self.param_private_subnets), Tags=[ asg.Tag('Name', '{}Server'.format(self.STACK_NAME_PREFIX), True), asg.Tag('Color', Ref(self.param_color), True) ])) # autoscaling policies autoscaling_policy_add = self.add_resource( asg.ScalingPolicy('scalingPolicyAddAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='1')) autoscaling_policy_remove = self.add_resource( asg.ScalingPolicy('scalingPolicyRemoveAppServer', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(autoscaling_group), Cooldown=600, ScalingAdjustment='-1')) if self.STACK_NAME_PREFIX == 'Otp': # trigger scale down if CPU avg usage < 10% for 3 consecutive 5 min periods self.add_resource( cw.Alarm('alarmAppServerLowCPU', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='10', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # trigger scale up if CPU avg usage >= 30% for a 5 min period self.add_resource( cw.Alarm('alarmAppServerHighCPU', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='30', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='CPUUtilization', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) else: # scale web servers based on network usage self.add_resource( cw.Alarm('alarmAppServerLowNetworkUsage', AlarmActions=[Ref(autoscaling_policy_remove)], Statistic='Average', Period=300, Threshold='4000000', EvaluationPeriods=3, ComparisonOperator='LessThanThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) self.add_resource( cw.Alarm('alarmAppServerHighNetworkUsage', AlarmActions=[ Ref(self.param_notification_arn), Ref(autoscaling_policy_add) ], Statistic='Average', Period=300, Threshold='10000000', EvaluationPeriods=1, ComparisonOperator='GreaterThanOrEqualToThreshold', MetricName='NetworkOut', Namespace='AWS/EC2', Dimensions=[ cw.MetricDimension('metricAutoScalingGroupName', Name='AutoScalingGroupName', Value=Ref(autoscaling_group)) ])) # # DNS name # self.create_resource( route53.RecordSetType( 'dnsName', Name=Join('.', [ Ref(self.param_color), Ref(self.param_stacktype), self.STACK_NAME_PREFIX, Ref(self.param_public_hosted_zone_name) ]), Type='A', AliasTarget=route53.AliasTarget( GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID'), GetAtt(app_server_load_balancer, 'DNSName')), HostedZoneName=Ref(self.param_public_hosted_zone_name))) self.add_output([ Output('{}ServerLoadBalancerEndpoint'.format( self.STACK_NAME_PREFIX), Description='Application server endpoint', Value=GetAtt(app_server_load_balancer, 'DNSName')), Output('{}ServerLoadBalancerHostedZoneNameID'.format( self.STACK_NAME_PREFIX), Description='ID of canonical hosted zone name for ELB', Value=GetAtt(app_server_load_balancer, 'CanonicalHostedZoneNameID')) ])
from troposphere import Output, Ref, Template, Parameter, GetAtt from troposphere import ec2 t = Template() t.set_description( "AWS CloudFormation Sample Template NatGateway: Sample template showing " "how to create a public NAT gateway. " "**WARNING** This template creates an Amazon NAT gateway. " "You will be billed for the AWS resources used if you create " "a stack from this template.") vpc_cidr = t.add_parameter( Parameter( 'VPCCIDR', Default='172.18.0.0/16', Description='The IP address space for this VPC, in CIDR notation', Type='String', )) public_subnet = t.add_parameter( Parameter( 'PublicSubnetCidr', Type='String', Description='Public Subnet CIDR', Default='172.18.0.0/22', )) private_subnet = t.add_parameter( Parameter( 'PrivateSubnetCidr', Type='String',
class UserPoolDomain(AWSObject): resource_type = "AWS::Cognito::UserPoolDomain" props = { 'Domain': (str, True), 'UserPoolId': (str, True), 'CustomDomainConfig': (CustomDomainConfig, False), } template = Template(Description='Cognito with passwordless e-mail auth') template.set_transform('AWS::Serverless-2016-10-31') dns_stack = template.add_parameter(Parameter( 'DnsStack', Type=constants.STRING, Default='spunt-punt-be-dns', )) core_stack = template.add_parameter(Parameter( 'CoreStack', Type=constants.STRING, Default='spunt-core', )) domain_name = template.add_parameter(Parameter( 'DomainName', Type=constants.STRING, Default='login.spunt.be', ))
t.add_version("2010-09-09") # Create the template t.add_description(""" AWS Cloudformation template for GzipToSnappy: This template creates a an S3 bucket and a Lambda function to trigger when gzip files are loaded on the S3 bucket. **WARNING**: You will be billed for AWS resources created. """) # Create parameters inputBucketName = t.add_parameter( Parameter("InputBucketName", Description="The name of the input bucket", Type="String")) inputKeyPrefix = t.add_parameter( Parameter("InputKeyPrefix", Description="The S3 input folder for incoming files", Type="String", Default="stg/input-gz/")) outputBucketName = t.add_parameter( Parameter( "OutputBucketName", Description="The name of the output bucket. This is required", Type="String", ))
from troposphere.route53 import RecordSetType t = Template() t.set_description( "AWS CloudFormation Sample Template Route53_A: " "Sample template showing how to create an Amazon Route 53 A record that " "maps to the public IP address of an EC2 instance. It assumes that you " "already have a Hosted Zone registered with Amazon Route 53. **WARNING** " "This template creates an Amazon EC2 instance. You will be billed for " "the AWS resources used if you create a stack from this template.") hostedzone = t.add_parameter( Parameter( "HostedZone", Description="The DNS name of an existing Amazon Route 53 hosted zone", Type="String", )) t.add_mapping( "RegionMap", { "us-east-1": { "AMI": "ami-7f418316" }, "us-west-1": { "AMI": "ami-951945d0" }, "us-west-2": { "AMI": "ami-16fd7026" },
from stack_modules.common_modules.common import * from troposphere import Parameter, Tags stackconfig = StackConfig() mystack = Stack(stackconfig) mystack.description('ElasticSearch Stack') parameters = { "env": Parameter( "DeploymentEnvironment", Type="String", Default="DEV", Description="Environment you are building (DEV,QA,STG,PROD)", ), "ver": Parameter( "ProductVersion", Type="String", Default="6.1", Description="Version deploying (e.g. 6.1)", ) } common_tags = Tags(env=Ref("DeploymentEnvironment"), Version=Ref("ProductVersion")) for p in parameters.values(): mystack.template.add_parameter(p) mystack.elasticsearch_cluster('elasticsearch')
from troposphere.sqs import Queue t = Template() t.set_description( "AWS CloudFormation Sample Template SQS_With_CloudWatch_Alarms: Sample " "template showing how to create an SQS queue with AWS CloudWatch alarms " "on queue depth. **WARNING** This template creates an Amazon SQS Queue " "and one or more Amazon CloudWatch alarms. You will be billed for the " "AWS resources used if you create a stack from this template.") alarmemail = t.add_parameter( Parameter( "AlarmEmail", Default="*****@*****.**", Description="Email address to notify if there are any " "operational issues", Type="String", )) myqueue = t.add_resource(Queue("MyQueue")) alarmtopic = t.add_resource( Topic( "AlarmTopic", Subscription=[ Subscription(Endpoint=Ref(alarmemail), Protocol="email"), ], )) queuedepthalarm = t.add_resource(
def test_noproperty(self): t = Parameter("MyParameter", Type="String") d = t.to_dict() with self.assertRaises(KeyError): d['Properties']
def test_ref_can_be_requested(self): param = Parameter('title', Type='String') reference = param.ref() self.assertIsInstance(reference, Ref) self.assertDictEqual(reference.data, {'Ref': 'title'})
import troposphere.policies as policies t = Template() t.add_version('2010-09-09') t.add_description('OpenAerialMap tiler API stack') ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') # # Parameters # keyname_param = t.add_parameter(Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', Default='hotosm', Description='Name of an existing EC2 key pair' )) role_param = t.add_parameter(Parameter( 'Role', Type='String', Default='OAMServer', Description='IAM Instance Role' )) target_bucket_param = t.add_parameter(Parameter( 'TargetBucket', Type='String', Default='oam-tiles', Description='Target S3 bucket for tiled imagery' )) tiler_ami_param = t.add_parameter(Parameter( 'CoreOSAMI', Type='String', Default='ami-05783d60', Description='CoreOS AMI'
t, vpcids = create_vpc(t) t, subnetids = create_subnet(t, vpcids) t, sg_output = create_sg(t, subnetids) sg_output_id_list = [] for x, y in sg_output.items(): sg_output_id_list.append(y) subnetids_output_id_list = [] for x, y in subnetids["SubnetIds"].items(): subnetids_output_id_list.append(y) ref_region = Ref('AWS::Region') # prints roleid and template to create role ClusterName = t.add_parameter( Parameter("ClusterName", Type="String", Description="Name of an eks", Default="test-eks")) eksResourcesVpcConfig = ResourcesVpcConfig(SecurityGroupIds=sg_output_id_list, SubnetIds=subnetids_output_id_list) eks = t.add_resource( Cluster("eks", Name=Ref(ClusterName), ResourcesVpcConfig=eksResourcesVpcConfig, RoleArn=role_id)) AmiId = t.add_parameter( Parameter( "AmiId", Type="String", Default="ami-0d3998d69ebe9b214",
from troposphere import Tags, ImportValue, Parameter, Sub, GetAtt, Ref, Join, Output, Export from troposphere import Template from troposphere import serverless, awslambda, s3, iam t = Template() t.add_version('2010-09-09') t.add_transform('AWS::Serverless-2016-10-31') # Parameters t.add_parameter(Parameter('CoreStack', Type='String')) t.add_parameter(Parameter('MySQLDbName', Type='String')) t.add_parameter(Parameter('MySQLUser', Type='String')) t.add_parameter(Parameter('MySQLPass', Type='String')) t.add_parameter(Parameter('NodeEnv', Type='String')) t.add_parameter(Parameter('KeycloakServerURL', Type='String')) t.add_parameter(Parameter('KeycloakRealm', Type='String')) t.add_parameter(Parameter('KeycloakClientID', Type='String')) t.add_parameter(Parameter('KeycloakClientSecret', Type='String')) t.add_parameter(Parameter('AwsKmsCmk', Type='String')) # Create S3 Bucket accountMediaBucket = t.add_resource(s3.Bucket( 'AccountMedia', CorsConfiguration = s3.CorsConfiguration(CorsRules = [ s3.CorsRules( AllowedHeaders = ['*'], AllowedMethods = ['GET', 'POST'], AllowedOrigins = ['*'], ) ]),
def main(): # Meta t.add_version("2010-09-09") t.add_description("Template for auto-scaling in an Application" "load balancer target group. " "The ALB will be used as an A Alias target " "for a specified Route53 hosted zone. " "This template also showcases " "Metadata Parameter Grouping, " "Special AWS Parameter Types, " "and Cloudformation Outputs with Exports" "which can be imported into other templates.") t.add_metadata({ "Author": "https://github.com/hmain/", "LastUpdated": "2017 01 31", "Version": "1", }) # Parameter grouping t.add_metadata({ "AWS::CloudFormation::Interface": { "ParameterGroups": [{ "Label": { "default": "Global parameters" }, "Parameters": ["environment"] }, { "Label": { "default": "Application Loadbalancer" }, "Parameters": [ "albSubnets", "loadbalancerPrefix", "loadBalancerArn", "albPaths", "albPort" ] }, { "Label": { "default": "VPC" }, "Parameters": ["ec2Subnets", "VPC", "securityGroup"] }, { "Label": { "default": "EC2" }, "Parameters": ["ec2Name", "ec2Type", "ec2Key"] }, { "Label": { "default": "Auto-scaling" }, "Parameters": [ "asgCapacity", "asgMinSize", "asgMaxSize", "asgCooldown", "asgHealthGrace" ] }, { "Label": { "default": "Route53" }, "Parameters": ["route53HostedZoneId", "route53HostedZoneName"] }] } }) AddAMI(t) environment = t.add_parameter( Parameter( "environment", Default="dev", Type="String", Description="Development or Production environment", AllowedValues=["dev", "prod"], ConstraintDescription="dev or prod", )) route53_hosted_zone_id = t.add_parameter( Parameter("route53HostedZoneId", Default="", Type="AWS::Route53::HostedZone::Id", Description="Route53 DNS zone ID")) route53_hosted_zone_name = t.add_parameter( Parameter("route53HostedZoneName", Default="my.aws.dns.com", Type="String", Description="Route53 hosted zone name")) security_group = t.add_parameter( Parameter("securityGroup", Default="", Type="List<AWS::EC2::SecurityGroup::Id>", Description="Which security groups to use")) alb_paths = t.add_parameter( Parameter( "albPaths", Default="/", Type="CommaDelimitedList", Description="Path-patterns you want the loadbalancer to point to in " "your application")) albPort = t.add_parameter( Parameter("albPort", Default="80", Type="Number", Description="Which loadbalancer port to use")) ec2_subnets = t.add_parameter( Parameter("ec2Subnets", Default="", Type="List<AWS::EC2::Subnet::Id>", Description="Private subnets for the instances.")) alb_subnets = t.add_parameter( Parameter("albSubnets", Default="", Type="List<AWS::EC2::Subnet::Id>", Description="Public subnets for the load balancer.")) loadbalancer_prefix = t.add_parameter( Parameter( "loadbalancerPrefix", Default="", Type="String", Description="Specify a prefix for your loadbalancer", )) vpc = t.add_parameter( Parameter("VPC", Default="", Type="AWS::EC2::VPC::Id", Description="Environment VPC")) # Auto scaling group parameters asg_capacity = t.add_parameter( Parameter("asgCapacity", Default="0", Type="Number", Description="Number of instances")) asg_min_size = t.add_parameter( Parameter("asgMinSize", Default="0", Type="Number", Description="Minimum size of AutoScalingGroup")) asg_max_size = t.add_parameter( Parameter("asgMaxSize", Default="1", Type="Number", Description="Maximum size of AutoScalingGroup")) asg_cooldown = t.add_parameter( Parameter( "asgCooldown", Default="300", Type="Number", Description="Cooldown before starting/stopping another instance")) asg_health_grace = t.add_parameter( Parameter( "asgHealthGrace", Default="300", Type="Number", Description="Wait before starting/stopping another instance")) # EC2 parameters ec2_name = t.add_parameter( Parameter("ec2Name", Default="myApplication", Type="String", Description="Name of the instances")) ec2_type = t.add_parameter( Parameter("ec2Type", Default="t2.large", Type="String", Description="Instance type.")) ec2_key = t.add_parameter( Parameter("ec2Key", Default="", Type="AWS::EC2::KeyPair::KeyName", Description="EC2 Key Pair")) # Launchconfiguration ec2_launchconfiguration = t.add_resource( autoscaling.LaunchConfiguration( "EC2LaunchConfiguration", ImageId=FindInMap("windowsAMI", Ref("AWS::Region"), "AMI"), KeyName=Ref(ec2_key), SecurityGroups=Ref(security_group), InstanceType=Ref(ec2_type), AssociatePublicIpAddress=False, )) # Application ELB alb_target_group = t.add_resource( elb.TargetGroup("albTargetGroup", HealthCheckPath=Select("0", Ref(alb_paths)), HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=Ref(ec2_name), Port=80, Protocol="HTTP", UnhealthyThresholdCount="3", VpcId=Ref(vpc))) # Auto scaling group t.add_resource( autoscaling.AutoScalingGroup( "autoScalingGroup", DesiredCapacity=Ref(asg_capacity), Tags=autoscaling.Tags(Environment=Ref(environment)), VPCZoneIdentifier=Ref(ec2_subnets), TargetGroupARNs=[Ref(alb_target_group)], MinSize=Ref(asg_min_size), MaxSize=Ref(asg_max_size), Cooldown=Ref(asg_cooldown), LaunchConfigurationName=Ref(ec2_launchconfiguration), HealthCheckGracePeriod=Ref(asg_health_grace), HealthCheckType="EC2", )) # Application Load Balancer application_load_balancer = t.add_resource( elb.LoadBalancer("applicationLoadBalancer", Name=Ref(loadbalancer_prefix), Scheme="internet-facing", Subnets=Ref(alb_subnets), SecurityGroups=Ref(security_group))) alb_listener = t.add_resource( elb.Listener("albListener", Port=Ref(albPort), Protocol="HTTP", LoadBalancerArn=Ref(application_load_balancer), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(alb_target_group)) ])) t.add_resource( elb.ListenerRule("albListenerRule", ListenerArn=Ref(alb_listener), Conditions=[ elb.Condition(Field="path-pattern", Values=Ref(alb_paths)) ], Actions=[ elb.Action(Type="forward", TargetGroupArn=Ref(alb_target_group)) ], Priority="1")) # Route53 t.add_resource( route53.RecordSetGroup( "route53RoundRobin", HostedZoneId=Ref(route53_hosted_zone_id), RecordSets=[ route53.RecordSet( Weight=1, SetIdentifier=Join(".", [ Ref(environment), Ref(route53_hosted_zone_name), "ELB" ]), Name=Join( ".", [Ref(environment), Ref(route53_hosted_zone_name)]), Type="A", AliasTarget=route53.AliasTarget( GetAtt(application_load_balancer, "CanonicalHostedZoneID"), GetAtt(application_load_balancer, "DNSName"))) ])) t.add_output( Output( "URL", Description="URL of the website", Value=Join("", [ "http://", GetAtt(application_load_balancer, "DNSName"), Select("0", Ref(alb_paths)) ]), Export=Export(Sub("${AWS::StackName}-URL")), )) print(t.to_json())
def add_parameters(self): # SharedServicesVpcId is VPC Output self.SharedServicesVpcId = self.template.add_parameter( Parameter( "SharedServicesVpcId", Type="String", )) # SharedServicesPrivSubnet1 is VPC Output self.SharedServicesPrivSubnet1 = self.template.add_parameter( Parameter( "SharedServicesPrivSubnet1", Type="String", )) # SharedServicesPrivSubnet2 is VPC Output self.SharedServicesPrivSubnet2 = self.template.add_parameter( Parameter( "SharedServicesPrivSubnet2", Type="String", )) # SharedServicesPubSubnet1 is VPC Output self.SharedServicesPubSubnet1 = self.template.add_parameter( Parameter( "SharedServicesPubSubnet1", Type="String", )) # SharedServicesPubSubnet2 is VPC Output self.SharedServicesPubSubnet2 = self.template.add_parameter( Parameter( "SharedServicesPubSubnet2", Type="String", )) self.EksClusterVersion = self.template.add_parameter( Parameter( "EksClusterVersion", Type="String", )) self.WorkerNodeImageId = self.template.add_parameter( Parameter( "WorkerNodeImageId", Type="String", )) self.WorkerNodeKeyName = self.template.add_parameter( Parameter( "WorkerNodeKeyName", Type="String", )) self.WorkerNodeInstanceType = self.template.add_parameter( Parameter( "WorkerNodeInstanceType", Type="String", )) self.WorkerNodeASGGroupMinSize = self.template.add_parameter( Parameter( "WorkerNodeASGGroupMinSize", Type="String", )) self.WorkerNodeASGGroupDesiredSize = self.template.add_parameter( Parameter( "WorkerNodeASGGroupDesiredSize", Type="String", )) self.WorkerNodeASGGroupMaxSize = self.template.add_parameter( Parameter( "WorkerNodeASGGroupMaxSize", Type="String", )) self.WorkerNodeASGHealthCheckType = self.template.add_parameter( Parameter( "WorkerNodeASGHealthCheckType", Type="String", )) self.WorkerNodeASGHealthCheckGracePeriod = self.template.add_parameter( Parameter( "WorkerNodeASGHealthCheckGracePeriod", Type="String", )) self.WorkerNodeASGCoolDown = self.template.add_parameter( Parameter( "WorkerNodeASGCoolDown", Type="String", ))
from troposphere.cloudwatch import (Alarm, MetricDimension) from troposphere.ecs import Cluster from troposphere.iam import (InstanceProfile, Role) PublicCidrIp = str(ip_network(get_ip())) t = Template() t.set_description("community-mother-api: ECS Cluster") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_parameter(Parameter("VpcId", Type="AWS::EC2::VPC::Id", Description="VPC")) t.add_parameter( Parameter("PublicSubnet", Description="PublicSubnet", Type="List<AWS::EC2::Subnet::Id>", ConstraintDescription="PublicSubnet")) t.add_resource( ec2.SecurityGroup("SecurityGroup", GroupDescription="Allow SSH and private network access",
from troposphere import Parameter, Ref, Template, apigateway template = Template() template.add_description("Example API Gateway from Swagger") param_source_bucket = template.add_parameter(Parameter( "SourceBucket", Type="String", Description="Name of the bucket where Swagger file is stored" )) param_ile_name = template.add_parameter(Parameter( "FileName", Type="String", Description="Name of the Swagger file inside S3 bucket" )) api = template.add_resource(apigateway.RestApi( "API", Description="My API", Name="MyAPI", BodyS3Location=apigateway.S3Location( Bucket=Ref(param_source_bucket), Key=Ref(param_ile_name) ) )) api_deployment = template.add_resource(apigateway.Deployment( "APIDeployment", RestApiId=Ref(api),
PublicCidrIp = str(ip_network(get_ip())) AnsiblePullCmd = \ "/usr/bin/ansible-pull -U {} {}.yml -i localhost".format( GithubAnsibleURL, ApplicationName ) t = Template() t.add_description("Effective DevOps in AWS: HelloWorld web application") t.add_parameter( Parameter( "KeyPair", Description="Name of an existing EC2 KeyPair to SSH", Type="AWS::EC2::KeyPair::KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", )) t.add_resource( ec2.SecurityGroup( "SecurityGroup", GroupDescription="Allow SSH and TCP/{} access".format(ApplicationPort), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule(
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description= "Availability Zone the cluster will launch into. THIS IS REQUIRED", )) volume_size = t.add_parameter( Parameter( "VolumeSize", Type="CommaDelimitedList", Description="Size of EBS volume in GB, if creating a new one")) volume_type = t.add_parameter( Parameter( "VolumeType", Type="CommaDelimitedList", Description="Type of volume to create either new or from snapshot") ) volume_iops = t.add_parameter( Parameter( "VolumeIOPS", Type="CommaDelimitedList", Description= "Number of IOPS for volume type io1. Not used for other volume types.", )) ebs_encryption = t.add_parameter( Parameter( "EBSEncryption", Type="CommaDelimitedList", Description="Boolean flag to use EBS encryption for /shared volume. " "(Not to be used for snapshots)", )) ebs_kms_id = t.add_parameter( Parameter( "EBSKMSKeyId", Type="CommaDelimitedList", Description= "KMS ARN for customer created master key, will be used for EBS encryption", )) ebs_volume_id = t.add_parameter( Parameter("EBSVolumeId", Type="CommaDelimitedList", Description="Existing EBS volume Id")) ebs_snapshot_id = t.add_parameter( Parameter( "EBSSnapshotId", Type="CommaDelimitedList", Description= "Id of EBS snapshot if using snapshot as source for volume", )) ebs_vol_num = t.add_parameter( Parameter( "NumberOfEBSVol", Type="Number", Description="Number of EBS Volumes the user requested, up to %s" % number_of_vol, )) use_vol = [None] * number_of_vol use_existing_ebs_volume = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")) elif i == 1: use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(ebs_vol_num), str(i)))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Ref(ebs_vol_num), str(i))), Condition(use_vol[i - 1]))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(volume_type)), "io1")) use_vol_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select(str(i), Ref(volume_size)), "NONE"))) use_vol_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select(str(i), Ref(volume_type)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select(str(i), Ref(ebs_encryption)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select(str(i), Ref(ebs_kms_id)), "NONE"))), ) use_ebs_snapshot = t.add_condition( "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_snapshot_id)), "NONE"))) use_existing_ebs_volume[i] = t.add_condition( "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_volume_id)), "NONE"))) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_vol_type, Select(str(i), Ref(volume_type)), "gp2"), Size=If(use_vol_size, Select(str(i), Ref(volume_size)), "20"), SnapshotId=If(use_ebs_snapshot, Select(str(i), Ref(ebs_snapshot_id)), NoValue), Iops=If(use_ebs_iops, Select(str(i), Ref(volume_iops)), NoValue), Encrypted=If(use_ebs_encryption, Select(str(i), Ref(ebs_encryption)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select(str(i), Ref(ebs_kms_id)), NoValue), Condition=create_vol, )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = If(use_existing_ebs_volume[i], Select(str(i), Ref(ebs_volume_id)), Ref(v[i])) if i == 0: outputs[i] = vol_to_return[i] else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
def main(): """Generates the CloudFormation template""" template = Template() template.add_version('2010-09-09') template.add_description( 'This template deploys an ECS cluster to the ' + 'provided VPC and subnets using an Auto Scaling Group') # Parameters # EnvironmentName env_name_param = template.add_parameter( Parameter( 'EnvironmentName', Type='String', Description= 'An environment name that will be prefixed to resource names', )) # InstanceType instance_type_param = template.add_parameter( Parameter( 'InstanceType', Type='String', Default='t2.nano', Description= 'Which instance type should we use to build the ECS cluster?', AllowedValues=[ 't2.nano', 't2.micro', 't2.small', 't2.medium', 't2.large', 't2.xlarge', 't2.2xlarge', ], )) # ClusterSize cluster_size_param = template.add_parameter( Parameter( 'ClusterSize', Type='Number', Description='How many ECS hosts do you want to initially deploy?', Default='1', )) # VPC template.add_parameter( Parameter( 'VPC', Type='AWS::EC2::VPC::Id', Description= 'Choose which VPC this ECS cluster should be deployed to', )) # Subnets subnets_param = template.add_parameter( Parameter( 'Subnets', Type='List<AWS::EC2::Subnet::Id>', Description= 'Choose which subnets this ECS cluster should be deployed to', )) # SecurityGroup sg_param = template.add_parameter( Parameter( 'SecurityGroup', Type='AWS::EC2::SecurityGroup::Id', Description= 'Select the Security Group to use for the ECS cluster hosts', )) # Mappings # AWSRegionToAMI template.add_mapping( 'AWSRegionToAMI', { 'us-east-1': { 'AMI': 'ami-a58760b3' }, 'us-east-2': { 'AMI': 'ami-a6e4bec3' }, 'us-west-1': { 'AMI': 'ami-74cb9b14' }, 'us-west-2': { 'AMI': 'ami-5b6dde3b' }, 'eu-west-1': { 'AMI': 'ami-e3fbd290' }, 'eu-west-2': { 'AMI': 'ami-77f6fc13' }, 'eu-central-1': { 'AMI': 'ami-38dc1157' }, 'ap-northeast-1': { 'AMI': 'ami-30bdce57' }, 'ap-southeast-1': { 'AMI': 'ami-9f75ddfc' }, 'ap-southeast-2': { 'AMI': 'ami-cf393cac' }, 'ca-central-1': { 'AMI': 'ami-1b01b37f' }, }, ) # Resources ecs_role = template.add_resource( Role( 'ECSRole', Path='/', RoleName=Sub('${EnvironmentName}-ECSRole-${AWS::Region}'), AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action('sts', 'AssumeRole')], Principal=awacs.aws.Principal('Service', ['ec2.amazonaws.com']), ), ]), Policies=[ Policy( PolicyName='ecs-service', PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action('ecs', 'CreateCluster'), awacs.aws.Action( 'ecs', 'DeregisterContainerInstance'), awacs.aws.Action('ecs', 'DiscoverPollEndpoint'), awacs.aws.Action('ecs', 'Poll'), awacs.aws.Action('ecs', 'RegisterContainerInstance'), awacs.aws.Action('ecs', 'StartTelemetrySession'), awacs.aws.Action('ecs', 'Submit*'), awacs.aws.Action('logs', 'CreateLogStream'), awacs.aws.Action( 'ecr', 'BatchCheckLayerAvailability'), awacs.aws.Action('ecr', 'BatchGetImage'), awacs.aws.Action('ecr', 'GetDownloadUrlForLayer'), awacs.aws.Action('ecr', 'GetAuthorizationToken'), ], Resource=['*'], ), ], ), ), ], )) ecs_instance_profile = template.add_resource( InstanceProfile( 'ECSInstanceProfile', Path='/', Roles=[Ref(ecs_role)], )) # ECSCluster ecs_cluster = template.add_resource( Cluster( 'ECSCluster', ClusterName=Ref(env_name_param), )) instance_metadata = Metadata( Init({ 'config': InitConfig( commands={ '01_add_instance_to_cluster': { 'command': Join('', [ '#!/bin/bash\n', 'echo ECS_CLUSTER=', Ref(ecs_cluster), ' >> /etc/ecs/ecs.config' ]) }, }, files=InitFiles({ '/etc/cfn/cfn-hup.conf': InitFile( mode='000400', owner='root', group='root', content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': InitFile( mode='000400', owner='root', group='root', content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackId'), ' --resource ECSLaunchConfiguration\n' ]), ) }), services=InitServices({ 'cfn-hup': InitService(enabled='true', ensureRunning='true', files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]) }), ) })) ecs_launch_config = template.add_resource( LaunchConfiguration( 'ECSLaunchConfiguration', ImageId=FindInMap('AWSRegionToAMI', Ref('AWS::Region'), 'AMI'), InstanceType=Ref(instance_type_param), SecurityGroups=[Ref(sg_param)], IamInstanceProfile=Ref(ecs_instance_profile), UserData=Base64( Join('', [ '#!/bin/bash\n', 'yum install -y aws-cfn-bootstrap\n', '/opt/aws/bin/cfn-init -v --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackName'), ' --resource ECSLaunchConfiguration\n', '/opt/aws/bin/cfn-signal -e $? --region ', Ref('AWS::Region'), ' --stack ', Ref('AWS::StackName'), ' --resource ECSAutoScalingGroup\n', ])), Metadata=instance_metadata, )) # ECSAutoScalingGroup: template.add_resource( AutoScalingGroup( 'ECSAutoScalingGroup', VPCZoneIdentifier=Ref(subnets_param), LaunchConfigurationName=Ref(ecs_launch_config), MinSize=Ref(cluster_size_param), MaxSize=Ref(cluster_size_param), DesiredCapacity=Ref(cluster_size_param), Tags=ASTags(Name=(Sub('${EnvironmentName} ECS host'), True)), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M'), ), UpdatePolicy=UpdatePolicy( AutoScalingRollingUpdate=AutoScalingRollingUpdate( MinInstancesInService='1', MaxBatchSize='1', PauseTime='PT15M', WaitOnResourceSignals=True, )), )) # Output template.add_output( Output( 'Cluster', Description='A reference to the ECS cluster', Value=Ref(ecs_cluster), )) print(template.to_json())
from troposphere.ec2 import VPC from troposphere.ec2 import Subnet from troposphere.ec2 import EIP from troposphere.ec2 import VPCGatewayAttachment from troposphere.ec2 import SecurityGroup t = Template() t.add_description("""\ This template deploys a VPC, with a pair of public and private subnets spread across two Availability Zones. It deploys an Internet Gateway, with a default route on the public subnets. It deploys a pair of NAT Gateways (one in each AZ), and default routes for them in the private subnets.""") EnvironmentName = t.add_parameter(Parameter( "EnvironmentName", Type="String", Default="vpc-stack", Description="An environment name that will be prefixed to resource names", )) PrivateSubnet1CIDR = t.add_parameter(Parameter( "PrivateSubnet1CIDR", Default="10.192.20.0/24", Type="String", Description="Please enter the IP range (CIDR notation) for the private subnet in the first Availability Zone", )) PrivateSubnet2CIDR = t.add_parameter(Parameter( "PrivateSubnet2CIDR", Default="10.192.21.0/24", Type="String", Description="Please enter the IP range (CIDR notation) for the private subnet in the second Availability Zone",
FromPort="22", ToPort="22", CidrIp="0.0.0.0/0"), ec2.SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0") ] t.add_resource(sg) # This is the keypair that CloudFormation will ask you about when launching the stack keypair = t.add_parameter( Parameter( "KeyName", Description= "Name of the SSH key pair that will be used to access the instance", Type="String", )) ud = Base64( Join('\n', [ "#!/bin/bash", "sudo yum -y install apache", "chown -R ec2-user /var/www/html" "echo '<html><body><h1>Welcome to DevOps on AWS</h1></body></html>' > /var/www/html/test.html", "sudo service apache start", "sudo chkconfig apache on" ])) instance = ec2.Instance("Webserver") instance.ImageId = "ami-e689729e" instance.InstanceType = "t2.micro" instance.SecurityGroups = [Ref(sg)]
t = Template() t.add_version('2010-09-09') t.add_mapping('RegionMap', data["RegionMap"]) t.add_description("""\ AWS CloudFormation ECS Cluster\ """) # Parameters ecsClusterName_param = t.add_parameter(Parameter( "ECSClusterName", Description="ECS Cluster Name", Default=data['ClusterInfo']['Name'], Type="String" )) # Creating the ECS Cluster (using the name provided when running CloudFormation. ECSCluster = t.add_resource(Cluster( 'ECSCluster', ClusterName=Ref(ecsClusterName_param), )) # Policy Amazon EC2 Container Registry - Enable our ECS Cluster to work with the ECR PolicyEcr = t.add_resource(PolicyType( 'PolicyEcr', PolicyName='EcrPolicy',
from troposphere import Equals, Join, Parameter, Ref from .template import template domain_name = Ref( template.add_parameter( Parameter( "DomainName", Description="The fully-qualified domain name for the application.", Type="String", ), group="Global", label="Domain Name", )) domain_name_alternates = Ref( template.add_parameter( Parameter( "DomainNameAlternates", Description= "A comma-separated list of Alternate FQDNs to be included in " "the Subject Alternative Name extension of the SSL certificate.", Type="CommaDelimitedList", ), group="Global", label="Alternate Domain Names", )) no_alt_domains = "NoAlternateDomains" template.add_condition( no_alt_domains,
from troposphere.dynamodb import (Key, AttributeDefinition, ProvisionedThroughput, Projection) from troposphere.dynamodb import Table, GlobalSecondaryIndex template = Template() template.add_description("Create a dynamodb table with a global secondary " "index") # N.B. If you remove the provisioning section this works for # LocalSecondaryIndexes aswell. readunits = template.add_parameter( Parameter("ReadCapacityUnits", Description="Provisioned read throughput", Type="Number", Default="10", MinValue="5", MaxValue="10000", ConstraintDescription="should be between 5 and 10000")) writeunits = template.add_parameter( Parameter("WriteCapacityUnits", Description="Provisioned write throughput", Type="Number", Default="5", MinValue="5", MaxValue="10000", ConstraintDescription="should be between 5 and 10000")) tableIndexName = template.add_parameter( Parameter(
def test_property_default(self): p = Parameter("param", Type="String", Default="foo") p.validate() p = Parameter("param", Type="Number", Default=1) p.validate() p = Parameter("param", Type="Number", Default=1.0) p.validate() p = Parameter("param", Type="Number", Default=0.1) p.validate() p = Parameter("param", Type="List<Number>", Default="1, 2, 3") p.validate() p = Parameter("param", Type="List<Number>", Default=" 0.1 , 2 , 1.1 ") p.validate() with self.assertRaises(ValueError): p = Parameter("param", Type="String", Default=1) p.validate() with self.assertRaises(ValueError): p = Parameter("param", Type="Number", Default="foo") p.validate() with self.assertRaises(TypeError): p = Parameter("param", Type="Number", Default=["foo"]) p.validate() with self.assertRaises(ValueError): p = Parameter("param", Type="List<Number>", Default="foo") p.validate() with self.assertRaises(ValueError): p = Parameter("param", Type="List<Number>", Default="1, 2, foo") p.validate() with self.assertRaises(TypeError): p = Parameter("param", Type="List<Number>", Default=["1", "2"]) p.validate()
# now the work begins t = Template() t.add_version("2010-09-09") t.add_description("""\ CloudFormation template to Deploy Hortonworks Data Platform on VPC with a public subnet""" ) ## Parameters AmbariInstanceType = t.add_parameter( Parameter( "AmbariInstanceType", Default="m3.large", ConstraintDescription="Must be a valid EC2 instance type.", Type="String", Description="Instance type for Ambari node", )) WorkerInstanceCount = t.add_parameter( Parameter( "WorkerInstanceCount", Default="2", Type="Number", MaxValue="99", MinValue="1", Description="Number of Worker instances", )) MasterInstanceCount = t.add_parameter(
def test_property_validator(self): p = Parameter("BasicString", Type="String", MaxLength=10) p.validate() p = Parameter("BasicString", Type="String", MaxValue=10) with self.assertRaises(ValueError): p.validate() p = Parameter("BasicNumber", Type="Number", MaxValue=10) p.validate() p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*") with self.assertRaises(ValueError): p.validate()
from troposphere import Ref, Template, Parameter, Output from troposphere.ec2 import SecurityGroup import troposphere.ec2 as ec2 template = Template() keyname_param = template.add_parameter( Parameter( "KeyName", Description="EC2 Keypair", Type="String", )) securityGroup_param = template.add_parameter( Parameter("SecurityGroup", Description="Security Group ID", Type="String")) ec2_instance = template.add_resource( Instance( 'JordansInstance', ImageId='ami-ebed508f', InstanceType='t2.micro', KeyName=Ref(keyname_param), )) template.add_output([ Output("InstanceId", Description="InstanceID of EC2 instance", Value=Ref(ec2_instance)) ]) print(template.to_json())