def test_CreationPolicy(self): w = WaitCondition( "mycondition", CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal( Timeout='PT15M')), ) w.validate()
def test_RequiredProps(self): handle = WaitConditionHandle("myWaitHandle") w = WaitCondition( "mycondition", Handle=Ref(handle), Timeout="300", ) w.validate()
def test_CreationPolicyWithProps(self): w = WaitCondition( "mycondition", Count=10, CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal( Timeout='PT15M')), ) with self.assertRaises(ValueError): w.validate()
def create_template(self): v = self.get_variables() t = self.template base_name = "Dummy" for i in range(v["Count"]): name = "%s%s" % (base_name, i) last_name = None if i: last_name = "%s%s" % (base_name, i - 1) wch = WaitConditionHandle(name) if last_name is not None: wch.DependsOn = last_name t.add_resource(wch) self.add_output("OutputValue", str(v["OutputValue"])) self.add_output("WCHCount", str(v["Count"])) if v["BreakLast"]: t.add_resource( WaitCondition( "BrokenWaitCondition", Handle=wch.Ref(), # Timeout is made deliberately large so CF rejects it Timeout=2**32, Count=0))
def create_template(self) -> None: """Create template.""" base_name = "Dummy" for i in range(self.variables["Count"]): name = "%s%s" % (base_name, i) last_name = None if i: last_name = "%s%s" % (base_name, i - 1) wch = WaitConditionHandle(name) if last_name is not None: wch.DependsOn = last_name self.template.add_resource(wch) if self.variables["BreakLast"] and i == self.variables["Count"] - 1: self.template.add_resource( WaitCondition( "BrokenWaitCondition", Handle=wch.Ref(), # Timeout is made deliberately large so CF rejects it Timeout=2**32, Count=0, )) self.add_output("OutputValue", str(self.variables["OutputValue"])) self.add_output("WCHCount", str(self.variables["Count"]))
def create_template(self): t = self.template t.add_resource(WaitConditionHandle("BrokenDummy")) t.add_resource(WaitCondition( "BrokenWaitCondition", Handle=Ref("BrokenDummy"), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0)) t.add_output(Output("DummyId", Value="dummy-1234"))
def test_yaml_long_form(self): t = Template() t.add_resource( WaitCondition("MyWaitCondition", Timeout=30, Handle=Sub(cond_string)) ) self.assertEqual(cond_normal, t.to_yaml()) self.assertEqual(cond_long, t.to_yaml(long_form=True)) self.assertEqual(cond_long, t.to_yaml(False, True)) self.assertEqual(cond_clean, t.to_yaml(clean_up=True)) self.assertEqual(cond_clean, t.to_yaml(True))
def _wait_condition_data_to_s3_url(condition: cloudformation.WaitCondition, artifacts_bucket: s3.Bucket) -> Sub: """Build a CloudFormation ``Sub`` structure that resolves to the S3 key reported to a wait condition. :param condition: Wait condition to reference :param artifacts_bucket: Bucket to reference """ return Sub( f"https://${{{artifacts_bucket.title}.DomainName}}/${{key}}", {"key": Select(3, Split('"', condition.get_att("Data")))}, )
def create_template(self) -> None: """Create template.""" self.template.add_resource(WaitConditionHandle("BrokenDummy")) self.template.add_resource( WaitCondition( "BrokenWaitCondition", Handle=Ref("BrokenDummy"), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0, ) ) self.add_output("DummyId", "dummy-1234")
def main(): t = Template() AddAMIMap(t) t.set_version("2010-09-09") t.set_description( "DCV 2017 Remote Desktop with Xilinx Vivado (using AWS FPGA Developer AMI)" ) tags = Tags(Name=Ref("AWS::StackName")) # user data InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'user_name="', Ref('UserName'), '"\n', 'user_pass="******"\n', '\n', ] with open('_include/dcv-install.sh', 'r',) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) VPCId = t.add_parameter(Parameter( 'VPCId', Type="AWS::EC2::VPC::Id", Description="VPC ID for where the remote desktop instance should be launched" )) t.set_parameter_label(VPCId, "VPC ID") t.add_parameter_to_group(VPCId, "Instance Configuration") Subnet = t.add_parameter(Parameter( 'Subnet', Type="AWS::EC2::Subnet::Id", Description="For the Subnet ID, you should choose one in the " "Availability Zone where you want the instance launched" )) t.set_parameter_label(Subnet, "Subnet ID") t.add_parameter_to_group(Subnet, "Instance Configuration") ExistingSecurityGroup = t.add_parameter(Parameter( 'ExistingSecurityGroup', Type="String", Default="NO_VALUE", Description="OPTIONAL: Needs to be a SG ID, for example sg-abcd1234efgh. " "This is an already existing Security Group ID that is " "in the same VPC, this is an addition to the security groups that " "are automatically created to enable access to the remote desktop," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(ExistingSecurityGroup, "OPTIONAL: Existing Security Group (e.g. sg-abcd1234efgh)") t.add_parameter_to_group(ExistingSecurityGroup, "Instance Configuration") remoteDesktopInstanceType = t.add_parameter(Parameter( 'remoteDesktopInstanceType', Type="String", Description="This is the instance type that will be used. As this is a " "2D workstation, we are not supporting GPU instance types.", Default="m4.xlarge", AllowedValues=[ "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", "z1d.large", "z1d.xlarge", "z1d.2xlarge", "z1d.3xlarge", "z1d.6xlarge", "z1d.12xlarge", "z1d.metal" ], ConstraintDescription= "Must an EC2 instance type from the list" )) t.set_parameter_label(remoteDesktopInstanceType, "Remote Desktop Instance Type") t.add_parameter_to_group(remoteDesktopInstanceType, "Instance Configuration") EC2KeyName = t.add_parameter(Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valid EC2 key pair" )) t.set_parameter_label(EC2KeyName, "EC2 Key Name") t.add_parameter_to_group(EC2KeyName, "Instance Configuration") OperatingSystem = t.add_parameter(Parameter( 'OperatingSystem', Type="String", Description="Operating System of the AMI", Default="centos7", AllowedValues=[ "centos7" ], ConstraintDescription="Must be: centos7" )) t.set_parameter_label(OperatingSystem, "Operating System of AMI") t.add_parameter_to_group(OperatingSystem, "Instance Configuration") StaticPrivateIpAddress = t.add_parameter(Parameter( 'StaticPrivateIpAddress', Type="String", Default="NO_VALUE", Description="OPTIONAL: If you already have a private VPC address range, you can " "specify the private IP address to use, leave as NO_VALUE if you choose not to use this", )) t.set_parameter_label(StaticPrivateIpAddress, "OPTIONAL: Static Private IP Address") t.add_parameter_to_group(StaticPrivateIpAddress, "Instance Configuration") UsePublicIp = t.add_parameter(Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance, " "this is overridden by CreateElasticIP=True", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(UsePublicIp, "Assign a public IP Address") t.add_parameter_to_group(UsePublicIp, "Instance Configuration") CreateElasticIP = t.add_parameter(Parameter( 'CreateElasticIP', Type="String", Description="Should an Elastic IP address be created and assigned, " "this allows for persistent IP address assignment", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(CreateElasticIP, "Create an Elastic IP address") t.add_parameter_to_group(CreateElasticIP, "Instance Configuration") S3BucketName = t.add_parameter(Parameter( 'S3BucketName', Type="String", Default="NO_VALUE", Description="OPTIONAL: S3 bucket to allow this instance read access (List and Get)," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(S3BucketName, "OPTIONAL: S3 bucket for read access") t.add_parameter_to_group(S3BucketName, "Instance Configuration") AccessCidr = t.add_parameter(Parameter( 'AccessCidr', Type="String", Description="This is the CIDR block for allowing remote access, for ports 22 and 8443", Default="111.222.333.444/32", AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x" )) t.set_parameter_label(AccessCidr, "CIDR block for remote access (ports 22 and 8443)") t.add_parameter_to_group(AccessCidr, "Instance Configuration") UserName = t.add_parameter(Parameter( 'UserName', Type="String", Description="User name for DCV remote desktop login, default is \"simuser\".", Default="simuser", MinLength="4", )) t.set_parameter_label(UserName, "User name for DCV login") t.add_parameter_to_group(UserName, "DCV Configuration") UserPass = t.add_parameter(Parameter( 'UserPass', Type="String", Description="Password for DCV remote desktop login. The default password is Ch4ng3M3!", Default="Ch4ng3M3!", MinLength="8", AllowedPattern="^((?=.*[a-z])(?=.*[A-Z])(?=.*[\\d])|(?=.*[a-z])(?=.*[A-Z])(?=.*[\\W_])|(?=.*[a-z])(?=.*[\\d])(?=.*[\\W_])|(?=.*[A-Z])(?=.*[\\d])(?=.*[\\W_])).+$", ConstraintDescription="Password must contain at least one element from three of the following sets: lowercase letters, uppercase letters, base 10 digits, non-alphanumeric characters", NoEcho=True )) t.set_parameter_label(UserPass, "Password for DCV login") t.add_parameter_to_group(UserPass, "DCV Configuration") # end parameters RootRole = t.add_resource(iam.Role( "RootRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"], }, "Action": ["sts:AssumeRole"] }] } )) dcvBucketPolicy= t.add_resource(PolicyType( "dcvBucketPolicy", PolicyName="dcvBucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": "arn:aws:s3:::dcv-license.us-east-1/*" } ], }, )), BucketPolicy= t.add_resource(PolicyType( "BucketPolicy", PolicyName="BucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"},"/*"]]} }, { "Effect": "Allow", "Action": [ "s3:ListBucket"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"}]]} } ], }, Condition="Has_Bucket" )), remoteDesktopSecurityGroup = t.add_resource(SecurityGroup( "remoteDesktopSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "Remote Desktop Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8443", ToPort="8443", CidrIp=Ref(AccessCidr), ), ] )) SshSecurityGroup = t.add_resource(SecurityGroup( "SshSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(AccessCidr), ), ] )) RootInstanceProfile = t.add_resource(InstanceProfile( "RootInstanceProfile", Roles=[Ref(RootRole)] )) remoteDesktopInstance = t.add_resource(ec2.Instance( 'remoteDesktopInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(remoteDesktopInstanceType)), DisableApiTermination='false', NetworkInterfaces=[ NetworkInterfaceProperty( SubnetId=Ref(Subnet), GroupSet=If( "not_existing_sg", [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup)], [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)] ), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', PrivateIpAddress=If( "Has_Static_Private_IP", Ref(StaticPrivateIpAddress), Ref("AWS::NoValue"), ) ) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) EIPAddress = t.add_resource(EIP( 'EIPAddress', Domain='vpc', InstanceId=Ref(remoteDesktopInstance), Condition="create_elastic_ip" )) t.add_condition( "not_existing_sg", Equals(Ref(ExistingSecurityGroup), "NO_VALUE") ) t.add_condition( "Has_Public_Ip", Equals(Ref(UsePublicIp), "True") ) t.add_condition( "Has_Bucket", Not(Equals(Ref(S3BucketName), "NO_VALUE")) ) t.add_condition( "create_elastic_ip", Equals(Ref(CreateElasticIP), "True") ) t.add_condition( "Has_Static_Private_IP", Not(Equals(Ref(StaticPrivateIpAddress), "NO_VALUE")) ) waithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) instanceWaitCondition = t.add_resource(WaitCondition( "instanceWaitCondition", Handle=Ref(waithandle), Timeout="3600", DependsOn="remoteDesktopInstance" )) t.add_output([ Output( "DCVConnectionLink", Description="Connect to the DCV Remote Desktop with this URL", Value=Join("", [ "https://", GetAtt("remoteDesktopInstance", 'PublicIp'), ":8443" ]) ), Output( "DCVUserName", Description="Login name for DCV session", Value=(Ref(UserName)) ), Output( "SSHTunnelCommand", Description='Command for setting up SSH tunnel to remote desktop, use "localhost:18443" for DCV client', Value=Join("", [ "ssh -i <file.pem> -L 18443:localhost:8443 -l centos ", GetAtt("remoteDesktopInstance", 'PublicIp') ]) ), ]) #print(t.to_json(indent=2)) print(to_yaml(t.to_json(indent=2), clean_up=True))
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map): """ :param int cluster_size: The number of nodes to create in the Flocker cluster (including control service node). :param dict client_ami_map: A map between AWS region name and AWS AMI ID for the client. :param dict node_ami_map: A map between AWS region name and AWS AMI ID for the node. :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm cluster. """ # Base JSON template. template = Template() # Keys corresponding to CloudFormation user Inputs. access_key_id_param = template.add_parameter( Parameter( "AmazonAccessKeyID", Description="Required: Your Amazon AWS access key ID", Type="String", NoEcho=True, AllowedPattern="[\w]+", MinLength="16", MaxLength="32", )) secret_access_key_param = template.add_parameter( Parameter( "AmazonSecretAccessKey", Description="Required: Your Amazon AWS secret access key", Type="String", NoEcho=True, MinLength="1", )) keyname_param = template.add_parameter( Parameter( "EC2KeyPair", Description= "Required: Name of an existing EC2 KeyPair to enable SSH " "access to the instance", Type="AWS::EC2::KeyPair::KeyName", )) template.add_parameter( Parameter( "S3AccessPolicy", Description="Required: Is current IAM user allowed to access S3? " "S3 access is required to distribute Flocker and Docker " "configuration amongst stack nodes. Reference: " "http://docs.aws.amazon.com/IAM/latest/UserGuide/" "access_permissions.html Stack creation will fail if user " "cannot access S3", Type="String", AllowedValues=["Yes"], )) volumehub_token = template.add_parameter( Parameter( "VolumeHubToken", Description=("Optional: Your Volume Hub token. " "You'll find the token at " "https://volumehub.clusterhq.com/v1/token."), Type="String", Default="", )) template.add_mapping('RegionMapClient', {k: { "AMI": v } for k, v in client_ami_map.items()}) template.add_mapping('RegionMapNode', {k: { "AMI": v } for k, v in node_ami_map.items()}) # Select a random AvailabilityZone within given AWS Region. zone = Select(0, GetAZs("")) # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution # between nodes. s3bucket = Bucket('ClusterConfig', DeletionPolicy='Retain') template.add_resource(s3bucket) # Create SecurityGroup for cluster instances. instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription=( "Enable ingress access on all protocols and ports."), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol=protocol, FromPort="0", ToPort="65535", CidrIp="0.0.0.0/0", ) for protocol in ('tcp', 'udp') ])) # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes. base_user_data = [ '#!/bin/bash\n', 'aws_region="', Ref("AWS::Region"), '"\n', 'aws_zone="', zone, '"\n', 'access_key_id="', Ref(access_key_id_param), '"\n', 'secret_access_key="', Ref(secret_access_key_param), '"\n', 's3_bucket="', Ref(s3bucket), '"\n', 'stack_name="', Ref("AWS::StackName"), '"\n', 'volumehub_token="', Ref(volumehub_token), '"\n', 'node_count="{}"\n'.format(cluster_size), 'apt-get update\n', ] # XXX Flocker agents are indexed from 1 while the nodes overall are indexed # from 0. flocker_agent_number = 1 # Gather WaitConditions wait_condition_names = [] for i in range(cluster_size): if i == 0: node_name = CONTROL_NODE_NAME else: node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i) # Create an EC2 instance for the {Agent, Control} Node. ec2_instance = ec2.Instance(node_name, ImageId=FindInMap("RegionMapNode", Ref("AWS::Region"), "AMI"), InstanceType="m3.large", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=node_name)) # WaitCondition and corresponding Handler to signal completion # of {Flocker, Docker, Swarm} configuration on the node. wait_condition_handle = WaitConditionHandle( INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name)) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name), Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Gather WaitConditions wait_condition_names.append(wait_condition.name) user_data = base_user_data[:] user_data += [ 'node_number="{}"\n'.format(i), 'node_name="{}"\n'.format(node_name), 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', ] # Setup S3 utilities to push/pull node-specific data to/from S3 bucket. user_data += _sibling_lines(S3_SETUP) if i == 0: # Control Node configuration. control_service_instance = ec2_instance user_data += ['flocker_node_type="control"\n'] user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR) user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP) user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_MANAGER_SETUP) template.add_output([ Output( "ControlNodeIP", Description="Public IP of Flocker Control and " "Swarm Manager.", Value=GetAtt(ec2_instance, "PublicIp"), ) ]) else: # Agent Node configuration. ec2_instance.DependsOn = control_service_instance.name user_data += [ 'flocker_node_type="agent"\n', 'flocker_agent_number="{}"\n'.format(flocker_agent_number) ] flocker_agent_number += 1 user_data += _sibling_lines(DOCKER_SETUP) # Setup Swarm 1.0.1 user_data += _sibling_lines(SWARM_NODE_SETUP) template.add_output([ Output( "AgentNode{}IP".format(i), Description=( "Public IP of Agent Node for Flocker and Swarm."), Value=GetAtt(ec2_instance, "PublicIp"), ) ]) user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER) user_data += _sibling_lines(VOLUMEHUB_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) ec2_instance.UserData = Base64(Join("", user_data)) template.add_resource(ec2_instance) # Client Node creation. client_instance = ec2.Instance(CLIENT_NODE_NAME, ImageId=FindInMap("RegionMapClient", Ref("AWS::Region"), "AMI"), InstanceType="m3.medium", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=CLIENT_NODE_NAME)) wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( CLIENT_WAIT_CONDITION, Handle=Ref(wait_condition_handle), Timeout=NODE_CONFIGURATION_TIMEOUT, ) template.add_resource(wait_condition) # Client Node {Flockerctl, Docker-compose} configuration. user_data = base_user_data[:] user_data += [ 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', 'node_number="{}"\n'.format("-1"), ] user_data += _sibling_lines(S3_SETUP) user_data += _sibling_lines(CLIENT_SETUP) user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION) client_instance.UserData = Base64(Join("", user_data)) # Start Client Node after Control Node and Agent Nodes are # up and running Flocker, Docker, Swarm stack. client_instance.DependsOn = wait_condition_names template.add_resource(client_instance) # List of Output fields upon successful creation of the stack. template.add_output([ Output( "ClientNodeIP", Description="Public IP address of the client node.", Value=GetAtt(client_instance, "PublicIp"), ) ]) template.add_output( Output( "ClientConfigDockerSwarmHost", Value=Join("", [ "export DOCKER_HOST=tcp://", GetAtt(control_service_instance, "PublicIp"), ":2376" ]), Description="Client config: Swarm Manager's DOCKER_HOST setting.")) template.add_output( Output("ClientConfigDockerTLS", Value="export DOCKER_TLS_VERIFY=1", Description="Client config: Enable TLS client for Swarm.")) return template.to_json()
def main(): t = Template() t.set_description("test instance launch") t.set_version("2010-09-09") InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS", "Reason" : "Instance launched", "UniqueId" : "launch001", "Data" : "Instance launched."}\' "${my_wait_handle}"', '\n', '\n', ] EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair", )) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) myInstanceType = t.add_parameter( Parameter( 'MyInstanceType', Type="String", Description="Instance type", Default="m5.2xlarge", )) VpcId = t.add_parameter( Parameter( 'VpcId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance", )) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 127.0.0.1/32", Default="127.0.0.1/32", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] })) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VpcId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) tags = Tags(Name=Ref("AWS::StackName")) myInstance = t.add_resource( ec2.Instance( 'MyInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(myInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If( "not_existing_sg", [Ref(SshSecurityGroup)], [Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "true")) mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) mywaitcondition = t.add_resource( WaitCondition("InstanceWaitCondition", Handle=Ref(mywaithandle), Timeout="1500", DependsOn="MyInstance")) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(myInstance)) ]) t.add_output( [Output("InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp'))]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('MyInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) ##print(t.to_yaml()) print(t.to_json(indent=2))
"AMI": "ami-d1f482b1" }, "us-east-1": { "AMI": "ami-8fcee4e5" }, "us-west-2": { "AMI": "ami-63b25203" } }) waitHandleAmbari = t.add_resource(WaitConditionHandle("waitHandleAmbari")) waitConditionAmbari = t.add_resource( WaitCondition( "waitConditionAmbari", Handle=Ref(waitHandleAmbari), Timeout="3600", )) ## Functions to generate blockdevicemappings ## count: the number of devices to map ## devicenamebase: "/dev/sd" or "/dev/xvd" ## volumesize: "100" ## volumetype: "gp2" def my_block_device_mappings_root(devicenamebase, volumesize, volumetype): block_device_mappings_root = (ec2.BlockDeviceMapping( DeviceName=devicenamebase + "a1", Ebs=ec2.EBSBlockDevice(VolumeSize=volumesize, VolumeType=volumetype))) return block_device_mappings_root
"CidrIp": "10.0.0.0/16", "FromPort": 0 }], VpcId=Ref(VPC), GroupDescription="Allow access to things on the VPC", Tags=Tags(Name=Join(" - ", [ FindInMap("Environments", Ref("EnvironmentType"), "ValueTags"), "Perforce Helix VPC Group", Ref("AWS::StackName") ]), ), )) WaitCondition = t.add_resource( WaitCondition( "WaitCondition", Handle=Ref("WaitHandle"), Timeout="350", DependsOn=["P4D", "MainInstance", "AppInstance"], )) PerforceHelixIAMUser = t.add_resource( User("PerforceHelixIAMUser", Path="/", Policies=[ Policy(PolicyName="PerforceHelixR53DNSPolicy", PolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("route53", "ChangeResourceRecordSets") ],
"Example template showing how the WaitCondition and WaitConditionHandle " "are configured. With this template, the stack will not complete until " "either the WaitCondition timeout occurs, or you manually signal the " "WaitCondition object using the URL created by the WaitConditionHandle. " "You can use CURL or some other equivalent mechanism to signal the " "WaitCondition. To find the URL, use cfn-describe-stack-resources or " "the AWS Management Console to display the PhysicalResourceId of the " "WaitConditionHandle - this is the URL to use to signal. For details of " "the signal request see the AWS CloudFormation User Guide at " "http://docs.amazonwebservices.com/AWSCloudFormation/latest/UserGuide/") mywaithandle = t.add_resource(WaitConditionHandle("myWaitHandle")) mywaitcondition = t.add_resource( WaitCondition( "myWaitCondition", Handle=Ref(mywaithandle), Timeout="300", )) t.add_output([ Output( "ApplicationData", Value=GetAtt(mywaitcondition, "Data"), Description="The data passed back as part of signalling the " "WaitCondition", ) ]) print(t.to_json())
def main(): t = Template() t.add_version("2010-09-09") t.add_description( "Currently supporting RHEL/CentOS 7.5. Setup IAM role and security groups, " "launch instance, create/attach 10 EBS volumes, install/fix ZFS " "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), " "create zfs RAID6 pool, setup NFS server, export NFS share") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'zfs_pool_name="', Ref('ZfsPool'), '"\n', 'zfs_mount_point="', Ref('ZfsMountPoint'), '"\n', 'nfs_cidr_block="', Ref('NFSCidr'), '"\n', 'nfs_opts="', Ref('NFSOpts'), '"\n', 'my_wait_handle="', Ref('NFSInstanceWaitHandle'), '"\n', '\n', ] with open( '_include/Tropo_build_zfs_export_nfs.sh', 'r', ) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Instance Configuration' }, 'Parameters': [ "OperatingSystem", "VPCId", "Subnet", "UsePublicIp", "CreateElasticIP", "EC2KeyName", "NFSInstanceType", "SshAccessCidr", "ExistingSecurityGroup", "ExistingPlacementGroup", "S3BucketName" ] }, { 'Label': { 'default': 'Storage Options - Required' }, 'Parameters': [ "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType", "VolumeIops" ] }, { 'Label': { 'default': 'ZFS Pool and FS Options - Required' }, 'Parameters': ["ZfsPool", "ZfsMountPoint"] }, { 'Label': { 'default': 'NFS Options - Required' }, 'Parameters': ["NFSCidr", "NFSOpts"] }], 'ParameterLabels': { 'OperatingSystem': { 'default': 'Operating System of AMI' }, 'VPCId': { 'default': 'VPC ID' }, 'Subnet': { 'default': 'Subnet ID' }, 'UsePublicIp': { 'default': 'Assign a Public IP ' }, 'CreateElasticIP': { 'default': 'Create and use an EIP ' }, 'EC2KeyName': { 'default': 'EC2 Key Name' }, 'NFSInstanceType': { 'default': 'Instance Type' }, 'SshAccessCidr': { 'default': 'SSH Access CIDR Block' }, 'ExistingSecurityGroup': { 'default': 'OPTIONAL: Existing Security Group' }, 'ExistingPlacementGroup': { 'default': 'OPTIONAL: Existing Placement Group' }, 'S3BucketName': { 'default': 'Optional S3 Bucket Name' }, 'RAIDLevel': { 'default': 'RAID Level' }, 'VolumeSize': { 'default': 'Volume size of the EBS vol' }, 'VolumeType': { 'default': 'Volume type of the EBS vol' }, 'EBSVolumeType': { 'default': 'Volume type of the EBS vol' }, 'VolumeIops': { 'default': 'IOPS for each EBS vol (only for io1)' }, 'ZfsPool': { 'default': 'ZFS pool name' }, 'ZfsMountPoint': { 'default': 'Mount Point' }, 'NFSCidr': { 'default': 'NFS CIDR block for mounts' }, 'NFSOpts': { 'default': 'NFS options' }, } } }) EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair")) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) NFSInstanceType = t.add_parameter( Parameter( 'NFSInstanceType', Type="String", Description="NFS instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription="Must an EC2 instance type from the list")) VolumeType = t.add_parameter( Parameter( 'VolumeType', Type="String", Description="Type of EBS volume", Default="EBS", AllowedValues=["EBS", "InstanceStore"], ConstraintDescription="Volume type has to EBS or InstanceStore")) EBSVolumeType = t.add_parameter( Parameter('EBSVolumeType', Description="Type of EBS volumes to create", Type="String", Default="io1", ConstraintDescription="Must be a either: io1, gp2, st1", AllowedValues=["io1", "gp2", "st1"])) VolumelSize = t.add_parameter( Parameter('VolumeSize', Type="Number", Default="500", Description="Volume size in GB")) VolumeIops = t.add_parameter( Parameter('VolumeIops', Type="Number", Default="20000", Description="IOPS for the EBS volume")) RAIDLevel = t.add_parameter( Parameter( 'RAIDLevel', Description="RAID Level, currently only 6 (8+2p) is supported", Type="String", Default="0", AllowedValues=["0"], ConstraintDescription="Must be 0")) ZfsPool = t.add_parameter( Parameter('ZfsPool', Description="ZFS pool name", Type="String", Default="v01")) ZfsMountPoint = t.add_parameter( Parameter( 'ZfsMountPoint', Description= "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)", Type="String", Default="testzfs")) VPCId = t.add_parameter( Parameter('VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance")) ExistingPlacementGroup = t.add_parameter( Parameter('ExistingPlacementGroup', Type="String", Description="OPTIONAL: Existing placement group")) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/talse", AllowedValues=["true", "false"])) CreateElasticIP = t.add_parameter( Parameter( 'CreateElasticIP', Type="String", Description= "Create an Elasic IP address, that will be assinged to an instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) S3BucketName = t.add_parameter( Parameter('S3BucketName', Type="String", Description="S3 bucket to allow this instance read access.")) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSCidr = t.add_parameter( Parameter( 'NFSCidr', Type="String", Description= "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0", Default="10.0.0.0/16", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSOpts = t.add_parameter( Parameter( 'NFSOpts', Description="NFS export options", Type="String", Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)") ) VarLogMessagesFile = t.add_parameter( Parameter( 'VarLogMessagesFile', Type="String", Description= "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)" )) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ iam.Policy(PolicyName="s3bucketaccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" } ] ] } }], }), ])) NFSSecurityGroup = t.add_resource( SecurityGroup("NFSSecurityGroup", VpcId=Ref(VPCId), GroupDescription="NFS Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="2049", ToPort="2049", CidrIp=Ref(NFSCidr), ), ])) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VPCId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) EIPAddress = t.add_resource( EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip")) tags = Tags(Name=Ref("AWS::StackName")) NFSInstance = t.add_resource( ec2.Instance( 'NFSInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(NFSInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If("not_existing_sg", [Ref(NFSSecurityGroup), Ref(SshSecurityGroup)], [ Ref(NFSSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup) ]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), PlacementGroupName=(Ref(ExistingPlacementGroup)), BlockDeviceMappings=If( 'vol_type_ebs', [ ec2.BlockDeviceMapping( DeviceName="/dev/sdh", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdi", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdj", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdk", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdl", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdm", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ], {"Ref": "AWS::NoValue"}, ), UserData=Base64(Join('', InstUserData)), )) # End of NFSInstance t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True")) t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), ""))) t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True")) nfswaithandle = t.add_resource( WaitConditionHandle('NFSInstanceWaitHandle')) nfswaitcondition = t.add_resource( WaitCondition("NFSInstanceWaitCondition", Handle=Ref(nfswaithandle), Timeout="1500", DependsOn="NFSInstance")) t.add_output([ Output("ElasticIP", Description="Elastic IP address for the instance", Value=Ref(EIPAddress), Condition="create_elastic_ip") ]) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance)) ]) t.add_output([ Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp')) ]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) t.add_output([ Output("ElasticPublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="create_elastic_ip") ]) t.add_output([ Output("PrivateMountPoint", Description="Mount point on private network", Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"])) ]) t.add_output([ Output("ExampleClientMountCommands", Description="Example commands to mount NFS on the clients", Value=Join("", [ "sudo mkdir /nfs1; sudo mount ", GetAtt('NFSInstance', 'PrivateIp'), ":/", Ref("ZfsPool"), "/", Ref("ZfsMountPoint"), " /nfs1" ])) ]) t.add_output([ Output("S3BucketName", Value=(Ref("S3BucketName")), Condition="Has_Bucket") ]) # "Volume01" : { "Value" : { "Ref" : "Volume01" } }, # "Volume02" : { "Value" : { "Ref" : "Volume02" } }, # "Volume03" : { "Value" : { "Ref" : "Volume03" } }, # "Volume04" : { "Value" : { "Ref" : "Volume04" } }, # "Volume05" : { "Value" : { "Ref" : "Volume05" } }, # "Volume06" : { "Value" : { "Ref" : "Volume06" } }, # "Volume07" : { "Value" : { "Ref" : "Volume07" } }, # "Volume08" : { "Value" : { "Ref" : "Volume08" } }, # "Volume09" : { "Value" : { "Ref" : "Volume09" } }, # "Volume10" : { "Value" : { "Ref" : "Volume10" } } print(t.to_json(indent=2))
node_name, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "FlockerAMI"), InstanceType="m3.large", KeyName=Ref(keyname_param), SecurityGroups=[Ref(instance_sg)], AvailabilityZone=zone, Tags=Tags(Name=node_name)) # WaitCondition and corresponding Handler to signal completion # of {Flocker, Docker, Swarm} configuration on the node. wait_condition_handle = WaitConditionHandle( INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name)) template.add_resource(wait_condition_handle) wait_condition = WaitCondition( INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name), Handle=Ref(wait_condition_handle), Timeout="600", ) template.add_resource(wait_condition) user_data = base_user_data[:] user_data += [ 'node_number="{}"\n'.format(i), 'node_name="{}"\n'.format(node_name), 'wait_condition_handle="', Ref(wait_condition_handle), '"\n', ] # Setup S3 utilities to push/pull node-specific data to/from S3 bucket. user_data += _sibling_lines(S3_SETUP) if i == 0: