def generate_cf(self): """ Create Cloud Formation Template from user supplied or default config file :return json string """ ## read machine list from the config file ## machines = self._readConfig(self.configdata) if 'error' in machines: return machines template = Template() template.add_description( "%s: [%s]" % (self.owner,", ".join(self.machinelist)) ) ## convert the params into cloud formation instance object ## for subnet in machines: for mclass in machines[subnet]: machine = machines[subnet][mclass] instance = self._set_instance_value(machine,mclass,self.subnet[subnet]) template.add_resource(instance) intrecordset = self._set_internal_resource_record(mclass) template.add_resource(intrecordset) if subnet == 'public': pubrecordset = self._set_public_resource_record(mclass) template.add_resource(pubrecordset) ## this magic function turn it to jason formatted template ## return template.to_json(),self.envname
def test_simple_table(self): serverless_table = SimpleTable( "SomeTable" ) t = Template() t.add_resource(serverless_table) t.to_json()
def test_mutualexclusion(self): t = Template() t.add_resource(FakeAWSObject( 'fake', callcorrect=True, singlelist=[10]) ) with self.assertRaises(ValueError): t.to_json()
def test_s3_filter(self): t = Template() t.add_resource( Function( "ProcessorFunction", Handler='process_file.handler', CodeUri='.', Runtime='python3.6', Policies='AmazonS3FullAccess', Events={ 'FileUpload': S3Event( 'FileUpload', Bucket="bucket", Events=['s3:ObjectCreated:*'], Filter=Filter(S3Key=S3Key( Rules=[ Rules(Name="prefix", Value="upload/"), Rules(Name="suffix", Value=".txt"), ], )) ) } ) ) t.to_json()
def test_no_required(self): stack = Stack( "mystack", ) t = Template() t.add_resource(stack) with self.assertRaises(ValueError): t.to_json()
def test_api_no_definition(self): serverless_api = Api( "SomeApi", StageName='test', ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_required_api_definitionbody(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionBody=self.swagger, ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_required_api_definitionuri(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionUri='s3://bucket/swagger.yml', ) t = Template() t.add_resource(serverless_api) t.to_json()
def test_required(self): stack = Stack( "mystack", DefaultInstanceProfileArn="instancearn", Name="myopsworksname", ServiceRoleArn="arn", ) t = Template() t.add_resource(stack) t.to_json()
def test_required_function(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip" ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_optional_auto_publish_alias(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip", AutoPublishAlias="alias" ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_required_api_both(self): serverless_api = Api( "SomeApi", StageName='test', DefinitionUri='s3://bucket/swagger.yml', DefinitionBody=self.swagger, ) t = Template() t.add_resource(serverless_api) with self.assertRaises(ValueError): t.to_json()
def test_exclusive(self): lambda_func = Function( "AMIIDLookup", Handler="index.handler", Role=GetAtt("LambdaExecutionRole", "Arn"), Code=Code(S3Bucket="lambda-functions", S3Key="amilookup.zip"), Runtime="nodejs", Timeout="25", ) t = Template() t.add_resource(lambda_func) t.to_json()
def test_valid_data(self): t = Template() cd = ecs.ContainerDefinition.from_dict("mycontainer", self.d) self.assertEquals(cd.Links[0], "containerA") td = ecs.TaskDefinition( "taskdef", ContainerDefinitions=[cd], Volumes=[ecs.Volume(Name="myvol")], TaskRoleArn=Ref(iam.Role("myecsrole")) ) t.add_resource(td) t.to_json()
def test_s3_location(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri=S3Location( Bucket="mybucket", Key="mykey", ) ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_optional_deployment_preference(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip", AutoPublishAlias="alias", DeploymentPreference=DeploymentPreference( Type="AllAtOnce" ) ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_DLQ(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip", DeadLetterQueue=DeadLetterQueue( Type='SNS', TargetArn='arn:aws:sns:us-east-1:000000000000:SampleTopic' ) ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_tags(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri="s3://bucket/handler.zip", Tags=Tags({ 'Tag1': 'TagValue1', 'Tag2': 'TagValue2' }) ) t = Template() t.add_resource(serverless_func) t.to_json()
def test_exactly_one_code(self): serverless_func = Function( "SomeHandler", Handler="index.handler", Runtime="nodejs", CodeUri=S3Location( Bucket="mybucket", Key="mykey", ), InlineCode="", ) t = Template() t.add_resource(serverless_func) with self.assertRaises(ValueError): t.to_json()
class CloudFormationTemplate(CloudTemplate): def __init__(self): # initialize Process super(CloudTemplate, self).__init__() def generate(self): self.template = Template() for instance in self.source['instance_groups']: image_id = instance['image_id'] instance_type = instance['type'] key_pair = instance['key_pair'] name = instance['name'] ec2_instance = self.template.add_resource(ec2.Instance( "Ec2Instance", ImageId=image_id, InstanceType=instance_type, KeyName=key_pair, SecurityGroups=[name], UserData=Base64("80") )) self.template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP", Description="Public IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ), ]) self.template = self.template.to_json() return self.template
def main(): vpc = ec2.VPC('MyVPC', CidrBlock='10.0.0.0/16') subnet = ec2.Subnet('MySubnet', AvailabilityZone='ap-southeast-2a', VpcId=Ref(vpc), CidrBlock='10.0.1.0/24') template = Template() single_instance_config = SingleInstanceConfig( keypair='INSERT_YOUR_KEYPAIR_HERE', si_image_id='ami-dc361ebf', si_instance_type='t2.micro', vpc=Ref(vpc), subnet=Ref(subnet), instance_dependencies=vpc.title, public_hosted_zone_name=None, sns_topic=None, is_nat=False, iam_instance_profile_arn=None, availability_zone='ap-southeast-2a' ) SingleInstance(title='jump', template=template, single_instance_config=single_instance_config ) template.add_resource(vpc) template.add_resource(subnet) print(template.to_json(indent=2, separators=(',', ': ')))
def output_template(self): template = Template() for parameter in self.parameters: template.add_parameter(parameter) for mapping in self.mappings: template.add_mapping(mapping[0], mapping[1]) for resource in self.resources: template.add_resource(resource) for output in self.outputs: template.add_output(output) print template.to_json() return
def test_layer_version(self): layer_version = LayerVersion( "SomeLayer", ContentUri="someuri", ) t = Template() t.add_resource(layer_version) t.to_json() layer_version = LayerVersion( "SomeLayer", ) t = Template() t.add_resource(layer_version) with self.assertRaises(ValueError): t.to_json()
def sceptre_handler(sceptre_user_data): t = Template() vpc = t.add_resource(VPC( "VirtualPrivateCloud", CidrBlock=sceptre_user_data["cidr_block"], InstanceTenancy="default", EnableDnsSupport=True, EnableDnsHostnames=True, )) igw = t.add_resource(InternetGateway( "InternetGateway", )) t.add_resource(VPCGatewayAttachment( "IGWAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(igw), )) t.add_output(Output( "VpcId", Description="New VPC ID", Value=Ref(vpc) )) return t.to_json()
def main(): template = Template() lambda_config = LambdaConfig( lambda_s3_bucket='smallest-bucket-in-history', lambda_s3_key='test_lambda.zip', lambda_description='test function', lambda_function_name='test_lambda', lambda_handler='test_lambda.lambda_handler', lambda_memory_size=128, lambda_role_arn='arn:aws:iam::123456789:role/lambda_basic_vpc_execution_with_s3', lambda_runtime='python2.7', lambda_timeout=1, lambda_schedule='rate(5 minutes)' ) # Test Lambda LambdaLeaf(leaf_title='MyLambda', template=template, dependencies=['app1:80'], lambda_config=lambda_config, availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'], public_cidr={'name': 'PublicIp', 'cidr': '0.0.0.0/0'}, tree_name='tree' ) print(template.to_json(indent=2, separators=(',', ': ')))
def main(): vpc = ec2.VPC('MyVPC', CidrBlock='10.0.0.0/16') subnet = ec2.Subnet('MySubnet', AvailabilityZone='ap-southeast-2a', VpcId=Ref(vpc), CidrBlock='10.0.1.0/24') template = Template() sns_topic = SNS(template) single_instance_config = SingleInstanceConfig( keypair='INSERT_YOUR_KEYPAIR_HERE', si_image_id='ami-53371f30', si_instance_type='t2.micro', vpc=Ref(vpc), subnet=Ref(subnet), is_nat=True, instance_dependencies=vpc.title, public_hosted_zone_name=None, iam_instance_profile_arn=None, sns_topic=sns_topic, availability_zone='ap-southeast-2a', ec2_scheduled_shutdown=None, owner='*****@*****.**' ) SingleInstance(title='nat1', template=template, single_instance_config=single_instance_config ) template.add_resource(vpc) template.add_resource(subnet) print(template.to_json(indent=2, separators=(',', ': ')))
def main(): template = Template() database_config = DatabaseConfig( db_instance_type='db.t2.micro', db_engine='postgres', db_port='5432', db_name='myDb', db_hdd_size='5', db_snapshot_id=None, db_backup_window=None, db_backup_retention=None, db_maintenance_window=None, db_storage_type='gp2', owner='*****@*****.**' ) DatabaseLeaf(leaf_title='MyDb', tree_name='tree', template=template, database_config=database_config, availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'] ) print(template.to_json(indent=2, separators=(',', ': ')))
def render(context): secgroup = ec2_security(context) instance = ec2instance(context) template = Template() template.add_resource(secgroup) template.add_resource(instance) keyname = template.add_parameter(Parameter(KEYPAIR, **{ "Type": "String", "Description": "EC2 KeyPair that enables SSH access to this instance", })) cfn_outputs = outputs() if context['project']['aws'].has_key('rds'): map(template.add_resource, rdsinstance(context)) cfn_outputs.extend([ mkoutput("RDSHost", "Connection endpoint for the DB cluster", (RDS_TITLE, "Endpoint.Address")), mkoutput("RDSPort", "The port number on which the database accepts connections", (RDS_TITLE, "Endpoint.Port")),]) if context['project']['aws'].has_key('ext'): map(template.add_resource, ext_volume(context)) if context['hostname']: # None if one couldn't be generated template.add_resource(external_dns(context)) template.add_resource(internal_dns(context)) cfn_outputs.extend([ mkoutput("DomainName", "Domain name of the newly created EC2 instance", Ref(R53_EXT_TITLE)), mkoutput("IntDomainName", "Domain name of the newly created EC2 instance", Ref(R53_INT_TITLE))]) map(template.add_output, cfn_outputs) return template.to_json()
def test_s3_bucket_accelerate_configuration(self): t = Template() ac = AccelerateConfiguration(AccelerationStatus="Enabled") b = Bucket("s3Bucket", AccelerateConfiguration=ac) t.add_resource(b) output = t.to_json() self.assertIn('"AccelerationStatus": "Enabled"', output)
def _generate_template(tms=1, within_vpc=False): t = Template() t.add_description(FLINK_TEMPLATE_DESCRIPTION) t.add_version(FLINK_TEMPLATE_VERSION) t.add_metadata({'LastUpdated': datetime.datetime.now().strftime('%c')}) # mappings mappings.add_mappings(t) # parameters parameters.add_parameters(t) vpc = None subnet_pri = None subnet_pub = None if within_vpc: # networking resources vpc, subnet_pri, subnet_pub = _define_vpc(t) # security groups sg_ssh = t.add_resource(securitygroups.ssh( parameters.ssh_location, vpc)) sg_jobmanager = t.add_resource(securitygroups.jobmanager( parameters.http_location, vpc)) sg_taskmanager = t.add_resource(securitygroups.taskmanager(None, vpc)) jobmanager = t.add_resource(instances.jobmanager( 0, [Ref(sg_ssh), Ref(sg_jobmanager)], within_vpc, subnet_pub )) prefix = "JobManager00" t.add_output(outputs.ssh_to(jobmanager, prefix)) t.add_output(Output( "FlinkWebGui", Description="Flink web interface", Value=Join("", [ 'http://', GetAtt(jobmanager, "PublicDnsName"), ':8081' ]) )) for index in range(0, tms): i = t.add_resource(instances.taskmanager( index, jobmanager, [Ref(sg_ssh), Ref(sg_taskmanager)], within_vpc, subnet_pri )) prefix = "TaskManager%2.2d" % index t.add_output(outputs.ssh_to(i, prefix, bastion=jobmanager)) return t.to_json()
def test_invalid_parameter_property_in_template(self): t = Template() p = Parameter("BasicNumber", Type="Number", AllowedPattern=".*") t.add_parameter(p) with self.assertRaises(ValueError): t.to_json()
def test_required_title_error(self): with self.assertRaisesRegexp(ValueError, "title:"): t = Template() t.add_resource(Instance('ec2instance')) t.to_json()
PubliclyAccessible=True)) # Outputs template.add_output([ Output('RDSPostgres', Description='RDS Postgres Instance', Value=Ref(rds_postgres)), Output('RDSPostgresEndpointAddress', Description='RDS Postgres Instance', Value=GetAtt(rds_postgres, 'Endpoint.Address')), Output('RDSPostgresEndpointPort', Description='RDS Postgres Instance', Value=GetAtt(rds_postgres, 'Endpoint.Port')), ]) template_json = template.to_json(indent=4) print(template_json) stack_args = { 'StackName': STACK_NAME, 'TemplateBody': template_json, 'Parameters': [ { 'ParameterKey': 'SubnetAZ1', 'ParameterValue': networking_resources['GenericPublicSubnetEuWest1b'], }, { 'ParameterKey': 'SubnetAZ2',
def test_mutualexclusion(self): t = Template() t.add_resource(FakeAWSObject('fake', callcorrect=True, singlelist=[10])) with self.assertRaises(ValueError): t.to_json()
def main(args): t = Template() # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id, # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_mt] efs_options = t.add_parameter( Parameter( "EFSOptions", Type="CommaDelimitedList", Description="Comma separated list of efs related options, " "8 parameters in total", ) ) compute_security_group = t.add_parameter( Parameter("ComputeSecurityGroup", Type="String", Description="SecurityGroup for Mount Target") ) subnet_id = t.add_parameter(Parameter("SubnetId", Type="String", Description="SubnetId for Mount Target")) create_efs = t.add_condition( "CreateEFS", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(1), Ref(efs_options)), "NONE")), ) create_mt = t.add_condition( "CreateMT", And(Not(Equals(Select(str(0), Ref(efs_options)), "NONE")), Equals(Select(str(7), Ref(efs_options)), "NONE")), ) use_performance_mode = t.add_condition("UsePerformanceMode", Not(Equals(Select(str(2), Ref(efs_options)), "NONE"))) use_efs_encryption = t.add_condition("UseEFSEncryption", Equals(Select(str(5), Ref(efs_options)), "true")) use_efs_kms_key = t.add_condition( "UseEFSKMSKey", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), "NONE"))) ) use_throughput_mode = t.add_condition("UseThroughputMode", Not(Equals(Select(str(6), Ref(efs_options)), "NONE"))) use_provisioned = t.add_condition("UseProvisioned", Equals(Select(str(6), Ref(efs_options)), "provisioned")) use_provisioned_throughput = t.add_condition( "UseProvisionedThroughput", And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), "NONE"))), ) fs = t.add_resource( FileSystem( "EFSFS", PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue), ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue), ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue), Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue), KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue), Condition=create_efs, ) ) t.add_resource( MountTarget( "EFSMT", FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))), SecurityGroups=[Ref(compute_security_group)], SubnetId=Ref(subnet_id), Condition=create_mt, ) ) t.add_output( Output( "FileSystemId", Description="ID of the FileSystem", Value=If(create_efs, Ref(fs), Select("1", Ref(efs_options))), ) ) # Specify output file path json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
class StandUp: region = "ap-southeast-2" subnet = "subnet-cb5facae" type = "m3.xlarge" ami = "ami-e95c31d3" stackname = "TestIOStack" name = "windows-2012-test" instance_template = "TestIO" timezone = "Australia/Brisbane" environment = "Development" comment = "Comments" keypair = "IOTEST" iamrole = "IAM-EC2-Default" securitygroups = ["sg-02b36667"] monitoring = False rollback = False volume = "" ec2conn = "" cfnconn = "" template = "" def __init__(self): self.build() self.output_to_file() def output_to_file(self): output = self.template.to_json() fd = open('standup.json', 'w') fd.write(output) fd.close() print(output) def metadata(self): m = MetadataObject() m.add_configkeys( 'AWS::CloudFormation::Init', 'InitRAID', 'files', { "C:\\cfn\\scripts\\striperaidebs.txt": { "content": """select disk 1 clean convert dynamic select disk 2 clean convert dynamic create volume stripe disk=1,2 list volume select volume 2 assign letter=e format fs=ntfs quick""" }, "C:\\cfn\\scripts\\striperaidephemeral.txt": { "content": """select disk 2 clean convert dynamic select disk 3 clean convert dynamic create volume stripe disk=2,3 select volume 2 assign letter=E format fs=ntfs quick""" } }) m.add_configkeys( 'AWS::CloudFormation::Init', 'InitRAID', 'commands', { "1-initialize-raid-1": { "command": """diskpart /s C:\\cfn\\scripts\\striperaidebs.txt""", "waitAfterCompletion": 0 } }) m.add_configkeys( 'AWS::CloudFormation::Init', 'TestIO', 'packages', { "msi": { "python": """https://www.python.org/ftp/python/3.4.2/python-3.4.2.amd64.msi""" } }) m.add_configkeys( 'AWS::CloudFormation::Init', 'TestIO', 'files', { "C:\\cfn\\scripts\\DiskRobIOt.py": """https://raw.githubusercontent.com/monk-ee/DiskRobIOt/master/DiskRobIOt.py""" }) m.add_configkeys( 'AWS::CloudFormation::Init', 'TestIO', 'commands', { "1-python-path": { "command": """setx path "%path%;C:\\Python34" """, "waitAfterCompletion": 0 }, "2-run-disktest": { "command": """c:\\cfn\\scripts\DiskRobIOt.py --path e:\\ """, "waitAfterCompletion": 0 } }) m.add_configkeys('AWS::CloudFormation::Init', 'configSets', 'config', ["InitRAID", "TestIO"]) return m def build(self): self.template = Template() self.template.add_version() self.template.add_description(self.comment) m = self.metadata() ec2_instance = self.template.add_resource( ec2.Instance( self.instance_template, ImageId=self.ami, InstanceType=self.type, KeyName=self.keypair, SubnetId=self.subnet, SecurityGroupIds=self.securitygroups, Monitoring=self.monitoring, IamInstanceProfile=self.iamrole, UserData=Base64("""<script> cfn-init -v -s """ + self.stackname + """ -r """ + self.instance_template + """ --region """ + self.region + """ --configset config</script>"""), Metadata=m.JSONrepr(), BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/xvdca", VirtualName="ephemeral0", ), ec2.BlockDeviceMapping( DeviceName="/dev/xvdcb", VirtualName="ephemeral1", ), ec2.BlockDeviceMapping( DeviceName="/dev/xvdb", Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True, VolumeSize="45", VolumeType="gp2"), ), ec2.BlockDeviceMapping( DeviceName="/dev/xvdc", Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True, VolumeSize="45", VolumeType="gp2"), ) ], Tags=Tags( Name=self.name, Environment=self.environment, Comment=self.comment, Role=self.iamrole, ), )) self.template.add_resource( ec2.EIP( "EIP", InstanceId=Ref(ec2_instance), Domain='vpc', )) self.template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "PrivateIP", Description= "Private IP address of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateIp"), ), Output( "PrivateDNS", Description="Private DNSName of the newly created EC2 instance", Value=GetAtt(ec2_instance, "PrivateDnsName"), ) ]) def cloudform(self): try: self.cfnconn.create_stack(self.stackname, template_body=self.template.to_json(), disable_rollback=self.rollback) output = self.template.to_json() return output except Exception as e: raise def ec2_connect_to_region(self): try: self.ec2conn = boto.ec2.connect_to_region(self.region) except: raise def vpc_connect_to_region(self): try: self.vpcconn = boto.vpc.connect_to_region(self.region) except: raise def iam_connect_to_region(self): try: self.iamconn = boto.iam.connect_to_region(self.region) except: raise def cfn_connect_to_region(self): try: self.cfnconn = boto.cloudformation.connect_to_region(self.region) except: raise
def main(): template = Template() template.set_version("2010-09-09") template.set_description( "AWS CloudFormation Sample Template: ELB with 2 EC2 instances" ) AddAMI(template) # Add the Parameters keyname_param = template.add_parameter( Parameter( "KeyName", Type="String", Default="mark", Description="Name of an existing EC2 KeyPair to " "enable SSH access to the instance", ) ) template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="m1.small", AllowedValues=[ "t1.micro", "m1.small", "m1.medium", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", "cg1.4xlarge", ], ConstraintDescription="must be a valid EC2 instance type.", ) ) webport_param = template.add_parameter( Parameter( "WebServerPort", Type="String", Default="8888", Description="TCP/IP port of the web server", ) ) apiport_param = template.add_parameter( Parameter( "ApiServerPort", Type="String", Default="8889", Description="TCP/IP port of the api server", ) ) subnetA = template.add_parameter( Parameter("subnetA", Type="String", Default="subnet-096fd06d") ) subnetB = template.add_parameter( Parameter("subnetB", Type="String", Default="subnet-1313ef4b") ) VpcId = template.add_parameter( Parameter("VpcId", Type="String", Default="vpc-82c514e6") ) # Define the instance security group instance_sg = template.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH and HTTP access on the inbound port", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(webport_param), ToPort=Ref(webport_param), CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=Ref(apiport_param), ToPort=Ref(apiport_param), CidrIp="0.0.0.0/0", ), ], ) ) # Add the web server instance WebInstance = template.add_resource( ec2.Instance( "WebInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(webport_param)), ) ) # Add the api server instance ApiInstance = template.add_resource( ec2.Instance( "ApiInstance", SecurityGroups=[Ref(instance_sg)], KeyName=Ref(keyname_param), InstanceType=Ref("InstanceType"), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), UserData=Base64(Ref(apiport_param)), ) ) # Add the application ELB ApplicationElasticLB = template.add_resource( elb.LoadBalancer( "ApplicationElasticLB", Name="ApplicationElasticLB", Scheme="internet-facing", Subnets=[Ref(subnetA), Ref(subnetB)], ) ) TargetGroupWeb = template.add_resource( elb.TargetGroup( "TargetGroupWeb", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="WebTarget", Port=Ref(webport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(WebInstance), Port=Ref(webport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId), ) ) TargetGroupApi = template.add_resource( elb.TargetGroup( "TargetGroupApi", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name="ApiTarget", Port=Ref(apiport_param), Protocol="HTTP", Targets=[ elb.TargetDescription(Id=Ref(ApiInstance), Port=Ref(apiport_param)) ], UnhealthyThresholdCount="3", VpcId=Ref(VpcId), ) ) Listener = template.add_resource( elb.Listener( "Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(ApplicationElasticLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(TargetGroupWeb)) ], ) ) template.add_resource( elb.ListenerRule( "ListenerRuleApi", ListenerArn=Ref(Listener), Conditions=[elb.Condition(Field="path-pattern", Values=["/api/*"])], Actions=[ elb.ListenerRuleAction( Type="forward", TargetGroupArn=Ref(TargetGroupApi) ) ], Priority="1", ) ) template.add_output( Output( "URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(ApplicationElasticLB, "DNSName")]), ) ) print(template.to_json())
def create_cloudformation_stack(args): print("Hello AWS!") # Connect to EC2. Get a cloudformation client session = boto3.Session(profile_name='f_project') client = session.client('cloudformation', region_name='sa-east-1') stack_name = args.stack_name ssh_key = args.ssh_key_name sec_group_name = 'TesisSecurityGroup' # Create stack template. cloudformation_template = Template() # Add parameters -> SSH key ssh_key_parameter = cloudformation_template.add_parameter( ssh_parameter(ssh_key)) cloudformation_template.add_output( cf_output("SSHKey", "SSH Key to log into instances", 'KeyName')) # Add roles and policies (bucket) policy_name = 'RolePolicies' role_name = 'InstanceRole' profile_name = 'InstanceProfile' ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') cloudformation_template, subnet = set_cloudformation_settings( cloudformation_template, ref_stack_id) # Add security group ssh_sec_group = add_ssh_security_group(sec_group_name) cloudformation_template.add_resource(ssh_sec_group) # Read the environment information from the config file cfg_parser = ConfigObj(CFG_FILE) instance_size = cfg_parser['aws_config']['INSTANCE_SIZE'] bucket_name = cfg_parser['aws_config']['BUCKET'] # Add bucket access policies cloudformation_template.add_resource( allow_bucket_access_role(role_name)) # 1 cloudformation_template.add_resource( bucket_access_policy(policy_name, role_name, bucket_name)) # 2 cloudformation_template.add_resource( instance_profile_bucket(profile_name, role_name)) # 3 for instance_id in cfg_parser["Instances"]: instance = cfg_parser["Instances"][instance_id] name = instance['name'] print("Instance name:", name) ami_id = instance['ami_id'] ip = instance['ip'] bootstrap_file = instance['local_bootstrap_file'] bootstrap_path = os.path.join(os.getcwd(), BOOTSTRAP_FOLDER, name, bootstrap_file) aws_instance = EnvInstance(name) aws_instance.create_instance_template( ami_id, instance_size, ip, ssh_key_parameter, bootstrap_path, sec_group_name, # TODO: unused subnet) aws_instance.set_bucket_access(role_name, profile_name, bucket_name) aws_instance.add_to_security_group(ssh_sec_group) # aws_instance.add_to_security_group(ref_stack_id) cloudformation_template.add_resource(aws_instance) cloudformation_template.add_output( cf_output("%sInstance" % name, "%s: IP %s" % (name, ip), name)) print("Instance added to template!") try: # Create stack client.create_stack(StackName=stack_name, TemplateBody=cloudformation_template.to_json(), Capabilities=['CAPABILITY_IAM']) # Wait until stack is created while client.describe_stacks( StackName=stack_name )["Stacks"][0]["StackStatus"] != StackState.created: # Add timeout -> and delete stack print("Creating Environment...") time.sleep(15) else: print("CloudFormation Stack created") except ClientError: formatted_lines = traceback.format_exc().splitlines() print(traceback.format_exc()) print(formatted_lines[0]) print(formatted_lines[-1]) print("CloudFormation Stack could not be created!")
def gerando_cloudformation(self, resource): template = Template() template.add_resource(resource) resource_json = json.loads(template.to_json()) return resource_json
def generate(env='pilot'): template = Template() template.set_version("2010-09-09") # ExistingVPC = template.add_parameter(Parameter( # "ExistingVPC", # Type="AWS::EC2::VPC::Id", # Description=( # "The VPC ID that includes the security groups in the" # "ExistingSecurityGroups parameter." # ), # )) # # ExistingSecurityGroups = template.add_parameter(Parameter( # "ExistingSecurityGroups", # Type="List<AWS::EC2::SecurityGroup::Id>", # )) param_spider_lambda_memory_size = template.add_parameter( Parameter( 'SpiderLambdaMemorySize', Type=NUMBER, Description='Amount of memory to allocate to the Lambda Function', Default='128', AllowedValues=MEMORY_VALUES ) ) param_spider_lambda_timeout = template.add_parameter( Parameter( 'SpiderLambdaTimeout', Type=NUMBER, Description='Timeout in seconds for the Lambda function', Default='60' ) ) spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq' spider_tasks_queue_dlq = template.add_resource( Queue( "SpiderTasksDLQ", QueueName=spider_tasks_queue_dlq_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), ) ) spider_tasks_queue_name = f"{env}-spider-tasks" spider_tasks_queue = template.add_resource( Queue( "SpiderTasksQueue", QueueName=spider_tasks_queue_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), VisibilityTimeout=300, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"), maxReceiveCount=2, ), DependsOn=[spider_tasks_queue_dlq], ) ) spider_lambda_role = template.add_resource( Role( "SpiderLambdaRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="root", Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ Action("logs", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("s3", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("sqs", "*") ] ), ] ), ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ) ) spider_file_path = './spider/index.js' spider_code = open(spider_file_path, 'r').readlines() spider_lambda = template.add_resource( Function( "SpiderLambda", Code=Code( S3Bucket='spider-lambda', S3Key=f'{env}.zip', # ZipFile=Join("", spider_code) ), Handler="index.handler", Role=GetAtt(spider_lambda_role, "Arn"), Runtime="nodejs12.x", Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'], MemorySize=Ref(param_spider_lambda_memory_size), Timeout=Ref(param_spider_lambda_timeout), DependsOn=[spider_tasks_queue], ) ) # AllSecurityGroups = template.add_resource(CustomResource( # "AllSecurityGroups", # List=Ref(ExistingSecurityGroups), # AppendedItem=Ref("SecurityGroup"), # ServiceToken=GetAtt(spider_lambda, "Arn"), # )) # # SecurityGroup = template.add_resource(SecurityGroup( # "SecurityGroup", # SecurityGroupIngress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # VpcId=Ref(ExistingVPC), # GroupDescription="Allow HTTP traffic to the host", # SecurityGroupEgress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # )) # # AllSecurityGroups = template.add_output(Output( # "AllSecurityGroups", # Description="Security Groups that are associated with the EC2 instance", # Value=Join(", ", GetAtt(AllSecurityGroups, "Value")), # )) source_sns_name = f'{env}-source-sns-topic' source_sns_topic = template.add_resource( Topic( "SNSSource", TopicName=source_sns_name, Subscription=[ Subscription( Endpoint=GetAtt(spider_tasks_queue, "Arn"), Protocol='sqs', ) ], DependsOn=[spider_tasks_queue] ) ) source_sns_topic_policy = template.add_resource( TopicPolicy( "SourceForwardingTopicPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowS3PutMessageInSNS", Statement=[ Statement( Sid="AllowS3PutMessages", Principal=Principal("Service", "s3.amazonaws.com"), Effect=Allow, Action=[ Action("sns", "Publish"), ], Resource=["*"], ) ] ), Topics=[Ref(source_sns_topic)], ) ) sns_sqs_policy = template.add_resource( QueuePolicy( "AllowSNSPutMessagesInSQS", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowSNSPutMessagesInSQS", Statement=[ Statement( Sid="AllowSNSPutMessagesInSQS2", Principal=Principal("*"), Effect=Allow, Action=[ Action("sqs", "SendMessage"), ], Resource=["*"], ) ] ), Queues=[Ref(spider_tasks_queue)], DependsOn=[spider_tasks_queue], ) ) # Buckets source_bucket_name = f'{env}-source-bucket' source_bucket = template.add_resource( Bucket( "SourceBucket", BucketName=source_bucket_name, NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations( Topic=Ref(source_sns_topic), Event="s3:ObjectCreated:*", ) ], ), DependsOn=[source_sns_topic_policy], ) ) results_bucket_name = f'{env}-results-bucket' results_bucket = template.add_resource( Bucket( "ResultsBucket", BucketName=results_bucket_name, ) ) # Lambda trigger template.add_resource( EventSourceMapping( "TriggerLambdaSpiderFromSQS", EventSourceArn=GetAtt(spider_tasks_queue, "Arn"), FunctionName=Ref(spider_lambda), BatchSize=1, # Default process tasks one by one ) ) return template.to_json()
class EnvironmentTemplate: def __init__(self, Env=os.environ.get('ENV', 'Development')): self.env = Env url_config = os.getcwd() + '/config/config.ini' p = MyParser() self.config = p.readconfig(url_config, self.env) self.template = Template() self.template.set_description("Service VPC") self.template.set_metadata({ "DependsOn": [], "Environment": Env, "StackName": "%s-VPC" % Env }) self.vpc = None self.gateway = None self.gateway_attachment = None """ private method to set tags for the resources""" def __set_tags(self, name): tags = [{ "Key": "Environment", "Value": self.env }, { "Key": "Name", "Value": "%s-%s" % (self.env, name) }] return tags """ create gateway within the vpc""" def create_gateway(self, name=GATEWAY): self.gateway = self.template.add_resource( ec2.InternetGateway(name, Tags=self.__set_tags("InternetGateway"))) self.template.add_output(Output( GATEWAY_ID, Value=self.gateway.Ref(), )) self.gateway_attachment = self.template.add_resource( ec2.VPCGatewayAttachment( VPC_GATEWAYATTACHMENT, VpcId=self.vpc.Ref(), InternetGatewayId=self.gateway.Ref(), )) """ create vpc n/w access list,inboud and outbound rule""" def create_network(self): self.vpc_nw_acl = self.template.add_resource( ec2.NetworkAcl(VPC_NETWORK_ACCESS_LIST, VpcId=self.vpc.Ref(), Tags=self.__set_tags("NetworkAcl"))) self.inbound_rule = self.template.add_resource( ec2.NetworkAclEntry(VPC_NETWORK_ACL_INBOUND_RULE, NetworkAclId=self.vpc_nw_acl.Ref(), RuleNumber=100, Protocol="6", PortRange=PortRange(To="443", From="443"), Egress="false", RuleAction="allow", CidrBlock="0.0.0.0/0")) self.outbound_rule = self.template.add_resource( ec2.NetworkAclEntry(VPC_NETWORK_ACL_OUTBOUND_RULE, NetworkAclId=self.vpc_nw_acl.Ref(), RuleNumber=200, Protocol="6", Egress="true", RuleAction="allow", CidrBlock="0.0.0.0/0")) """ create vpc for given template""" def create_vpc(self, name=VPC_NAME): self.vpc = self.template.add_resource( ec2.VPC(name, CidrBlock=self.config['vpc_cidrblock'], EnableDnsSupport=True, EnableDnsHostnames=True, InstanceTenancy="default", Tags=self.__set_tags("ServiceVPC"))) # Just about everything needs this, so storing it on the object self.template.add_output(Output(VPC_ID, Value=self.vpc.Ref())) """ write template json to the file""" def write_to_file(self, filename): with open(filename, 'w') as f: f.write( json.dumps(json.loads(self.template.to_json()), indent=2, sort_keys=True))
import troposphere.ec2 as ec2 t = Template() t.add_resource( ec2.ClientVpnEndpoint( "myClientVpnEndpoint", AuthenticationOptions=[ ec2.ClientAuthenticationRequest( Type="directory-service-authentication", ActiveDirectory=ec2.DirectoryServiceAuthenticationRequest( DirectoryId="d-926example"), ) ], ClientCidrBlock="10.0.0.0/22", ConnectionLogOptions=ec2.ConnectionLogOptions(Enabled=False), Description="My Client VPN Endpoint", DnsServers=["11.11.0.1"], ServerCertificateArn=("arn:aws:acm:us-east-1:111122223333:certificate/" "12345678-1234-1234-1234-123456789012"), TagSpecifications=[ ec2.TagSpecifications( ResourceType="client-vpn-endpoint", Tags=Tags(Purpose="Production"), ) ], TransportProtocol="udp", )) print(t.to_json())
def test_badrequired(self): with self.assertRaises(ValueError): t = Template() t.add_resource(Instance('ec2instance')) t.to_json()
"AmbariSSH", Description="SSH to the Ambari Node", Value=Join("", ["ssh ec2-user@", GetAtt('AmbariNode', 'PublicDnsName')]), ), Output("AmbariServiceInstanceId", Description="The Ambari Servers Instance-Id", Value=Ref('AmbariNode')), Output("Region", Description="AWS Region", Value=ref_region), ]) if __name__ == '__main__': template_compressed = "\n".join( [line.strip() for line in t.to_json().split("\n")]) try: cfcon = boto.cloudformation.connect_to_region('us-west-2') cfcon.validate_template(template_compressed) except boto.exception.BotoServerError, e: sys.stderr.write( "FATAL: CloudFormation Template Validation Error:\n%s\n" % e.message) else: sys.stderr.write("Successfully validated template!\n") with open('generated/cfn-ambari-jumpstart.template-uncompressed.json', 'w') as f: f.write(t.to_json()) print( 'Uncompressed template written to generated/cfn-ambari-jumpstart.template-uncompressed.json'
def gen_template(config) -> dict: """Generates a Cloud Formation template to make a device stack on EC2 based on the passed configuration Arguments: config -- The configuration to use when generating the template (specifies things like number of server instances, etc) Returns: The generated template as a JSON object """ num_couchbase_servers = config.server_number couchbase_instance_type = config.server_type num_sync_gateway_servers = config.sync_gateway_number sync_gateway_server_type = config.sync_gateway_type t = Template() t.set_description( 'An Ec2-classic stack with Couchbase Server + Sync Gateway') def createCouchbaseSecurityGroups(t): # Couchbase security group secGrpCouchbase = ec2.SecurityGroup('CouchbaseSecurityGroup') secGrpCouchbase.GroupDescription = "Allow access to Couchbase Server" secGrpCouchbase.SecurityGroupIngress = [ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), # Sync Gatway Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="4984", ToPort="4985", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # expvars IpProtocol="tcp", FromPort="9876", ToPort="9876", CidrIp="0.0.0.0/0", ), # Couchbase Server Client-To-Node Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8091", ToPort="8096", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11207", ToPort="11207", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11210", ToPort="11211", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="18091", ToPort="18096", CidrIp="0.0.0.0/0", ), # Couchbase Server Node-To-Node Ports ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="4369", ToPort="4369", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9100", ToPort="9105", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9110", ToPort="9118", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9120", ToPort="9122", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9130", ToPort="9130", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="9999", ToPort="9999", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="11209", ToPort="11210", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="19130", ToPort="19130", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="21100", ToPort="21100", CidrIp="172.31.0.0/16", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="21150", ToPort="21150", CidrIp="172.31.0.0/16", ) ] # Add security group to template t.add_resource(secGrpCouchbase) return secGrpCouchbase keyname_param = t.add_parameter( Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 KeyPair to enable SSH access') ) secGrpCouchbase = createCouchbaseSecurityGroups(t) # Couchbase Server Instances for i in range(num_couchbase_servers): name = "{}{}".format(config.couchbase_server_prefix, i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = couchbase_instance_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.Tags = Tags(Name=name, Type="couchbaseserver") instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] t.add_resource(instance) # Sync Gw instances (ubuntu ami) for i in range(num_sync_gateway_servers): name = "{}{}".format(config.sync_gateway_prefix, i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = sync_gateway_server_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] # Make syncgateway0 a cache writer, and the rest cache readers # See https://github.com/couchbase/sync_gateway/wiki/Distributed-channel-cache-design-notes if i == 0: instance.Tags = Tags(Name=name, Type="syncgateway", CacheType="writer") else: instance.Tags = Tags(Name=name, Type="syncgateway") t.add_resource(instance) return t.to_json()
Key(Ref(tableIndexName), "HASH") ], ProvisionedThroughput=ProvisionedThroughput( Ref(readunits), Ref(writeunits) ), GlobalSecondaryIndexes=[ GlobalSecondaryIndex( "SecondaryIndex", [ Key(Ref(secondaryIndexHashName), "HASH"), Key(Ref(secondaryIndexRangeName), "RANGE") ], Projection("ALL"), ProvisionedThroughput( Ref(readunits), Ref(writeunits) ) ) ] )) template.add_output(Output( "GSITable", Value=Ref(GSITable), Description="Table with a Global Secondary Index", )) print template.to_json()
def test_badrequired(self): with self.assertRaises(ValueError): t = Template() t.add_resource(LaunchTemplateData('launchtemplatedata')) t.to_json()
def provision(self): size = int(self.app.pargs.size or 1) name = self.app.pargs.name or '%s-network-%s' % (self.app.project, str(uuid.uuid4())[:6]) if not 'aws_ec2_key_name' in self.app.config['provision']: self.app.log.error( 'You need to set provision.aws_ec2_key_name in the config') return self.app.log.info('Starting new network: %s' % name) template = Template() sg, subnet, vpc = self.app.networks.sg_subnet_vpc(template) for i in range(size): self.app.networks.add_instance(name, template, i, sg, subnet) tpl = template.to_json() cf = self.app.networks.get_boto().resource('cloudformation') stack = cf.create_stack(StackName=name, TemplateBody=tpl) self.app.log.info('Waiting for cloudformation: %s' % name) while True: stack.reload() print('Status: ', stack.stack_status) REGISTRY = { 'bootstrapped': datetime.utcnow().strftime('%c'), 'size': size, 'status': stack.stack_status } self.app.networks.register(name, REGISTRY) if stack.stack_status.startswith('ROLLBACK_'): p = shell.Prompt( 'Error deploying cloudformation, what do you want to do?', options=['Delete It', 'Leave It'], numbered=True) if p.prompt() == 'Delete It': stack.delete() return if stack.stack_status == 'CREATE_COMPLETE': if (self.app.pargs.default): with open(self.app.utils.path('.hydra_network'), 'w+') as fh: fh.write(name) outputs = { o['OutputKey']: o['OutputValue'] for o in stack.outputs } ips = [outputs['IP%s' % i] for i in range(size)] REGISTRY['outputs'], REGISTRY['ips'] = outputs, ips self.app.networks.register(name, REGISTRY) for ip in ips: self.app.log.info('Node IP: %s' % ip) self.app.log.info( 'Creation complete, pausing for a minute while the software installs...' ) for i in range(10): time.sleep(30) try: REGISTRY['node_data'] = { ip: self.get_bootstrap_data(ip, name) for ip in ips } self.app.networks.register(name, REGISTRY) break except: pass self.app.log.info('Stack launch success!') return True time.sleep(10)
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.set_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping('AWSInstanceType2Arch', { 't1.micro': {'Arch': 'PV64'}, 't2.micro': {'Arch': 'HVM64'}, 't2.small': {'Arch': 'HVM64'}, 't2.medium': {'Arch': 'HVM64'}, 'm1.small': {'Arch': 'PV64'}, 'm1.medium': {'Arch': 'PV64'}, 'm1.large': {'Arch': 'PV64'}, 'm1.xlarge': {'Arch': 'PV64'}, 'm2.xlarge': {'Arch': 'PV64'}, 'm2.2xlarge': {'Arch': 'PV64'}, 'm2.4xlarge': {'Arch': 'PV64'}, 'm3.medium': {'Arch': 'HVM64'}, 'm3.large': {'Arch': 'HVM64'}, 'm3.xlarge': {'Arch': 'HVM64'}, 'm3.2xlarge': {'Arch': 'HVM64'}, 'c1.medium': {'Arch': 'PV64'}, 'c1.xlarge': {'Arch': 'PV64'}, 'c3.large': {'Arch': 'HVM64'}, 'c3.xlarge': {'Arch': 'HVM64'}, 'c3.2xlarge': {'Arch': 'HVM64'}, 'c3.4xlarge': {'Arch': 'HVM64'}, 'c3.8xlarge': {'Arch': 'HVM64'}, 'c4.large': {'Arch': 'HVM64'}, 'c4.xlarge': {'Arch': 'HVM64'}, 'c4.2xlarge': {'Arch': 'HVM64'}, 'c4.4xlarge': {'Arch': 'HVM64'}, 'c4.8xlarge': {'Arch': 'HVM64'}, 'g2.2xlarge': {'Arch': 'HVMG2'}, 'r3.large': {'Arch': 'HVM64'}, 'r3.xlarge': {'Arch': 'HVM64'}, 'r3.2xlarge': {'Arch': 'HVM64'}, 'r3.4xlarge': {'Arch': 'HVM64'}, 'r3.8xlarge': {'Arch': 'HVM64'}, 'i2.xlarge': {'Arch': 'HVM64'}, 'i2.2xlarge': {'Arch': 'HVM64'}, 'i2.4xlarge': {'Arch': 'HVM64'}, 'i2.8xlarge': {'Arch': 'HVM64'}, 'd2.xlarge': {'Arch': 'HVM64'}, 'd2.2xlarge': {'Arch': 'HVM64'}, 'd2.4xlarge': {'Arch': 'HVM64'}, 'd2.8xlarge': {'Arch': 'HVM64'}, 'hi1.4xlarge': {'Arch': 'HVM64'}, 'hs1.8xlarge': {'Arch': 'HVM64'}, 'cr1.8xlarge': {'Arch': 'HVM64'}, 'cc2.8xlarge': {'Arch': 'HVM64'} }) template.add_mapping('AWSRegionArch2AMI', { 'us-east-1': {'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30'}, 'us-west-2': {'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099'}, 'us-west-1': {'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73'}, 'eu-west-1': {'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105'}, 'eu-central-1': {'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb'}, 'ap-northeast-1': {'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6'}, 'ap-southeast-1': {'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40'}, 'ap-southeast-2': {'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89'}, 'sa-east-1': {'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED'}, 'cn-north-1': {'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED'} }) template.add_mapping('Region2Principal', { 'us-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} }) # Parameters cachenodetype = template.add_parameter(Parameter( 'ClusterNodeType', Description='The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=['cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge'], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter(Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=['t1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge'], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter(Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter(Parameter( 'SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.' )) # Resources webserverrole = template.add_resource(iam.Role( 'WebServerRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal')]), ) ] ), Path='/', )) template.add_resource(iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=PolicyDocument( Statement=[awacs.aws.Statement( Action=[awacs.aws.Action("elasticache", "DescribeCacheClusters")], Resource=["*"], Effect=awacs.aws.Allow )] ), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource(iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource(ec2.SecurityGroup( 'WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ] )) webserverinstance = template.add_resource(ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation.InitFile( content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache' ), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root' ), '/usr/local/bin/get_cluster_config': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root' ), '/usr/local/bin/install_phpredis': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root' ), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root' ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=['/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf'] ), }), }, ) }) ), ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64(Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere') )) redisclustersg = template.add_resource(elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource(elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource(elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output( 'WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ]) ) ]) # Print CloudFormation Template print(template.to_json())
t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource( ec2.Instance("instance", ImageId="ami-f5f41398", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, IamInstanceProfile=Ref("InstanceProfile"))) t.add_output( Output( "InstancePublicIp", Description="Public IP of our instance.", Value=GetAtt("instance", "PublicIp"), )) t.add_output( Output( "WebUrl", Description="Application endpoint", Value=Join("", [ "http://", GetAtt("instance", "PublicDnsName"), ":", ApplicationPort ]), )) print t.to_json()
def main(args): number_of_vol = 5 t = Template() availability_zone = t.add_parameter( Parameter( "AvailabilityZone", Type="String", Description= "Availability Zone the cluster will launch into. THIS IS REQUIRED", )) volume_size = t.add_parameter( Parameter( "VolumeSize", Type="CommaDelimitedList", Description="Size of EBS volume in GB, if creating a new one")) volume_type = t.add_parameter( Parameter( "VolumeType", Type="CommaDelimitedList", Description="Type of volume to create either new or from snapshot") ) volume_iops = t.add_parameter( Parameter( "VolumeIOPS", Type="CommaDelimitedList", Description= "Number of IOPS for volume type io1. Not used for other volume types.", )) ebs_encryption = t.add_parameter( Parameter( "EBSEncryption", Type="CommaDelimitedList", Description="Boolean flag to use EBS encryption for /shared volume. " "(Not to be used for snapshots)", )) ebs_kms_id = t.add_parameter( Parameter( "EBSKMSKeyId", Type="CommaDelimitedList", Description= "KMS ARN for customer created master key, will be used for EBS encryption", )) ebs_volume_id = t.add_parameter( Parameter("EBSVolumeId", Type="CommaDelimitedList", Description="Existing EBS volume Id")) ebs_snapshot_id = t.add_parameter( Parameter( "EBSSnapshotId", Type="CommaDelimitedList", Description= "Id of EBS snapshot if using snapshot as source for volume", )) ebs_vol_num = t.add_parameter( Parameter( "NumberOfEBSVol", Type="Number", Description="Number of EBS Volumes the user requested, up to %s" % number_of_vol, )) use_vol = [None] * number_of_vol use_existing_ebs_volume = [None] * number_of_vol v = [None] * number_of_vol for i in range(number_of_vol): if i == 0: create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")) elif i == 1: use_vol[i] = t.add_condition("UseVol%s" % (i + 1), Not(Equals(Ref(ebs_vol_num), str(i)))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) else: use_vol[i] = t.add_condition( "UseVol%s" % (i + 1), And(Not(Equals(Ref(ebs_vol_num), str(i))), Condition(use_vol[i - 1]))) create_vol = t.add_condition( "Vol%s_CreateEBSVolume" % (i + 1), And(Condition(use_vol[i]), Equals(Select(str(i), Ref(ebs_volume_id)), "NONE")), ) use_ebs_iops = t.add_condition( "Vol%s_UseEBSPIOPS" % (i + 1), Equals(Select(str(i), Ref(volume_type)), "io1")) use_vol_size = t.add_condition( "Vol%s_UseVolumeSize" % (i + 1), Not(Equals(Select(str(i), Ref(volume_size)), "NONE"))) use_vol_type = t.add_condition( "Vol%s_UseVolumeType" % (i + 1), Not(Equals(Select(str(i), Ref(volume_type)), "NONE"))) use_ebs_encryption = t.add_condition( "Vol%s_UseEBSEncryption" % (i + 1), Equals(Select(str(i), Ref(ebs_encryption)), "true")) use_ebs_kms_key = t.add_condition( "Vol%s_UseEBSKMSKey" % (i + 1), And(Condition(use_ebs_encryption), Not(Equals(Select(str(i), Ref(ebs_kms_id)), "NONE"))), ) use_ebs_snapshot = t.add_condition( "Vol%s_UseEBSSnapshot" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_snapshot_id)), "NONE"))) use_existing_ebs_volume[i] = t.add_condition( "Vol%s_UseExistingEBSVolume" % (i + 1), Not(Equals(Select(str(i), Ref(ebs_volume_id)), "NONE"))) v[i] = t.add_resource( ec2.Volume( "Volume%s" % (i + 1), AvailabilityZone=Ref(availability_zone), VolumeType=If(use_vol_type, Select(str(i), Ref(volume_type)), "gp2"), Size=If( use_ebs_snapshot, NoValue, If(use_vol_size, Select(str(i), Ref(volume_size)), "20")), SnapshotId=If(use_ebs_snapshot, Select(str(i), Ref(ebs_snapshot_id)), NoValue), Iops=If(use_ebs_iops, Select(str(i), Ref(volume_iops)), NoValue), Encrypted=If(use_ebs_encryption, Select(str(i), Ref(ebs_encryption)), NoValue), KmsKeyId=If(use_ebs_kms_key, Select(str(i), Ref(ebs_kms_id)), NoValue), Condition=create_vol, )) outputs = [None] * number_of_vol vol_to_return = [None] * number_of_vol for i in range(number_of_vol): vol_to_return[i] = If(use_existing_ebs_volume[i], Select(str(i), Ref(ebs_volume_id)), Ref(v[i])) if i == 0: outputs[i] = vol_to_return[i] else: outputs[i] = If(use_vol[i], Join(",", vol_to_return[:(i + 1)]), outputs[i - 1]) t.add_output( Output("Volumeids", Description="Volume IDs of the resulted EBS volumes", Value=outputs[number_of_vol - 1])) json_file_path = args.target_path output_file = open(json_file_path, "w") output_file.write(t.to_json()) output_file.close()
Output( "AmbariServiceInstanceId", Description="The Ambari Servers Instance-Id", Value=Ref('AmbariNode') ), Output( "Region", Description="AWS Region", Value=ref_region ), ]) if __name__ == '__main__': template_compressed="\n".join([line.strip() for line in t.to_json().split("\n")]) try: cfcon = boto.cloudformation.connect_to_region('us-west-2') cfcon.validate_template(template_compressed) except boto.exception.BotoServerError, e: sys.stderr.write("FATAL: CloudFormation Template Validation Error:\n%s\n" % e.message) else: sys.stderr.write("Successfully validated template!\n") with open('generated/cfn-ambari.template-uncompressed.json', 'w') as f: f.write(t.to_json()) print('Uncompressed template written to generated/cfn-ambari.template-uncompressed.json') with open('generated/cfn-ambari.template.json', 'w') as f: f.write(template_compressed) print('Compressed template written to generated/cfn-ambari.template.json')
class SFXTemplate: def __init__(self, profile): self.template = Template() self.profile = profile def buildParameters(self): self.paramKeyName = self.template.add_parameter( Parameter("KeyName", Description='SSH Keyname to start', Type='AWS::EC2::KeyPair::KeyName')) self.paramImageId = self.template.add_parameter( Parameter( "ImageId", Description='Image ID of the AMI used to create the Instance', Type='AWS::EC2::Image::Id', )) def buildTemplate(self): self.vpc = self.template.add_resource( Stack('Vpc', TemplateURL= "https://s3.amazonaws.com/msel-cf-templates/vpc.template", Parameters={'Department': 'LAG'})) self.subnet = self.template.add_resource( Stack('Subnet', TemplateURL= 'https://s3.amazonaws.com/msel-cf-templates/subnet.template', Parameters={ 'VPCID': GetAtt(self.vpc, 'Outputs.VpcId'), 'CidrBlock': '10.0.1.0/24', 'Department': 'LAG', 'MapPublicIP': 'True', 'RouteTableId': GetAtt(self.vpc, 'Outputs.RouteTableId'), })) securitygroup = self.template.add_resource( SecurityGroup( "SecurityGroup", GroupDescription='SFX ports SSH, HTTP, HTTPS', VpcId=GetAtt(self.vpc, 'Outputs.VpcId'), SecurityGroupIngress=[ SecurityGroupRule( CidrIp='0.0.0.0/0', Description='Allow SSH (port 22) from all', FromPort='22', ToPort='22', IpProtocol='tcp', ), SecurityGroupRule( CidrIp='0.0.0.0/0', Description='Allow HTTP (port 80) from all', FromPort='80', ToPort='80', IpProtocol='tcp', ), SecurityGroupRule( CidrIp='0.0.0.0/0', Description='Allow HTTPS (port 443) from all', FromPort='443', ToPort='443', IpProtocol='tcp', ), ])) self.eip = self.template.add_resource(EIP('EIP')) self.instance = self.template.add_resource( Instance( 'SFXInstance', ImageId=Ref(self.paramImageId), KeyName=Ref(self.paramKeyName), SubnetId=GetAtt(self.subnet, 'Outputs.SubnetId'), SecurityGroupIds=[Ref(securitygroup)], Tags=Tags( Name='SFX Test Instance', Department='LAG', Project='SFX', ), UserData=Base64(Ref(self.userdata)), )) self.template.add_resource( EIPAssociation( "EIPAssoc", EIP=Ref(self.eip), InstanceId=Ref(self.instance), )) self.template.add_resource( RecordSetType( "dnsRecord", HostedZoneName=Join("", ['cloud.library.jhu.edu', '.']), Comment="SFX-Test", Name=Join('', ['sfx-test', ".", "cloud.library.jhu.edu"]), Type="A", TTL="300", ResourceRecords=[GetAtt(self.instance, 'PublicIp')], )) def upload(self): import sys session = boto3.Session(profile_name=self.profile) cf_client = session.client('cloudformation') stacks = cf_client.describe_stacks() datestamp = f"{datetime.datetime.now():%Y%m%d%H%M%S}" update = False for stack in stacks['Stacks']: if stack['StackName'] == 'SFX-Test': update = True if update: waiter = cf_client.get_waiter('stack_update_complete') createdict = cf_client.update_stack( StackName='SFX-Test', TemplateBody=self.template.to_json(), Parameters=[ { 'ParameterKey': 'KeyName', 'UsePreviousValue': True }, { 'ParameterKey': 'ImageId', 'UsePreviousValue': True }, ], ClientRequestToken='sfxtest-clouformation-update-' + datestamp) print('Updating...') else: waiter = cf_client.get_waiter('stack_create_complete') createdict = cf_client.create_stack( StackName='SFX-Test', TemplateBody=self.template.to_json(), Parameters=[ { 'ParameterKey': 'KeyName', 'ParameterValue': 'operations', }, { 'ParameterKey': 'ImageId', 'ParameterValue': 'ami-6f3f4915', }, ], ClientRequestToken='sfxtest-clouformation-create-' + datestamp) print('Creating...') waiter.wait(StackName='SFX-Test') def __str__(self): return (self.template.to_json())
Value=Ref(root_bucket), Description="Name of S3 bucket to hold website content")) #print(t.to_json()) #print(t.outputs['BucketName'].Value.data) domain = sys.argv[1] stack_name = domain.replace('.', '') client = boto3.client('cloudformation', region_name='eu-west-1') try: response = client.describe_stacks(StackName=stack_name, ) except botocore.exceptions.ClientError: client.create_stack( StackName=stack_name, TemplateBody=t.to_json(), Parameters=[ { 'ParameterKey': 'HostedZone', 'ParameterValue': domain }, ], ) waiter = client.get_waiter('stack_create_complete') waiter.wait(StackName=stack_name) response = client.describe_stacks(StackName=stack_name, ) try: client.update_stack(StackName=stack_name, TemplateBody=t.to_json(), Parameters=[
def main(**params): try: # Metadata t = Template() t.set_version("2010-09-09") t.set_description("(SOCA) - Base template to deploy compute nodes.") allow_anonymous_data_collection = params["MetricCollectionAnonymous"] debug = False mip_usage = False instances_list = params["InstanceType"].split("+") asg_lt = asg_LaunchTemplate() ltd = LaunchTemplateData("NodeLaunchTemplateData") mip = MixedInstancesPolicy() stack_name = Ref("AWS::StackName") # Begin LaunchTemplateData UserData = '''#!/bin/bash -xe export PATH=$PATH:/usr/local/bin if [[ "''' + params['BaseOS'] + '''" == "centos7" ]] || [[ "''' + params[ 'BaseOS'] + '''" == "rhel7" ]]; then EASY_INSTALL=$(which easy_install-2.7) $EASY_INSTALL pip PIP=$(which pip2.7) $PIP install awscli yum install -y nfs-utils # enforce install of nfs-utils else # Upgrade awscli on ALI (do not use yum) EASY_INSTALL=$(which easy_install-2.7) $EASY_INSTALL pip PIP=$(which pip) $PIP install awscli --upgrade fi if [[ "''' + params['BaseOS'] + '''" == "amazonlinux2" ]]; then /usr/sbin/update-motd --disable fi GET_INSTANCE_TYPE=$(curl http://169.254.169.254/latest/meta-data/instance-type) echo export "SOCA_CONFIGURATION="''' + str( params['ClusterId']) + '''"" >> /etc/environment echo export "SOCA_BASE_OS="''' + str( params['BaseOS']) + '''"" >> /etc/environment echo export "SOCA_JOB_QUEUE="''' + str( params['JobQueue']) + '''"" >> /etc/environment echo export "SOCA_JOB_OWNER="''' + str( params['JobOwner']) + '''"" >> /etc/environment echo export "SOCA_JOB_NAME="''' + str( params['JobName']) + '''"" >> /etc/environment echo export "SOCA_JOB_PROJECT="''' + str(params['JobProject'] ) + '''"" >> /etc/environment echo export "SOCA_VERSION="''' + str(params['Version'] ) + '''"" >> /etc/environment echo export "SOCA_JOB_EFA="''' + str(params['Efa']).lower( ) + '''"" >> /etc/environment echo export "SOCA_JOB_ID="''' + str(params['JobId'] ) + '''"" >> /etc/environment echo export "SOCA_SCRATCH_SIZE=''' + str( params['ScratchSize'] ) + '''" >> /etc/environment echo export "SOCA_INSTALL_BUCKET="''' + str( params['S3Bucket'] ) + '''"" >> /etc/environment echo export "SOCA_INSTALL_BUCKET_FOLDER="''' + str( params['S3InstallFolder'] ) + '''"" >> /etc/environment echo export "SOCA_FSX_LUSTRE_BUCKET="''' + str( params['FSxLustreConfiguration'] ['fsx_lustre'] ).lower() + '''"" >> /etc/environment echo export "SOCA_FSX_LUSTRE_DNS="''' + str( params['FSxLustreConfiguration'] ['existing_fsx'] ).lower() + '''"" >> /etc/environment echo export "SOCA_INSTANCE_TYPE=$GET_INSTANCE_TYPE" >> /etc/environment echo export "SOCA_INSTANCE_HYPERTHREADING="''' + str( params['ThreadsPerCore'] ).lower() + '''"" >> /etc/environment echo export "SOCA_HOST_SYSTEM_LOG="/apps/soca/''' + str( params['ClusterId'] ) + '''/cluster_node_bootstrap/logs/''' + str( params['JobId'] ) + '''/$(hostname -s)"" >> /etc/environment echo export "AWS_STACK_ID=${AWS::StackName}" >> /etc/environment echo export "AWS_DEFAULT_REGION=${AWS::Region}" >> /etc/environment source /etc/environment AWS=$(which aws) # Give yum permission to the user on this specific machine echo "''' + params['JobOwner'] + ''' ALL=(ALL) /bin/yum" >> /etc/sudoers mkdir -p /apps mkdir -p /data # Mount EFS echo "''' + params['EFSDataDns'] + ''':/ /data nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab echo "''' + params['EFSAppsDns'] + ''':/ /apps nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 0 0" >> /etc/fstab mount -a # Configure NTP yum remove -y ntp yum install -y chrony mv /etc/chrony.conf /etc/chrony.conf.original echo -e """ # use the local instance NTP service, if available server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4 # Use public servers from the pool.ntp.org project. # Please consider joining the pool (http://www.pool.ntp.org/join.html). # !!! [BEGIN] SOCA REQUIREMENT # You will need to open UDP egress traffic on your security group if you want to enable public pool #pool 2.amazon.pool.ntp.org iburst # !!! [END] SOCA REQUIREMENT # Record the rate at which the system clock gains/losses time. driftfile /var/lib/chrony/drift # Allow the system clock to be stepped in the first three updates # if its offset is larger than 1 second. makestep 1.0 3 # Specify file containing keys for NTP authentication. keyfile /etc/chrony.keys # Specify directory for log files. logdir /var/log/chrony # save data between restarts for fast re-load dumponexit dumpdir /var/run/chrony """ > /etc/chrony.conf systemctl enable chronyd # Prepare Log folder mkdir -p $SOCA_HOST_SYSTEM_LOG echo "@reboot /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNodePostReboot.sh >> $SOCA_HOST_SYSTEM_LOG/ComputeNodePostReboot.log 2>&1" | crontab - $AWS s3 cp s3://$SOCA_INSTALL_BUCKET/$SOCA_INSTALL_BUCKET_FOLDER/scripts/config.cfg /root/ /bin/bash /apps/soca/$SOCA_CONFIGURATION/cluster_node_bootstrap/ComputeNode.sh ''' + params[ 'SchedulerHostname'] + ''' >> $SOCA_HOST_SYSTEM_LOG/ComputeNode.sh.log 2>&1''' SpotFleet = True if ((params["SpotPrice"] is not False) and (int(params["DesiredCapacity"]) > 1 or len(instances_list) > 1)) else False ltd.EbsOptimized = True for instance in instances_list: if "t2." in instance: ltd.EbsOptimized = False else: # metal + t2 does not support CpuOptions if "metal" not in instance and (SpotFleet is False or len(instances_list) == 1): # Spotfleet with multiple instance types doesn't support CpuOptions # So we can't add CpuOptions if SpotPrice is specified and when multiple instances are specified ltd.CpuOptions = CpuOptions( CoreCount=int(params["CoreCount"]), ThreadsPerCore=1 if params["ThreadsPerCore"] is False else 2) ltd.IamInstanceProfile = IamInstanceProfile( Arn=params["ComputeNodeInstanceProfileArn"]) ltd.KeyName = params["SSHKeyPair"] ltd.ImageId = params["ImageId"] if params["SpotPrice"] is not False and params[ "SpotAllocationCount"] is False: ltd.InstanceMarketOptions = InstanceMarketOptions( MarketType="spot", SpotOptions=SpotOptions( MaxPrice=Ref("AWS::NoValue") if params["SpotPrice"] == "auto" else str(params["SpotPrice"]) # auto -> cap at OD price )) ltd.InstanceType = instances_list[0] ltd.NetworkInterfaces = [ NetworkInterfaces(InterfaceType="efa" if params["Efa"] is not False else Ref("AWS::NoValue"), DeleteOnTermination=True, DeviceIndex=0, Groups=[params["SecurityGroupId"]]) ] ltd.UserData = Base64(Sub(UserData)) ltd.BlockDeviceMappings = [ BlockDeviceMapping( DeviceName="/dev/xvda" if params["BaseOS"] == "amazonlinux2" else "/dev/sda1", Ebs=EBSBlockDevice(VolumeSize=params["RootSize"], VolumeType="gp2", DeleteOnTermination="false" if params["KeepEbs"] is True else "true", Encrypted=True)) ] if int(params["ScratchSize"]) > 0: ltd.BlockDeviceMappings.append( BlockDeviceMapping( DeviceName="/dev/xvdbx", Ebs=EBSBlockDevice( VolumeSize=params["ScratchSize"], VolumeType="io1" if int(params["VolumeTypeIops"]) > 0 else "gp2", Iops=params["VolumeTypeIops"] if int(params["VolumeTypeIops"]) > 0 else Ref("AWS::NoValue"), DeleteOnTermination="false" if params["KeepEbs"] is True else "true", Encrypted=True))) ltd.TagSpecifications = [ ec2.TagSpecifications( ResourceType="instance", Tags=base_Tags(Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]), _soca_JobId=str(params["JobId"]), _soca_JobName=str(params["JobName"]), _soca_JobQueue=str(params["JobQueue"]), _soca_StackId=stack_name, _soca_JobOwner=str(params["JobOwner"]), _soca_JobProject=str(params["JobProject"]), _soca_KeepForever=str( params["KeepForever"]).lower(), _soca_ClusterId=str(params["ClusterId"]), _soca_NodeType="soca-compute-node")) ] # End LaunchTemplateData # Begin Launch Template Resource lt = LaunchTemplate("NodeLaunchTemplate") lt.LaunchTemplateName = params["ClusterId"] + "-" + str( params["JobId"]) lt.LaunchTemplateData = ltd t.add_resource(lt) # End Launch Template Resource if SpotFleet is True: # SpotPrice is defined and DesiredCapacity > 1 or need to try more than 1 instance_type # Create SpotFleet # Begin SpotFleetRequestConfigData Resource sfrcd = ec2.SpotFleetRequestConfigData() sfrcd.AllocationStrategy = params["SpotAllocationStrategy"] sfrcd.ExcessCapacityTerminationPolicy = "noTermination" sfrcd.IamFleetRole = params["SpotFleetIAMRoleArn"] sfrcd.InstanceInterruptionBehavior = "terminate" if params["SpotPrice"] != "auto": sfrcd.SpotPrice = str(params["SpotPrice"]) sfrcd.TargetCapacity = params["DesiredCapacity"] sfrcd.Type = "maintain" sfltc = ec2.LaunchTemplateConfigs() sflts = ec2.LaunchTemplateSpecification(LaunchTemplateId=Ref(lt), Version=GetAtt( lt, "LatestVersionNumber")) sfltc.LaunchTemplateSpecification = sflts sfltc.Overrides = [] for subnet in params["SubnetId"]: for instance in instances_list: sfltc.Overrides.append( ec2.LaunchTemplateOverrides(InstanceType=instance, SubnetId=subnet)) sfrcd.LaunchTemplateConfigs = [sfltc] TagSpecifications = ec2.SpotFleetTagSpecification( ResourceType="spot-fleet-request", Tags=base_Tags(Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]), _soca_JobId=str(params["JobId"]), _soca_JobName=str(params["JobName"]), _soca_JobQueue=str(params["JobQueue"]), _soca_StackId=stack_name, _soca_JobOwner=str(params["JobOwner"]), _soca_JobProject=str(params["JobProject"]), _soca_KeepForever=str( params["KeepForever"]).lower(), _soca_ClusterId=str(params["ClusterId"]), _soca_NodeType="soca-compute-node")) # End SpotFleetRequestConfigData Resource # Begin SpotFleet Resource spotfleet = ec2.SpotFleet("SpotFleet") spotfleet.SpotFleetRequestConfigData = sfrcd t.add_resource(spotfleet) # End SpotFleet Resource else: asg_lt.LaunchTemplateSpecification = LaunchTemplateSpecification( LaunchTemplateId=Ref(lt), Version=GetAtt(lt, "LatestVersionNumber")) asg_lt.Overrides = [] for instance in instances_list: asg_lt.Overrides.append( LaunchTemplateOverrides(InstanceType=instance)) # Begin InstancesDistribution if params["SpotPrice"] is not False and \ params["SpotAllocationCount"] is not False and \ (int(params["DesiredCapacity"]) - int(params["SpotAllocationCount"])) > 0: mip_usage = True idistribution = InstancesDistribution() idistribution.OnDemandAllocationStrategy = "prioritized" # only supported value idistribution.OnDemandBaseCapacity = params[ "DesiredCapacity"] - params["SpotAllocationCount"] idistribution.OnDemandPercentageAboveBaseCapacity = "0" # force the other instances to be SPOT idistribution.SpotMaxPrice = Ref( "AWS::NoValue") if params["SpotPrice"] == "auto" else str( params["SpotPrice"]) idistribution.SpotAllocationStrategy = params[ 'SpotAllocationStrategy'] mip.InstancesDistribution = idistribution # End MixedPolicyInstance # Begin AutoScalingGroup Resource asg = AutoScalingGroup("AutoScalingComputeGroup") asg.DependsOn = "NodeLaunchTemplate" if mip_usage is True or instances_list.__len__() > 1: mip.LaunchTemplate = asg_lt asg.MixedInstancesPolicy = mip else: asg.LaunchTemplate = LaunchTemplateSpecification( LaunchTemplateId=Ref(lt), Version=GetAtt(lt, "LatestVersionNumber")) asg.MinSize = int(params["DesiredCapacity"]) asg.MaxSize = int(params["DesiredCapacity"]) asg.VPCZoneIdentifier = params["SubnetId"] if params["PlacementGroup"] is True: pg = PlacementGroup("ComputeNodePlacementGroup") pg.Strategy = "cluster" t.add_resource(pg) asg.PlacementGroup = Ref(pg) asg.Tags = Tags(Name=str(params["ClusterId"]) + "-compute-job-" + str(params["JobId"]), _soca_JobId=str(params["JobId"]), _soca_JobName=str(params["JobName"]), _soca_JobQueue=str(params["JobQueue"]), _soca_StackId=stack_name, _soca_JobOwner=str(params["JobOwner"]), _soca_JobProject=str(params["JobProject"]), _soca_KeepForever=str( params["KeepForever"]).lower(), _soca_ClusterId=str(params["ClusterId"]), _soca_NodeType="soca-compute-node") t.add_resource(asg) # End AutoScalingGroup Resource # Begin FSx for Lustre if params["FSxLustreConfiguration"]["fsx_lustre"] is not False: if params["FSxLustreConfiguration"]["existing_fsx"] is False: fsx_lustre = FileSystem("FSxForLustre") fsx_lustre.FileSystemType = "LUSTRE" fsx_lustre.StorageCapacity = params["FSxLustreConfiguration"][ "capacity"] fsx_lustre.SecurityGroupIds = [params["SecurityGroupId"]] fsx_lustre.SubnetIds = params["SubnetId"] if params["FSxLustreConfiguration"]["s3_backend"] is not False: fsx_lustre_configuration = LustreConfiguration() fsx_lustre_configuration.ImportPath = params[ "FSxLustreConfiguration"]["import_path"] if params[ "FSxLustreConfiguration"][ "import_path"] is not False else params[ "FSxLustreConfiguration"]["s3_backend"] fsx_lustre_configuration.ExportPath = params[ "FSxLustreConfiguration"]["import_path"] if params[ "FSxLustreConfiguration"][ "import_path"] is not False else params[ "FSxLustreConfiguration"][ "s3_backend"] + "/" + params[ "ClusterId"] + "-fsxoutput/job-" + params[ "JobId"] + "/" fsx_lustre.LustreConfiguration = fsx_lustre_configuration fsx_lustre.Tags = base_Tags( # False disable PropagateAtLaunch Name=str(params["ClusterId"] + "-compute-job-" + params["JobId"]), _soca_JobId=str(params["JobId"]), _soca_JobName=str(params["JobName"]), _soca_JobQueue=str(params["JobQueue"]), _soca_StackId=stack_name, _soca_JobOwner=str(params["JobOwner"]), _soca_JobProject=str(params["JobProject"]), _soca_KeepForever=str(params["KeepForever"]).lower(), _soca_FSx="true", _soca_ClusterId=str(params["ClusterId"]), ) t.add_resource(fsx_lustre) # End FSx For Lustre # Begin Custom Resource # Change Mapping to No if you want to disable this if allow_anonymous_data_collection is True: metrics = CustomResourceSendAnonymousMetrics("SendAnonymousData") metrics.ServiceToken = params["SolutionMetricLambda"] metrics.DesiredCapacity = str(params["DesiredCapacity"]) metrics.InstanceType = str(params["InstanceType"]) metrics.Efa = str(params["Efa"]) metrics.ScratchSize = str(params["ScratchSize"]) metrics.RootSize = str(params["RootSize"]) metrics.SpotPrice = str(params["SpotPrice"]) metrics.BaseOS = str(params["BaseOS"]) metrics.StackUUID = str(params["StackUUID"]) metrics.KeepForever = str(params["KeepForever"]) metrics.FsxLustre = str(params["FSxLustreConfiguration"]) t.add_resource(metrics) # End Custom Resource if debug is True: print(t.to_json()) # Tags must use "soca:<Key>" syntax template_output = t.to_yaml().replace("_soca_", "soca:") return {'success': True, 'output': template_output} except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] return { 'success': False, 'output': 'cloudformation_builder.py: ' + (str(e) + ': error :' + str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb.tb_lineno)) }
t.add_output([ Output('vpc', Value=Ref("vpc"), Export=Export(F"{stackname}-VPCID")), Output('sgid', Value=Ref("CustomSg"), Export=Export(F"{stackname}-SecurityGroupID")), Output('PublicSubnets', Value=Join(", ", [ Ref("S0"), Ref("S2"), Ref("S4"), Ref("S6"), Ref("S8"), Ref("S10") ]), Export=Export(F"{stackname}-PublicSubnetsList")), Output('PrivateSubnests', Value=Join(", ", [ Ref("S1"), Ref("S3"), Ref("S5"), Ref("S7"), Ref("S9"), Ref("S11") ]), Export=Export(F"{stackname}-PrivateSubnetsList")) ]) '''to be removed''' client('cloudformation').validate_template(TemplateBody=t.to_json()) stack_kwargs = {'StackName': stackname, 'TemplateBody': t.to_yaml()} print(client('cloudformation').create_stack(**stack_kwargs)['StackId'])
"sudo cp /root/work/python-troposphere/config/cpNginx.conf /etc/nginx/conf.d/ &>>/root/user-data.log\n", "sudo systemctl start nginx &>>/root/user-data.log" ])))) # Associate the EIP to the webServer EC2 instance webServerEIPAssociation = t.add_resource( EIPAssociation("webServerEIPAssociation", AllocationId=GetAtt(webServerEIP, 'AllocationId'), InstanceId=Ref(pubInst01))) #### Outputs #### t.add_output([ Output("webServerEIP", Description="Elastic IP of the web server EC2", Value=GetAtt(webServerEIP, 'AllocationId')) ]) print(t.to_json()) # Delete output file if already exists os.remove( '/Users/camelia.pohoata/IdeaProjects/FlyPikachu/python-troposphere/templates/template01.json' ) # Write template to file fhandle = open( '/Users/camelia.pohoata/IdeaProjects/FlyPikachu/python-troposphere/templates/template01.json', 'w') fhandle.write(t.to_json()) fhandle.close()
chaos_lambda_rule = t.add_resource( Rule("ChaosLambdaRule", Description="Trigger Chaos Lambda according to a schedule", State="ENABLED", ScheduleExpression=Ref(chaos_schedule), Targets=[ Target(Arn=GetAtt(lambda_function, "Arn"), Id="ChaosLambdaRuleTarget") ])) t.add_resource( Permission("ChaosLambdaRulePermission", FunctionName=GetAtt(lambda_function, "Arn"), SourceArn=GetAtt(chaos_lambda_rule, "Arn"), Principal="events.amazonaws.com", Action="lambda:InvokeFunction")) t.add_output( Output("ChaosLambdaFunctionOutput", Value=Ref(lambda_function), Description="The Chaos Lambda Function")) t.add_output( Output("ChaosLambdaRuleOutput", Value=Ref(chaos_lambda_rule), Description="Rule used to trigger the Chaos Lambda")) template = t.to_json() if len(sys.argv) > 1: open(sys.argv[1], "w").write(template + "\n") else: print(template)
lambda_role = Role('ECSPortAllocatorLambdaRole', Path='/', AssumeRolePolicyDocument=assume_role_policy_document, Policies=[lambda_policy]) ecs_port_allocator = Function( "ECSPortAllocator", Handler="ecs_cfn_port_allocator.lambda_handler", Role=GetAtt(lambda_role, "Arn"), Code=Code( S3Bucket=Ref(s3_bucket_param), S3Key=Ref(s3_key_param), ), Runtime="python2.7", Timeout="25", ) ecs_port_allocator_output = Output('ECSPortAllocatorLambda', Description='ECSPortAllocatorLambda', Value=GetAtt(ecs_port_allocator, 'Arn')) template.add_parameter(s3_bucket_param) template.add_parameter(s3_key_param) template.add_resource(lambda_role) template.add_resource(ecs_port_allocator) template.add_output(ecs_port_allocator_output) if __name__ == '__main__': print(template.to_json())
"PublicIP", Value=GetAtt(ec2_instance, "PublicIp") ), Output( "PrivateIP", Value=GetAtt(ec2_instance, "PrivateIp") ), Output( "PublicDNS", Value=GetAtt(elb, "DNSName") ), Output( "PrivateDNS", Value=GetAtt(ec2_instance, "PrivateDnsName") ) ]) parser = argparse.ArgumentParser() parser.add_argument("--access-key") parser.add_argument("--secret-key") args = parser.parse_args() conn = boto.cloudformation.connect_to_region( "us-east-1", aws_access_key_id=args.access_key, aws_secret_access_key=args.secret_key) stack_id = conn.create_stack( "rzienertTroposphereTest", template_body=t.to_json()) print(stack_id)