def add_instance(self): t = self.template self.openvpn_instance = t.add_resource(Instance( 'OpenVpnInstance', ImageId=self.sceptre_user_data['ami_map'][self.sceptre_user_data['region']], InstanceType=self.sceptre_user_data['instance_type'], KeyName=self.sceptre_user_data['key_pair'], NetworkInterfaces=[ NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex=0, GroupSet=[self.sceptre_user_data['openvpn_sg']], SubnetId=self.sceptre_user_data['subnets']['public_1'] ) ], UserData=Base64(Join("", [ "admin_user="******"\n", "admin_pw=",self.sceptre_user_data['vpn_admin_pw'],"\n", "reroute_gw=1\n", "reroute_dns=1\n" ] )), Tags=self.DEFAULT_TAGS + [Tag('Name', self.sceptre_user_data['application']+'-Instance')] )) return 0
def ts_add_instance_with_public_ip(t, security_group, name='MyInstance', image_id=None, subnet_id=None, tag="aws test instance", public=True): if image_id is None: image_id = get_linux2_image_id() if subnet_id is None: subnet_id = get_first_subnet() return t.add_resource( Instance( name, KeyName=get_my_key(), InstanceType="m4.xlarge", ImageId=image_id, NetworkInterfaces=[ NetworkInterfaceProperty( AssociatePublicIpAddress=public, DeviceIndex=0, DeleteOnTermination=True, GroupSet=security_group if isinstance( security_group, list) else [security_group], # associates the security groups SubnetId=subnet_id), ], Tags=Tags( Name=tag, Application=Ref("AWS::StackName"), Developer="cisco::haoru", ), ))
def addSearchGuard(template, role, subnet, keyname, secgroup, profilename): profile = InstanceProfile("sgprofile" + profilename, Path="/", Roles=[Ref(role)]) template.add_resource(profile) instance = Instance( "sg" + profilename, InstanceType="m4.xlarge", ImageId=FindInMap("RegionToAmi", Ref("AWS::Region"), "stable"), DisableApiTermination=False, IamInstanceProfile=Ref(profile), KeyName=Ref(keyname), Monitoring=False, InstanceInitiatedShutdownBehavior="stop", UserData=userdata.from_file("src/bootstrap.sh"), NetworkInterfaces=[ NetworkInterfaceProperty(DeviceIndex=0, Description="Primary network interface", SubnetId=Ref(subnet), DeleteOnTermination=True, AssociatePublicIpAddress=True, GroupSet=[Ref(secgroup)]) ], Tags=[ Tag("Name", "Search Guard " + profilename), Tag("sgnodetag", profilename) ], EbsOptimized=False, BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice(VolumeSize=25)) ]) template.add_resource(instance) return instance
def NetworkInterfaces(self): return [ NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex='0', GroupSet=[Ref(self.template.sg())], SubnetId=Ref(self.template.subnet()), ) ]
def add_instance(self, stack_name, t, i, sg, subnet): from troposphere import Base64, FindInMap, GetAtt, Join, Output from troposphere import Ref, Tags, Template from troposphere.ec2 import PortRange, NetworkAcl, Route, \ VPCGatewayAttachment, SubnetRouteTableAssociation, Subnet, RouteTable, \ VPC, NetworkInterfaceProperty, NetworkAclEntry, \ SubnetNetworkAclAssociation, EIP, Instance, InternetGateway, \ SecurityGroupRule, SecurityGroup from troposphere.policies import CreationPolicy, ResourceSignal from troposphere.cloudformation import Init, InitFile, InitFiles, \ InitConfig, InitService, InitServices from troposphere import Ref, Template, ec2, Parameter, Output, GetAtt from troposphere.ec2 import NetworkInterfaceProperty instance = ec2.Instance("node%s" % i) instance.ImageId = self.app.config.get('provision', 'aws_ec2_ami_id') instance.InstanceType = self.app.config.get('provision', 'aws_ec2_instance_type') instance.KeyName = self.app.config.get('provision', 'aws_ec2_key_name') instance.NetworkInterfaces = [ NetworkInterfaceProperty(GroupSet=[ sg, ], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=subnet) ] instance.UserData = Base64( Join('', [ '#!/bin/bash -xe\n', 'apt update -y -q\n', 'apt install -y -q python3-pip\n', 'apt install -y -q htop tmux zsh jq || true\n', 'apt remove -y -q python3-yaml\n', 'pip3 install cement colorlog\n', 'pip3 install %s\n' % (self.app.config.get('provision', 'pip_install') % self.app.config['hydra']), 'su -l -c "hydra client join-network --name=%s --set-default --install" ubuntu\n' % stack_name ])) t.add_resource(instance) t.add_output([ Output( "ID%s" % i, Description="InstanceId of the newly created EC2 instance", Value=Ref(instance), ), Output( "IP%s" % i, Description= "Public IP address of the newly created EC2 instance", Value=GetAtt(instance, "PublicIp"), ), ])
def ec2_instance(name, ami_id, keyname, instance_type, sg_ids, subnet_id): """ """ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html resource_name = '{}EC2Instance'.format(sanitize_resource_name(name)) return Instance( resource_name, ImageId=ami_id, InstanceType=instance_type, KeyName=keyname, NetworkInterfaces=[NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex=0, GroupSet=sg_ids, SubnetId=subnet_id, )], Tags=tags(name) )
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") chefserver_instance_class = template.add_parameter( Parameter( 'ChefServerInstanceType', Type='String', Default='t2.medium', Description='Chef Server instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) # Create IAM role for the chefserver instance # load the policies default_policy = json.loads( cfn.load_template("default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" })) chefserver_role_name = '.'.join(['chefserver', CLOUDNAME, CLOUDENV]) chefserver_iam_role = template.add_resource( Role("ChefServerIamRole", AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ Policy(PolicyName="ChefServerPolicy", PolicyDocument=json.loads( cfn.load_template( "chefserver_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))), Policy(PolicyName="ChefserverDefaultPolicy", PolicyDocument=default_policy) ], DependsOn=vpc.title)) chefserver_instance_profile = template.add_resource( InstanceProfile("chefserverInstanceProfile", Path="/", Roles=[Ref(chefserver_iam_role)], DependsOn=chefserver_iam_role.title)) chefserver_user_data = cfn.load_template("chefserver-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "chefserver" }) chefserver_ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp='{0}.0.0/16'.format(CIDR_PREFIX), FromPort=p[1], ToPort=p[1]) for p in [('tcp', 80), ('tcp', 443)] ] chefserver_sg = template.add_resource( SecurityGroup("ChefServer", GroupDescription="Security Group for the Chef server", VpcId=Ref(vpc), SecurityGroupIngress=chefserver_ingress_rules, DependsOn=vpc.title)) chefserver_name = cfn.sanitize_id("ChefServer", CLOUDNAME, CLOUDENV) chefserver_instance = template.add_resource( Instance(chefserver_name, DependsOn=vpc.title, InstanceType=Ref(chefserver_instance_class), KeyName=Ref(cfn.keyname), SourceDestCheck=False, ImageId=FindInMap('RegionMap', region, int(cfn.Amis.EBS)), NetworkInterfaces=[ NetworkInterfaceProperty( Description='Network interface for {0}'.format( chefserver_name), GroupSet=[Ref(chefserver_sg)], SubnetId=Ref( cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.PLATFORM)[0]), AssociatePublicIpAddress=True, DeviceIndex=0, DeleteOnTermination=True) ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize=50, DeleteOnTermination=False)) ]))
CidrIp='0.0.0.0/0' ) ], VpcId=Ref(vpc) ) ) web_instance = t.add_resource( ec2.Instance( "TestWebInstance", ImageId='ami-0bea7fd38fabe821a', InstanceType='t2.micro', NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(instance_sg)], AssociatePublicIpAddress='true', DeviceIndex='0', SubnetId=Ref(subnet1) ) ] ) ) api_instance = t.add_resource( ec2.Instance( "TestApiInstance", ImageId='ami-0bea7fd38fabe821a', InstanceType='t2.micro', NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(instance_sg)], AssociatePublicIpAddress='true',
def main(): t = Template() t.add_version("2010-09-09") t.add_description( "Currently supporting RHEL/CentOS 7.5. Setup IAM role and security groups, " "launch instance, create/attach 10 EBS volumes, install/fix ZFS " "(http://download.zfsonlinux.org/epel/zfs-release.el7_5.noarch.rpm), " "create zfs RAID6 pool, setup NFS server, export NFS share") InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'zfs_pool_name="', Ref('ZfsPool'), '"\n', 'zfs_mount_point="', Ref('ZfsMountPoint'), '"\n', 'nfs_cidr_block="', Ref('NFSCidr'), '"\n', 'nfs_opts="', Ref('NFSOpts'), '"\n', 'my_wait_handle="', Ref('NFSInstanceWaitHandle'), '"\n', '\n', ] with open( '_include/Tropo_build_zfs_export_nfs.sh', 'r', ) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) t.add_metadata({ 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Instance Configuration' }, 'Parameters': [ "OperatingSystem", "VPCId", "Subnet", "UsePublicIp", "CreateElasticIP", "EC2KeyName", "NFSInstanceType", "SshAccessCidr", "ExistingSecurityGroup", "ExistingPlacementGroup", "S3BucketName" ] }, { 'Label': { 'default': 'Storage Options - Required' }, 'Parameters': [ "RAIDLevel", "VolumeSize", "VolumeType", "EBSVolumeType", "VolumeIops" ] }, { 'Label': { 'default': 'ZFS Pool and FS Options - Required' }, 'Parameters': ["ZfsPool", "ZfsMountPoint"] }, { 'Label': { 'default': 'NFS Options - Required' }, 'Parameters': ["NFSCidr", "NFSOpts"] }], 'ParameterLabels': { 'OperatingSystem': { 'default': 'Operating System of AMI' }, 'VPCId': { 'default': 'VPC ID' }, 'Subnet': { 'default': 'Subnet ID' }, 'UsePublicIp': { 'default': 'Assign a Public IP ' }, 'CreateElasticIP': { 'default': 'Create and use an EIP ' }, 'EC2KeyName': { 'default': 'EC2 Key Name' }, 'NFSInstanceType': { 'default': 'Instance Type' }, 'SshAccessCidr': { 'default': 'SSH Access CIDR Block' }, 'ExistingSecurityGroup': { 'default': 'OPTIONAL: Existing Security Group' }, 'ExistingPlacementGroup': { 'default': 'OPTIONAL: Existing Placement Group' }, 'S3BucketName': { 'default': 'Optional S3 Bucket Name' }, 'RAIDLevel': { 'default': 'RAID Level' }, 'VolumeSize': { 'default': 'Volume size of the EBS vol' }, 'VolumeType': { 'default': 'Volume type of the EBS vol' }, 'EBSVolumeType': { 'default': 'Volume type of the EBS vol' }, 'VolumeIops': { 'default': 'IOPS for each EBS vol (only for io1)' }, 'ZfsPool': { 'default': 'ZFS pool name' }, 'ZfsMountPoint': { 'default': 'Mount Point' }, 'NFSCidr': { 'default': 'NFS CIDR block for mounts' }, 'NFSOpts': { 'default': 'NFS options' }, } } }) EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair")) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) NFSInstanceType = t.add_parameter( Parameter( 'NFSInstanceType', Type="String", Description="NFS instance type", Default="r4.16xlarge", AllowedValues=[ "m4.16xlarge", "m4.10xlarge", "r4.16xlarge", "c8.8xlarge" ], ConstraintDescription="Must an EC2 instance type from the list")) VolumeType = t.add_parameter( Parameter( 'VolumeType', Type="String", Description="Type of EBS volume", Default="EBS", AllowedValues=["EBS", "InstanceStore"], ConstraintDescription="Volume type has to EBS or InstanceStore")) EBSVolumeType = t.add_parameter( Parameter('EBSVolumeType', Description="Type of EBS volumes to create", Type="String", Default="io1", ConstraintDescription="Must be a either: io1, gp2, st1", AllowedValues=["io1", "gp2", "st1"])) VolumelSize = t.add_parameter( Parameter('VolumeSize', Type="Number", Default="500", Description="Volume size in GB")) VolumeIops = t.add_parameter( Parameter('VolumeIops', Type="Number", Default="20000", Description="IOPS for the EBS volume")) RAIDLevel = t.add_parameter( Parameter( 'RAIDLevel', Description="RAID Level, currently only 6 (8+2p) is supported", Type="String", Default="0", AllowedValues=["0"], ConstraintDescription="Must be 0")) ZfsPool = t.add_parameter( Parameter('ZfsPool', Description="ZFS pool name", Type="String", Default="v01")) ZfsMountPoint = t.add_parameter( Parameter( 'ZfsMountPoint', Description= "ZFS mount point, absolute path will be /pool_name/mount_point (e.g. /v01/testzfs)", Type="String", Default="testzfs")) VPCId = t.add_parameter( Parameter('VPCId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance")) ExistingPlacementGroup = t.add_parameter( Parameter('ExistingPlacementGroup', Type="String", Description="OPTIONAL: Existing placement group")) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/talse", AllowedValues=["true", "false"])) CreateElasticIP = t.add_parameter( Parameter( 'CreateElasticIP', Type="String", Description= "Create an Elasic IP address, that will be assinged to an instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) S3BucketName = t.add_parameter( Parameter('S3BucketName', Type="String", Description="S3 bucket to allow this instance read access.")) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 0.0.0.0/0", Default="0.0.0.0/0", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSCidr = t.add_parameter( Parameter( 'NFSCidr', Type="String", Description= "CIDR for NFS Security Group and NFS clients, to allow all access use 0.0.0.0/0", Default="10.0.0.0/16", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) NFSOpts = t.add_parameter( Parameter( 'NFSOpts', Description="NFS export options", Type="String", Default="(rw,async,no_root_squash,wdelay,no_subtree_check,no_acl)") ) VarLogMessagesFile = t.add_parameter( Parameter( 'VarLogMessagesFile', Type="String", Description= "S3 bucket and file name for log CloudWatch config (e.g. s3://jouser-logs/var-log-message.config)" )) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Policies=[ iam.Policy(PolicyName="s3bucketaccess", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" }, "/*" ] ] } }, { "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": { "Fn::Join": [ "", [ "arn:aws:s3:::", { "Ref": "S3BucketName" } ] ] } }], }), ])) NFSSecurityGroup = t.add_resource( SecurityGroup("NFSSecurityGroup", VpcId=Ref(VPCId), GroupDescription="NFS Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="2049", ToPort="2049", CidrIp=Ref(NFSCidr), ), ])) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VPCId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) EIPAddress = t.add_resource( EIP('EIPAddress', Domain='vpc', Condition="create_elastic_ip")) tags = Tags(Name=Ref("AWS::StackName")) NFSInstance = t.add_resource( ec2.Instance( 'NFSInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(NFSInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If("not_existing_sg", [Ref(NFSSecurityGroup), Ref(SshSecurityGroup)], [ Ref(NFSSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup) ]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), PlacementGroupName=(Ref(ExistingPlacementGroup)), BlockDeviceMappings=If( 'vol_type_ebs', [ ec2.BlockDeviceMapping( DeviceName="/dev/sdh", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdi", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdj", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdk", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdl", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ec2.BlockDeviceMapping( DeviceName="/dev/sdm", Ebs=ec2.EBSBlockDevice( VolumeSize=(Ref(VolumelSize)), DeleteOnTermination="True", Iops=(Ref(VolumeIops)), VolumeType=(Ref(EBSVolumeType)))), ], {"Ref": "AWS::NoValue"}, ), UserData=Base64(Join('', InstUserData)), )) # End of NFSInstance t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("vol_type_ebs", Equals(Ref(VolumeType), "EBS")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "True")) t.add_condition("Has_Bucket", Not(Equals(Ref(S3BucketName), ""))) t.add_condition("create_elastic_ip", Equals(Ref(CreateElasticIP), "True")) nfswaithandle = t.add_resource( WaitConditionHandle('NFSInstanceWaitHandle')) nfswaitcondition = t.add_resource( WaitCondition("NFSInstanceWaitCondition", Handle=Ref(nfswaithandle), Timeout="1500", DependsOn="NFSInstance")) t.add_output([ Output("ElasticIP", Description="Elastic IP address for the instance", Value=Ref(EIPAddress), Condition="create_elastic_ip") ]) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(NFSInstance)) ]) t.add_output([ Output("InstancePrivateIP", Value=GetAtt('NFSInstance', 'PrivateIp')) ]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) t.add_output([ Output("ElasticPublicIP", Value=GetAtt('NFSInstance', 'PublicIp'), Condition="create_elastic_ip") ]) t.add_output([ Output("PrivateMountPoint", Description="Mount point on private network", Value=Join("", [GetAtt('NFSInstance', 'PrivateIp'), ":/fs1"])) ]) t.add_output([ Output("ExampleClientMountCommands", Description="Example commands to mount NFS on the clients", Value=Join("", [ "sudo mkdir /nfs1; sudo mount ", GetAtt('NFSInstance', 'PrivateIp'), ":/", Ref("ZfsPool"), "/", Ref("ZfsMountPoint"), " /nfs1" ])) ]) t.add_output([ Output("S3BucketName", Value=(Ref("S3BucketName")), Condition="Has_Bucket") ]) # "Volume01" : { "Value" : { "Ref" : "Volume01" } }, # "Volume02" : { "Value" : { "Ref" : "Volume02" } }, # "Volume03" : { "Value" : { "Ref" : "Volume03" } }, # "Volume04" : { "Value" : { "Ref" : "Volume04" } }, # "Volume05" : { "Value" : { "Ref" : "Volume05" } }, # "Volume06" : { "Value" : { "Ref" : "Volume06" } }, # "Volume07" : { "Value" : { "Ref" : "Volume07" } }, # "Volume08" : { "Value" : { "Ref" : "Volume08" } }, # "Volume09" : { "Value" : { "Ref" : "Volume09" } }, # "Volume10" : { "Value" : { "Ref" : "Volume10" } } print(t.to_json(indent=2))
def main(): t = Template() AddAMIMap(t) t.set_version("2010-09-09") t.set_description( "DCV 2017 Remote Desktop with Xilinx Vivado (using AWS FPGA Developer AMI)" ) tags = Tags(Name=Ref("AWS::StackName")) # user data InstUserData = list() InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', '##exit 0\n', # use this to disable all user-data and bring up files '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'user_name="', Ref('UserName'), '"\n', 'user_pass="******"\n', '\n', ] with open('_include/dcv-install.sh', 'r',) as ud_file: user_data_file = ud_file.readlines() for l in user_data_file: InstUserData.append(l) VPCId = t.add_parameter(Parameter( 'VPCId', Type="AWS::EC2::VPC::Id", Description="VPC ID for where the remote desktop instance should be launched" )) t.set_parameter_label(VPCId, "VPC ID") t.add_parameter_to_group(VPCId, "Instance Configuration") Subnet = t.add_parameter(Parameter( 'Subnet', Type="AWS::EC2::Subnet::Id", Description="For the Subnet ID, you should choose one in the " "Availability Zone where you want the instance launched" )) t.set_parameter_label(Subnet, "Subnet ID") t.add_parameter_to_group(Subnet, "Instance Configuration") ExistingSecurityGroup = t.add_parameter(Parameter( 'ExistingSecurityGroup', Type="String", Default="NO_VALUE", Description="OPTIONAL: Needs to be a SG ID, for example sg-abcd1234efgh. " "This is an already existing Security Group ID that is " "in the same VPC, this is an addition to the security groups that " "are automatically created to enable access to the remote desktop," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(ExistingSecurityGroup, "OPTIONAL: Existing Security Group (e.g. sg-abcd1234efgh)") t.add_parameter_to_group(ExistingSecurityGroup, "Instance Configuration") remoteDesktopInstanceType = t.add_parameter(Parameter( 'remoteDesktopInstanceType', Type="String", Description="This is the instance type that will be used. As this is a " "2D workstation, we are not supporting GPU instance types.", Default="m4.xlarge", AllowedValues=[ "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "m5.large", "m5.xlarge", "m5.2xlarge", "m5.4xlarge", "m5.12xlarge", "m5.24xlarge", "z1d.large", "z1d.xlarge", "z1d.2xlarge", "z1d.3xlarge", "z1d.6xlarge", "z1d.12xlarge", "z1d.metal" ], ConstraintDescription= "Must an EC2 instance type from the list" )) t.set_parameter_label(remoteDesktopInstanceType, "Remote Desktop Instance Type") t.add_parameter_to_group(remoteDesktopInstanceType, "Instance Configuration") EC2KeyName = t.add_parameter(Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valid EC2 key pair" )) t.set_parameter_label(EC2KeyName, "EC2 Key Name") t.add_parameter_to_group(EC2KeyName, "Instance Configuration") OperatingSystem = t.add_parameter(Parameter( 'OperatingSystem', Type="String", Description="Operating System of the AMI", Default="centos7", AllowedValues=[ "centos7" ], ConstraintDescription="Must be: centos7" )) t.set_parameter_label(OperatingSystem, "Operating System of AMI") t.add_parameter_to_group(OperatingSystem, "Instance Configuration") StaticPrivateIpAddress = t.add_parameter(Parameter( 'StaticPrivateIpAddress', Type="String", Default="NO_VALUE", Description="OPTIONAL: If you already have a private VPC address range, you can " "specify the private IP address to use, leave as NO_VALUE if you choose not to use this", )) t.set_parameter_label(StaticPrivateIpAddress, "OPTIONAL: Static Private IP Address") t.add_parameter_to_group(StaticPrivateIpAddress, "Instance Configuration") UsePublicIp = t.add_parameter(Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance, " "this is overridden by CreateElasticIP=True", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(UsePublicIp, "Assign a public IP Address") t.add_parameter_to_group(UsePublicIp, "Instance Configuration") CreateElasticIP = t.add_parameter(Parameter( 'CreateElasticIP', Type="String", Description="Should an Elastic IP address be created and assigned, " "this allows for persistent IP address assignment", Default="True", ConstraintDescription="True/False", AllowedValues=[ "True", "False" ] )) t.set_parameter_label(CreateElasticIP, "Create an Elastic IP address") t.add_parameter_to_group(CreateElasticIP, "Instance Configuration") S3BucketName = t.add_parameter(Parameter( 'S3BucketName', Type="String", Default="NO_VALUE", Description="OPTIONAL: S3 bucket to allow this instance read access (List and Get)," "leave as NO_VALUE if you choose not to use this" )) t.set_parameter_label(S3BucketName, "OPTIONAL: S3 bucket for read access") t.add_parameter_to_group(S3BucketName, "Instance Configuration") AccessCidr = t.add_parameter(Parameter( 'AccessCidr', Type="String", Description="This is the CIDR block for allowing remote access, for ports 22 and 8443", Default="111.222.333.444/32", AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x" )) t.set_parameter_label(AccessCidr, "CIDR block for remote access (ports 22 and 8443)") t.add_parameter_to_group(AccessCidr, "Instance Configuration") UserName = t.add_parameter(Parameter( 'UserName', Type="String", Description="User name for DCV remote desktop login, default is \"simuser\".", Default="simuser", MinLength="4", )) t.set_parameter_label(UserName, "User name for DCV login") t.add_parameter_to_group(UserName, "DCV Configuration") UserPass = t.add_parameter(Parameter( 'UserPass', Type="String", Description="Password for DCV remote desktop login. The default password is Ch4ng3M3!", Default="Ch4ng3M3!", MinLength="8", AllowedPattern="^((?=.*[a-z])(?=.*[A-Z])(?=.*[\\d])|(?=.*[a-z])(?=.*[A-Z])(?=.*[\\W_])|(?=.*[a-z])(?=.*[\\d])(?=.*[\\W_])|(?=.*[A-Z])(?=.*[\\d])(?=.*[\\W_])).+$", ConstraintDescription="Password must contain at least one element from three of the following sets: lowercase letters, uppercase letters, base 10 digits, non-alphanumeric characters", NoEcho=True )) t.set_parameter_label(UserPass, "Password for DCV login") t.add_parameter_to_group(UserPass, "DCV Configuration") # end parameters RootRole = t.add_resource(iam.Role( "RootRole", AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"], }, "Action": ["sts:AssumeRole"] }] } )) dcvBucketPolicy= t.add_resource(PolicyType( "dcvBucketPolicy", PolicyName="dcvBucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": "arn:aws:s3:::dcv-license.us-east-1/*" } ], }, )), BucketPolicy= t.add_resource(PolicyType( "BucketPolicy", PolicyName="BucketPolicy", Roles=[Ref(RootRole)], PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": ["s3:GetObject"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"},"/*"]]} }, { "Effect": "Allow", "Action": [ "s3:ListBucket"], "Resource": {"Fn::Join":["", ["arn:aws:s3:::", {"Ref": "S3BucketName"}]]} } ], }, Condition="Has_Bucket" )), remoteDesktopSecurityGroup = t.add_resource(SecurityGroup( "remoteDesktopSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "Remote Desktop Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8443", ToPort="8443", CidrIp=Ref(AccessCidr), ), ] )) SshSecurityGroup = t.add_resource(SecurityGroup( "SshSecurityGroup", VpcId = Ref(VPCId), GroupDescription = "SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(AccessCidr), ), ] )) RootInstanceProfile = t.add_resource(InstanceProfile( "RootInstanceProfile", Roles=[Ref(RootRole)] )) remoteDesktopInstance = t.add_resource(ec2.Instance( 'remoteDesktopInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(remoteDesktopInstanceType)), DisableApiTermination='false', NetworkInterfaces=[ NetworkInterfaceProperty( SubnetId=Ref(Subnet), GroupSet=If( "not_existing_sg", [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup)], [Ref(remoteDesktopSecurityGroup), Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)] ), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', PrivateIpAddress=If( "Has_Static_Private_IP", Ref(StaticPrivateIpAddress), Ref("AWS::NoValue"), ) ) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) EIPAddress = t.add_resource(EIP( 'EIPAddress', Domain='vpc', InstanceId=Ref(remoteDesktopInstance), Condition="create_elastic_ip" )) t.add_condition( "not_existing_sg", Equals(Ref(ExistingSecurityGroup), "NO_VALUE") ) t.add_condition( "Has_Public_Ip", Equals(Ref(UsePublicIp), "True") ) t.add_condition( "Has_Bucket", Not(Equals(Ref(S3BucketName), "NO_VALUE")) ) t.add_condition( "create_elastic_ip", Equals(Ref(CreateElasticIP), "True") ) t.add_condition( "Has_Static_Private_IP", Not(Equals(Ref(StaticPrivateIpAddress), "NO_VALUE")) ) waithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) instanceWaitCondition = t.add_resource(WaitCondition( "instanceWaitCondition", Handle=Ref(waithandle), Timeout="3600", DependsOn="remoteDesktopInstance" )) t.add_output([ Output( "DCVConnectionLink", Description="Connect to the DCV Remote Desktop with this URL", Value=Join("", [ "https://", GetAtt("remoteDesktopInstance", 'PublicIp'), ":8443" ]) ), Output( "DCVUserName", Description="Login name for DCV session", Value=(Ref(UserName)) ), Output( "SSHTunnelCommand", Description='Command for setting up SSH tunnel to remote desktop, use "localhost:18443" for DCV client', Value=Join("", [ "ssh -i <file.pem> -L 18443:localhost:8443 -l centos ", GetAtt("remoteDesktopInstance", 'PublicIp') ]) ), ]) #print(t.to_json(indent=2)) print(to_yaml(t.to_json(indent=2), clean_up=True))
"AMI": "ami-5ec1673e" }, "us-west-1": { "AMI": "ami-23e8a343" }, }) mongo_instance_name = "MongoDB" mongo_instance = Instance( mongo_instance_name, template=template, KeyName=Ref(secret_key), NetworkInterfaces=[ NetworkInterfaceProperty( AssociatePublicIpAddress=True, SubnetId=Ref(public_subnet), DeviceIndex="0", GroupSet=[Ref(instance_security_group)], ) ], ImageId=FindInMap("InstanceRegionMap", Ref(AWS_REGION), "AMI"), # SecurityGroupIds=[Ref(default_security_group)], InstanceType=instance_type, UserData=Base64( Join('', [ "#!/bin/bash -xe\n", "sleep 30s\n", "touch /tmp/init.log\n", "yum update -y\n", "echo update-done >> /tmp/init.log\n", "yum install -y docker \n", "echo docker-install-done >> /tmp/init.log\n", "usermod -a -G docker ec2-user \n", "service docker start\n", "echo docker-start-done >> /tmp/init.log\n", "mkdir -p /data\n", "docker run --name mongo -v /data:/data/db -p 27017:27017 -d mongo --auth\n",
Instance("jenkinsInstance1", ImageId=FindInMap( mappings.ubuntu_14_AWSRegionArch2AMI[mappings.logicalName], ref_region, FindInMap(mappings.AWSInstanceType2Arch[mappings.logicalName], Ref(jenkins_instance_type_param), 'Arch')), InstanceType=Ref(jenkins_instance_type_param), IamInstanceProfile=Ref(cfninstanceprofile), KeyName=Ref(ssh_key_param), UserData=user_data.jenkins_userData(Ref(jenkins_password_param), Ref('AWS::Region'), Ref('AWS::StackId')), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(public_tools_sg)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_tools_subnet)) ], Tags=Tags(Name="Jenkins Server", CandidateName=candidate_name), DependsOn=internetGateway.title)) if add_elk_instance: elk_instance1 = t.add_resource( Instance( "elkInstance1", ImageId=FindInMap( mappings.ubuntu_14_AWSRegionArch2AMI[mappings.logicalName], ref_region, FindInMap(mappings.AWSInstanceType2Arch[mappings.logicalName], Ref(jenkins_instance_type_param), 'Arch')),
def create_template(num_masters, num_agents, num_publicAgents): #outfilename = "test.json" outfilename = "cf_" + str(num_masters) + "." + str(num_agents) + "." + str( num_publicAgents) + ".json" # Create the Template t = Template() t.add_version('2010-09-09') t.add_description('Creates a set of Servers for DC/OS using CentOS 7.3 AMI. Creates a boot server to host the DC/OS installer and a NAT Instance for outbound connections from private agents. Creates ' + str(num_masters) + ' Master(s), ' \ + str(num_agents) + ' Private Agent(s), and ' + str(num_publicAgents) + ' Public Agent(s). After creating the Stack; Log into the boot server and run the DCOS Bash Script installer for AWS') # Amazon Linux AMI 2016.09.1.20170119 x86_64 VPC NAT HVM EBS # amzn-ami-vpc-nat-hvm-2016.09.1.20170119-x86_64-ebs - # ami-dd3dd7cb us-east-1 (N. Virginia) # ami-564b6e33 us-east-2 (Ohio) # ami-7d54061d us-west-1 (N. Cal) # ami-3b6fd05b us-west-2 (Oregon) t.add_mapping( 'NATAmi', { 'us-east-1': { 'default': 'ami-dd3dd7cb' }, 'us-east-2': { 'default': 'ami-564b6e33' }, 'us-west-1': { 'default': 'ami-7d54061d' }, 'us-west-2': { 'default': 'ami-3b6fd05b' }, }) # The c73 AMI pre created and deployed on each region t.add_mapping( 'c73Ami', { 'us-east-1': { 'default': 'ami-46c1b650' }, 'us-east-2': { 'default': 'ami-18f8df7d' }, 'us-west-1': { 'default': 'ami-f5d7f195' }, 'us-west-2': { 'default': 'ami-f4533694' }, }) # CloudFormation Parameters # Sometimes when I deployed stack on us-east-1; it would fail on av zone us-east-1c with error messages instance type not support on this AZ. I added this parameter to fix all of the components in on AZ for now avzone_param = t.add_parameter( Parameter( "AVZoneName", ConstraintDescription='Must be the name of an an Availability Zone', Description='Name of an Availability Zone', Type='AWS::EC2::AvailabilityZone::Name', )) # Every agent will get a data drive of this size dataDriveSizeGB_param = t.add_parameter( Parameter( "dataDriveSizeGB", Default="100", MinValue=20, MaxValue=1000, Description= 'Size of data drive to add to private agents from 20 to 1000GB', Type='Number')) # The key will be added to the centos user so you can login to centos using the key keyname_param = t.add_parameter( Parameter( "KeyName", ConstraintDescription= 'Must be the name of an existing EC2 KeyPair.', Description= 'Name of an existing EC2 KeyPair to enable SSH access to the instance', Type='AWS::EC2::KeyPair::KeyName', )) # While you can allow everyone it's more secure to just allow a single machine or subnet of machines; web port will also be opened to this CIDR sshlocation_param = t.add_parameter( Parameter( "sshlocation", Type="String", Description= "Subnet allowed to ssh to these servers. 0.0.0.0/0 to allow all.")) # Instance type for Master instanceTypeMaster_param = t.add_parameter( Parameter( 'InstanceTypeMaster', Type='String', Description='EC2 instance type for ' + str(num_masters) + ' Masters(s)', Default='m4.xlarge', AllowedValues=[ 't2.xlarge', 't2.2xlarge', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', ], ConstraintDescription='Must be a valid EC2 instance type.', )) # Instance type for Agents instanceTypeAgent_param = t.add_parameter( Parameter( 'InstanceTypeAgent', Type='String', Description='EC2 instance type for ' + str(num_agents) + ' Private Agent(s)', Default='m4.2xlarge', AllowedValues=[ 't2.xlarge', 't2.2xlarge', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', ], ConstraintDescription='Must be a valid EC2 instance type.', )) # Instance type for Public Agents instanceTypePublicAgent_param = t.add_parameter( Parameter( 'InstanceTypePublicAgent', Type='String', Description='EC2 instance type for ' + str(num_publicAgents) + ' Public Agent(s)', Default='m4.xlarge', AllowedValues=[ 't2.xlarge', 't2.2xlarge', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', ], ConstraintDescription='Must be a valid EC2 instance type.', )) # Adding Resources ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') # Create VPC nm = 'vpc' vpc = t.add_resource( VPC(nm, CidrBlock='10.10.0.0/16', EnableDnsSupport=True, EnableDnsHostnames=True, Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Subnet for Masters nm = 'mastersSubnet' subnetMasters = t.add_resource( Subnet(nm, AvailabilityZone=Ref(avzone_param), CidrBlock='10.10.0.0/24', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Subnet for Agents nm = 'agentsSubnet' subnetAgents = t.add_resource( Subnet(nm, AvailabilityZone=Ref(avzone_param), CidrBlock='10.10.16.0/24', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Subnet for Public Agents nm = 'publicAgentsSubnet' subnetPublicAgents = t.add_resource( Subnet(nm, AvailabilityZone=Ref(avzone_param), CidrBlock='10.10.32.0/24', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Gateway; route to the outside world (Internet) nm = 'ig' internetGateway = t.add_resource( InternetGateway(nm, Tags=Tags(Application=ref_stack_id, Name=Join( "", [Ref('AWS::StackName'), "-", nm])))) # Attach Gateway to VPC nm = 'gatewayAttachment' gatewayAttachment = t.add_resource( VPCGatewayAttachment(nm, VpcId=Ref(vpc), InternetGatewayId=Ref(internetGateway))) # Create Route Table nm = 'routeTable' routeTable = t.add_resource( RouteTable(nm, VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Add Routes # Allow all outbound traffic nm = 'route' route = t.add_resource( Route( nm, DependsOn=gatewayAttachment.title, GatewayId=Ref(internetGateway), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Associate RouteTable to Master and Public Subnets nm = 'subnetRTAMasters' subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( nm, SubnetId=Ref(subnetMasters), RouteTableId=Ref(routeTable), )) nm = 'subnetRTAPublicAgents' subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( nm, SubnetId=Ref(subnetPublicAgents), RouteTableId=Ref(routeTable), )) # Create Security Group (General access to ssh and internal connectionsn between masters, agents, and public agents) nm = 'securityGroup' securityGroup = t.add_resource( SecurityGroup(nm, GroupDescription='Security Group', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='-1', CidrIp='10.10.0.0/16') ], VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Security Group Public Agents nm = 'securityGroupPublicAgents' publicAgentsSG = t.add_resource( SecurityGroup(nm, GroupDescription='Security Group Public Agents', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='10000', ToPort='10010', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='9090', ToPort='9090', CidrIp='0.0.0.0/0') ], VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create Security Group Masters Allow Access from sshlocation param as test nm = 'securityGroupMasters' mastersSG = t.add_resource( SecurityGroup(nm, GroupDescription='Security Group Masters', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp=Ref(sshlocation_param)) ], VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) if useNatInstance: # **** Also change in natRoute **** # Create NAT instance; This allows private agents to get out to the Internet nm = 'nat' nat = t.add_resource( Instance( nm, SourceDestCheck="false", ImageId=FindInMap("NATAmi", Ref("AWS::Region"), "default"), InstanceType="m4.large", AvailabilityZone=Ref(avzone_param), KeyName=Ref(keyname_param), DependsOn=internetGateway.title, NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(securityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnetMasters), PrivateIpAddress='10.10.0.9') ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=EBSBlockDevice( DeleteOnTermination='true', )) ], Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) else: # Create Elastic IP for NatGateay nm = 'natIP' nat_eip = t.add_resource(EIP( nm, Domain="vpc", )) # Create NAT Gateway nm = 'natGateway' nat = t.add_resource( NatGateway( nm, AllocationId=GetAtt(nat_eip, 'AllocationId'), SubnetId=Ref(subnetMasters), )) # Create Route Table for NAT nm = 'natRouteTable' routeTableNAT = t.add_resource( RouteTable(nm, VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Associate Agent Subnet to NAT nm = 'subnetRTAAgents' subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( nm, SubnetId=Ref(subnetAgents), RouteTableId=Ref(routeTableNAT), )) # Add Routes (Agents can reach out anywhere) nm = 'natRoute' if useNatInstance: route = t.add_resource( Route( nm, RouteTableId=Ref(routeTableNAT), DestinationCidrBlock='0.0.0.0/0', InstanceId=Ref(nat), )) else: route = t.add_resource( Route( nm, RouteTableId=Ref(routeTableNAT), DestinationCidrBlock='0.0.0.0/0', NatGatewayId=Ref(nat), )) # **************************************** # NOTE: I am using static PrivateIPAddresses; this may not be a good choice; however, it simplified the install script. The range of IP's for the master and agents are limited to 24 subnet and I start at 11 # With this configuration the max number of agents is around 240. # **************************************** # Create boot instance # Installs on AWS so far have taken longer than on Azure. Takes about 10 minutes for the boot server to configure. # Tried several InstanceType from t2.micro to m4.large; all take about 10 minutes for boot to load. The docker start of mesosphere/dcos-genconf seems to be taking longer than it did on azure. nm = 'boot' boot = t.add_resource( Instance(nm, ImageId=FindInMap("c73Ami", Ref("AWS::Region"), "default"), InstanceType="m4.xlarge", AvailabilityZone=Ref(avzone_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(securityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnetMasters), PrivateIpAddress='10.10.0.10') ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize="100", DeleteOnTermination='true', )) ], Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Create master instance(s) masters = [] i = 1 while i <= num_masters: nm = 'm' + str(i) private_ip = "10.10.0." + str(i + 10) instance = t.add_resource( Instance(nm, ImageId=FindInMap("c73Ami", Ref("AWS::Region"), "default"), InstanceType=Ref(instanceTypeMaster_param), AvailabilityZone=Ref(avzone_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(securityGroup), Ref(mastersSG)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnetMasters), PrivateIpAddress=private_ip) ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize="100", DeleteOnTermination='true', )) ], Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) masters.append(instance) i += 1 # Create agent instance(s) i = 1 while i <= num_agents: nm = 'a' + str(i) private_ip = "10.10.16." + str(i + 10) instance = t.add_resource( Instance( nm, ImageId=FindInMap("c73Ami", Ref("AWS::Region"), "default"), InstanceType=Ref(instanceTypeAgent_param), AvailabilityZone=Ref(avzone_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(securityGroup)], AssociatePublicIpAddress='false', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnetAgents), PrivateIpAddress=private_ip) ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize="100", DeleteOnTermination='true', )) ], Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) volume = t.add_resource( Volume(nm + "data", AvailabilityZone=Ref(avzone_param), Size=Ref(dataDriveSizeGB_param), Tags=Tags( Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm + "data"])))) volattach = t.add_resource( VolumeAttachment(nm + "dataattach", InstanceId=Ref(instance), VolumeId=Ref(volume), Device="/dev/sdc")) i += 1 # Create public agent instance(s) publicAgents = [] i = 1 nm = "p1" while i <= num_publicAgents: nm = 'p' + str(i) private_ip = "10.10.32." + str(i + 10) instance = t.add_resource( Instance( nm, ImageId=FindInMap("c73Ami", Ref("AWS::Region"), "default"), InstanceType=Ref(instanceTypePublicAgent_param), AvailabilityZone=Ref(avzone_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(securityGroup), Ref(publicAgentsSG)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnetPublicAgents), PrivateIpAddress=private_ip) ], BlockDeviceMappings=[ BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=EBSBlockDevice( VolumeSize="100", DeleteOnTermination='true', )) ], Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) publicAgents.append(instance) i += 1 # Load Balancer Masters nm = "masters" elasticLBMasters = t.add_resource( elb.LoadBalancer( nm, Instances=[Ref(r) for r in masters], Subnets=[Ref(subnetMasters)], SecurityGroups=[Ref(mastersSG)], CrossZone=False, Listeners=[ elb.Listener( LoadBalancerPort="80", InstancePort="80", Protocol="TCP", ), elb.Listener( LoadBalancerPort="443", InstancePort="443", Protocol="TCP", ), ], # Health Checking on port 80 which should be there after DCOS has been installed. HealthCheck=elb.HealthCheck( Target="TCP:80", HealthyThreshold="2", UnhealthyThreshold="2", Interval="30", Timeout="5", ), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Load Balancer Public Agents nm = "publicagents" elasticLBPublicAgents = t.add_resource( elb.LoadBalancer( nm, #AvailabilityZones=GetAZs(""), Instances=[Ref(r) for r in publicAgents], Subnets=[Ref(subnetPublicAgents)], SecurityGroups=[Ref(publicAgentsSG)], CrossZone=False, Listeners=[ elb.Listener( LoadBalancerPort="10000", InstancePort="10000", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10001", InstancePort="10001", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10002", InstancePort="10002", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10003", InstancePort="10003", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10004", InstancePort="10004", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10005", InstancePort="10005", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10006", InstancePort="10006", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10007", InstancePort="10007", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10008", InstancePort="10008", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10009", InstancePort="10009", Protocol="TCP", ), elb.Listener( LoadBalancerPort="10010", InstancePort="10010", Protocol="TCP", ), elb.Listener( LoadBalancerPort="9090", InstancePort="9090", Protocol="TCP", ), elb.Listener( LoadBalancerPort="80", InstancePort="80", Protocol="TCP", ), elb.Listener( LoadBalancerPort="443", InstancePort="443", Protocol="TCP", ) ], # I've added health check for port 9090; becomes healthy after Marathon-LB is installed. HealthCheck=elb.HealthCheck( Target="TCP:9090", HealthyThreshold="2", UnhealthyThreshold="2", Interval="30", Timeout="5", ), Tags=Tags(Application=ref_stack_id, Name=Join("", [Ref('AWS::StackName'), "-", nm])))) # Outputs t.add_output( Output("BootServer", Description="Name/IP of Boot Server", Value=Join( "/", [GetAtt(boot, "PublicDnsName"), GetAtt(boot, "PublicIp")]))) t.add_output( Output("MastersURL", Description="URL of the Masters", Value=Join( "", ["http://", GetAtt(elasticLBMasters, "DNSName")]))) t.add_output( Output( "PublicAgentsURL", Description="URL of the Public Agents haproxy stats.", Value=Join("", [ "http://", GetAtt(elasticLBPublicAgents, "DNSName"), ":9090/haproxy?stats" ]))) # Write json to file jsonStr = t.to_json() fout = open(outfilename, "w") fout.write(jsonStr) fout.close() # Print the json to screen print(jsonStr)
], Tags=Tags(IoCluster=Ref("AWS::StackName"), Name=Join("-", [Ref("AWS::StackName"), "SecurityGroup"]))), # Start an instance in Public Subnet 1 "BastionHost": ec2.Instance( "BastionHost", ImageId="ami-ffbdd790", InstanceType=Ref("InstanceType"), KeyName=Ref("EC2KeyPairName"), #SubnetId = Ref("PublicSubnet1"), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref("InstanceSecurityGroup")], AssociatePublicIpAddress="True", DeviceIndex="0", DeleteOnTermination="True", SubnetId=Ref("PublicSubnet1")) ], Tags=Tags(IoCluster=Ref("AWS::StackName"), Name=Join("-", [Ref("AWS::StackName"), "BastionHost"]))) } t = Template() for p in parameters.values(): t.add_parameter(p) for k in conditions: t.add_condition(k, conditions[k])
def generate_template(service_name): t = Template() t.add_version('2010-09-09') t.add_description("""\ AWS CloudFormation Template for AWS Exploitation Lab """) t.add_mapping("PublicRegionMap", ami_public_mapping) t.add_mapping("PrivateRegionMap", ami_private_mapping) keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription= 'must be the name of an existing EC2 KeyPair.', Description= 'Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description= ' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/32', AllowedPattern= "(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter( Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default='t2.micro', AllowedValues=[ 't2.micro', 't2.small', 't2.medium', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) ref_stack_id = Ref('AWS::StackId') ec2_role = t.add_resource( Role("%sEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]))) ec2_role.ManagedPolicyArns = ["arn:aws:iam::aws:policy/ReadOnlyAccess"] ec2_snapshot_policy_document = awacs.aws.Policy(Statement=[ awacs.aws.Statement(Sid="PermitEC2Snapshots", Effect=awacs.aws.Allow, Action=[ awacs.aws.Action("ec2", "CreateSnapshot"), awacs.aws.Action("ec2", "ModifySnapshotAttribute"), ], Resource=["*"]) ]) ec2_snapshot_policy = Policy(PolicyName="EC2SnapshotPermissions", PolicyDocument=ec2_snapshot_policy_document) priv_ec2_role = t.add_resource( Role("%sPrivEC2Role" % service_name, AssumeRolePolicyDocument=awacs.aws.Policy(Statement=[ awacs.aws.Statement( Effect=awacs.aws.Allow, Action=[awacs.aws.Action("sts", "AssumeRole")], Principal=awacs.aws.Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ec2_snapshot_policy])) priv_ec2_role.ManagedPolicyArns = [ "arn:aws:iam::aws:policy/ReadOnlyAccess" ] VPC_ref = t.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Application=ref_stack_id))) instanceProfile = t.add_resource( InstanceProfile("InstanceProfile", InstanceProfileName="%sInstanceRole" % (service_name), Roles=[Ref(ec2_role)])) privInstanceProfile = t.add_resource( InstanceProfile("PrivInstanceProfile", InstanceProfileName="%sPrivInstanceRole" % (service_name), Roles=[Ref(priv_ec2_role)])) public_subnet = t.add_resource( Subnet('%sSubnetPublic' % service_name, MapPublicIpOnLaunch=True, CidrBlock='10.0.1.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_public" % (service_name)))) private_subnet = t.add_resource( Subnet('%sSubnetPrivate' % service_name, MapPublicIpOnLaunch=False, CidrBlock='10.0.2.0/24', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sSubnet_private" % (service_name)))) internetGateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id, Name="%sInternetGateway" % service_name))) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(VPC_ref), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable('RouteTable', VpcId=Ref(VPC_ref), Tags=Tags(Application=ref_stack_id, Name="%sRouteTable" % service_name))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) # Only associate this Route Table with the public subnet subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(routeTable), )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='%sSecurityGroup' % service_name, SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='0', ToPort='65535', CidrIp="10.0.0.0/8"), ], VpcId=Ref(VPC_ref), )) public_instance = t.add_resource( Instance( "Public%sInstance" % service_name, ImageId=FindInMap("PublicRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet)) ], UserData=Base64(public_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPublicInstance' % (service_name)))) private_instance = t.add_resource( Instance( "Private%sInstance" % service_name, ImageId=FindInMap("PrivateRegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(private_subnet)) ], UserData=Base64(private_instance_userdata), Tags=Tags(Application=ref_stack_id, Name='%sPrivateInstance' % (service_name)), IamInstanceProfile="%sPrivInstanceRole" % (service_name))) outputs = [] outputs.append( Output( "PublicIP", Description="IP Address of Public Instance", Value=GetAtt(public_instance, "PublicIp"), )) t.add_output(outputs) # Set up S3 Bucket and CloudTrail S3Bucket = t.add_resource(Bucket("S3Bucket", DeletionPolicy="Retain")) S3PolicyDocument = awacs.aws.PolicyDocument( Id='EnforceServersideEncryption', Version='2012-10-17', Statement=[ awacs.aws.Statement( Sid='PermitCTBucketPut', Action=[s3.PutObject], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket), "/*"])], ), awacs.aws.Statement( Sid='PermitCTBucketACLRead', Action=[s3.GetBucketAcl], Effect=awacs.aws.Allow, Principal=awacs.aws.Principal("Service", ["cloudtrail.amazonaws.com"]), Resource=[Join('', [s3.ARN(''), Ref(S3Bucket)])], ) ]) S3BucketPolicy = t.add_resource( BucketPolicy("BucketPolicy", PolicyDocument=S3PolicyDocument, Bucket=Ref(S3Bucket), DependsOn=[S3Bucket])) myTrail = t.add_resource( Trail( "CloudTrail", IsLogging=True, S3BucketName=Ref(S3Bucket), DependsOn=["BucketPolicy"], )) myTrail.IsMultiRegionTrail = True myTrail.IncludeGlobalServiceEvents = True return t.to_json()
def test_running_script_on_startup_using_user_data(self): test_stack_name = 'TestRunningUserData' init_cf_env(test_stack_name) ### now = str(datetime.datetime.now()) t = Template() security_group = t.add_resource( SecurityGroup("SecurityGroup", GroupDescription="Enable all ingress", VpcId=get_default_vpc(), SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', CidrIp="0.0.0.0/0", FromPort=0, ToPort=65535), ], Tags=Tags( Application=Ref("AWS::StackName"), Developer="cisco::haoru", ))) instance = t.add_resource( Instance( "MyInstance", KeyName=KEY, InstanceType="m4.xlarge", ImageId=get_linux2_image_id( ), # linux2 has /opt/aws/bin/cfn-signal preinstalled NetworkInterfaces=[ NetworkInterfaceProperty(AssociatePublicIpAddress=True, DeviceIndex=0, GroupSet=[Ref(security_group)], SubnetId=get_first_subnet()), ], UserData=Base64( Join( '', [ '#!/bin/bash -xe\n', # user data that begins with shebang will be executed f'echo "{now}" > /tmp/now\n', '/opt/aws/bin/cfn-signal -e $? ', # send signal to let cloud formation know it is ready ' --stack ', Ref("AWS::StackName"), ' --resource MyInstance ', ' --region ', Ref("AWS::Region"), '\n' ])), CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout='PT10M') # expect to receive signal in 10 mins ), Tags=Tags( Name="aws test user data", Application=Ref("AWS::StackName"), Developer="cisco::haoru", ), )) t.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(instance), ), Output( "PublicIP", Description= "Public IP address of the newly created EC2 instance", Value=GetAtt(instance, "PublicIp"), ), ]) dump_template(t, True) cf_client.create_stack(StackName=test_stack_name, TemplateBody=t.to_yaml()) cf_client.get_waiter('stack_create_complete').wait( StackName=test_stack_name) public_ip = key_find( cf_client.describe_stacks( StackName=test_stack_name)['Stacks'][0]['Outputs'], 'OutputKey', 'PublicIP')['OutputValue'] actual = run( f"ssh -o 'StrictHostKeyChecking no' ec2-user@{public_ip} cat /tmp/now" ) self.assertEqual(actual, now)
UserData=Base64(Join("", my_bootstrap_script('AmbariNode','true','true','127.0.0.1'))), ImageId=FindInMap("RHEL66", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=[my_block_device_mappings_root("/dev/sd","100","gp2")], CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal( Count=1, Timeout="PT30M" )), KeyName=Ref(KeyName), IamInstanceProfile=Ref(AmbariInstanceProfile), InstanceType=Ref(AmbariInstanceType), NetworkInterfaces=[ NetworkInterfaceProperty( DeleteOnTermination="true", DeviceIndex="0", SubnetId=Ref(PublicSubnet), GroupSet=[Ref(AmbariSecurityGroup)], AssociatePublicIpAddress="true", ), ], )) WorkerNodeLaunchConfig = t.add_resource(LaunchConfiguration( "WorkerNodeLaunchConfig", UserData=Base64(Join("", my_bootstrap_script('WorkerNodes','true','false',ref_ambariserver))), ImageId=FindInMap("RHEL66", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=If( "WorkerUseEBSBool", my_block_device_mappings_ebs(9,"/dev/sd","1000","gp2"), my_block_device_mappings_ephemeral(24,"/dev/sd") ), KeyName=Ref(KeyName),
def __init__(self): u""" Infrastructure Class Contructor """ self.aws = AWS() self.ami = AMI() self.ref_stack_id = Ref('AWS::StackId') self.ami_id = self.ami.minimal_linux_ami() # NOTE: Troposphere doesn't have a template feature to make KeyPairs # So handle this ad-hoc for now. self.keypair_name = 'test-deploy-keypair' if self.keypair_doesnt_exist(): self.create_keypair(self.keypair_name) self.deployment_bucket_prefix = 'test-deploy-bucket-' self.deployment_bucket_name = '{}{}'.format( self.deployment_bucket_prefix, uuid.uuid4().hex[:12].lower()) self.deployment_bucket_location = None if self.deploy_bucket_doesnt_exist(): self.deployment_bucket_location = self.create_deploy_bucket( self.deployment_bucket_name) else: self.deployment_bucket_location = self.get_bucket_url( self.deployment_bucket_name) self.server_certificate_name = 'test-deploy-certificate' self.server_certificate_arn = None if self.server_certificate_doesnt_exist(): self.server_certificate_arn = self.upload_server_certificate() self.template = Template() self.template.add_version('2010-09-09') self.template.add_description( 'AWS Cloudformation Template for autoscaled, load balance controlled EC2 service' ) self.template.add_parameter( Parameter('KeyName', Description='Name of an existing EC2 KeyPair', Default=self.keypair_name, Type='String')) self.template.add_parameter( Parameter('AmiId', Description='Lastest Minimal Linux AMI', Default=self.ami_id, Type='String')) self.template.add_parameter( Parameter('DeployBucketName', Description='Name of the deployment_bucket', Default=self.deployment_bucket_name, Type='String')) self.template.add_parameter( Parameter('DeployBucketLocation', Description='Location of the deployment_bucket', Default=self.deployment_bucket_location, Type='String')) self.template.add_parameter( Parameter('ServerCertificateArn', Description='Certificate ARN for the Load Balancer', Default=self.server_certificate_arn, Type='String')) self.sshlocation = self.template.add_parameter( Parameter( 'SSHLocation', Description= 'The IP address range that can be used to SSH to the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern= r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."))) self.vpc = self.template.add_resource( VPC('TestDeployVpc', CidrBlock='10.0.0.0/16', Tags=Tags(Application=self.ref_stack_id))) self.subnet = self.template.add_resource( Subnet('TestDeploySubnet', VpcId=Ref(self.vpc), CidrBlock='10.0.0.0/24', Tags=Tags(Application=self.ref_stack_id))) self.gateway = self.template.add_resource( InternetGateway('TestDeployGateway', Tags=Tags(Application=self.ref_stack_id))) self.gatewayattach = self.template.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(self.vpc), InternetGatewayId=Ref(self.gateway))) self.route_table = self.template.add_resource( RouteTable('RouteTable', VpcId=Ref(self.vpc), Tags=Tags(Application=self.ref_stack_id))) self.route = self.template.add_resource( Route('Route', DependsOn='AttachGateway', GatewayId=Ref('TestDeployGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(self.route_table))) self.subnet_route_association = self.template.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(self.subnet), RouteTableId=Ref(self.route_table), DependsOn=['TestDeploySubnet', 'RouteTable'])) self.network_acl = self.template.add_resource( NetworkAcl('NetworkAcl', VpcId=Ref(self.vpc), Tags=Tags(Application=self.ref_stack_id))) self.inbound_private_http = self.template.add_resource( NetworkAclEntry('InboundHTTP', NetworkAclId=Ref(self.network_acl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_private_http_alt = self.template.add_resource( NetworkAclEntry('InboundHTTPAlt', NetworkAclId=Ref(self.network_acl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='8000', From='8000'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_private_https = self.template.add_resource( NetworkAclEntry('InboundHTTPS', NetworkAclId=Ref(self.network_acl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_ssh = self.template.add_resource( NetworkAclEntry('InboundSSH', NetworkAclId=Ref(self.network_acl), RuleNumber='103', Protocol='6', PortRange=PortRange(To='22', From='22'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_response = self.template.add_resource( NetworkAclEntry('InboundResponsePorts', NetworkAclId=Ref(self.network_acl), RuleNumber='104', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_http = self.template.add_resource( NetworkAclEntry('OutboundHTTP', NetworkAclId=Ref(self.network_acl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_https = self.template.add_resource( NetworkAclEntry('OutboundHTTPS', NetworkAclId=Ref(self.network_acl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_response = self.template.add_resource( NetworkAclEntry('OutboundResponsePorts', NetworkAclId=Ref(self.network_acl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.subnet_network_association = self.template.add_resource( SubnetNetworkAclAssociation( 'SubnetNetworkACLAssociation', SubnetId=Ref(self.subnet), NetworkAclId=Ref(self.network_acl), DependsOn=['TestDeploySubnet', 'NetworkAcl'])) self.instance_security_group = self.template.add_resource( SecurityGroup('InstanceSecurityGroup', GroupDescription='Open all ports', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='1024', ToPort='65535', CidrIp='0.0.0.0/0') ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='1', ToPort='65535', CidrIp='0.0.0.0/0') ], VpcId=Ref(self.vpc))) self.instance = self.template.add_resource( Instance( 'TestDeployInstance', ImageId=Ref('AmiId'), InstanceType='t2.micro', KeyName=Ref('KeyName'), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref('InstanceSecurityGroup')], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref('TestDeploySubnet')) ], UserData=Base64( Join('', [ "#!/bin/bash\n", "apt-get update\n", "apt-get -y install python python-pip python-setuptools\n", "mkdir aws-cfn-bootstrap-latest\n", "curl https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz | tar xz -C aws-cfn-bootstrap-latest --strip-components 1\n", "easy_install aws-cfn-bootstrap-latest\n", "/usr/local/bin/cfn-init --stack ", { "Ref": "AWS::StackName" }, " --resource TestDeployInstance", " --region ", { "Ref": "AWS::Region" }, "\n", "/usr/local/bin/cfn-signal --exit-code $? '", { "Ref": "WaitHandle" }, "'\n" "\n", "python -m SimpleHTTPServer 8000 2>&1 >/dev/null &\n", ])), DependsOn=['InstanceSecurityGroup', 'TestDeploySubnet'], Tags=Tags(Application=self.ref_stack_id)))
def create_template(key_pair, region, type_instance): """ Create the Cloud Formation Template """ # Create references for the CFT ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS:StackName') # Add the template header t = Template() t.add_version('2010-09-09') t.add_description(PROJECT + ": " +\ 'AWS Cloud Formation Template for creating a custom VPC with '\ 'public subnet to host a simple web server on a single EC2 instance '\ 'with a security group that allows public HTTP and SSH traffic.') # Add the VPC vpc = t.add_resource( VPC('VPC', CidrBlock=VPC_CIDR, Tags=Tags(Application=ref_stack_id, Name=NAME_PREFIX + 'vpc', Project=PROJECT))) # Add a public Subnet for our web server public_subnet = t.add_resource( Subnet('Subnet', CidrBlock=SUBNET_CIDR, VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=NAME_PREFIX + 'public-subnet', Project=PROJECT))) # Add an IGW for public access internet_gateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id, Name=NAME_PREFIX + 'igw', Project=PROJECT))) # Attach our IGW to our VPC gateway_attachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway))) # Add a custom Route Table for public access route_table = t.add_resource( RouteTable('RouteTable', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=NAME_PREFIX + 'publicroutetable', Project=PROJECT))) # Add a public Route to our Route Table through our IGW public_route = t.add_resource( Route('Route', DependsOn='AttachGateway', GatewayId=Ref(internet_gateway), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(route_table))) # Associate our public Subnet with our public Route Table subnet_route_table_assoc = t.add_resource( SubnetRouteTableAssociation('SubnetRouteTableAssociation', SubnetId=Ref(public_subnet), RouteTableId=Ref(route_table))) # Add a new Network ACL for our public Subnet network_acl = t.add_resource( NetworkAcl('NetworkAcl', VpcId=Ref(vpc), Tags=Tags(Application=ref_stack_id, Name=NAME_PREFIX + 'networkacl', Project=PROJECT))) # Inbound ACL Rule for HTTP inboundHttpAclEntry = t.add_resource( NetworkAclEntry('InboundHTTPNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='100', Protocol=6, PortRange=PortRange(To='80', From='80'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Inbound ACL Rule for HTTPS inboundHttpsAclEntry = t.add_resource( NetworkAclEntry('InboundHTTPSNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='200', Protocol=6, PortRange=PortRange(To='443', From='443'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Inbound ACL Rule for SSH inboundSshAclEntry = t.add_resource( NetworkAclEntry('InboundSSHNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='300', Protocol=6, PortRange=PortRange(To='22', From='22'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Inbound ACL Rule for Ephemeral Response Ports inboundResponsePortsAclEntry = t.add_resource( NetworkAclEntry('InboundResponsePortsNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='400', Protocol=6, PortRange=PortRange(To='65535', From='1024'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Outbound ACL Rule for HTTP outboundHttpAclEntry = t.add_resource( NetworkAclEntry('OutboundHTTPNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='100', Protocol=6, PortRange=PortRange(To='80', From='80'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Outbound ACL Rule for HTTPS outboundHttpsAclEntry = t.add_resource( NetworkAclEntry('OutboundHTTPSNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='200', Protocol=6, PortRange=PortRange(To='443', From='443'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Outbound ACL Rule for SSH outboundSshAclEntry = t.add_resource( NetworkAclEntry('OutboundSSHNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='300', Protocol=6, PortRange=PortRange(To='22', From='22'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Outbound ACL Rule for Ephemeral Response Ports outboundResponsePortsAclEntry = t.add_resource( NetworkAclEntry('OutboundResponsePortsNetworkAclEntry', NetworkAclId=Ref(network_acl), RuleNumber='400', Protocol=6, PortRange=PortRange(To='65535', From='1024'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) # Associate our public Subnet with our new Network ACL subnet_network_acl_assoc = t.add_resource( SubnetNetworkAclAssociation('SubnetNetworkAclAssociation', SubnetId=Ref(public_subnet), NetworkAclId=Ref(network_acl))) # Add a Security Group for our Web Server webserver_security_group = t.add_resource( SecurityGroup('SecurityGroup', GroupDescription='Web Server SG', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', ToPort='80', FromPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', ToPort='22', FromPort='22', CidrIp='0.0.0.0/0') ], VpcId=Ref(vpc))) # Add the Web Server Instance instance = t.add_resource( Instance( 'WebServerInstance', ImageId=supported_regions_ami_map[region], InstanceType=type_instance, KeyName=key_pair, NetworkInterfaces= [ NetworkInterfaceProperty( GroupSet=[Ref(webserver_security_group)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet)) ], UserData=Base64( Join( '\n', [ '#!/bin/bash -x', 'yum install httpd -y', 'yum update -y', 'echo "<html><h1>Automation for the People</h1></html>" ' \ '> /var/www/html/index.html', 'service httpd start', 'chkconfig httpd on' ])), Tags=Tags( Application=ref_stack_id, Name=NAME_PREFIX+'webserver', Project=PROJECT))) # Add Outputs to the Template for the VPC ID and URL t.add_output([ Output('VPC', Description='VPC ID', Value=Ref(vpc)), Output('URL', Description='Web Server URL', Value=Join('', ['http://', GetAtt('WebServerInstance', 'PublicIp')])) ]) return t
# }, Tags=Tags(Name=Join(" - ", [ FindInMap("Environments", Ref("EnvironmentType"), "ValueTags"), "Perforce Helix App Server", Ref("AWS::StackName") ]), ), ImageId=FindInMap( "AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref("InstanceType"), "Arch")), KeyName=Ref("KeyName"), InstanceType=Ref("InstanceType"), NetworkInterfaces=[ NetworkInterfaceProperty( DeviceIndex="0", GroupSet=[Ref("VPCGroup")], DeleteOnTermination="true", AssociatePublicIpAddress="true", SubnetId=Ref("PublicSubnet"), PrivateIpAddress="10.0.0.51", ), ], DependsOn=["MainInstance"], )) HostRecord = t.add_resource( RecordSetType( "HostRecord", Comment="DNS name for my instance.", Name=Join("", [Ref("SiteName"), ".", Ref("HostedZone"), "."]), HostedZoneName=Join("", [Ref("HostedZone"), "."]), ResourceRecords=[GetAtt("MainInstance", "PublicIp")],
def _create_template(vpc_id, subnet_public_id, internetgateway_id, network_interface_id, allocation_id): """returns Template instance""" t = Template() t.add_version("2010-09-09") t.add_description("""Template to create SLM instance and bind to passed in Newtork Interface\ """) keyname_param = t.add_parameter( Parameter( "KeyName", ConstraintDescription= "must be the name of an existing EC2 KeyPair.", Description= "Name of an existing EC2 KeyPair to enable SSH access to \ the instance", Type="AWS::EC2::KeyPair::KeyName", )) sshlocation_param = t.add_parameter( Parameter( "RDPLocation", Description= " The IP address range that can be used to RDP to the EC2 \ instances", Type="String", MinLength="9", MaxLength="18", Default="0.0.0.0/0", AllowedPattern= r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter( Parameter( "InstanceType", Type="String", Description="SLM Server EC2 instance type", Default="t2.micro", AllowedValues=[ "t1.micro", "t2.micro", "t2.small", "t2.medium", "t3.micro", "t3.small", "t3.medium", "t3.large", "t3.xlarge", ], ConstraintDescription="must be a valid EC2 instance type.", )) t.add_mapping( "AWSInstanceType2Arch", { "t1.micro": { "Arch": "PV64" }, "t2.micro": { "Arch": "HVM64" }, "t2.small": { "Arch": "HVM64" }, "t2.medium": { "Arch": "HVM64" }, "t3.micro": { "Arch": "HVM64" }, "t3.small": { "Arch": "HVM64" }, "t3.medium": { "Arch": "HVM64" }, "t3.large": { "Arch": "HVM64" }, "t3.xlarge": { "Arch": "HVM64" }, }, ) # Windows_Server-2016-English-Full-Base-2018.09.15 t.add_mapping("AWSRegionArch2AMI", {"us-east-1": { "HVM64": "ami-01945499792201081" }}) ref_stack_id = Ref("AWS::StackId") ref_region = Ref("AWS::Region") ref_stack_name = Ref("AWS::StackName") routeTable = t.add_resource( RouteTable("RouteTable", VpcId=vpc_id, Tags=Tags(Application=ref_stack_id))) route = t.add_resource( Route( "Route", # DependsOn='AttachGateway', GatewayId=internetgateway_id, DestinationCidrBlock="0.0.0.0/0", RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( "SubnetRouteTableAssociation", SubnetId=subnet_public_id, RouteTableId=Ref(routeTable), )) """ networkAcl = t.add_resource( NetworkAcl( 'NetworkAcl', VpcId=vpc_id, Tags=Tags( Application=ref_stack_id), )) inBoundPrivateNetworkAclEntry = t.add_resource( NetworkAclEntry( 'InboundHTTPNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) inboundSSHNetworkAclEntry = t.add_resource( NetworkAclEntry( 'InboundSSHNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='22', From='22'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) inboundResponsePortsNetworkAclEntry = t.add_resource( NetworkAclEntry( 'InboundResponsePortsNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0', )) outBoundHTTPNetworkAclEntry = t.add_resource( NetworkAclEntry( 'OutBoundHTTPNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) outBoundHTTPSNetworkAclEntry = t.add_resource( NetworkAclEntry( 'OutBoundHTTPSNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) outBoundResponsePortsNetworkAclEntry = t.add_resource( NetworkAclEntry( 'OutBoundResponsePortsNetworkAclEntry', NetworkAclId=Ref(networkAcl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0', )) subnetNetworkAclAssociation = t.add_resource( SubnetNetworkAclAssociation( 'SubnetNetworkAclAssociation', SubnetId=subnet_public_id, NetworkAclId=Ref(networkAcl), )) """ instanceSecurityGroup = t.add_resource( SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable RDP access via port 3389", SecurityGroupIngress=[ SecurityGroupRule( IpProtocol="tcp", FromPort="3389", ToPort="3389", CidrIp=Ref(sshlocation_param), ) ], VpcId=vpc_id, )) """ { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:Get*", "s3:List*" ], "Resource": "*" } ] } """ slm_iam_role = t.add_resource( iam.Role( "SLMServerRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"]), ) ]), Policies=[ iam.Policy( PolicyName="SLMServerPolicy", PolicyDocument=Policy(Statement=[ Statement( Effect=Allow, Action=[ Action("s3", "List*"), Action("s3", "Get*"), ], Resource=["arn:aws:s3:::*"], ) ]), ) ], )) slm_instanceprofile = t.add_resource( iam.InstanceProfile("SLMServerInstanceProfile", Roles=[Ref(slm_iam_role)])) # instance_metadata = Metadata() instance = t.add_resource( Instance( "SLMServerInstance", # Metadata=instance_metadata, ImageId=FindInMap( "AWSRegionArch2AMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(instanceType_param), "Arch"), ), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( NetworkInterfaceId=network_interface_id, DeviceIndex="0") ], IamInstanceProfile=Ref(slm_instanceprofile), UserData=Base64( Join( "", [ "<powershell>\n", '$ErrorActionPreference = "Stop"\n', "net user Administrator %(password)s\n" % dict(password=ADMIN_PASSWORD), r"Read-S3Object -BucketName %(bucket_name)s -Key SLMLockInfo.zip -File \Users\Administrtor\Desktop\SLMLockInfo.zip\n" % dict(bucket_name=BUCKET_NAME), 'Rename-Computer -NewName "SLMServer" -Restart\n' "</powershell>\n", ], )), Tags=Tags(Name="FOQUS_SLMServer", Application=ref_stack_id), )) """ ipAddress = t.add_resource( EIP('IPAddress', DependsOn='AttachGateway', Domain='vpc', InstanceId=Ref(instance) )) """ t.add_output([ Output( "URL", Description="Newly created application URL", Value=Join( "", ["http://", GetAtt("SLMServerInstance", "PublicIp")]), ) ]) return t
AllocationId=GetAtt(firewalldPublicNATEIP, "AllocationId"), NetworkInterfaceId=Ref(firewalldPublicNATInterface))) firewalldInstance = t.add_resource( Instance( 'Firewalld', AvailabilityZone=az, ImageId=FindInMap('RegionToInstanceToAMI', Ref('AWS::Region'), 'firewalld'), InstanceType="t2.medium", KeyName="Default key 3-16-16", UserData= "IyEvYmluL2Jhc2gKc2V0IC14CmV4ZWMgPiA+KHRlZSAvdmFyL2xvZy91c2VyLWRhdGEubG9nICkgMj4mMQplY2hvICJuZXQuaXB2NC5pcF9mb3J3YXJkID0gMSIgPj4gL2V0Yy9zeXNjdGwuY29uZgplY2hvICJuZXQuaXB2NC5jb25mLmFsbC5ycF9maWx0ZXIgPSAwIiA+PiAvZXRjL3N5c2N0bC5jb25mCmVjaG8gIm5ldC5pcHY0LmNvbmYuZGVmYXVsdC5ycF9maWx0ZXIgPSAwIiA+PiAvZXRjL3N5c2N0bC5jb25mCmVjaG8gIm5ldC5pcHY0LmNvbmYuZXRoMC5ycF9maWx0ZXIgPSAwIiA+PiAvZXRjL3N5c2N0bC5jb25mCmVjaG8gIm5ldC5pcHY0LmNvbmYuZXRoMS5ycF9maWx0ZXIgPSAwIiA+PiAvZXRjL3N5c2N0bC5jb25mCmVjaG8gIm5ldC5pcHY0LmNvbmYuZXRoMi5ycF9maWx0ZXIgPSAwIiA+PiAvZXRjL3N5c2N0bC5jb25mCmVjaG8gIm5ldC5pcHY0LmNvbmYubG8ucnBfZmlsdGVyID0gMCIgPj4gL2V0Yy9zeXNjdGwuY29uZgplY2hvIEdBVEVXQVk9MTAuMC4xLjEgPj4gL2V0Yy9zeXNjb25maWcvbmV0d29yawpzeXNjdGwgLXAKCmNwIC9ldGMvc3lzY29uZmlnL25ldHdvcmstc2NyaXB0cy9pZmNmZy1ldGgwIC9ldGMvc3lzY29uZmlnL25ldHdvcmstc2NyaXB0cy9pZmNmZy1ldGgxCnNlZCAtaSAncy8iZXRoMCIvImV0aDEiLycgL2V0Yy9zeXNjb25maWcvbmV0d29yay1zY3JpcHRzL2lmY2ZnLWV0aDEKZWNobyAiSFdBRERSPSQoY2F0IC9zeXMvY2xhc3MvbmV0L2V0aDEvYWRkcmVzcyB8IHRyIC1kICdcbicgfCB0ciAtZCAnICcpIiA+PiAvZXRjL3N5c2NvbmZpZy9uZXR3b3JrLXNjcmlwdHMvaWZjZmctZXRoMQpjcCAvZXRjL3N5c2NvbmZpZy9uZXR3b3JrLXNjcmlwdHMvaWZjZmctZXRoMCAvZXRjL3N5c2NvbmZpZy9uZXR3b3JrLXNjcmlwdHMvaWZjZmctZXRoMgpzZWQgLWkgJ3MvImV0aDAiLyJldGgyIi8nIC9ldGMvc3lzY29uZmlnL25ldHdvcmstc2NyaXB0cy9pZmNmZy1ldGgyCmVjaG8gIkhXQUREUj0kKGNhdCAvc3lzL2NsYXNzL25ldC9ldGgyL2FkZHJlc3MgfCB0ciAtZCAnXG4nIHwgdHIgLWQgJyAnKSIgPj4gL2V0Yy9zeXNjb25maWcvbmV0d29yay1zY3JpcHRzL2lmY2ZnLWV0aDIKZWNobyBaT05FPWludGVybmFsID4+IC9ldGMvc3lzY29uZmlnL25ldHdvcmstc2NyaXB0cy9pZmNmZy1ldGgwCmVjaG8gWk9ORT1leHRlcm5hbCA+PiAvZXRjL3N5c2NvbmZpZy9uZXR3b3JrLXNjcmlwdHMvaWZjZmctZXRoMQplY2hvIFpPTkU9cHVibGljID4+IC9ldGMvc3lzY29uZmlnL25ldHdvcmstc2NyaXB0cy9pZmNmZy1ldGgyCnNlcnZpY2UgbmV0d29yayByZXN0YXJ0Cnl1bSBpbnN0YWxsIC15IGZpcmV3YWxsZAoKIyBpcHRhYmxlcyAtdCBtYW5nbGUgLUkgUFJFUk9VVElORyAtaSBldGgwIC1wIHRjcCAtcyAxMC4wLjIuNTAgLWogTUFSSyAtLXNldC1tYXJrIDIKIyBpcHRhYmxlcyAtdCBuYXQgLUEgUE9TVFJPVVRJTkcgISAtcyAxMC4wLjIuNTAgLWogTUFTUVVFUkFERQojIGlwdGFibGVzIC10IG5hdCAtSSBQT1NUUk9VVElORyAtcyAxMC4wLjIuNTAgLWogU05BVCAtLXRvLXNvdXJjZSAxMC4wLjEuMTAgCiMgaXB0YWJsZXMgLXQgbmF0IC1JIFBSRVJPVVRJTkcgLWkgZXRoMSAtaiBETkFUIC0tdG8gMTAuMC4yLjUwCgpmaXJld2FsbC1vZmZsaW5lLWNtZCAtLXpvbmU9ZXh0ZXJuYWwgLS1hZGQtc2VydmljZT1kaGNwdjYtY2xpZW50CmZpcmV3YWxsLW9mZmxpbmUtY21kIC0tem9uZT1wdWJsaWMgLS1hZGQtbWFzcXVlcmFkZQpmaXJld2FsbC1vZmZsaW5lLWNtZCAtLXpvbmU9cHVibGljIC0tYWRkLWZvcndhcmQtcG9ydD1wb3J0PTIyMjI6cHJvdG89dGNwOnRvYWRkcj0xMC4wLjIuMjA6dG9wb3J0PTIyCmZpcmV3YWxsLW9mZmxpbmUtY21kIC0tZGlyZWN0IC0tYWRkLXJ1bGUgaXB2NCBuYXQgUFJFUk9VVElORyAwIC1pIGV0aDEgLWogRE5BVCAtLXRvLWRlc3RpbmF0aW9uIDEwLjAuMi41MApmaXJld2FsbC1vZmZsaW5lLWNtZCAtLWRpcmVjdCAtLWFkZC1ydWxlIGlwdjQgbmF0IFBPU1RST1VUSU5HIDAgLXMgMTAuMC4yLjUwIC1qIFNOQVQgLS10by1zb3VyY2UgMTAuMC4xLjEwCmZpcmV3YWxsLW9mZmxpbmUtY21kIC0tZGlyZWN0IC0tYWRkLXJ1bGUgaXB2NCBtYW5nbGUgUFJFUk9VVElORyAwIC1pIGV0aDAgLXMgMTAuMC4yLjUwIC1qIE1BUksgLS1zZXQtbWFyayAyCmZpcmV3YWxsLW9mZmxpbmUtY21kIC0tZGlyZWN0IC0tYWRkLXJ1bGUgaXB2NCBmaWx0ZXIgRk9SV0FSRCAwIC1pIGV0aDEgLW8gZXRoMCAtaiBBQ0NFUFQKZmlyZXdhbGwtb2ZmbGluZS1jbWQgLS1kaXJlY3QgLS1hZGQtcnVsZSBpcHY0IGZpbHRlciBGT1JXQVJEIDAgLWkgZXRoMCAtbyBldGgxIC1qIEFDQ0VQVApzeXN0ZW1jdGwgcmVzdGFydCBmaXJld2FsbGQuc2VydmljZQpzeXN0ZW1jdGwgcmVzdGFydCBuZXR3b3JrLnNlcnZpY2UKCmlwIHJ1bGUgYWRkIGZ3bWFyayAyIHRhYmxlIDMKaXAgcm91dGUgYWRkIGRlZmF1bHQgdmlhIDEwLjAuMS4xIGRldiBldGgxIHRhYmxlIDMKaXAgcm91dGUgZmx1c2ggY2FjaGUKc2VkIC1pICckaWlwIHJ1bGUgYWRkIGZ3bWFyayAyIHRhYmxlIDNcbmlwIHJvdXRlIGFkZCBkZWZhdWx0IHZpYSAxMC4wLjEuMSBkZXYgZXRoMSB0YWJsZSAzXG5pcCByb3V0ZSBmbHVzaCBjYWNoZScgL2V0Yy9yYy5kL3JjLmxvY2FsCmNobW9kICt4IC9ldGMvcmMuZC9yYy5sb2NhbApzZWQgLWkgJ3MvUGFzc3dvcmRBdXRoZW50aWNhdGlvbiBuby9QYXNzd29yZEF1dGhlbnRpY2F0aW9uIHllcy8nIC9ldGMvc3NoL3NzaGRfY29uZmlnCnNlZCAtaSAncy9yb290OlteOl0qL3Jvb3Q6JDEkWGxReUFsbnYkbVlhSjNIN0hsNXpqeFY5ZW5wWDJNMS8nIC9ldGMvc2hhZG93CnNlZCAtaSAncy9jZW50b3M6W146XSovY2VudG9zOiQxJFhsUXlBbG52JG1ZYUozSDdIbDV6anhWOWVucFgyTTEvJyAvZXRjL3NoYWRvdwpybSAtcmYgL2hvbWUvY2VudG9zLy5zc2gKcm0gLXJmIC9yb290Ly5zc2gKcm0gLXJmIC9ldGMvaG9zdG5hbWUKZWNobyBmaXJld2FsbCA+PiAvZXRjL2hvc3RuYW1lCnJlYm9vdA==", NetworkInterfaces=[ NetworkInterfaceProperty( DeviceIndex='0', NetworkInterfaceId=Ref(firewalldPrivateSideInterface)), NetworkInterfaceProperty( DeviceIndex='1', NetworkInterfaceId=Ref(firewalldPublicWebInterface)), NetworkInterfaceProperty( DeviceIndex='2', NetworkInterfaceId=Ref(firewalldPublicNATInterface)) ], Tags=Tags(Name=Join(" ", ["Team", team_number, "Firewalld Instance"]), Team=team_number))) privateSubnetDefaultRoute = t.add_resource( Route( 'PrivateSubnetDefaultRoute', NetworkInterfaceId=Ref(firewalldPrivateSideInterface),
Instance( 'amazonNATInstance1', ImageId=FindInMap( mappings.ami_nat_instanceAWSRegionArch2AMI[mappings.logicalName], ref_region, FindInMap('AWSInstanceType2Arch', Ref(amazon_NAT_instance_type_param), 'Arch')), InstanceType=Ref(amazon_NAT_instance_type_param), KeyName=Ref(ssh_key_param), SourceDestCheck=False, Tags=Tags(Name="NAT Server", ), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(hadoop_cluser_sg), Ref(public_api_sg)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_api_subnet)) ], # SecurityGroupIds=[Ref(hadoop_cluser_sg), Ref(pulic_api_sg)], # SubnetId=Ref(public_api_subnet), DependsOn=internetGateway.title)) # Private Route Table needed only if using # NAT Instance to provide 'Internet' access private_route_table = t.add_resource( Route( 'privateInternetRoute1', DependsOn='AttachGateway',
def create(): from troposphere.ec2 import PortRange, NetworkAcl, Route, \ VPCGatewayAttachment, SubnetRouteTableAssociation, Subnet, RouteTable, \ VPC, NetworkInterfaceProperty mydb = mysql.connector.connect(host="localhost", user="******", passwd="AmazingTheory62", database="cloud_formation") mycursor = mydb.cursor() mycursor.execute("SELECT * FROM customize_table") myresult = (mycursor.fetchone()) sname = myresult[0] instance1 = myresult[1] instancetype1 = myresult[2] instance2 = myresult[3] instancetype2 = myresult[4] dbname = myresult[5] dbuser = myresult[6] dbpassword = myresult[7] dbstorage = myresult[8] dbinstance = myresult[9] vpcname = myresult[10] subnetname = myresult[11] t = Template() t.add_version('2010-09-09') t.set_description("""\ AWS CloudFormation Sample Template VPC_Single_Instance_In_Subnet: Sample \ template showing how to create a VPC and add an EC2 instance with an Elastic \ IP address and a security group. \ **WARNING** This template creates an Amazon EC2 instance. You will be billed \ for the AWS resources used if you create a stack from this template.""") keyname_param = t.add_parameter( Parameter( 'KeyName', ConstraintDescription= 'must be the name of an existing EC2 KeyPair.', Description= 'Name of an existing EC2 KeyPair to enable SSH access to \ the instance', Type='AWS::EC2::KeyPair::KeyName', Default='jayaincentiuskey', )) sshlocation_param = t.add_parameter( Parameter( 'SSHLocation', Description= ' The IP address range that can be used to SSH to the EC2 \ instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern= r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) instanceType_param = t.add_parameter( Parameter( 'InstanceType', Type='String', Description='WebServer EC2 instance type', Default=instancetype1, AllowedValues=[ 't1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) instanceType_param1 = t.add_parameter( Parameter( 'SecindInstanceType', Type='String', Description='WebServer EC2 instance type', Default=instancetype2, AllowedValues=[ 't1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge', ], ConstraintDescription='must be a valid EC2 instance type.', )) t.add_mapping( 'AWSInstanceType2Arch', { 't1.micro': { 'Arch': 'PV64' }, 't2.micro': { 'Arch': 'HVM64' }, 't2.small': { 'Arch': 'HVM64' }, 't2.medium': { 'Arch': 'HVM64' }, 'm1.small': { 'Arch': 'PV64' }, 'm1.medium': { 'Arch': 'PV64' }, 'm1.large': { 'Arch': 'PV64' }, 'm1.xlarge': { 'Arch': 'PV64' }, 'm2.xlarge': { 'Arch': 'PV64' }, 'm2.2xlarge': { 'Arch': 'PV64' }, 'm2.4xlarge': { 'Arch': 'PV64' }, 'm3.medium': { 'Arch': 'HVM64' }, 'm3.large': { 'Arch': 'HVM64' }, 'm3.xlarge': { 'Arch': 'HVM64' }, 'm3.2xlarge': { 'Arch': 'HVM64' }, 'c1.medium': { 'Arch': 'PV64' }, 'c1.xlarge': { 'Arch': 'PV64' }, 'c3.large': { 'Arch': 'HVM64' }, 'c3.xlarge': { 'Arch': 'HVM64' }, 'c3.2xlarge': { 'Arch': 'HVM64' }, 'c3.4xlarge': { 'Arch': 'HVM64' }, 'c3.8xlarge': { 'Arch': 'HVM64' }, 'g2.2xlarge': { 'Arch': 'HVMG2' }, 'r3.large': { 'Arch': 'HVM64' }, 'r3.xlarge': { 'Arch': 'HVM64' }, 'r3.2xlarge': { 'Arch': 'HVM64' }, 'r3.4xlarge': { 'Arch': 'HVM64' }, 'r3.8xlarge': { 'Arch': 'HVM64' }, 'i2.xlarge': { 'Arch': 'HVM64' }, 'i2.2xlarge': { 'Arch': 'HVM64' }, 'i2.4xlarge': { 'Arch': 'HVM64' }, 'i2.8xlarge': { 'Arch': 'HVM64' }, 'hi1.4xlarge': { 'Arch': 'HVM64' }, 'hs1.8xlarge': { 'Arch': 'HVM64' }, 'cr1.8xlarge': { 'Arch': 'HVM64' }, 'cc2.8xlarge': { 'Arch': 'HVM64' }, }) t.add_mapping( 'AWSRegionArch2AMI', { 'us-east-1': { 'PV64': 'ami-50842d38', 'HVM64': 'ami-08842d60', 'HVMG2': 'ami-3a329952' }, 'us-west-2': { 'PV64': 'ami-af86c69f', 'HVM64': 'ami-8786c6b7', 'HVMG2': 'ami-47296a77' }, 'us-west-1': { 'PV64': 'ami-c7a8a182', 'HVM64': 'ami-cfa8a18a', 'HVMG2': 'ami-331b1376' }, 'eu-west-1': { 'PV64': 'ami-aa8f28dd', 'HVM64': 'ami-748e2903', 'HVMG2': 'ami-00913777' }, 'ap-southeast-1': { 'PV64': 'ami-20e1c572', 'HVM64': 'ami-d6e1c584', 'HVMG2': 'ami-fabe9aa8' }, 'ap-northeast-1': { 'PV64': 'ami-21072820', 'HVM64': 'ami-35072834', 'HVMG2': 'ami-5dd1ff5c' }, 'ap-southeast-2': { 'PV64': 'ami-8b4724b1', 'HVM64': 'ami-fd4724c7', 'HVMG2': 'ami-e98ae9d3' }, 'sa-east-1': { 'PV64': 'ami-9d6cc680', 'HVM64': 'ami-956cc688', 'HVMG2': 'NOT_SUPPORTED' }, 'cn-north-1': { 'PV64': 'ami-a857c591', 'HVM64': 'ami-ac57c595', 'HVMG2': 'NOT_SUPPORTED' }, 'eu-central-1': { 'PV64': 'ami-a03503bd', 'HVM64': 'ami-b43503a9', 'HVMG2': 'ami-b03503ad' }, }) ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') VPC = t.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Name=vpcname, Application=ref_stack_id))) subnet = t.add_resource( Subnet('publicSubnet', CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b', VpcId=Ref(VPC), Tags=Tags(Name=subnetname, Application=ref_stack_id))) subnet1 = t.add_resource( Subnet('publicSubnet1', CidrBlock='10.0.3.0/24', AvailabilityZone='us-west-2a', VpcId=Ref(VPC), Tags=Tags(Name=subnetname, Application=ref_stack_id))) publicsubnet = t.add_resource( Subnet('PrivateSubnet', CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a', VpcId=Ref(VPC), Tags=Tags(Name=subnetname, Application=ref_stack_id))) publicsubnet1 = t.add_resource( Subnet('PrivateSubnet1', CidrBlock='10.0.2.0/24', AvailabilityZone='us-west-2b', VpcId=Ref(VPC), Tags=Tags(Name=subnetname, Application=ref_stack_id))) internetGateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Application=ref_stack_id))) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(VPC), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource( RouteTable('RouteTable', VpcId=Ref(VPC), Tags=Tags(Application=ref_stack_id))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) routeTable1 = t.add_resource( RouteTable('PrivateRouteTable', VpcId=Ref(VPC), Tags=Tags(Application=ref_stack_id))) # route1 = t.add_resource( # Route( # 'PublicRoute', # DependsOn='AttachGateway', # GatewayId=Ref('InternetGateway'), # DestinationCidrBlock='0.0.0.0/0', # RouteTableId=Ref(routeTable1), # )) subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(subnet), RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation1 = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation1', SubnetId=Ref(subnet1), RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation2 = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTable1Association2', SubnetId=Ref(publicsubnet), RouteTableId=Ref(routeTable1), )) subnetRouteTableAssociation3 = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTable1Association3', SubnetId=Ref(publicsubnet1), RouteTableId=Ref(routeTable1), )) # # subnetRouteTableAssociation4= t.add_resource( # SubnetRouteTableAssociation( # 'SubnetRouteTable1Association4', # SubnetId=Ref(subnet1), # RouteTableId=Ref(routeTable1), # )) instanceSecurityGroup = t.add_resource( SecurityGroup( 'InstanceSecurityGroup', GroupDescription='Enable SSH access via port 22', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0') ], VpcId=Ref(VPC), )) instance = t.add_resource( Instance( 'WebServerInstance', #Metadata=instance_metadata, ImageId=FindInMap( 'AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instanceType_param), 'Arch')), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnet)) ], Tags=Tags(Name=instance1, Application=ref_stack_id), )) instance1 = t.add_resource( Instance( 'WebServerInstance1', #Metadata=instance_metadata, ImageId=FindInMap( 'AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instanceType_param1), 'Arch')), InstanceType=Ref(instanceType_param1), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnet1)) ], Tags=Tags(Name=instance2, Application=ref_stack_id), )) # rdssubnet = t.add_parameter(Parameter( # "Subnets", # Type="CommaDelimitedList", # Default=Ref(publicsubnet1), # Description=( # "The list of SubnetIds, for at least two Availability Zones in the " # "region in your Virtual Private Cloud (VPC)") # )) mydbsubnetgroup = t.add_resource( DBSubnetGroup( "MyDBSubnetGroup", DBSubnetGroupDescription= "Subnets available for the RDS DB Instance", # Type="CommaDelimitedList", SubnetIds=[Ref(publicsubnet), Ref(publicsubnet1)], )) mydb = t.add_resource( DBInstance( "MyDB", DBName=dbname, AllocatedStorage=dbstorage, DBInstanceClass=dbinstance, Engine="MySQL", EngineVersion="5.5", MasterUsername=dbuser, MasterUserPassword=dbpassword, DBSubnetGroupName=Ref(mydbsubnetgroup), )) t.add_output( Output("JDBCConnectionString", Description="JDBC connection string for database", Value=Join("", [ "jdbc:mysql://", GetAtt("MyDB", "Endpoint.Address"), GetAtt("MyDB", "Endpoint.Port"), "/", "customizedb" ]))) # print(t.to_json()) file = open('customizejson.json', 'w') file.write(t.to_json()) file.close() os.system('aws cloudformation create-stack --stack-name ' + sname + ' --template-body file://customizejson.json') # create()
# customUserData = customUserData.replace("hostnamekey",natAG_hostname) # customUserData = customUserData.replace("s3commonbucket",config['S3_COMMON']) instance = t.add_resource( Instance( config_file['ec2']['name'], ImageId=FindInMap( 'AWSRegionArch2AMI', config_file['vpc']['region'], FindInMap('AWSInstanceType2Arch', config_file['ec2']['type'], 'Arch')), InstanceType=config_file['ec2']['type'], KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(instanceSecurityGroup)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(subnet)) ], CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout='PT15M')), Tags=Tags(Application=ref_stack_id), )) ipAddress = t.add_resource( EIP('IPAddress', DependsOn='AttachGateway', Domain='vpc', InstanceId=Ref(instance))) # WRITE TEMPLATE
def generate_template(): template = Template() ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') template.add_description( 'Counter Strike Source Dedicated Server instances Stack implementing a linux ' + 'server and installing the dedicated server on it') aws_access_key = template.add_parameter( Parameter('AWSAccessKey', Type='String', Description='AWS Access Key')) aws_secret_key = template.add_parameter( Parameter('AWSSecretKey', Type='String', Description='AWS Secret Key')) css_instance_name = template.add_parameter( Parameter('CSSInstanceName', Default='css-server', Type='String', Description='The Name tag for the CSS Server instance.')) ami_id_linux = template.add_parameter( Parameter('AmiIdLinux', Default='ami-82f4dae7', Type='AWS::EC2::Image::Id', Description='Instances in the DMZ will use this AMI.')) instance_type = template.add_parameter( Parameter( 'InstanceType', Type='String', Description='Instances launched will use this EC2 Instance type.', AllowedValues=[ 't2.nano', 't2.micro', 't2.small', 't2.medium', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'm4.large' ], ConstraintDescription='must be a supported EC2 Instance type')) vpc_id = template.add_parameter(Parameter( 'VPCId', Type='String', )) public_subnet = template.add_parameter( Parameter( 'PublicSubnet1', Type='String', )) iam_role = template.add_parameter( Parameter('IAMRole', Type='String', Description='The IAM role associated with the instances.')) keyname = template.add_parameter( Parameter( 'KeyName', Type='AWS::EC2::KeyPair::KeyName', Description= 'Instances in the Auto Scaling Group will use this ssh key.')) css_init_config_script = template.add_parameter( Parameter("CSSInitConfigScript", Type="String", Description="File containing initial configuration script")) css_install_script = template.add_parameter( Parameter( "CSSInstallScript", Type="String", Description="File containing installation script for CSS server")) css_mods_tgz = template.add_parameter( Parameter("CSSModsTgz", Type="String", Description="File containing mods of the CSS server")) css_mapcycle_txt = template.add_parameter( Parameter("CSSMapcycleTxt", Type="String", Description="mapcycle.txt of the CSS server")) css_server_cfg = template.add_parameter( Parameter("CSSServerCfg", Type="String", Description="server.cfg of the CSS server")) css_rcon_password = template.add_parameter( Parameter("CSSRconPassword", Type="String", Description="RCON password of the CSS server")) bucket_name = template.add_parameter( Parameter("BucketName", Type="String", Description="Name of the S3 Bucket")) # Create Security Groups sshlocation_param = template.add_parameter( Parameter( 'SSHLocation', Description= ' The IP address range that can be used to SSH to the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern= "(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."), )) public_security_group = template.add_resource( SecurityGroup( 'PublicSecurityGroup', GroupDescription='Security group for instances in the DMZ', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='icmp', FromPort='8', ToPort='-1', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol='tcp', FromPort='1200', ToPort='1200', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='udp', FromPort='1200', ToPort='1200', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27005', ToPort='27005', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='27015', ToPort='27015', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27015', ToPort='27015', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27020', ToPort='27020', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='udp', FromPort='26901', ToPort='26901', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='icmpv6', FromPort='-1', ToPort='-1', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='1200', ToPort='1200', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='udp', FromPort='1200', ToPort='1200', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27005', ToPort='27005', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='27015', ToPort='27015', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27015', ToPort='27015', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='udp', FromPort='27020', ToPort='27020', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='udp', FromPort='26901', ToPort='26901', CidrIpv6='::/0') ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol='icmp', FromPort='8', ToPort='-1', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='icmpv6', FromPort='-1', ToPort='-1', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='80', ToPort='80', CidrIpv6='::/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='443', ToPort='443', CidrIpv6='::/0') ], VpcId=Ref(vpc_id), )) # Create CSS Server instance in the public subnet css_server_instance = template.add_resource( Instance( 'CSSServerInstance', ImageId=Ref(ami_id_linux), InstanceType=Ref(instance_type), KeyName=Ref(keyname), IamInstanceProfile=Ref(iam_role), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(public_security_group)], AssociatePublicIpAddress='false', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(public_subnet)) ], Tags=Tags(Name=Ref(css_instance_name), Application=ref_stack_id), UserData=Base64( Join('', [ '#!/bin/bash -xe\n', 'echo LC_ALL="en_US.UTF-8" >> /etc/environment\n', 'export LC_ALL="en_US.UTF-8"\n', 'apt-get update\n', 'apt-get -y install python-minimal\n', 'echo -e "import sys\nreload(sys)\nsys.setdefaultencoding(\'utf8\')" > /usr/local/lib/python2.7/dist-packages/setEncoding.py\n' 'echo PYTHONSTARTUP=/usr/local/lib/python2.7/dist-packages/setEncoding.py >> /etc/environment\n', 'export PYTHONSTARTUP=/usr/local/lib/python2.7/dist-packages/setEncoding.py\n', 'curl https://bootstrap.pypa.io/get-pip.py > /tmp/get-pip.py\n', 'python /tmp/get-pip.py\n', 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', '/usr/local/bin/cfn-init -v ', ' --stack ', ref_stack_name, ' --resource CSSServerInstance ', ' --region ', ref_region, '\n' ])), Metadata=Metadata( Authentication({ "S3AccessCreds": AuthenticationBlock(type="S3", accessKeyId=Ref(aws_access_key), secretKey=Ref(aws_secret_key), buckets=Ref(bucket_name)) }), Init({ "config": InitConfig( sources={ '/tmp/mods': Ref(css_mods_tgz), }, files={ '/tmp/init-config.sh': { 'source': Ref(css_init_config_script), 'authentication': 'S3AccessCreds', 'mode': '000755', 'owner': 'root', 'group': 'root' }, '/tmp/css-install-script.sh': { 'source': Ref(css_install_script), 'authentication': 'S3AccessCreds', 'mode': '000755', 'owner': 'root', 'group': 'root' }, '/tmp/cfg/mapcycle.txt': { 'source': Ref(css_mapcycle_txt), 'authentication': 'S3AccessCreds' }, '/tmp/cfg/server.cfg': { 'source': Ref(css_server_cfg), 'authentication': 'S3AccessCreds' } }, commands={ '1_run_init-config.sh': { 'command': '/tmp/init-config.sh', 'cwd': '~', 'env': { 'RCON_PASSWORD': Ref(css_rcon_password) } } }, ) })))) css_server_instance_ip_address = template.add_resource( EIP('IPAddress', Domain='vpc', InstanceId=Ref(css_server_instance))) template.add_output( Output( 'InstanceIp', Value=Ref(css_server_instance_ip_address), Description='Linux Instance IP', )) return template
def build(ssh_keypair_name): template = Template() template.set_version("2010-09-09") keyname_param = template.add_parameter( Parameter( "KeyName", ConstraintDescription="must be the name of an existing EC2 KeyPair.", Description="Name of an existing EC2 KeyPair to enable SSH access to \ the instance", Type="AWS::EC2::KeyPair::KeyName", Default=ssh_keypair_name, ) ) sshlocation_param = template.add_parameter( Parameter( "SSHLocation", Description=" The IP address range that can be used to SSH to the EC2 \ instances", Type="String", MinLength="9", MaxLength="18", Default="0.0.0.0/0", AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=("must be a valid IP CIDR range of the form x.x.x.x/x."), ) ) instanceType_param = template.add_parameter( Parameter( "InstanceType", Type="String", Description="WebServer EC2 instance type", Default="t3a.small", AllowedValues=[ "t2.micro", "t2.small", "t2.medium", "t3a.small", "m3.medium", "m3.large", "m3.xlarge", "m3.2xlarge", "c3.large", "c3.xlarge", "c3.2xlarge", "c3.4xlarge", "c3.8xlarge", "g2.2xlarge", "r3.large", "r3.xlarge", "r3.2xlarge", "r3.4xlarge", "r3.8xlarge", "i2.xlarge", "i2.2xlarge", "i2.4xlarge", "i2.8xlarge", "hi1.4xlarge", "hs1.8xlarge", "cr1.8xlarge", "cc2.8xlarge", ], ConstraintDescription="must be a valid EC2 instance type.", ) ) template.add_mapping( "AWSRegion2AMI", { "us-east-1": {"image": "ami-0d915a031cabac0e0"}, "us-east-2": {"image": "ami-0b97435028ca44fcc"}, "us-west-1": {"image": "ami-068d0753a46192935"}, "us-west-2": {"image": "ami-0c457f229774da543"}, "eu-west-1": {"image": "ami-046c6a0123bf94619"}, "eu-west-2": {"image": "ami-0dbe8ba0cd21ea12b"}, "eu-west-3": {"image": "ami-041bf9180061ce7ea"}, "eu-central-1": {"image": "ami-0f8184e6f30cc0c33"}, "eu-north-1": {"image": "ami-08dd1b893371bcaac"}, "ap-south-1": {"image": "ami-0ff23052091536db2"}, "ap-southeast-1": {"image": "ami-0527e82bae7c51958"}, "ap-southeast-2": {"image": "ami-0bae8773e653a32ec"}, "ap-northeast-1": {"image": "ami-060741a96307668be"}, "ap-northeast-2": {"image": "ami-0d991ac4f545a6b34"}, "sa-east-1": {"image": "ami-076f350d5a5ec448d"}, "ca-central-1": {"image": "ami-0071deaa12b66d1bf"}, }, ) vpc = template.add_resource(VPC("VPC", CidrBlock="10.0.0.0/16")) subnet = template.add_resource(Subnet("Subnet", CidrBlock="10.0.0.0/24", VpcId=Ref(vpc))) internet_gateway = template.add_resource(InternetGateway("InternetGateway")) attach_gateway = template.add_resource( VPCGatewayAttachment("AttachGateway", VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway)) ) route_table = template.add_resource(RouteTable("RouteTable", VpcId=Ref(vpc))) template.add_resource( Route( "Route", DependsOn=attach_gateway, GatewayId=Ref(internet_gateway), DestinationCidrBlock="0.0.0.0/0", RouteTableId=Ref(route_table), ) ) template.add_resource( SubnetRouteTableAssociation("SubnetRouteTableAssociation", SubnetId=Ref(subnet), RouteTableId=Ref(route_table),) ) network_acl = template.add_resource(NetworkAcl("NetworkAcl", VpcId=Ref(vpc),)) template.add_resource( NetworkAclEntry( "InboundHTTPNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="100", Protocol="6", PortRange=PortRange(To="80", From="80"), Egress="false", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( NetworkAclEntry( "InboundSSHNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="101", Protocol="6", PortRange=PortRange(To="22", From="22"), Egress="false", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( NetworkAclEntry( "InboundResponsePortsNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="102", Protocol="6", PortRange=PortRange(To="65535", From="1024"), Egress="false", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( NetworkAclEntry( "OutBoundHTTPNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="100", Protocol="6", PortRange=PortRange(To="80", From="80"), Egress="true", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( NetworkAclEntry( "OutBoundHTTPSNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="101", Protocol="6", PortRange=PortRange(To="443", From="443"), Egress="true", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( NetworkAclEntry( "OutBoundResponsePortsNetworkAclEntry", NetworkAclId=Ref(network_acl), RuleNumber="102", Protocol="6", PortRange=PortRange(To="65535", From="1024"), Egress="true", RuleAction="allow", CidrBlock="0.0.0.0/0", ) ) template.add_resource( SubnetNetworkAclAssociation("SubnetNetworkAclAssociation", SubnetId=Ref(subnet), NetworkAclId=Ref(network_acl),) ) instance_security_group = template.add_resource( SecurityGroup( "InstanceSecurityGroup", GroupDescription="Enable SSH access via port 22", SecurityGroupIngress=[ SecurityGroupRule(IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(sshlocation_param)), SecurityGroupRule(IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0"), ], VpcId=Ref(vpc), ) ) server_instance = template.add_resource( Instance( "ServerInstance", ImageId=FindInMap("AWSRegion2AMI", Ref("AWS::Region"), "image"), InstanceType=Ref(instanceType_param), KeyName=Ref(keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref(instance_security_group)], AssociatePublicIpAddress="true", DeviceIndex="0", DeleteOnTermination="true", SubnetId=Ref(subnet), ) ], ) ) template.add_output([Output("ServerIP", Value=GetAtt(server_instance, "PublicIp"))]) return template
"AccessCIDR", Description="The CIDR IP range that is permitted to access Services in this VPC. Use 0.0.0.0/0 if you want public access from the internet.", Default="0.0.0.0/0", ConstraintDescription="Must be a valid IP CIDR range of the form x.x.x.x/x.", MaxLength=18, AllowedPattern="(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", MinLength=9, Type="String", )) Bastion = t.add_resource(Instance( "Bastion", NetworkInterfaces=[ NetworkInterfaceProperty( SubnetId=Ref(Subnet), DeviceIndex="0", GroupSet=[Ref("SecurityGroup")], AssociatePublicIpAddress=True, ), ], Tags=Tags( Name="Bastion for Atlassian Product VPC", ), KeyName=Ref(KeyName), InstanceType="t2.micro", ImageId=Ref(LatestAmiId), )) SecurityGroup = t.add_resource(SecurityGroup( "SecurityGroup", SecurityGroupIngress=[{ "ToPort": 22, "IpProtocol": "tcp", "CidrIp": Ref(AccessCIDR), "FromPort": 22 }], VpcId=Ref(VPC),
def main(): t = Template() t.set_description("test instance launch") t.set_version("2010-09-09") InstUserData = [ '#!/usr/bin/env bash\n', '\n', 'set -x\n', '\n', 'my_wait_handle="', Ref('InstanceWaitHandle'), '"\n', 'curl -X PUT -H \'Content-Type:\' --data-binary \'{ "Status" : "SUCCESS", "Reason" : "Instance launched", "UniqueId" : "launch001", "Data" : "Instance launched."}\' "${my_wait_handle}"', '\n', '\n', ] EC2KeyName = t.add_parameter( Parameter( 'EC2KeyName', Type="AWS::EC2::KeyPair::KeyName", Description= "Name of an existing EC2 KeyPair to enable SSH access to the instance.", ConstraintDescription="REQUIRED: Must be a valud EC2 key pair", )) OperatingSystem = t.add_parameter( Parameter('OperatingSystem', Type="String", Description="Operating System", Default="centos7", AllowedValues=[ "alinux2", "centos7", "rhel7", ], ConstraintDescription="Must be: alinux2, centos7, rhel7")) myInstanceType = t.add_parameter( Parameter( 'MyInstanceType', Type="String", Description="Instance type", Default="m5.2xlarge", )) VpcId = t.add_parameter( Parameter( 'VpcId', Type="AWS::EC2::VPC::Id", Description="VPC Id for this instance", )) Subnet = t.add_parameter( Parameter('Subnet', Type="AWS::EC2::Subnet::Id", Description="Subnet IDs")) ExistingSecurityGroup = t.add_parameter( Parameter( 'ExistingSecurityGroup', Type="AWS::EC2::SecurityGroup::Id", Description= "OPTIONAL: Choose an existing Security Group ID, e.g. sg-abcd1234") ) UsePublicIp = t.add_parameter( Parameter( 'UsePublicIp', Type="String", Description="Should a public IP address be given to the instance", Default="true", ConstraintDescription="true/false", AllowedValues=["true", "false"])) SshAccessCidr = t.add_parameter( Parameter( 'SshAccessCidr', Type="String", Description="CIDR Block for SSH access, default 127.0.0.1/32", Default="127.0.0.1/32", AllowedPattern= "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", ConstraintDescription="Must be a valid CIDR x.x.x.x/x")) RootRole = t.add_resource( iam.Role("RootRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] })) SshSecurityGroup = t.add_resource( SecurityGroup("SshSecurityGroup", VpcId=Ref(VpcId), GroupDescription="SSH Secuirty group", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(SshAccessCidr), ), ])) RootInstanceProfile = t.add_resource( InstanceProfile("RootInstanceProfile", Roles=[Ref(RootRole)])) tags = Tags(Name=Ref("AWS::StackName")) myInstance = t.add_resource( ec2.Instance( 'MyInstance', ImageId=FindInMap("AWSRegionAMI", Ref("AWS::Region"), Ref(OperatingSystem)), KeyName=Ref(EC2KeyName), InstanceType=(Ref(myInstanceType)), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=If( "not_existing_sg", [Ref(SshSecurityGroup)], [Ref(SshSecurityGroup), Ref(ExistingSecurityGroup)]), AssociatePublicIpAddress=Ref(UsePublicIp), DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(Subnet)) ], IamInstanceProfile=(Ref(RootInstanceProfile)), UserData=Base64(Join('', InstUserData)), )) t.add_mapping( 'AWSRegionAMI', { "ap-northeast-1": { "centos7": "ami-8e8847f1", "rhel7": "ami-6b0d5f0d" }, "ap-northeast-2": { "centos7": "ami-bf9c36d1", "rhel7": "ami-3eee4150" }, "ap-south-1": { "centos7": "ami-1780a878", "rhel7": "ami-5b673c34" }, "ap-southeast-1": { "centos7": "ami-8e0205f2", "rhel7": "ami-76144b0a" }, "ap-southeast-2": { "centos7": "ami-d8c21dba", "rhel7": "ami-67589505" }, "ca-central-1": { "centos7": "ami-e802818c", "rhel7": "ami-49f0762d" }, "eu-central-1": { "centos7": "ami-dd3c0f36", "rhel7": "ami-c86c3f23" }, "eu-west-1": { "centos7": "ami-3548444c", "rhel7": "ami-7c491f05" }, "eu-west-2": { "centos7": "ami-00846a67", "rhel7": "ami-7c1bfd1b" }, "eu-west-3": { "centos7": "ami-262e9f5b", "rhel7": "ami-5026902d" }, "sa-east-1": { "centos7": "ami-cb5803a7", "rhel7": "ami-b0b7e3dc" }, "us-east-1": { "centos7": "ami-9887c6e7", "rhel7": "ami-6871a115" }, "us-east-2": { "centos7": "ami-9c0638f9", "rhel7": "ami-03291866" }, "us-west-1": { "centos7": "ami-4826c22b", "rhel7": "ami-18726478" }, "us-west-2": { "centos7": "ami-3ecc8f46", "rhel7": "ami-28e07e50" } }) t.add_condition("not_existing_sg", Equals(Ref(ExistingSecurityGroup), "")) t.add_condition("Has_Public_Ip", Equals(Ref(UsePublicIp), "true")) mywaithandle = t.add_resource(WaitConditionHandle('InstanceWaitHandle')) mywaitcondition = t.add_resource( WaitCondition("InstanceWaitCondition", Handle=Ref(mywaithandle), Timeout="1500", DependsOn="MyInstance")) t.add_output([ Output("InstanceID", Description="Instance ID", Value=Ref(myInstance)) ]) t.add_output( [Output("InstancePrivateIP", Value=GetAtt('MyInstance', 'PrivateIp'))]) t.add_output([ Output("InstancePublicIP", Value=GetAtt('MyInstance', 'PublicIp'), Condition="Has_Public_Ip") ]) ##print(t.to_yaml()) print(t.to_json(indent=2))
# - 2 in the public subnet # - 2 in the private subnet privInst01 = t.add_resource( Instance( 'cpPrivInstance01', ImageId=ImageID, InstanceType='t2.micro', Tags=[{ "Key": "Name", "Value": "cp_priv_instance_1" }], KeyName=Ref(ssh_keyname_param), NetworkInterfaces=[ NetworkInterfaceProperty(GroupSet=[Ref(backendSecurityGroup)], DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(privateSubnet)) ], UserData=Base64( Join('', [ '#!/bin/bash -v\n', "sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-$(rpm -E '%{rhel}').noarch.rpm &>/root/user-data.log\n", 'sudo yum install -y python-pip &>>/root/user-data.log\n', 'sudo pip install Flask &>>/root/user-data.log\n', 'sudo yum install -y git &>>/root/user-data.log\n', 'sudo mkdir -p /root/work && cd /root/work/ && git clone -v https://github.com/cjurju/pikachuFly.git . &>>/root/user-data.log\n', 'sudo python /root/work/python-troposphere/HelloWorld.py & &>>/root/user-data.log' ])))) pubInst01 = t.add_resource( Instance(