def create_launch_template_resource(template, api_launch_template_name_variable, api_instance_class_parameter, ec2_instance_profile_resource, api_security_group_resource, ecs_cluster_resource): return template.add_resource( ec2.LaunchTemplate( 'LaunchTemplate', LaunchTemplateName=api_launch_template_name_variable, LaunchTemplateData=ec2.LaunchTemplateData( ImageId='ami-0ae254c8a2d3346a7', InstanceType=Ref(api_instance_class_parameter), IamInstanceProfile=ec2.IamInstanceProfile( Arn=GetAtt(ec2_instance_profile_resource, 'Arn')), InstanceInitiatedShutdownBehavior='terminate', Monitoring=ec2.Monitoring(Enabled=True), SecurityGroups=[Ref(api_security_group_resource)], BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName='/dev/xvdcz', Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=22, VolumeType='gp2')) ], UserData=Base64( Join('', [ '#!/bin/bash\n', 'echo ECS_CLUSTER=', Ref(ecs_cluster_resource), ' >> /etc/ecs/ecs.config;echo ECS_BACKEND_HOST= >> /etc/ecs/ecs.config;' ])))))
def add_launchconfig_spot(self): self.launchconfig_spot = self.template.add_resource( autoscaling.LaunchConfiguration( "LaunchConfigurationOnSpot", UserData=Base64( Join('', [ "#!/bin/bash\n", "cfn-signal -e 0", " --resource AutoscalingGroup", " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n", "yum -y install docker htop stress && service docker start\n", "docker run -d -p 80:8080 stealthizer/docker-aws-info\n" ])), ImageId=self.amiId, KeyName=self.keyname, SpotPrice=self.spot_price, IamInstanceProfile=Ref("InstanceProfile"), BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=[ Ref(self.securityGroup), Ref(self.securityGroupLoadBalancer) ], InstanceType=self.instance_type, ))
def my_block_device_mappings_ephemeral(diskcount, devicenamebase): block_device_mappings_ephemeral = [] block_device_mappings_ephemeral.append( my_block_device_mappings_root("/dev/sd", ref_boot_disk_size, "gp2")) for i in xrange(diskcount): block_device_mappings_ephemeral.append( ec2.BlockDeviceMapping(DeviceName=devicenamebase + chr(i + 98), VirtualName="ephemeral" + str(i))) return block_device_mappings_ephemeral
def create_template(self): t = self.template t.add_description("Acceptance Tests for cumulus scaling groups") # TODO fix # instance = self.name + self.context.environment['env'] instance = "someinstance" # TODO: give to builder the_chain = chain.Chain() the_chain.add(ingress_rule.IngressRule( port_to_open="22", cidr="10.0.0.0/8" )) instance_profile_name = "InstanceProfile" + self.name the_chain.add(InstanceProfileRole( instance_profile_name=instance_profile_name, role=iam.Role( "SomeRoleName1", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com", "s3.amazonaws.com"]) ) ] ), ))) launchConfigName = 'lc' + self.name the_chain.add(launch_config.LaunchConfig(asg_name=self.name, launch_config_name=launchConfigName, meta_data=self.get_metadata(), instance_profile_name=instance_profile_name), ) the_chain.add(block_device_data.BlockDeviceData(ec2.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice( VolumeSize="40" )))) the_chain.add(scaling_group.ScalingGroup( launch_config_name=launchConfigName, )) chain_context = chaincontext.ChainContext( template=t, instance_name=instance ) the_chain.run(chain_context)
def get_block_device_mapping(instanceType): mappings = [] for i in range(INSTANCETYPE_TO_BLOCKDEVICEMAPPING.get(instanceType, 0)): mappings.append( ec2.BlockDeviceMapping( # this needs to wrap over to /dev/sdaa if we ever use d2.8xl instances DeviceName='/dev/sd{}'.format(chr(ord('m') + i)), VirtualName='ephemeral{}'.format(i) ) ) return mappings
def create_template(self): t = self.template t.add_description("Acceptance Tests for cumulus scaling groups") the_chain = chain.Chain() application_port = "8000" the_chain.add( launch_config.LaunchConfig(prefix='websitesimple', vpc_id=Ref('VpcId'), meta_data=self.get_metadata(), bucket_name=self.context.bucket_name)) the_chain.add( ingress_rule.IngressRule(port_to_open="22", cidr="10.0.0.0/8")) the_chain.add( ingress_rule.IngressRule(port_to_open=application_port, cidr="10.0.0.0/8")) the_chain.add( block_device_data.BlockDeviceData( ec2.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="40")))) the_chain.add( target_group.TargetGroup(port=application_port, vpc_id=Ref("VpcId"))) the_chain.add(scaling_group.ScalingGroup()) the_chain.add( dns.Dns(base_domain=Ref("BaseDomain"), hosted_zone_id=Ref("AlbCanonicalHostedZoneID"), target=Ref("AlbDNSName"), dns_name=troposphere.Join('', [ self.context.namespace, "-websitesimple", ]))) the_chain.add(alb_port.AlbPort(port_to_open=application_port, )) the_chain.add( listener_rule.ListenerRule(base_domain_name=Ref("BaseDomain"), alb_listener_rule=Ref("IAlbListener"), path_pattern="/*", priority="2")) chain_context = chaincontext.ChainContext(template=t, ) the_chain.run(chain_context)
def my_block_device_mappings_ebs(diskcount,devicenamebase,volumesize,volumetype): block_device_mappings_ebs = [] block_device_mappings_ebs.append(my_block_device_mappings_root("/dev/sd",ref_disk_all_root_volumesize,"gp2")) for i in xrange(diskcount): block_device_mappings_ebs.append( ec2.BlockDeviceMapping( DeviceName = devicenamebase + chr(i+98), Ebs = ec2.EBSBlockDevice( VolumeSize = volumesize, VolumeType = volumetype, DeleteOnTermination = True, ))) return block_device_mappings_ebs
def get_block_device_mappings(self): '''Create a troposphere mapping for each device''' mappings = [] for i in range(0, self.volume_count): device_name = self._device_name_from_slot_index(i) if i < self.working_volume_count: device = self.working_device else: device = self.backing_device mappings.append( ec2.BlockDeviceMapping(DeviceName=device_name, Ebs=device)) return mappings
def __generate_blockmap(self, blockmap=None): if blockmap is None: blockmap = [] blockmap = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( VolumeSize=Ref("RootVolSize"), VolumeType="gp2")), ] app_config = self.config['apps'].values()[0] if 'mounts' in app_config: for mount in app_config['mounts']: blockmap.append( ec2.BlockDeviceMapping( DeviceName=mount['path'], Ebs=ec2.EBSBlockDevice( VolumeSize=Ref("{}VolSize".format(mount['name'])), SnapshotId=If( "{}NotRestoringFromSnapshot".format( mount['name']), Ref("AWS::NoValue"), Ref("{}SnapID".format(mount['name']))), VolumeType=mount.get('type', 'standard'), DeleteOnTermination=True))) return blockmap
def create_instance(template, parameters=None, user_data_script=None, assign_public_ip=None, volume=None, profile=None): image_id = parameters['image_id'] key_name = parameters['key_name'] sg_ids = parameters['sg_ids'] instance_type = parameters['instance_type'] subnet_id = parameters['subnet_id'] content = [] user_data = None if user_data_script: with open(user_data_script, 'r') as f: content = f.readlines() user_data = Base64(Join('', content)) ec2_instance = template.add_resource( ec2.Instance("Ec2Instance", ImageId=Ref(image_id), InstanceType=Ref(instance_type), KeyName=Ref(key_name))) if assign_public_ip: ec2_instance.NetworkInterfaces = [ ec2.NetworkInterfaceProperty(AssociatePublicIpAddress=True, DeviceIndex=0, GroupSet=Ref(sg_ids), SubnetId=Ref(subnet_id)) ] else: ec2_instance.SecurityGroupIds = Ref(sg_ids) ec2_instance.SubnetId = Ref(subnet_id) if volume: ec2_instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=volume) ] if profile: ec2_instance.IamInstanceProfile = Ref(profile) if user_data: ec2_instance.UserData = user_data
def _launch_config(self): return LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig(files=cloudformation.InitFiles({ '/etc/cfn/cfn-hup.conf': cloudformation.InitFile(content=Join( '', [ '[main]\n', 'stack=', self.ref_stack_id, '\n', 'region=', self.ref_region, '\n', ]), mode='000400', owner='root', group='root'), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.\ Metadata.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', self.ref_stack_name, ' --resource WebServerInstance ', ' --region ', self.ref_region, '\n', 'runas=root\n', ])) }), services={ "sysvinit": cloudformation.InitServices({ "rsyslog": cloudformation. InitService( enabled=True, ensureRunning=True, files=[ '/etc/rsyslog.d/20-somethin.conf' ]) }) }) })), UserData=Base64(Join('', self.config['app_instance_user_data'])), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), KeyName=self.config['sshkey'], IamInstanceProfile=Ref(self.instance_iam_role_instance_profile), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName=self.config['device_name'], Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=self.config['app_sg'], InstanceType=self.config['instance_type'], )
) bastion_instance = ec2.Instance( "BastionInstance", template=template, ImageId=Ref(bastion_ami), InstanceType=Ref(bastion_instance_type), KeyName=Ref(bastion_key_name), SecurityGroupIds=[Ref(bastion_security_group)], SubnetId=Ref(public_subnet_a), BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( VolumeType="gp2", VolumeSize=8, Encrypted=use_aes256_encryption, KmsKeyId=If(use_cmk_arn, Ref(cmk_arn), Ref("AWS::NoValue")), ), ), ], Condition=bastion_type_and_ami_set, Tags=[ { "Key": "Name", "Value": Join("-", [Ref("AWS::StackName"), "bastion"]), }, { "Key": "aws-web-stacks:role", "Value": "bastion", },
def my_block_device_mappings_root(devicenamebase, volumesize, volumetype): block_device_mappings_root = (ec2.BlockDeviceMapping( DeviceName=devicenamebase + "a1", Ebs=ec2.EBSBlockDevice(VolumeSize=volumesize, VolumeType=volumetype))) return block_device_mappings_root
def emit_configuration(): # Parameters here jenkins_instance_class = template.add_parameter( Parameter( 'JenkinsInstanceType', Type='String', Default='t2.micro', Description='Chef jenkins instance type', AllowedValues=cfn.usable_instances(), ConstraintDescription='Instance size must be a valid instance type' )) # jenkins IAM role jenkins_role_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) jenkins_iam_role = template.add_resource( iam.Role('JenkinsIamRole', AssumeRolePolicyDocument=ASSUME_ROLE_POLICY, Path="/", Policies=[ iam.Policy(PolicyName='JenkinsPolicy', PolicyDocument=json.loads( cfn.load_template( "jenkins_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))), iam.Policy(PolicyName='JenkinsDefaultPolicy', PolicyDocument=json.loads( cfn.load_template( "default_policy.json.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "region": "us-east-1" }))) ], DependsOn=cfn.vpcs[0].title)) jenkins_instance_profile = template.add_resource( iam.InstanceProfile("JenkinsInstanceProfile", Path="/", Roles=[Ref(jenkins_iam_role)], DependsOn=jenkins_iam_role.title)) jenkins_user_data = cfn.load_template("default-init.bash.j2", { "env": CLOUDENV, "cloud": CLOUDNAME, "deploy": "jenkins" }) ingress_rules = [ ec2.SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 22), ('tcp', 80), ('tcp', 443)] ] security_group = template.add_resource( ec2.SecurityGroup( "JenkinsSecurityGroup", GroupDescription='Security Group for jenkins instances', VpcId=Ref(cfn.vpcs[0]), SecurityGroupIngress=ingress_rules, DependsOn=cfn.vpcs[0].title, Tags=Tags(Name='.'.join(['jenkins-sg', CLOUDNAME, CLOUDENV])))) launch_cfg = template.add_resource( autoscaling.LaunchConfiguration( "JenkinsLaunchConfiguration", ImageId=FindInMap('RegionMap', Ref("AWS::Region"), int(cfn.Amis.EBS)), InstanceType=Ref(jenkins_instance_class), IamInstanceProfile=Ref(jenkins_instance_profile), AssociatePublicIpAddress=not USE_PRIVATE_SUBNETS, BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], KeyName=Ref(cfn.keyname), SecurityGroups=[Ref(security_group)], DependsOn=[jenkins_instance_profile.title, security_group.title], UserData=Base64(jenkins_user_data))) asg_name = '.'.join(['jenkins', CLOUDNAME, CLOUDENV]) asg = template.add_resource( autoscaling.AutoScalingGroup( "JenkinsASG", AvailabilityZones=cfn.get_asg_azs(), DesiredCapacity="1", LaunchConfigurationName=Ref(launch_cfg), MinSize="1", MaxSize="1", NotificationConfiguration=autoscaling.NotificationConfiguration( TopicARN=Ref(cfn.alert_topic), NotificationTypes=[ EC2_INSTANCE_TERMINATE, EC2_INSTANCE_TERMINATE_ERROR, EC2_INSTANCE_LAUNCH, EC2_INSTANCE_LAUNCH_ERROR ]), VPCZoneIdentifier=[ Ref(sn) for sn in cfn.get_vpc_subnets(cfn.vpcs[0], cfn.SubnetTypes.PLATFORM) ]))
Filters=[{'Name': 'tag-value','Values': ['UatDevPublicSecurityGroup']}] ) ec2_instance = template.add_resource(ec2.Instance( "NagiosServer", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), KeyName='ec2deployuatdev', SubnetId=subnetdata['Subnets'][0]['SubnetId'], SecurityGroupIds=[sgdata['SecurityGroups'][0]['GroupId'],], Tenancy='default', InstanceType="t2.micro", BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( VolumeType="gp2", VolumeSize="20" ) ), ], Tags=Tags(**{ 'Name': 'NagiosServer', 'Environment': 'uatdev', }) )) template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance),
def build_template(instance_type="m3.xlarge", keypair=None, groups=["default"], setup_script="instance-setup.sh", iops=1000, enterprise=False): print("## Building CloudFormation Template") template = Template() ## CF Template Parameters for Community vs. Standard if enterprise is False: repopath = "/etc/yum.repos.d/mongodb.repo" repocontent = [ "[MongoDB]\n", "name=MongoDB Repository\n", "baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64\n", "gpgcheck=0\n", "enabled=1\n" ] shortname = "MongoDBInstance" longname = "MongoDB {version} {iops} IOPS".format( version=MONGODB_VERSION, iops=iops) else: repopath = "/etc/yum.repos.d/mongodb-enterprise.repo" repocontent = [ "[MongoDB-Enterprise]\n", "name=MongoDB Enterprise Repository\n", "baseurl=https://repo.mongodb.com/yum/redhat/6/mongodb-enterprise/stable/$basearch/\n", "gpgcheck=0\n", "enabled=1\n" ] shortname = "MongoDBStandardInstance" longname = "MongoDB Standard {version} {iops} IOPS".format( version=MONGODB_VERSION, iops=iops) ## CF Template UserData script ## Hack up UserData a bit so refs are setup correctly user_data = [ "#!/bin/bash\n", "yum update -y aws-cfn-bootstrap\n", "/opt/aws/bin/cfn-init -v -s ", Ref("AWS::StackName"), " -r ", shortname, " --region ", Ref("AWS::Region"), " > /tmp/cfn-init.log 2>&1\n" ] with open(setup_script) as lines: for line in lines: user_data.append(line) ## CF Template Block Device Mappings block_device_mappings = [] for mount, type in { "/dev/xvdf": "data", "/dev/xvdg": "journal", "/dev/xvdh": "log" }.items(): block_device_mappings.append( ec2.BlockDeviceMapping( DeviceName=mount, Ebs=ec2.EBSBlockDevice( VolumeSize=STORAGE_MAP[iops][type]["size"], Iops=STORAGE_MAP[iops][type]["iops"], VolumeType="io1", DeleteOnTermination=False, ), )) ## CF Template Region-AMI-Mapping template.add_mapping("RegionAMIMap", REGION_AMI_MAP) ## CF Template EC2 Instance mongodb_instance = template.add_resource( ec2.Instance( shortname, ImageId=FindInMap("RegionAMIMap", Ref("AWS::Region"), "AMI"), InstanceType=instance_type, KeyName=keypair, SecurityGroups=groups, EbsOptimized=True, BlockDeviceMappings=block_device_mappings, Metadata={ "AWS::CloudFormation::Init": { "config": { "files": { repopath: { "content": { "Fn::Join": ["", repocontent] }, "mode": "000644", "owner": "root", "group": "root" } } } } }, UserData=Base64(Join("", user_data)), Tags=Tags(Name=longname, ), )) ## CF Template Outputs template.add_output([ Output("MongoDBInstanceID", Description="MongoDBInstance ID", Value=Ref(mongodb_instance)), Output("MongoDBInstanceDNS", Description="MongoDBInstance Public DNS", Value=GetAtt(mongodb_instance, "PublicDnsName")) ]) print("## CloudFormation Template Built") return template.to_json()
"ap-northeast-1": { "AMI": "ami-dcfa4edd" } }) ebs = ec2.EBSBlockDevice(VolumeSize=20, VolumeType="gp2", DeletionPolicy="Snapshot") ec2_instance = template.add_resource( ec2.Instance("Ec2Instance", ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType="t1.micro", KeyName=Ref(keyname_param), SecurityGroups=["default"], UserData=Base64("80"), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/sdf", Ebs=ebs) ])) template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(ec2_instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(ec2_instance, "AvailabilityZone"), ), Output( "PublicIP",
# Allow ingress to the HDF Service Node ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0') ])) ec2_instance = t.add_resource( ec2.Instance( "HdsfServer", AvailabilityZone=Ref(az_param), ImageId=Ref(ami_param), InstanceType="m4.large", BlockDeviceMappings=[ ec2.BlockDeviceMapping("RootVolume", DeviceName='/dev/xvda', Ebs=ec2.EBSBlockDevice(VolumeSize=32)) ], # This doesn't work, but it seems like we need to fix a problem in # https://github.com/cloudtools/troposphere/blob/2dc788dbc89c15ce5984f9c40b143494336a2348/troposphere/ec2.py#L311 # It only works if the profile exists outside of the template # Til this is investigated you have to manually edit the generated json file and # remove the \" s IamInstanceProfile='{"Ref": "ServiceProfile"}', KeyName=Ref(keyname_param), SecurityGroups=[Ref(security_group)], UserData=Base64( Join("\n", [ "#!/bin/bash", "yum update -y", "amazon-linux-extras install docker", "service docker start", "usermod -a -G docker ec2-user",
'DNS:*.*.compute.internal', 'DNS:*.ec2.internal', 'IP:10.3.0.1', ] (API_KEY_PEM, API_CERT_PEM, API_KEY, API_CERT) = create_cert('kube-apiserver', san_list=api_san_list, sign_key=CA_KEY, sign_cert=CA_CERT) LAUNCH_CONFIGURATION = TEMPLATE.add_resource( autoscaling.LaunchConfiguration( 'LaunchConfiguration', BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName='/dev/sdb', Ebs=ec2.EBSBlockDevice( VolumeSize=Ref(DOCKER_GRAPH_SIZE), )), ], IamInstanceProfile=Ref(INSTANCE_PROFILE), ImageId=FindInMap('RegionMap', Ref(AWS_REGION), 'AMI'), InstanceType=Ref(INSTANCE_TYPE), KeyName=Ref(KEY_NAME), SecurityGroups=[ Ref(SECURITY_GROUP), Ref(API_SERVER_SECURITY_GROUP), Ref(CONSUL_HTTP_API_SECURITY_GROUP) ], UserData=Base64( Join('', [ '#cloud-config\n\n', 'coreos:\n',
def instance(self, index): hosts = ", ".join(["\"node{node}.{stack_name}.{domain}:10101\"".format(node=node, stack_name='${AWS::StackName}', domain=self.domain) for node in range(self.cluster_size)]) internal_hosts = ", ".join(["\"node{node}.{stack_name}.{domain}:12000\"".format(node=node, stack_name='${AWS::StackName}', domain=self.domain) for node in range(self.cluster_size)]) config_file = dedent(''' data-dir = "/home/{username}/pilosa/data1" bind = "node{node}.{stack_name}.{domain}:10101" log-path = "/home/ubuntu/pilosa.log" [cluster] replicas = {replicas} internal-port = 12000 type = "http" hosts = [{hosts}] internal-hosts = [{internal_hosts}] ''')[1:].format(node=index, replicas=self.replicas, stack_name='${AWS::StackName}', domain=self.domain, username=self.username, hosts=hosts, internal_hosts=internal_hosts) user_data = dedent(''' {common} # update open file limits cat >> /etc/security/limits.conf <<- EOF * soft nofile 262144 * hard nofile 262144 * hard memlock unlimited * soft memlock unlimited EOF # install pilosa go get -u github.com/pilosa/pilosa cd $GOPATH/src/github.com/pilosa/pilosa make install # set up pilosa config file cat > /etc/pilosa.cfg <<- EOF {config_file} EOF # clean up root's mess chown -R {username}:{username} /home/{username} # all output should go to pilosa.log - pilosa.out should be empty sudo -u {username} PATH=$PATH nohup pilosa server --config=/etc/pilosa.cfg &> /home/{username}/pilosa.out & '''[1:]).format(config_file=config_file, common=self.common_user_data, username=self.username) return ec2.Instance( 'PilosaInstance{}'.format(index), ImageId = Ref(self.ami), BlockDeviceMappings=[ec2.BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice(VolumeSize=Ref(self.volume_size), VolumeType=Ref(self.volume_type)) )], InstanceType = Ref(self.instance_type), KeyName = Ref(self.key_pair), IamInstanceProfile=Ref(self.instance_profile), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( GroupSet=[Ref(self.instance_security_group.title)], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref(self.subnet))], UserData = Base64(Sub(user_data)), )
def create_template(self): t = self.template t.add_description("Acceptance Tests for cumulus scaling groups") # TODO fix # instance = self.name + self.context.environment['env'] # TODO: give to builder the_chain = chain.Chain() application_port = "8000" instance_profile_name = "InstanceProfile" + self.name the_chain.add( InstanceProfileRole( instance_profile_name=instance_profile_name, role=iam.Role( "SomeRoleName1", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", [ "ec2.amazonaws.com", "s3.amazonaws.com" ])) ]), ))) the_chain.add( launch_config.LaunchConfig(meta_data=self.get_metadata(), vpc_id=Ref("VpcId"))) the_chain.add( block_device_data.BlockDeviceData( ec2.BlockDeviceMapping( DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="40")))) the_chain.add( target_group.TargetGroup(port=application_port, vpc_id=Ref("VpcId"))) the_chain.add(scaling_group.ScalingGroup()) the_chain.add( ingress_rule.IngressRule(port_to_open="22", cidr="10.0.0.0/8")) the_chain.add( ingress_rule.IngressRule(port_to_open=application_port, cidr="10.0.0.0/8")) the_chain.add( dns.Dns( base_domain=Ref("BaseDomain"), hosted_zone_id=Ref("AlbCanonicalHostedZoneID"), dns_name=Ref("AlbDNSName"), )) the_chain.add( alb_port.AlbPort( port_to_open=application_port, alb_sg_name="AlbSg", )) the_chain.add( listener_rule.ListenerRule(base_domain_name=Ref("BaseDomain"), alb_listener_rule=Ref("IAlbListener"), path_pattern="/*", priority="2")) chain_context = chaincontext.ChainContext( template=t, instance_name=instance_profile_name) the_chain.run(chain_context)
securityGroup = ec2.SecurityGroup('InstanceSecurityGroup') securityGroup.GroupDescription = 'SSH and HTTP Access' securityGroup.SecurityGroupIngress = [ ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp=Ref(sshLocation)), ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0'), ec2.SecurityGroupRule(IpProtocol='tcp', FromPort=443, ToPort=443, CidrIp='0.0.0.0/0'), ] instance = ec2.Instance("ec2instance", ImageId="ami-cd0f5cb6", InstanceType="t2.micro") instance.SecurityGroups = [Ref(securityGroup)] instance.KeyName = Ref(keyName) instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice( VolumeSize=30 ) ) ] instance.Tags = [ec2.Tag('Name', Ref(name))] ipAddress = ec2.EIP('IPAddress') ipAssociation = ec2.EIPAssociation( 'EIPAssociation', InstanceId=Ref(instance), EIP=Ref(ipAddress) ) # It would be nice to generate the route53 record here as well, but a # different account has the Hosted Zone configured :(
'LaunchTemplate', LaunchTemplateName=Ref(api_launch_template_name), LaunchTemplateData=ec2.LaunchTemplateData( ImageId='ami-066826c6a40879d75', InstanceType=Ref(api_instance_class), IamInstanceProfile=ec2.IamInstanceProfile( Arn=GetAtt(ec2_instance_profile, 'Arn') ), InstanceInitiatedShutdownBehavior='terminate', Monitoring=ec2.Monitoring(Enabled=True), SecurityGroups=[Ref(api_security_group)], BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/xvdcz', Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=22, VolumeType='gp2' ) ) ], UserData=Base64( Join('', [ '#!/bin/bash\n', 'echo ECS_CLUSTER=', Ref(ecs_cluster), ' >> /etc/ecs/ecs.config;echo ECS_BACKEND_HOST= >> /etc/ecs/ecs.config;' ]) ) ) ) )
def create_asg( self, layer_name, instance_profile, instance_type=None, ami_name='ubuntu1404LtsAmiId', ec2_key=None, user_data=None, default_instance_type=None, security_groups=None, min_size=1, max_size=1, root_volume_size=None, root_volume_type=None, include_ephemerals=True, number_ephemeral_vols=2, ebs_data_volumes=None, #[{'size':'100', 'type':'gp2', 'delete_on_termination': True, 'iops': 4000, 'volume_type': 'io1'}] custom_tags=None, load_balancer=None, instance_monitoring=False, subnet_type='private', launch_config_metadata=None, creation_policy=None, update_policy=None, depends_on=None): ''' Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group @param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed @param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group @param instance_type [Troposphere.Parameter | string] Reference to the AWS EC2 Instance Type to deploy. @param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region @param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group @param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group @param default_instance_type [string - AWS Instance Type] AWS instance type to set as the default for the input parameter defining the instance type for this layer_name @param security_groups [Troposphere.ec2.SecurityGroup[]] array of security groups to be applied to instances within this Auto Scaling group @param min_size [int] value to set as the minimum number of instances for the Auto Scaling group @param max_size [int] value to set as the maximum number of instances for the Auto Scaling group @param root_volume_size [int] size (in GiB) to assign to the root volume of the launched instance @param include_ephemerals [Boolean] indicates that ephemeral volumes should be included in the block device mapping of the Launch Configuration @param number_ephemeral_vols [int] number of ephemeral volumes to attach within the block device mapping Launch Configuration @param ebs_data_volumes [list] dictionary pair of size and type data properties in a list used to create ebs volume attachments @param custom_tags [Troposphere.autoscaling.Tag[]] Collection of Auto Scaling tags to be assigned to the Auto Scaling Group @param load_balancer [Troposphere.elasticloadbalancing.LoadBalancer] Object reference to an ELB to be assigned to this auto scaling group @param instance_monitoring [Boolean] indicates that detailed monitoring should be turned on for all instnaces launched within this Auto Scaling group @param subnet_type [string {'public', 'private'}] string indicating which type of subnet (public or private) instances should be launched into ''' if subnet_type not in ['public', 'private']: raise RuntimeError( 'Unable to determine which type of subnet instances should be launched into. ' + str(subnet_type) + ' is not one of ["public", "private"].') if ec2_key != None and type(ec2_key) != Ref: ec2_key = Ref(ec2_key) elif ec2_key == None: ec2_key = Ref(self.template.parameters['ec2Key']) if default_instance_type == None: default_instance_type = 'm1.small' if type(instance_type) != str: instance_type = Ref(instance_type) sg_list = [] for sg in security_groups: if isinstance(sg, Ref): sg_list.append(sg) else: sg_list.append(Ref(sg)) launch_config_obj = autoscaling.LaunchConfiguration( layer_name + 'LaunchConfiguration', IamInstanceProfile=Ref(instance_profile), ImageId=FindInMap('RegionMap', Ref('AWS::Region'), ami_name), InstanceType=instance_type, SecurityGroups=sg_list, KeyName=ec2_key, Metadata=(launch_config_metadata or None), InstanceMonitoring=instance_monitoring) if user_data != None: launch_config_obj.UserData = user_data block_devices = [] if root_volume_type != None and root_volume_size != None: ebs_device = ec2.EBSBlockDevice(VolumeSize=root_volume_size) if root_volume_type != None: ebs_device.VolumeType = root_volume_type block_devices.append( ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=ebs_device)) device_names = ['/dev/sd%s' % c for c in 'bcdefghijklmnopqrstuvwxyz'] if ebs_data_volumes != None and len(ebs_data_volumes) > 0: for ebs_volume in ebs_data_volumes: device_name = device_names.pop() ebs_block_device = ec2.EBSBlockDevice( DeleteOnTermination=ebs_volume.get('delete_on_termination', True), VolumeSize=ebs_volume.get('size', '100'), VolumeType=ebs_volume.get('type', 'gp2')) if 'iops' in ebs_volume: ebs_block_device.Iops = int(ebs_volume.get('iops')) if 'snapshot_id' in ebs_volume: ebs_block_device.SnapshotId = ebs_volume.get('snapshot_id') block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, Ebs=ebs_block_device)) if include_ephemerals and number_ephemeral_vols > 0: device_names.reverse() for x in range(0, number_ephemeral_vols): device_name = device_names.pop() block_devices.append( ec2.BlockDeviceMapping(DeviceName=device_name, VirtualName='ephemeral' + str(x))) if len(block_devices) > 0: launch_config_obj.BlockDeviceMappings = block_devices launch_config = self.template.add_resource(launch_config_obj) if depends_on: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ], DependsOn=depends_on) else: auto_scaling_obj = autoscaling.AutoScalingGroup( layer_name + 'AutoScalingGroup', AvailabilityZones=self.azs, LaunchConfigurationName=Ref(launch_config), MaxSize=max_size, MinSize=min_size, DesiredCapacity=min(min_size, max_size), VPCZoneIdentifier=self.subnets[subnet_type.lower()], TerminationPolicies=[ 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default' ]) lb_tmp = [] if load_balancer is not None: try: if type(load_balancer) is dict: for lb in load_balancer: lb_tmp.append(Ref(load_balancer[lb])) elif type(load_balancer) is not Ref: for lb in load_balancer: lb_tmp.append(Ref(lb)) else: lb_tmp.append(load_balancer) except TypeError: lb_tmp.append(Ref(load_balancer)) else: lb_tmp = None if lb_tmp is not None and len(lb_tmp) > 0: auto_scaling_obj.LoadBalancerNames = lb_tmp if creation_policy is not None: auto_scaling_obj.resource['CreationPolicy'] = creation_policy if update_policy is not None: auto_scaling_obj.resource['UpdatePolicy'] = update_policy if custom_tags != None and len(custom_tags) > 0: if type(custom_tags) != list: custom_tags = [custom_tags] auto_scaling_obj.Tags = custom_tags else: auto_scaling_obj.Tags = [] auto_scaling_obj.Tags.append(autoscaling.Tag('Name', layer_name, True)) return self.template.add_resource(auto_scaling_obj)
def add_instance(self, stack_name, template, provision_refs, instance_num, version=None): instance = ec2.Instance(f'node{instance_num}') instance.IamInstanceProfile = Ref(provision_refs.instance_profile) instance.ImageId = self.app.config.get('provision', 'aws_ec2_ami_id') instance.InstanceType = self.app.config.get('provision', 'aws_ec2_instance_type') instance.KeyName = self.app.config.get('provision', 'aws_ec2_key_name') instance.NetworkInterfaces = [ ec2.NetworkInterfaceProperty(GroupSet=[ provision_refs.security_group_ec2, ], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=provision_refs.random_subnet) ] instance.EbsOptimized = 'true' if 'i3' not in instance.InstanceType: instance.BlockDeviceMappings = [ # Set root volume size to 500gb ec2.BlockDeviceMapping(DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(VolumeSize='500', VolumeType='io1', Iops='1000')) ] instance.Tags = Tags(Name=f'{stack_name}-node{instance_num}') version_flag = f' --version={version}' if version else '' join_network_arguments = f'--name={stack_name}{version_flag} --set-default --install --no-configure' instance.UserData = Base64( Join( '', [ '#!/bin/bash -xe\n', 'apt update -y -q\n', 'UCF_FORCE_CONFOLD=1 DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -qq -y install python3-pip\n', 'apt install -y -q htop tmux zsh jq libssl-dev libleveldb-dev || true\n', 'ln -s /usr/lib/x86_64-linux-gnu/libleveldb.so /usr/lib/x86_64-linux-gnu/libleveldb.so.1\n', 'mkdir /data\n', ] + ([ # If type i3, mount NVMe drive at /data 'DEV=/dev/$(lsblk | grep nvme | awk \'{print $1}\')\n', 'mkfs -t xfs $DEV\n', 'UUID=$(blkid -s UUID -o value $DEV)\n', 'echo "UUID=$UUID /data xfs defaults 0 0" >> /etc/fstab\n', 'mount -a\n' ] if 'i3' in instance.InstanceType else []) + [ # Install hydra 'pip3 install cement colorlog\n', f'pip3 install {self.app.config.get("provision", "pip_install") % self.app.config["hydra"]}\n', 'chown ubuntu:ubuntu /data\n', 'su -l -c "hydra info" ubuntu\n', # Generate default hydra.yml "sed -i 's/workdir: .*/workdir: \\/data/' /home/ubuntu/.hydra.yml\n", # Change workdir to /data f'su -l -c "hydra client join-network {join_network_arguments}" ubuntu\n' ])) template.add_resource(instance) template.add_output([ Output( f"ID{instance_num}", Description="InstanceId of the newly created EC2 instance", Value=Ref(instance), ), Output( f"IP{instance_num}", Description= "Public IP address of the newly created EC2 instance", Value=GetAtt(instance, "PublicIp"), ), ]) return instance
def gen_template(config): num_couchbase_servers = config.server_number couchbase_instance_type = config.server_type num_sync_gateway_servers = config.sync_gateway_number sync_gateway_server_type = config.sync_gateway_type num_gateloads = config.load_number gateload_instance_type = config.load_type num_lbs = config.lb_number lb_instance_type = config.lb_type t = Template() t.add_description( 'An Ec2-classic stack with Couchbase Server, Sync Gateway + load testing tools ' ) def createCouchbaseSecurityGroups(t): # Couchbase security group secGrpCouchbase = ec2.SecurityGroup('CouchbaseSecurityGroup') secGrpCouchbase.GroupDescription = "Allow access to Couchbase Server" secGrpCouchbase.SecurityGroupIngress = [ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8091", ToPort="8091", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # sync gw user port IpProtocol="tcp", FromPort="4984", ToPort="4984", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # sync gw admin port IpProtocol="tcp", FromPort="4985", ToPort="4985", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # expvars IpProtocol="tcp", FromPort="9876", ToPort="9876", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="4369", ToPort="4369", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="5984", ToPort="5984", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="8092", ToPort="8092", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="11209", ToPort="11209", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="11210", ToPort="11210", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="11211", ToPort="11211", CidrIp="0.0.0.0/0", ), ec2.SecurityGroupRule( # couchbase server IpProtocol="tcp", FromPort="21100", ToPort="21299", CidrIp="0.0.0.0/0", ) ] # Add security group to template t.add_resource(secGrpCouchbase) return secGrpCouchbase # # Parameters # keyname_param = t.add_parameter( Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 KeyPair to enable SSH access') ) secGrpCouchbase = createCouchbaseSecurityGroups(t) # Create an IAM Role to give the EC2 instance permissions to # push Cloudwatch Logs, which avoids the need to bake in the # AWS_KEY + AWS_SECRET_KEY into an ~/.aws/credentials file or # env variables mobileTestKitRole = iam.Role( 'MobileTestKit', ManagedPolicyArns=['arn:aws:iam::aws:policy/CloudWatchFullAccess'], AssumeRolePolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': 'sts:AssumeRole', 'Principal': { 'Service': 'ec2.amazonaws.com' }, 'Effect': 'Allow', }] }) t.add_resource(mobileTestKitRole) # The InstanceProfile instructs the EC2 instance to use # the mobileTestKitRole created above. It will be referenced # in the instance.IamInstanceProfile property for all EC2 instances created instanceProfile = iam.InstanceProfile( 'EC2InstanceProfile', Roles=[Ref(mobileTestKitRole)], ) t.add_resource(instanceProfile) # Couchbase Server Instances for i in xrange(num_couchbase_servers): name = "couchbaseserver{}".format(i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = couchbase_instance_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.Tags = Tags(Name=name, Type="couchbaseserver") instance.IamInstanceProfile = Ref(instanceProfile) instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] t.add_resource(instance) # Sync Gw instances (ubuntu ami) for i in xrange(num_sync_gateway_servers): name = "syncgateway{}".format(i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = sync_gateway_server_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.IamInstanceProfile = Ref(instanceProfile) instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] # Make syncgateway0 a cache writer, and the rest cache readers # See https://github.com/couchbase/sync_gateway/wiki/Distributed-channel-cache-design-notes if i == 0: instance.Tags = Tags(Name=name, Type="syncgateway", CacheType="writer") else: instance.Tags = Tags(Name=name, Type="syncgateway") t.add_resource(instance) # Gateload instances (ubuntu ami) for i in xrange(num_gateloads): name = "gateload{}".format(i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = gateload_instance_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.IamInstanceProfile = Ref(instanceProfile) instance.Tags = Tags(Name=name, Type="gateload") instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] t.add_resource(instance) # Load Balancer instances (ubuntu ami) for i in xrange(num_lbs): name = "loadbalancer{}".format(i) instance = ec2.Instance(name) instance.ImageId = "ami-6d1c2007" # centos7 instance.InstanceType = lb_instance_type instance.SecurityGroups = [Ref(secGrpCouchbase)] instance.KeyName = Ref(keyname_param) instance.IamInstanceProfile = Ref(instanceProfile) instance.Tags = Tags(Name=name, Type="loadbalancer") instance.BlockDeviceMappings = [ ec2.BlockDeviceMapping(DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=200, VolumeType="gp2")) ] t.add_resource(instance) return t.to_json()
Stack=Ref("AWS::StackName")))) for f in labels: LaunchConfig = template.add_resource( LaunchConfiguration( "LaunchConfiguration" + f, ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=instanceTypeWorker, KeyName=Ref(keyPar_param), IamInstanceProfile=Ref(rootInstanceProfile), SecurityGroups=[Ref(instanceSecurityWorkerGroup)], UserData=Base64( USER_DATA_WORKER.replace("[ipPrivateList]", ipPrivateList).replace("[label]", f)), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")) ])) AutoscalingGroupX = template.add_resource( AutoScalingGroup( "AutoscalingGroup" + f, Cooldown=300, HealthCheckGracePeriod=300, DesiredCapacity=DesiredCapacity, MinSize=MinSize, MaxSize=MaxSize, Tags=[ Tag("Name", environmentString + "AutoscalingGroup" + f, True) ], LaunchConfigurationName=Ref(LaunchConfig), VPCZoneIdentifier=subnetsList,
def buildStack(bootstrap, env): t = Template() t.add_description("""\ Configures autoscaling group for hello world app""") vpcCidr = t.add_parameter( Parameter( "VPCCidr", Type="String", Description="VPC cidr (x.x.x.x/xx)", )) publicSubnet1 = t.add_parameter( Parameter( "PublicSubnet1", Type="String", Description="A public VPC subnet ID for the api app load balancer.", )) publicSubnet2 = t.add_parameter( Parameter( "PublicSubnet2", Type="String", Description="A public VPC subnet ID for the api load balancer.", )) dbName = t.add_parameter( Parameter( "DBName", Default="HelloWorldApp", Description="The database name", Type="String", MinLength="1", MaxLength="64", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription=("must begin with a letter and contain only" " alphanumeric characters."))) dbUser = t.add_parameter( Parameter( "DBUser", NoEcho=True, Description="The database admin account username", Type="String", MinLength="1", MaxLength="16", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription=("must begin with a letter and contain only" " alphanumeric characters."))) dbPassword = t.add_parameter( Parameter( "DBPassword", NoEcho=True, Description="The database admin account password", Type="String", MinLength="8", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="must contain only alphanumeric characters.") ) dbType = t.add_parameter( Parameter( "DBType", Default="db.t2.medium", Description="Database instance class", Type="String", AllowedValues=[ "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge", "db.m5.4xlarge", "db.m5.12xlarge", "db.m5.24xlarge", "db.m4.large", "db.m4.xlarge", "db.m4.2xlarge", "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge", "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge", "db.r4.4xlarge", "db.r4.8xlarge", "db.r4.16xlarge", "db.x1e.xlarge", "db.x1e.2xlarge", "db.x1e.4xlarge", "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge", "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.t2.micro", "db.t2.small", "db.t2.medium", "db.t2.large", "db.t2.xlarge", "db.t2.2xlarge" ], ConstraintDescription="must select a valid database instance type.", )) dbAllocatedStorage = t.add_parameter( Parameter( "DBAllocatedStorage", Default="5", Description="The size of the database (Gb)", Type="Number", MinValue="5", MaxValue="1024", ConstraintDescription="must be between 5 and 1024Gb.", )) whitelistedCIDR = t.add_parameter( Parameter( "WhitelistedCIDR", Description="CIDR whitelisted to be open on public instances", Type="String", )) #### NETWORK SECTION #### vpc = t.add_resource( VPC("VPC", CidrBlock=Ref(vpcCidr), EnableDnsHostnames=True)) subnet1 = t.add_resource( Subnet("Subnet1", CidrBlock=Ref(publicSubnet1), AvailabilityZone="eu-west-1a", VpcId=Ref(vpc))) subnet2 = t.add_resource( Subnet("Subnet2", CidrBlock=Ref(publicSubnet2), AvailabilityZone="eu-west-1b", VpcId=Ref(vpc))) internetGateway = t.add_resource(InternetGateway('InternetGateway')) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(vpc), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource(RouteTable('RouteTable', VpcId=Ref(vpc))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(subnet1), RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation2 = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation2', SubnetId=Ref(subnet2), RouteTableId=Ref(routeTable), )) #### SECURITY GROUP #### loadBalancerSg = t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", VpcId=Ref(vpc), GroupDescription="Enable SSH access via port 22", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ], )) instanceSg = t.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", VpcId=Ref(vpc), GroupDescription="Enable SSH access via port 22", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(whitelistedCIDR), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8000", ToPort="8000", SourceSecurityGroupId=Ref(loadBalancerSg), ), ], )) rdsSg = t.add_resource( SecurityGroup("RDSSecurityGroup", GroupDescription="Security group for RDS DB Instance.", VpcId=Ref(vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", SourceSecurityGroupId=Ref(instanceSg), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp=Ref(whitelistedCIDR), ), ])) #### DATABASE SECTION #### subnetGroup = t.add_resource( DBSubnetGroup( "SubnetGroup", DBSubnetGroupDescription= "Subnets available for the RDS DB Instance", SubnetIds=[Ref(subnet1), Ref(subnet2)], )) db = t.add_resource( DBInstance( "RDSHelloWorldApp", DBName=Join("", [Ref(dbName), env]), DBInstanceIdentifier=Join("", [Ref(dbName), env]), EnableIAMDatabaseAuthentication=True, PubliclyAccessible=True, AllocatedStorage=Ref(dbAllocatedStorage), DBInstanceClass=Ref(dbType), Engine="postgres", EngineVersion="10.4", MasterUsername=Ref(dbUser), MasterUserPassword=Ref(dbPassword), DBSubnetGroupName=Ref(subnetGroup), VPCSecurityGroups=[Ref(rdsSg)], )) t.add_output( Output("RDSConnectionString", Description="Connection string for database", Value=GetAtt("RDSHelloWorldApp", "Endpoint.Address"))) if (bootstrap): return t #### INSTANCE SECTION #### keyName = t.add_parameter( Parameter( "KeyName", Type="String", Description="Name of an existing EC2 KeyPair to enable SSH access", MinLength="1", AllowedPattern="[\x20-\x7E]*", MaxLength="255", ConstraintDescription="can contain only ASCII characters.", )) scaleCapacityMin = t.add_parameter( Parameter( "ScaleCapacityMin", Default="1", Type="String", Description="Number of api servers to run", )) scaleCapacityMax = t.add_parameter( Parameter( "ScaleCapacityMax", Default="1", Type="String", Description="Number of api servers to run", )) scaleCapacityDesired = t.add_parameter( Parameter( "ScaleCapacityDesired", Default="1", Type="String", Description="Number of api servers to run", )) amiId = t.add_parameter( Parameter( "AmiId", Type="String", Default="ami-09693313102a30b2c", Description="The AMI id for the api instances", )) instanceType = t.add_parameter( Parameter("InstanceType", Description="WebServer EC2 instance type", Type="String", Default="t2.medium", AllowedValues=[ "t2.nano", "t2.micro", "t2.small", "t2.medium", "t2.large", "m3.medium", "m3.large", "m3.xlarge", "m3.2xlarge", "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "c4.large", "c4.xlarge", "c4.2xlarge", "c4.4xlarge", "c4.8xlarge" ], ConstraintDescription="must be a valid EC2 instance type.")) assumeRole = t.add_resource( Role("AssumeRole", AssumeRolePolicyDocument=json.loads("""\ { "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "ec2.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }\ """))) instanceProfile = t.add_resource( InstanceProfile("InstanceProfile", Roles=[Ref(assumeRole)])) rolePolicyType = t.add_resource( PolicyType("RolePolicyType", Roles=[Ref(assumeRole)], PolicyName=Join("", ["CloudWatchHelloWorld", "-", env]), PolicyDocument=json.loads("""\ { "Version": "2012-10-17", "Statement": [ { "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents" ], "Effect": "Allow", "Resource": [ "arn:aws:logs:*:*:*" ] } ] }\ """))) appPassword = t.add_parameter( Parameter( "AppPassword", NoEcho=True, Description="The Password for the app user", Type="String", MinLength="8", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="must contain only alphanumeric characters.") ) launchConfig = t.add_resource( LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig(files=cloudformation.InitFiles({ "/home/app/environment": cloudformation.InitFile(content=Join( "", [ "SPRING_DATASOURCE_URL=", "jdbc:postgresql://", GetAtt("RDSHelloWorldApp", "Endpoint.Address"), ":5432/HelloWorldApp" + env + "?currentSchema=hello_world", "\n", "SPRING_DATASOURCE_USERNAME=app", "\n", "SPRING_DATASOURCE_PASSWORD="******"\n", "SPRING_PROFILES_ACTIVE=", env, "\n" ]), mode="000600", owner="app", group="app") }), ) }), ), UserData=Base64( Join('', [ "#!/bin/bash\n", "/opt/aws/bin/cfn-init", " --resource LaunchConfiguration", " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n", "/opt/aws/bin/cfn-signal -e $? ", " --stack ", { "Ref": "AWS::StackName" }, " --resource AutoscalingGroup ", " --region ", { "Ref": "AWS::Region" }, "\n" ])), ImageId=Ref(amiId), KeyName=Ref(keyName), IamInstanceProfile=Ref(instanceProfile), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=[Ref(instanceSg)], InstanceType=Ref(instanceType), AssociatePublicIpAddress='True', )) applicationElasticLB = t.add_resource( elb.LoadBalancer("ApplicationElasticLB", Name="ApplicationElasticLB-" + env, Scheme="internet-facing", Type="application", SecurityGroups=[Ref(loadBalancerSg)], Subnets=[Ref(subnet1), Ref(subnet2)])) targetGroup = t.add_resource( elb.TargetGroup("TargetGroupHelloWorld", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="15", HealthyThresholdCount="5", Matcher=elb.Matcher(HttpCode="200,404"), Port="8000", Protocol="HTTP", UnhealthyThresholdCount="3", TargetGroupAttributes=[ elb.TargetGroupAttribute( Key="deregistration_delay.timeout_seconds", Value="120", ) ], VpcId=Ref(vpc))) listener = t.add_resource( elb.Listener("Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(applicationElasticLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(targetGroup)) ])) t.add_output( Output("URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(applicationElasticLB, "DNSName")]))) autoScalingGroup = t.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref(scaleCapacityDesired), LaunchConfigurationName=Ref(launchConfig), MinSize=Ref(scaleCapacityMin), MaxSize=Ref(scaleCapacityMax), VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], TargetGroupARNs=[Ref(targetGroup)], HealthCheckType="ELB", HealthCheckGracePeriod=360, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)), CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout="PT15M", Count=Ref(scaleCapacityDesired))))) # print(t.to_json()) return t
), ] )) eip = template.add_resource(ec2.EIP("Eip")) ec2_instance = template.add_resource(ec2.Instance( 'Ec2Instance', ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), InstanceType=Ref(instance_type), KeyName=Ref(key_name), SecurityGroups=[Ref(security_group)], BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName="/dev/sda1", Ebs=ec2.EBSBlockDevice( VolumeSize=Ref(root_size), ) ), ], UserData=Base64(Join('', [ '#!/bin/bash\n', 'sudo apt-get update\n', 'sudo apt-get install -y build-essential\n, 'wget https://raw.githubusercontent.com/dokku/dokku/', Ref(dokku_version), '/bootstrap.sh\n', 'sudo', ' DOKKU_TAG=', Ref(dokku_version), ' DOKKU_VHOST_ENABLE=', Ref(dokku_vhost_enable), ' DOKKU_WEB_CONFIG=', Ref(dokku_web_config), ' DOKKU_HOSTNAME=', Ref(dokku_hostname), ' DOKKU_KEY_FILE=/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name ' bash bootstrap.sh\n',
def create_instance(): return ec2.Instance( 'devserver', BlockDeviceMappings=[ ec2.BlockDeviceMapping( DeviceName='/dev/xvda', Ebs=ec2.EBSBlockDevice( VolumeSize=100, VolumeType='gp2', DeleteOnTermination=True, ), ), ], ImageId=Ref('amiId'), InstanceType='t2.medium', KeyName=Ref('keypair'), SecurityGroupIds=[Ref('secgrpDevServer')], SubnetId=Ref('subnetA'), Tags=_tags(), UserData=Base64( textwrap.dedent(r''' #!/bin/bash -ex exec > >(tee ~/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 echo BEGIN date '+%Y-%m-%d %H:%M:%S' # --- System Config hostnamectl set-hostname gwa-dev yum install -y git jq tree vim # --- UX cat <<-EOT > /etc/profile.d/ux.sh alias vi='vim' alias tree='tree -C' EOT cat <<-EOT >> /etc/vimrc set autoindent set modeline set tabstop=4 set listchars=tab:—— EOT # --- Docker yum install -y docker systemctl enable docker systemctl start docker usermod -aG docker ec2-user docker network create geowave-admin # --- Jenkins sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key yum install -y jenkins java-1.8.0-openjdk usermod -aG docker jenkins systemctl enable jenkins systemctl start jenkins echo END date '+%Y-%m-%d %H:%M:%S' ''').lstrip()), )