def Meta(name, private_ip): return cfn.Metadata( cfn.Authentication({ "default": cfn.AuthenticationBlock(type="S3", roleName=Ref(InstanceRole), buckets=[ Ref(Bucket) ]) }), cfn.Init( cfn.InitConfigSets(default = ['SetupHost','SetupWebsite']), SetupHost = cfn.InitConfig( files = { "/etc/hostname":{ "content": Join(".",[ name, Ref(HostedZone) ]) }, "/root/set-hostname.sh":{ "content": Join("",[ "#!/bin/bash\n", "hostname --file /etc/hostname\n", "h=$(cat /etc/hostname)\n", "sed -i s/HOSTNAME=.*/HOSTNAME=$h/g /etc/sysconfig/network\n", "echo ", private_ip , " $h >>/etc/hosts \n", "service network restart" ]), "mode": "755" }, }, commands={ "1-set-hostname": { "command": "/root/set-hostname.sh " } } ), SetupWebsite = cfn.InitConfig( packages = { "yum" : { 'httpd' : []} }, sources = { "/var/www/html/": Join("", [ "https://", Ref(Bucket), ".s3.amazonaws.com/3dstreetartindia.zip" ]) }, services = { "sysvinit" : { "httpd" : { "enabled" : "true", "ensureRunning" : "true" }, } } ) ))
def create_server_metadata(metadata): return cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig(packages=metadata['packages'], sources=metadata['sources'], files=metadata['files'], commands=metadata['commands']) }))
def get_instance_metadata(instance_name): return autoscaling.Metadata( cfn.Init({ 'config': cfn.InitConfig( packages={'yum': { 'httpd': [] }}, files=cfn.InitFiles({ '/etc/cfn/cfn-hup.conf': cfn.InitFile(content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackName'), '\n', 'region=', Ref('AWS::Region'), '\n', ]), mode='000400', owner='root', group='root'), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cfn.InitFile(content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.', instance_name, '.Metadata.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack=', Ref('AWS::StackName'), ' --resource=', instance_name, ' --region=', Ref('AWS::Region'), '\n', 'runas=root\n', ])) }), services={ 'sysvinit': cfn.InitServices({ 'httpd': cfn.InitService(enabled=True, ensureRunning=True), 'cfn-hup': cfn.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]) }) }) }))
def get_metadata(self): metadata = cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets(default=['install_and_run']), install_and_run=cloudformation.InitConfig(commands={ '01-startup': { 'command': 'echo hello world' }, }))) return metadata
def htpasswd(filename): return cf.InitConfig('htpasswd', files={ filename: { 'content': 'user:password_hash', 'mode': '000660', 'owner': 'root', 'group': 'docker', }, })
def get_metadata(self): metadata = cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets(default=['install_and_run']), install_and_run=cloudformation.InitConfig( commands={ '01-startup': { 'command': 'nohup python -m SimpleHTTPServer 8000 &' }, }))) return metadata
def configure_for_follower(instance, counter): subnet_index = counter % NUM_AZS if counter == 2: instance.DependsOn = "MonitorInstance" else: instance.DependsOn = BASE_NAME + str(counter - 1) #base_instance.title instance.SubnetId = Select(str(subnet_index), Ref(PrivateSubnetsToSpanParam)) # instance.AvailabilityZone = Select(str(subnet_index), Ref(AvailabilityZonesParam)) instance.Metadata = cloudformation.Metadata( cloudformation.Authentication({ "S3AccessCreds": cloudformation.AuthenticationBlock( type="S3", roleName=Ref(StorReduceHostRole), #Ref(HostRoleParam), buckets=[Ref(QSS3BucketNameParam)]) }), cloudformation.Init({ "config": cloudformation.InitConfig( files=cloudformation.InitFiles({ "/home/ec2-user/connect-srr.sh": cloudformation.InitFile(source=Sub( "https://${" + QSS3BucketNameParam.title + "}.${QSS3Region}.amazonaws.com/${" + QSS3KeyPrefixParam.title + "}scripts/connect-srr.sh", **{ "QSS3Region": If("GovCloudCondition", "s3-us-gov-west-1", "s3") }), mode="000550", owner="root", group="root") }), commands={ "connect-srr": { "command": Join("", [ "/home/ec2-user/connect-srr.sh \"", GetAtt(base_instance, "PrivateDnsName"), "\" \'", Ref(StorReducePasswordParam), "\' ", "\"", Ref(ShardsNumParam), "\" ", "\"", Ref(ReplicaShardsNumParam), "\" ", "\"", Ref(elasticLB), "\" ", "\"", Ref("AWS::Region"), "\" ", "\"", GetAtt("Eth0", "PrimaryPrivateIpAddress"), "\" ", "\"", Ref(NumSRRHostsParam), "\"" ]) } }) })) instance.Tags = [{"Key": "Name", "Value": "StorReduce-QS-Host"}]
def add_cfn_init(): return cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets(ascending=['config'], descending=['config']), config=cloudformation.InitConfig( commands={ 'test': { 'command': 'echo "$CFNTEST" > text.txt', 'env': { 'CFNTEST': 'I come from config.' }, 'cwd': '~' } })))
def docker(): return cf.InitConfig('Docker', packages={'yum': { 'docker': [] }}, commands={ 'docker_user': { 'command': 'usermod -aG docker ec2-user' }, 'install_compose': { 'command': 'pip install docker-compose' }, }, services={ 'sysvinit': { 'docker': { 'enabled': True, 'ensureRunning': True } } })
def docker_compose(name, compose_yml): name = name.lower() compose_file = '/opt/{n}/docker-compose.yml'.format(n=name) init = cf.InitConfig( 'Compose' + name.title(), files={ compose_file: { 'content': compose_yml, 'mode': '000664', 'owner': 'root', 'group': 'docker', }, }, commands={ 'up': { 'command': '/usr/local/bin/docker-compose -f {f} up -d'.format( f=compose_file) }, }) return init, compose_file
def to_cloudformation_template(self, base_template): instance = ec2.Instance(self.name) instance.InstanceType = 't2.nano' instance.ImageId = IMAGE_ID base_template.add_resource(instance) if self.bootstrap_file_contents: newrelic_license_param = base_template.add_parameter(Parameter( "NewRelicLicenseKey", Description="Value of your New Relic License Key", Type="String", )) instance.UserData = CloudFormationHelper.create_user_data(self.bootstrap_file_contents, self.name) instance.Metadata = cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( default=['new_relic'] ), new_relic=cloudformation.InitConfig( commands={ 'write_dollop_version': { 'command': 'echo "$DOLLOP_VERSION" > dollop_init.txt', 'env': { 'DOLLOP_VERSION': __version__ }, 'cwd': '~' }, 'configure_new_relic': { 'command': Join('', ['nrsysmond-config --set license_key=', Ref(newrelic_license_param)]) } }, services={ 'sysvinit': { 'newrelic-sysmond': { 'enabled': 'true', 'ensureRunning': 'true' } } } ) ) ) if SSH_PORT in self.open_ports: keyname_param = base_template.add_parameter(Parameter( "EC2KeyPair", Description="Name of an existing EC2 KeyPair to enable SSH access to the instance", Type="AWS::EC2::KeyPair::KeyName", )) security_group = base_template.add_resource(ec2.SecurityGroup( 'DollopServerSecurityGroup', GroupDescription='Dollop-generated port access rules for an EC2 instance', SecurityGroupIngress=CloudFormationHelper.create_security_group_rules(self.open_ports), Tags=Tags(Name='ops.dollop.resource.sg')) ) instance.KeyName = Ref(keyname_param) instance.SecurityGroups = [Ref(security_group)] # Template Output base_template.add_output([ Output( "InstanceId", Description="InstanceId of the newly created EC2 instance", Value=Ref(instance), ), Output( "AZ", Description="Availability Zone of the newly created EC2 instance", Value=GetAtt(instance, "AvailabilityZone"), ), Output( "PublicIP", Description="Public IP address of the newly created EC2 instance", Value=GetAtt(instance, "PublicIp"), ), Output( "PrivateIP", Description="Private IP address of the newly created EC2 instance", Value=GetAtt(instance, "PrivateIp"), ), Output( "PublicDNS", Description="Public DNS Name of the newly created EC2 instance", Value=GetAtt(instance, "PublicDnsName"), ), Output( "PrivateDNS", Description="Private DNS Name of the newly created EC2 instance", Value=GetAtt(instance, "PrivateDnsName"), ), ]) instance.Tags = Tags(Name=DollopServer.DefaultTag) return base_template
def certbot(domain, email, conf_dir='/opt/certs/', copy_to=None, pre_hook=None, post_hook=None): script_name = '/opt/certbot-auto' commands = { '1_get_cert': { 'command': Join(' ', [ script_name, 'certonly', '--config-dir', conf_dir, '--standalone --debug --agree-tos --non-interactive', '-d', domain, '--email', email, ]) } } renew_script = [ '#/bin/bash -e\n', 'unset PYTHON_INSTALL_LAYOUT\n', script_name + ' renew --config-dir ' + conf_dir, ' --debug --non-interactive', ] if pre_hook: renew_script.append(' --pre-hook="' + pre_hook + '"') copy_certs = None if copy_to: copy_certs = Join('', [ 'cp ' + conf_dir.rstrip('/') + '/live/', domain, '/*.pem ', copy_to ]) commands.update({ '2_certs_dest': { 'command': 'mkdir -p ' + copy_to, }, '3_copy_certs': { 'command': copy_certs, }, }) # Copy certificated and/or run a custop post-hook if copy_certs or post_hook: hook = [' --post-hook="'] if copy_certs: hook.append(copy_certs) if post_hook: hook.extend([' && ', post_hook]) hook.append('"') renew_script.append(hook) return cf.InitConfig('Certbot', files={ script_name: { 'source': 'https://dl.eff.org/certbot-auto', 'mode': '000755', 'owner': 'root', 'group': 'root', }, '/etc/cron.daily/certbot_renew': { 'content': Join('', renew_script), 'mode': '000755', 'owner': 'root', 'group': 'root', }, }, commands=commands)
def attach(self): """Attaches a bootstrapped Chef Node EC2 instance to an AWS CloudFormation template and returns the template. """ parameters = ec2_parameters.EC2Parameters(self.template) parameters.attach() resources = ec2_resources.EC2Resources(self.template) resources.attach() security_group = self.template.add_resource(ec2.SecurityGroup( 'SecurityGroup', GroupDescription='Allows SSH access from anywhere', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp=Ref(self.template.parameters['SSHLocation']) ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0' ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=8080, ToPort=8080, CidrIp='0.0.0.0/0' ) ], VpcId=ImportValue("prod2-VPCID"), Tags=Tags( Name='{0}SecurityGroup'.format(EC2_INSTANCE_NAME) ) )) self.template.add_resource(ec2.Instance( EC2_INSTANCE_NAME, ImageId=If( 'IsCentos7', FindInMap( "AWSRegionArch2Centos7LinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")), FindInMap( "AWSRegionArch2AmazonLinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")) ), InstanceType=Ref(self.template.parameters['InstanceType']), KeyName=FindInMap('Region2KeyPair', Ref('AWS::Region'), 'key'), SecurityGroupIds=[Ref(security_group)], SubnetId=ImportValue("prod2-SubnetPublicAZ2"), IamInstanceProfile=Ref( self.template.resources['InstanceProfileResource']), UserData=Base64(Join('', [ If('IsCentos7', Join('\n', [ '#!/bin/bash ', 'sudo yum update -y ', 'sudo yum install -y vim ', 'sudo yum install -y epel-release ', 'sudo yum install -y awscli ', '# Install CFN-BootStrap ', ('/usr/bin/easy_install --script-dir /opt/aws/bin ' 'https://s3.amazonaws.com/cloudformation-examples/' 'aws-cfn-bootstrap-latest.tar.gz '), ('cp -v /usr/lib/python2*/site-packages/aws_cfn_' 'bootstrap*/init/redhat/cfn-hup /etc/init.d '), 'chmod +x /etc/init.d/cfn-hup ', ]), Join('\n', [ '#!/bin/bash -xe ', 'yum update -y ', '# Update CFN-BootStrap ', 'yum update -y aws-cfn-bootstrap', 'sudo yum install -y awslogs ', ])), Join('', [ '# Install the files and packages from the metadata\n' '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --configsets InstallAndRun', ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n', '# Signal the status from cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ' ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n' ]), ] ) ), Metadata=cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( InstallAndRun=['Install', 'InstallLogs', 'InstallChef', 'Configure'] ), Install=cloudformation.InitConfig( packages={ 'yum': { 'stress': [], 'docker': [] } }, files={ '/etc/cfn/cfn-hup.conf': { 'content': Join('\n', [ '[main]', 'stack={{stackid}}', 'region={{region}}', 'interval=1' ]), 'context': { 'stackid': Ref('AWS::StackId'), 'region': Ref('AWS::Region') }, 'mode': '000400', 'owner': 'root', 'group': 'root' }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Join('\n', [ '[cfn-auto-reloader-hook]', 'triggers=post.update', ('path=Resources.{{instance_name}}' '.Metadata' '.AWS::CloudFormation::Init'), ('action=/opt/aws/bin/cfn-init -v ' ' --stack {{stack_name}} ' ' --resource {{instance_name}} ' ' --configsets {{config_sets}} ' ' --region {{region}} '), 'runas={{run_as}}' ]), 'context': { 'instance_name': EC2_INSTANCE_NAME, 'stack_name': Ref('AWS::StackName'), 'region': Ref('AWS::Region'), 'config_sets': 'InstallAndRun', 'run_as': 'root' } } }, services={ 'sysvinit': { 'docker': { 'enabled': 'true', 'ensureRunning': 'true' }, 'cfn-hup': { 'enabled': 'true', 'ensureRunning': 'true' } } }, commands={ '01_test': { 'command': 'echo "$CFNTEST" > Install.txt', 'env': { 'CFNTEST': 'I come from Install.' }, 'cwd': '~' } } ), InstallLogs=cloudformation.InitConfig( files={ '/etc/awslogs/awslogs.conf': { 'content': Join('\n', [ '[general]', ('state_file= /var/awslogs/' 'state/agent-state'), '', '[/var/log/cloud-init.log]', 'file = /var/log/cloud-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cloud-init-output.log]', 'file = /var/log/cloud-init-output.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init-output.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-init.log]', 'file = /var/log/cfn-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-hup.log]', 'file = /var/log/cfn-hup.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-hup.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-wire.log]', 'file = /var/log/cfn-wire.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-wire.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/httpd]', 'file = /var/log/httpd/*', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/httpd'), 'datetime_format = {{datetime_format}}' ]), 'context': { 'log_group_name': Ref( self.template.resources[ 'LogGroupResource']), 'datetime_format': '%d/%b/%Y:%H:%M:%S' } }, '/etc/awslogs/awscli.conf': { 'content': Join('\n', [ '[plugins]', 'cwlogs = cwlogs', '[default]', 'region = {{region}}' ]), 'context': { 'region': Ref('AWS::Region') }, 'mode': '000444', 'owner': 'root', 'group': 'root' } }, commands={ '01_create_state_directory': { 'command' : 'mkdir -p /var/awslogs/state' }, '02_test': { 'command': 'echo "$CFNTEST" > InstallLogs.txt', 'env': { 'CFNTEST': 'I come from install_logs.' }, 'cwd': '~' }, '03_install_aws_logs_if_centos': { 'command': If('IsCentos7', Join('\n', [ ('curl https://s3.amazonaws.com/aws-' 'cloudwatch/downloads/latest/awslogs-' 'agent-setup.py -O'), Join('', [ 'sudo python ./awslogs-agent-setup.py', ' --configfile /etc/awslogs/awslogs', '.conf --non-interactive --region ', Ref('AWS::Region')]) ]), Join('', [ 'echo "not installing awslogs from ', 'from source"' ])) } }, services={ 'sysvinit': { 'awslogs': { 'enabled': 'true', 'ensureRunning': 'true', 'files': ['/etc/awslogs/awslogs.conf'] } } } ), InstallChef=cloudformation.InitConfig( commands={ '01_invoke_omnitruck_install': { 'command': ( 'curl -L ' 'https://omnitruck.chef.io/install.sh | ' 'bash' ), } }, files={ '/etc/chef/client.rb': { 'source': S3_CLIENT_RB, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/jasondebolt-validator.pem': { 'source': S3_VALIDATOR_PEM, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/first-run.json': { 'source': S3_FIRST_RUN, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' } } ), Configure=cloudformation.InitConfig( commands={ '01_test': { 'command': 'echo "$CFNTEST" > Configure.txt', 'env': { 'CFNTEST': 'I come from Configure.' }, 'cwd': '~' }, '02_chef_bootstrap': { 'command': ( 'chef-client -j ' '/etc/chef/first-run.json' ) } } ) ), cloudformation.Authentication({ 'S3AccessCreds': cloudformation.AuthenticationBlock( type='S3', roleName=Ref(self.template.resources['RoleResource'])) }) ), Tags=Tags( Name=Ref('AWS::StackName'), env='ops' ) )) self.template.add_output(Output( 'PublicIp', Description='Public IP of the newly created EC2 instance', Value=GetAtt(EC2_INSTANCE_NAME, 'PublicIp') )) self.template.add_output(Output( 'LinuxType', Description='The linux type of the EC2 instance.', Value=If('IsCentos7', 'centos_7', 'amazon_linux') )) return self.template
def add_launch_template(template, hosts_sg): """Function to create a launch template. :param template: ECS Cluster template :type template: troposphere.Template :param hosts_sg: security group for the EC2 hosts :type hosts_sg: troposphere.ec2.SecurityGroup :return: launch_template :rtype: troposphere.ec2.LaunchTemplate """ # from troposphere.cloudformation import ( # WaitCondition, WaitConditionHandle # ) # Deactivated conditions given you could run with no EC2 at all. # Tricky condition to do as the WaitCondition and Handler cannot be created on a CFN Update, but only at the # very creation of the stack. # wait_handle = WaitConditionHandle( # 'BootstrapHandle', # template=template # ) # WaitCondition( # 'BootStrapCondition', # template=template, # DependsOn=[hosts_role], # Handle=Ref(wait_handle), # Timeout='900' # ) launch_template = LaunchTemplate( "LaunchTemplate", template=template, Metadata=cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( default=["awspackages", "dockerconfig", "ecsconfig", "awsservices"] ), awspackages=cloudformation.InitConfig( packages={"yum": {"awslogs": [], "amazon-ssm-agent": []}}, commands={ "001-check-packages": {"command": "rpm -qa | grep amazon"}, "002-check-packages": {"command": "rpm -qa | grep aws"}, }, ), awsservices=cloudformation.InitConfig( services={ "sysvinit": { "amazon-ssm-agent": {"enabled": True, "ensureRunning": True} } } ), dockerconfig=cloudformation.InitConfig( commands={ "001-stop-docker": {"command": "systemctl stop docker"}, "098-reload-systemd": {"command": "systemctl daemon-reload"}, }, files={ "/etc/sysconfig/docker": { "owner": "root", "group": "root", "mode": "644", "content": Join( "\n", [ "DAEMON_MAXFILES=1048576", Join( " ", ["OPTIONS=--default-ulimit nofile=1024:4096"], ), "DAEMON_PIDFILE_TIMEOUT=10", "#EOF", "", ], ), } }, services={ "sysvinit": { "docker": { "enabled": True, "ensureRunning": True, "files": ["/etc/sysconfig/docker"], "commands": ["098-reload-systemd"], } } }, ), ecsconfig=cloudformation.InitConfig( files={ "/etc/ecs/ecs.config": { "owner": "root", "group": "root", "mode": "644", "content": Join( "\n", [ Sub(f"ECS_CLUSTER=${{{CLUSTER_NAME_T}}}"), "ECS_ENABLE_TASK_IAM_ROLE=true", "ECS_ENABLE_SPOT_INSTANCE_DRAINING=true", "ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST=true", "ECS_ENABLE_CONTAINER_METADATA=true", "ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP=true", "ECS_UPDATES_ENABLED=true", "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=15m", "ECS_IMAGE_CLEANUP_INTERVAL=10m", "ECS_NUM_IMAGES_DELETE_PER_CYCLE=100", "ECS_ENABLE_TASK_ENI=true", "ECS_AWSVPC_BLOCK_IMDS=true", "ECS_TASK_METADATA_RPS_LIMIT=300,400", "ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE=true", 'ECS_AVAILABLE_LOGGING_DRIVERS=["awslogs", "json-file"]', "#EOF", ], ), } }, commands={ "0001-restartecs": { "command": "systemctl --no-block restart ecs" } }, ), ) ), LaunchTemplateData=LaunchTemplateData( BlockDeviceMappings=[ LaunchTemplateBlockDeviceMapping( DeviceName="/dev/xvda", Ebs=EBSBlockDevice(DeleteOnTermination=True, Encrypted=True), ) ], ImageId=Ref(compute_params.ECS_AMI_ID), InstanceInitiatedShutdownBehavior="terminate", IamInstanceProfile=IamInstanceProfile( Arn=Sub(f"${{{HOST_PROFILE_T}.Arn}}") ), TagSpecifications=[ TagSpecifications( ResourceType="instance", Tags=Tags( Name=Sub(f"EcsNodes-${{{CLUSTER_NAME_T}}}"), StackName=Ref("AWS::StackName"), StackId=Ref("AWS::StackId"), ), ) ], InstanceType="m5a.large", Monitoring=Monitoring(Enabled=True), SecurityGroupIds=[GetAtt(hosts_sg, "GroupId")], UserData=Base64( Join( "\n", [ "#!/usr/bin/env bash", "export PATH=$PATH:/opt/aws/bin", "cfn-init -v || yum install aws-cfn-bootstrap -y", Sub( f"cfn-init --region ${{AWS::Region}} -r LaunchTemplate -s ${{AWS::StackName}}" ), # 'if [ $? -ne 0 ]; then', # Sub(f'cfn-signal -e 1 -r "Failed to bootstrap" \'${{{wait_handle.title}}}\''), # 'halt', # 'else', # Sub(f'cfn-signal -e 0 -r "Successfully bootstrapped" \'${{{wait_handle.title}}}\''), # 'fi', "# EOF", ], ) ), ), LaunchTemplateName=Ref(CLUSTER_NAME_T), ) return launch_template
def cfn_elasticsearch(): init_args = {} init_args.update({ 'REPOSITORIES': cfm.InitConfig( files={ '/etc/yum.repos.d/elasticsearch.repo': { 'content': Join('', [ '[elasticsearch-6.x]\n', 'name=Elasticsearch repository for 6.x packages\n', 'baseurl=https://artifacts.elastic.co/packages/6.x/yum\n', 'gpgcheck=1\n', 'gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch\n', 'enabled=1\n' ]) } }, commands={ '10_epel-repo': { 'command': 'yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm', 'test': '! test -e /etc/yum.repos.d/epel.repo' }, '20_elastic.co-signature': { 'command': 'rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch', 'test': '! rpm -qi gpg-pubkey-d88e42b4-52371eca' }, '40_repo-set-options': { 'command': 'yum-config-manager --save --setopt=\*.skip_if_unavailable=1' }, } ), 'PACKAGES': cfm.InitConfig( packages={ 'yum': { 'elasticsearch': [], 'kibana': [], 'java-11-amazon-corretto-headless': [], 'jq': [], } }, commands={ '01_disable-epel': { 'command': 'yum-config-manager --disable epel >/dev/null' }, '02_install-ES-plugin-discovery-ec2': { 'cwd': '/usr/share/elasticsearch/bin', 'command': './elasticsearch-plugin install --batch discovery-ec2', 'test': 'test ! -e /usr/share/elasticsearch/plugins/discovery-ec2' }, '03_install-ES-plugin-repository-s3': { 'cwd': '/usr/share/elasticsearch/bin', 'command': './elasticsearch-plugin install --batch repository-s3', 'test': 'test ! -e /usr/share/elasticsearch/plugins/repository-s3' }, '06_install-ES-plugin-analysis-icu': { 'cwd': '/usr/share/elasticsearch/bin', 'command': './elasticsearch-plugin install --batch analysis-icu', 'test': 'test ! -e /usr/share/elasticsearch/plugins/analysis-icu' }, '07_configure_kibana': { 'command': 'sed -i \'/^#server.host/ s/localhost/0.0.0.0/;/^#server.host/ s/^#//\' /etc/kibana/kibana.yml' } }, services={ 'sysvinit': { 'elasticsearch': { 'enabled': 'false' }, 'kibana': { 'enabled': 'false' } } } ), 'SERVICES': cfm.InitConfig( files={ '/etc/elasticsearch/elasticsearch.yml': { 'content': Join('', [ 'cluster.name: ' + cfg.Brand + '-elasticsearch\n', 'network.host: [\'_local:ipv4_\', \'_site:ipv4_\']\n', 'discovery.ec2.groups: ', Ref('SecurityGroupInstancesRules'), '\n' 'path.data: /var/lib/elasticsearch\n', 'path.logs: /var/log/elasticsearch\n', 'discovery.zen.hosts_provider: ec2\n', 'discovery.ec2.endpoint: ', Sub('ec2.${AWS::Region}.amazonaws.com'), '\n' ]), 'group': 'elasticsearch' }, }, commands={ '10-etc_sysconfig_elasticsearch': { 'command': Join('', [ 'export MY_ES_HEAP_SIZE=$(expr $(grep "MemTotal:" /proc/meminfo | egrep -o "([[:digit:]]+)") / 1000 / 2)m;', 'echo -e "# Heap size defaults to 256m min, 1g max\n', '# Set ES_HEAP_SIZE to 50% of available RAM, but no more than 31g\n', 'ES_JAVA_OPTS=\\"-Xms${MY_ES_HEAP_SIZE} -Xmx${MY_ES_HEAP_SIZE} -XX:NewRatio=6\\"" > /etc/sysconfig/elasticsearch', ' && chgrp elasticsearch /etc/sysconfig/elasticsearch' ]) } }, services={ 'sysvinit': { 'elasticsearch': { 'ensureRunning': 'true', 'files': [ '/etc/elasticsearch/elasticsearch.yml', '/etc/sysconfig/elasticsearch' ] }, 'kibana': { 'ensureRunning': 'true', }, } } ) }) return init_args
# TODO: figure out how to reinstall Dokku if the version is changed (?) install_dokku=cloudformation.InitConfig( commands={ '01_fetch': { 'command': Join('', [ 'wget https://raw.githubusercontent.com/dokku/dokku/', Ref(dokku_version), '/bootstrap.sh', ]), 'cwd': '~', }, '02_install': { # docker-ce fails to install with this error if bootstrap.sh is run without sudo: # "debconf: delaying package configuration, since apt-utils is not installed" 'command': 'sudo -E bash bootstrap.sh', # use -E to make sure bash gets our env 'env': { 'DOKKU_TAG': Ref(dokku_version), 'DOKKU_VHOST_ENABLE': Ref(dokku_vhost_enable), 'DOKKU_WEB_CONFIG': Ref(dokku_web_config), 'DOKKU_HOSTNAME': domain_name, 'DOKKU_KEY_FILE': '/home/ubuntu/.ssh/authorized_keys', # use the key configured by key_name 'DOKKU_SKIP_KEY_FILE': 'false', # should be the default, but be explicit just in case }, 'cwd': '~', }, }, ), set_dokku_env=cloudformation.InitConfig(
def buildInstance(t, args): t.add_resource( ec2.SecurityGroup('WebserverIngressSG', GroupDescription='Global Webserver Access', VpcId=Ref('VPC'), Tags=Tags(Name='Global Webserver Access'))) t.add_resource( ec2.SecurityGroupIngress('WebserverIngressSG80', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='80', ToPort='80')) t.add_resource( ec2.SecurityGroupIngress('WebserverIngress443', GroupId=Ref('WebserverIngressSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='443', ToPort='443')) t.add_resource( ec2.SecurityGroup('SysAdminAccessSG', GroupDescription='System Administrator Access', VpcId=Ref('VPC'), Tags=Tags(Name='System Administrator Access'))) if (args.dev): t.add_resource( ec2.SecurityGroupIngress('DevSysadminIngress22', GroupId=Ref('SysAdminAccessSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='22', ToPort='22')) rolePolicyStatements = [{ "Sid": "Stmt1500699052003", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket')])] }, { "Sid": "Stmt1500699052000", "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])] }, { "Sid": "Stmt1500612724002", "Effect": "Allow", "Action": ["kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*"], "Resource": [OpenEMRKeyARN] }] if (args.recovery): rolePolicyStatements.extend([ { "Sid": "Stmt1500699052004", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join( "", ["arn:aws:s3:::", Ref('RecoveryS3Bucket')])] }, { "Sid": "Stmt1500699052005", "Effect": "Allow", "Action": [ "s3:GetObject", ], "Resource": [ Join("", [ "arn:aws:s3:::", Ref('RecoveryS3Bucket'), '/Backup/*' ]) ] }, ]) t.add_resource( iam.ManagedPolicy('WebserverPolicy', Description='Policy for webserver instance', PolicyDocument={ "Version": "2012-10-17", "Statement": rolePolicyStatements })) t.add_resource( iam.Role('WebserverRole', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path='/', ManagedPolicyArns=[Ref('WebserverPolicy')])) t.add_resource( iam.InstanceProfile('WebserverInstanceProfile', Path='/', Roles=[Ref('WebserverRole')])) t.add_resource( ec2.Volume('DockerVolume', DeletionPolicy='Delete' if args.dev else 'Snapshot', Size=Ref('PracticeStorage'), AvailabilityZone=Select("0", GetAZs("")), VolumeType='gp2', Encrypted=True, KmsKeyId=OpenEMRKeyID, Tags=Tags(Name="OpenEMR Practice"))) bootstrapScript = [ "#!/bin/bash -x\n", "exec > /var/log/openemr-cfn-bootstrap 2>&1\n", "cfn-init -v ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --configsets Setup ", " --region ", ref_region, "\n", "cfn-signal -e $? ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --region ", ref_region, "\n" ] setupScript = [ "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n", "/root/openemr-devops/packages/standard/ami/ami-configure.sh\n" ] stackPassthroughFile = [ "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n" ] if (args.recovery): stackPassthroughFile.extend([ "RECOVERYS3=", Ref('RecoveryS3Bucket'), "\n", "RECOVERY_NEWRDS=", GetAtt('RDSInstance', 'Endpoint.Address'), "\n", ]) if (args.recovery): dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MANUAL_SETUP: 1\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] else: dockerComposeFile = [ "version: '3.1'\n", "services:\n", " openemr:\n", " restart: always\n", " image: openemr/openemr", docker_version, "\n", " ports:\n", " - 80:80\n", " - 443:443\n", " volumes:\n", " - logvolume01:/var/log\n", " - sitevolume:/var/www/localhost/htdocs/openemr/sites\n", " environment:\n", " MYSQL_HOST: '", GetAtt('RDSInstance', 'Endpoint.Address'), "'\n", " MYSQL_ROOT_USER: openemr\n", " MYSQL_ROOT_PASS: '******'RDSPassword'), "'\n", " MYSQL_USER: openemr\n", " MYSQL_PASS: '******'RDSPassword'), "'\n", " OE_USER: admin\n", " OE_PASS: '******'AdminPassword'), "'\n", "volumes:\n", " logvolume01: {}\n", " sitevolume: {}\n" ] bootstrapInstall = cloudformation.InitConfig( files={ "/root/cloud-setup.sh": { "content": Join("", setupScript), "mode": "000500", "owner": "root", "group": "root" }, "/root/cloud-variables": { "content": Join("", stackPassthroughFile), "mode": "000500", "owner": "root", "group": "root" }, "/root/openemr-devops/packages/standard/docker-compose.yaml": { "content": Join("", dockerComposeFile), "mode": "000500", "owner": "root", "group": "root" } }, commands={"01_setup": { "command": "/root/cloud-setup.sh" }}) bootstrapMetadata = cloudformation.Metadata( cloudformation.Init(cloudformation.InitConfigSets(Setup=['Install']), Install=bootstrapInstall)) t.add_resource( ec2.Instance('WebserverInstance', Metadata=bootstrapMetadata, ImageId=FindInMap('RegionData', ref_region, 'OpenEMRMktPlaceAMI'), InstanceType=Ref('WebserverInstanceSize'), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex="0", GroupSet=[ Ref('ApplicationSecurityGroup'), Ref('WebserverIngressSG'), Ref('SysAdminAccessSG') ], SubnetId=Ref('PublicSubnet1')) ], KeyName=Ref('EC2KeyPair'), IamInstanceProfile=Ref('WebserverInstanceProfile'), Volumes=[{ "Device": "/dev/sdd", "VolumeId": Ref('DockerVolume') }], Tags=Tags(Name='OpenEMR Cloud Standard'), InstanceInitiatedShutdownBehavior='stop', UserData=Base64(Join('', bootstrapScript)), CreationPolicy={"ResourceSignal": { "Timeout": "PT15M" }})) return t
def buildStack(bootstrap, env): t = Template() t.add_description("""\ Configures autoscaling group for hello world app""") vpcCidr = t.add_parameter( Parameter( "VPCCidr", Type="String", Description="VPC cidr (x.x.x.x/xx)", )) publicSubnet1 = t.add_parameter( Parameter( "PublicSubnet1", Type="String", Description="A public VPC subnet ID for the api app load balancer.", )) publicSubnet2 = t.add_parameter( Parameter( "PublicSubnet2", Type="String", Description="A public VPC subnet ID for the api load balancer.", )) dbName = t.add_parameter( Parameter( "DBName", Default="HelloWorldApp", Description="The database name", Type="String", MinLength="1", MaxLength="64", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription=("must begin with a letter and contain only" " alphanumeric characters."))) dbUser = t.add_parameter( Parameter( "DBUser", NoEcho=True, Description="The database admin account username", Type="String", MinLength="1", MaxLength="16", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription=("must begin with a letter and contain only" " alphanumeric characters."))) dbPassword = t.add_parameter( Parameter( "DBPassword", NoEcho=True, Description="The database admin account password", Type="String", MinLength="8", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="must contain only alphanumeric characters.") ) dbType = t.add_parameter( Parameter( "DBType", Default="db.t2.medium", Description="Database instance class", Type="String", AllowedValues=[ "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge", "db.m5.4xlarge", "db.m5.12xlarge", "db.m5.24xlarge", "db.m4.large", "db.m4.xlarge", "db.m4.2xlarge", "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge", "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge", "db.r4.4xlarge", "db.r4.8xlarge", "db.r4.16xlarge", "db.x1e.xlarge", "db.x1e.2xlarge", "db.x1e.4xlarge", "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge", "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.t2.micro", "db.t2.small", "db.t2.medium", "db.t2.large", "db.t2.xlarge", "db.t2.2xlarge" ], ConstraintDescription="must select a valid database instance type.", )) dbAllocatedStorage = t.add_parameter( Parameter( "DBAllocatedStorage", Default="5", Description="The size of the database (Gb)", Type="Number", MinValue="5", MaxValue="1024", ConstraintDescription="must be between 5 and 1024Gb.", )) whitelistedCIDR = t.add_parameter( Parameter( "WhitelistedCIDR", Description="CIDR whitelisted to be open on public instances", Type="String", )) #### NETWORK SECTION #### vpc = t.add_resource( VPC("VPC", CidrBlock=Ref(vpcCidr), EnableDnsHostnames=True)) subnet1 = t.add_resource( Subnet("Subnet1", CidrBlock=Ref(publicSubnet1), AvailabilityZone="eu-west-1a", VpcId=Ref(vpc))) subnet2 = t.add_resource( Subnet("Subnet2", CidrBlock=Ref(publicSubnet2), AvailabilityZone="eu-west-1b", VpcId=Ref(vpc))) internetGateway = t.add_resource(InternetGateway('InternetGateway')) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(vpc), InternetGatewayId=Ref(internetGateway))) routeTable = t.add_resource(RouteTable('RouteTable', VpcId=Ref(vpc))) route = t.add_resource( Route( 'Route', DependsOn='AttachGateway', GatewayId=Ref('InternetGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(subnet1), RouteTableId=Ref(routeTable), )) subnetRouteTableAssociation2 = t.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation2', SubnetId=Ref(subnet2), RouteTableId=Ref(routeTable), )) #### SECURITY GROUP #### loadBalancerSg = t.add_resource( ec2.SecurityGroup( "LoadBalancerSecurityGroup", VpcId=Ref(vpc), GroupDescription="Enable SSH access via port 22", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="80", ToPort="80", CidrIp="0.0.0.0/0", ), ], )) instanceSg = t.add_resource( ec2.SecurityGroup( "InstanceSecurityGroup", VpcId=Ref(vpc), GroupDescription="Enable SSH access via port 22", SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp=Ref(whitelistedCIDR), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="8000", ToPort="8000", SourceSecurityGroupId=Ref(loadBalancerSg), ), ], )) rdsSg = t.add_resource( SecurityGroup("RDSSecurityGroup", GroupDescription="Security group for RDS DB Instance.", VpcId=Ref(vpc), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", SourceSecurityGroupId=Ref(instanceSg), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp=Ref(whitelistedCIDR), ), ])) #### DATABASE SECTION #### subnetGroup = t.add_resource( DBSubnetGroup( "SubnetGroup", DBSubnetGroupDescription= "Subnets available for the RDS DB Instance", SubnetIds=[Ref(subnet1), Ref(subnet2)], )) db = t.add_resource( DBInstance( "RDSHelloWorldApp", DBName=Join("", [Ref(dbName), env]), DBInstanceIdentifier=Join("", [Ref(dbName), env]), EnableIAMDatabaseAuthentication=True, PubliclyAccessible=True, AllocatedStorage=Ref(dbAllocatedStorage), DBInstanceClass=Ref(dbType), Engine="postgres", EngineVersion="10.4", MasterUsername=Ref(dbUser), MasterUserPassword=Ref(dbPassword), DBSubnetGroupName=Ref(subnetGroup), VPCSecurityGroups=[Ref(rdsSg)], )) t.add_output( Output("RDSConnectionString", Description="Connection string for database", Value=GetAtt("RDSHelloWorldApp", "Endpoint.Address"))) if (bootstrap): return t #### INSTANCE SECTION #### keyName = t.add_parameter( Parameter( "KeyName", Type="String", Description="Name of an existing EC2 KeyPair to enable SSH access", MinLength="1", AllowedPattern="[\x20-\x7E]*", MaxLength="255", ConstraintDescription="can contain only ASCII characters.", )) scaleCapacityMin = t.add_parameter( Parameter( "ScaleCapacityMin", Default="1", Type="String", Description="Number of api servers to run", )) scaleCapacityMax = t.add_parameter( Parameter( "ScaleCapacityMax", Default="1", Type="String", Description="Number of api servers to run", )) scaleCapacityDesired = t.add_parameter( Parameter( "ScaleCapacityDesired", Default="1", Type="String", Description="Number of api servers to run", )) amiId = t.add_parameter( Parameter( "AmiId", Type="String", Default="ami-09693313102a30b2c", Description="The AMI id for the api instances", )) instanceType = t.add_parameter( Parameter("InstanceType", Description="WebServer EC2 instance type", Type="String", Default="t2.medium", AllowedValues=[ "t2.nano", "t2.micro", "t2.small", "t2.medium", "t2.large", "m3.medium", "m3.large", "m3.xlarge", "m3.2xlarge", "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", "c4.large", "c4.xlarge", "c4.2xlarge", "c4.4xlarge", "c4.8xlarge" ], ConstraintDescription="must be a valid EC2 instance type.")) assumeRole = t.add_resource( Role("AssumeRole", AssumeRolePolicyDocument=json.loads("""\ { "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "ec2.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }\ """))) instanceProfile = t.add_resource( InstanceProfile("InstanceProfile", Roles=[Ref(assumeRole)])) rolePolicyType = t.add_resource( PolicyType("RolePolicyType", Roles=[Ref(assumeRole)], PolicyName=Join("", ["CloudWatchHelloWorld", "-", env]), PolicyDocument=json.loads("""\ { "Version": "2012-10-17", "Statement": [ { "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents" ], "Effect": "Allow", "Resource": [ "arn:aws:logs:*:*:*" ] } ] }\ """))) appPassword = t.add_parameter( Parameter( "AppPassword", NoEcho=True, Description="The Password for the app user", Type="String", MinLength="8", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="must contain only alphanumeric characters.") ) launchConfig = t.add_resource( LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig(files=cloudformation.InitFiles({ "/home/app/environment": cloudformation.InitFile(content=Join( "", [ "SPRING_DATASOURCE_URL=", "jdbc:postgresql://", GetAtt("RDSHelloWorldApp", "Endpoint.Address"), ":5432/HelloWorldApp" + env + "?currentSchema=hello_world", "\n", "SPRING_DATASOURCE_USERNAME=app", "\n", "SPRING_DATASOURCE_PASSWORD="******"\n", "SPRING_PROFILES_ACTIVE=", env, "\n" ]), mode="000600", owner="app", group="app") }), ) }), ), UserData=Base64( Join('', [ "#!/bin/bash\n", "/opt/aws/bin/cfn-init", " --resource LaunchConfiguration", " --stack ", Ref("AWS::StackName"), " --region ", Ref("AWS::Region"), "\n", "/opt/aws/bin/cfn-signal -e $? ", " --stack ", { "Ref": "AWS::StackName" }, " --resource AutoscalingGroup ", " --region ", { "Ref": "AWS::Region" }, "\n" ])), ImageId=Ref(amiId), KeyName=Ref(keyName), IamInstanceProfile=Ref(instanceProfile), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName="/dev/xvda", Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=[Ref(instanceSg)], InstanceType=Ref(instanceType), AssociatePublicIpAddress='True', )) applicationElasticLB = t.add_resource( elb.LoadBalancer("ApplicationElasticLB", Name="ApplicationElasticLB-" + env, Scheme="internet-facing", Type="application", SecurityGroups=[Ref(loadBalancerSg)], Subnets=[Ref(subnet1), Ref(subnet2)])) targetGroup = t.add_resource( elb.TargetGroup("TargetGroupHelloWorld", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="15", HealthyThresholdCount="5", Matcher=elb.Matcher(HttpCode="200,404"), Port="8000", Protocol="HTTP", UnhealthyThresholdCount="3", TargetGroupAttributes=[ elb.TargetGroupAttribute( Key="deregistration_delay.timeout_seconds", Value="120", ) ], VpcId=Ref(vpc))) listener = t.add_resource( elb.Listener("Listener", Port="80", Protocol="HTTP", LoadBalancerArn=Ref(applicationElasticLB), DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref(targetGroup)) ])) t.add_output( Output("URL", Description="URL of the sample website", Value=Join("", ["http://", GetAtt(applicationElasticLB, "DNSName")]))) autoScalingGroup = t.add_resource( AutoScalingGroup( "AutoscalingGroup", DesiredCapacity=Ref(scaleCapacityDesired), LaunchConfigurationName=Ref(launchConfig), MinSize=Ref(scaleCapacityMin), MaxSize=Ref(scaleCapacityMax), VPCZoneIdentifier=[Ref(subnet1), Ref(subnet2)], TargetGroupARNs=[Ref(targetGroup)], HealthCheckType="ELB", HealthCheckGracePeriod=360, UpdatePolicy=UpdatePolicy( AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( WillReplace=True, ), AutoScalingRollingUpdate=AutoScalingRollingUpdate( PauseTime='PT5M', MinInstancesInService="1", MaxBatchSize='1', WaitOnResourceSignals=True)), CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout="PT15M", Count=Ref(scaleCapacityDesired))))) # print(t.to_json()) return t
cloudformation.InitConfig( files=cloudformation.InitFiles({ "/etc/rsyslog.d/20-somethin.conf": cloudformation.InitFile( source=Join( "", [ "http://", Ref(DeployBucket), ".s3.amazonaws.com/stacks/", Ref(RootStackName), "/env/etc/rsyslog.d/20-somethin.conf", ], ), mode="000644", owner="root", group="root", authentication="DeployUserAuth", ) }), services={ "sysvinit": cloudformation.InitServices({ "rsyslog": cloudformation.InitService( enabled=True, ensureRunning=True, files=["/etc/rsyslog.d/20-somethin.conf"], ) }) }, )
def _launch_config(self): return LaunchConfiguration( "LaunchConfiguration", Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig(files=cloudformation.InitFiles({ '/etc/cfn/cfn-hup.conf': cloudformation.InitFile(content=Join( '', [ '[main]\n', 'stack=', self.ref_stack_id, '\n', 'region=', self.ref_region, '\n', ]), mode='000400', owner='root', group='root'), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.\ Metadata.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', self.ref_stack_name, ' --resource WebServerInstance ', ' --region ', self.ref_region, '\n', 'runas=root\n', ])) }), services={ "sysvinit": cloudformation.InitServices({ "rsyslog": cloudformation. InitService( enabled=True, ensureRunning=True, files=[ '/etc/rsyslog.d/20-somethin.conf' ]) }) }) })), UserData=Base64(Join('', self.config['app_instance_user_data'])), ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"), KeyName=self.config['sshkey'], IamInstanceProfile=Ref(self.instance_iam_role_instance_profile), BlockDeviceMappings=[ ec2.BlockDeviceMapping(DeviceName=self.config['device_name'], Ebs=ec2.EBSBlockDevice(VolumeSize="8")), ], SecurityGroups=self.config['app_sg'], InstanceType=self.config['instance_type'], )
def generate_stack_template(): template = Template() generate_description(template) generate_version(template) # ---Parameters------------------------------------------------------------ param_vpc_id = Parameter( 'VpcIdentifer', Description= 'The identity of the VPC (vpc-abcdwxyz) in which this stack shall be created.', Type='AWS::EC2::VPC::Id', ) template.add_parameter(param_vpc_id) param_vpc_security_group = Parameter( 'VpcSecurityGroup', Description= 'The security group (sg-abcdwxyz) to apply to the resources created by this stack.', Type='AWS::EC2::SecurityGroup::Id', ) template.add_parameter(param_vpc_security_group) param_webserver_instance_subnet_id = Parameter( 'VpcSubnetIdentifer', Description= 'The identity of the public subnet (subnet-abcdwxyz) in which the web server shall be created.', Type='AWS::EC2::Subnet::Id', ) template.add_parameter(param_webserver_instance_subnet_id) param_keyname = Parameter( 'PemKeyName', Description= 'Name of an existing EC2 KeyPair file (.pem) to use to create EC2 instances', Type='AWS::EC2::KeyPair::KeyName') template.add_parameter(param_keyname) param_instance_type = Parameter( 'EC2InstanceType', Description= 'EC2 instance type, reference this parameter to insure consistency', Type='String', Default= 't2.medium', # Prices from (2015-12-03) (Windows, us-west (North CA)) AllowedValues=[ # Source : https://aws.amazon.com/ec2/pricing/ 't2.small', # $0.044/hour 't2.micro', # $0.022/hour 't2.medium', # $0.088/hour 't2.large', # $0.166/hour 'm3.medium', # $0.140/hour 'm3.large', # $0.28/hour 'c4.large' # $0.221/hour ], ConstraintDescription='Must be a valid EC2 instance type') template.add_parameter(param_instance_type) #---Mappings--------------------------------------------------------------- mapping_environment_attribute_map = template.add_mapping( 'EnvironmentAttributeMap', { 'ap-southeast-1': { 'WebServerAmi': 'ami-1ddc0b7e' }, 'ap-southeast-2': { 'WebServerAmi': 'ami-0c95b86f' }, 'us-east-1': { 'WebServerAmi': 'ami-a4827dc9' }, 'us-west-1': { 'WebServerAmi': 'ami-f5f41398' } }) # ---Resources------------------------------------------------------------- ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') # Create the metadata for the server instance. name_web_server = 'WebServer' webserver_instance_metadata = cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={'yum': { 'nginx': [], 'git': [] }}, files=cloudformation.InitFiles({ # cfn-hup.conf initialization '/etc/cfn/authorapp.conf': cloudformation.InitFile(content=Join( '', [ 'server {', '\n', ' listen 3030 ssl http2;', '\n', ' root /var/www/authorapp;', '\n', '\n', ' ssl_certificate /vagrant/ssl/ca.crt;', '\n', ' ssl_certificate_key /vagrant/ssl/ca.key;', '\n', '\n', ' location / {', '\n', ' }', '\n', '\n', ' location /api {', '\n', ' proxy_pass http://10.50.50.1:3000;', '\n', ' }', '\n', '}', '\n', ]), mode='000400', owner='root', group='root'), }), services=dict(sysvinit=cloudformation.InitServices({ # start cfn-hup service - # required for CloudFormation stack update 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]), # Disable sendmail service - not required. 'sendmail': cloudformation.InitService(enabled=False, ensureRunning=False) }))) })) resource_web_server = ec2.Instance( name_web_server, Metadata=webserver_instance_metadata, ImageId=FindInMap('EnvironmentAttributeMap', ref_region, 'WebServerAmi'), InstanceType=Ref(param_instance_type), KeyName=Ref(param_keyname), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=str(True), DeleteOnTermination=str(True), Description='Network interface for web server', DeviceIndex=str(0), GroupSet=[Ref(param_vpc_security_group)], SubnetId=Ref(param_webserver_instance_subnet_id), ) ], Tags=Tags(Name=name_web_server, VPC=Ref(param_vpc_id)), UserData=Base64( Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', 'yum update -y', '\n' '/opt/aws/bin/cfn-init --verbose ', ' --stack ', ref_stack_name, ' --resource %s ' % name_web_server, ' --region ', ref_region, '\n', '/opt/aws/bin/cfn-signal --exit-code $? ', ' --stack ', ref_stack_name, ' --resource ', name_web_server, '\n' ]))) template.add_resource(resource_web_server) template.add_output( Output('WebServer', Description='Web Server', Value=GetAtt(name_web_server, 'PublicIp'))) return template
ConfigCFNTools=cloudformation.InitConfig( files={ '/etc/cfn/cfn-hup.conf': { 'content': Sub( '[main]\n' 'stack=${AWS::StackId}\n' 'region=${AWS::Region}\n' 'interval=5\n' 'verbose=false\n' ), 'mode': '000400', 'owner': 'root', 'group': 'root', }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Sub( '[cfn-auto-reloader-hook]\n' 'triggers=post.update\n' 'path=Resources.%(INSTANCE_NAME)s.Metadata.AWS::CloudFormation::Init\n' 'action=/opt/aws/bin/cfn-init -v' ' --stack ${AWS::StackName}' ' --resource %(INSTANCE_NAME)s' ' --configsets Update' ' --region ${AWS::Region}' '\n' 'runas=root\n' % \ {'INSTANCE_NAME': instance_resource_name} ), }, }, services={ 'sysvinit': cloudformation.InitServices( { 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf', ] ), } ) }, ),
"yum install pystache python-daemon -y\n", "/bin/rpm -U https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.amzn1.noarch.rpm\n", "/opt/aws/bin/cfn-init ", " --stack ", { "Ref": "AWS::StackName" }, " --resource myLaunchConfig", " --configsets InstallandRun", " --region ", { "Ref": "AWS::Region" }, "\n" ])), Metadata=Metadata( cf.Init({ "configsets": cf.InitConfigSets(InstallandRun=["install", "config"]), "install": cf.InitConfig(packages={"yum": { "git": [], "wget": [] }}), "config": cf.InitConfig(files=cf.InitFiles({ "/tmp/example.txt": cf.InitFile(content=Join('', [ "This is a file example.\n", "See another examples in:\n", "https://github.com/rabeloo/cf-templates\n" ]), owner="root", group="root", mode="000600") }), ), }))))
def _add_ec2_auto_scaling(self): instance_profile = self._add_instance_profile() self.sg_alb = SecurityGroup( "SecurityGroupAlb", VpcId=Ref(self.vpc), GroupDescription=Sub("${AWS::StackName}-alb")) self.template.add_resource(self.sg_alb) self.sg_hosts = SecurityGroup( "SecurityGroupEc2Hosts", SecurityGroupIngress=[{ 'SourceSecurityGroupId': Ref(self.sg_alb), 'IpProtocol': -1 }], VpcId=Ref(self.vpc), GroupDescription=Sub("${AWS::StackName}-hosts")) self.template.add_resource(self.sg_hosts) sg_host_ingress = SecurityGroupIngress("SecurityEc2HostsIngress", SourceSecurityGroupId=Ref( self.sg_hosts), IpProtocol="-1", GroupId=Ref(self.sg_hosts), FromPort="-1", ToPort="-1") self.template.add_resource(sg_host_ingress) database_security_group = SecurityGroup( "SecurityGroupDatabases", SecurityGroupIngress=[{ 'SourceSecurityGroupId': Ref(self.sg_hosts), 'IpProtocol': -1 }], VpcId=Ref(self.vpc), GroupDescription=Sub("${AWS::StackName}-databases")) self.template.add_resource(database_security_group) user_data = Base64( Sub('\n'.join([ "#!/bin/bash", "yum update -y", "yum install -y aws-cfn-bootstrap", "/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration", "/opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource AutoScalingGroup", "yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm", "systemctl enable amazon-ssm-agent", "systemctl start amazon-ssm-agent", "" ]))) lc_metadata = cloudformation.Init({ "config": cloudformation.InitConfig( files=cloudformation.InitFiles({ "/etc/cfn/cfn-hup.conf": cloudformation.InitFile( content=Sub('\n'.join([ '[main]', 'stack=${AWS::StackId}', 'region=${AWS::Region}', '' ])), mode='256', # TODO: Why 256 owner="root", group="root"), "/etc/cfn/hooks.d/cfn-auto-reloader.conf": cloudformation.InitFile(content=Sub('\n'.join([ '[cfn-auto-reloader-hook]', 'triggers=post.update', 'path=Resources.ContainerInstances.Metadata.AWS::CloudFormation::Init', 'action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource LaunchConfiguration', '' ])), ) }), services={ "sysvinit": cloudformation.InitServices({ "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=[ '/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/cfn-auto-reloader.conf' ]) }) }, commands={ '01_add_instance_to_cluster': { 'command': Sub('echo "ECS_CLUSTER=${Cluster}\nECS_RESERVED_MEMORY=256" > /etc/ecs/ecs.config' ) } }) }) launch_configuration = LaunchConfiguration( 'LaunchConfiguration', UserData=user_data, IamInstanceProfile=Ref(instance_profile), SecurityGroups=[Ref(self.sg_hosts)], InstanceType=Ref('InstanceType'), ImageId=FindInMap("AWSRegionToAMI", Ref("AWS::Region"), "AMI"), Metadata=lc_metadata, KeyName=Ref(self.key_pair)) self.template.add_resource(launch_configuration) # , PauseTime='PT15M', WaitOnResourceSignals=True, MaxBatchSize=1, MinInstancesInService=1) up = AutoScalingRollingUpdate('AutoScalingRollingUpdate') # TODO: clean up subnets = list(self.private_subnets) self.auto_scaling_group = AutoScalingGroup( "AutoScalingGroup", UpdatePolicy=up, DesiredCapacity=self.desired_instances, Tags=[{ 'PropagateAtLaunch': True, 'Value': Sub('${AWS::StackName} - ECS Host'), 'Key': 'Name' }], MinSize=Ref('MinSize'), MaxSize=Ref('MaxSize'), VPCZoneIdentifier=[Ref(subnets.pop()), Ref(subnets.pop())], LaunchConfigurationName=Ref(launch_configuration), CreationPolicy=CreationPolicy(ResourceSignal=ResourceSignal( Timeout='PT15M'))) self.template.add_resource(self.auto_scaling_group) self.cluster_scaling_policy = ScalingPolicy( 'AutoScalingPolicy', AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(self.auto_scaling_group), Cooldown=300, PolicyType='SimpleScaling', ScalingAdjustment=1) self.template.add_resource(self.cluster_scaling_policy)
# Define our launch configuration (AWS compatibility resource) launch_config = template.add_resource(autoscaling.LaunchConfiguration( "MyLaunchConfig", KeyName="bootstrap", InstanceType="t1.micro", ImageId="Ubuntu", SecurityGroups=[Ref(security_group)], Metadata=cloudformation.Init({ "config": cloudformation.InitConfig( files=cloudformation.InitFiles({ "file1": cloudformation.InitFile( content=Join('\n', [ "This is a", "test file" ]), mode="000755", owner="root", group="root", context=cloudformation.InitFileContext({ "security_group_id": Ref(security_group) }) ) }) ) }), UserData=Base64(Join('\n', [ "#!/bin/bash", "echo \"Upgrade started at $(date)\"", "apt-get update", "apt-get -y upgrade", "echo \"Upgrade complete at $(date)\"", ]))
def buildInstance(t, args): t.add_resource( ec2.SecurityGroup('WebserverSG', GroupDescription='Global Webserver Access', VpcId=Ref('VPC'), Tags=Tags(Name='Global Webserver Access'))) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress1', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='22', ToPort='22')) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress2', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='80', ToPort='80')) t.add_resource( ec2.SecurityGroupIngress('WebserverSGIngress3', GroupId=Ref('WebserverSG'), IpProtocol='tcp', CidrIp='0.0.0.0/0', FromPort='443', ToPort='443')) rolePolicyStatements = [{ "Sid": "Stmt1500699052003", "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket')])] }, { "Sid": "Stmt1500699052000", "Effect": "Allow", "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"], "Resource": [Join("", ["arn:aws:s3:::", Ref('S3Bucket'), '/Backup/*'])] }, { "Sid": "Stmt1500612724002", "Effect": "Allow", "Action": ["kms:Encrypt", "kms:Decrypt", "kms:GenerateDataKey*"], "Resource": [OpenEMRKeyARN] }] t.add_resource( iam.ManagedPolicy('WebserverPolicy', Description='Policy for webserver instance', PolicyDocument={ "Version": "2012-10-17", "Statement": rolePolicyStatements })) t.add_resource( iam.Role('WebserverRole', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, Path='/', ManagedPolicyArns=[Ref('WebserverPolicy')])) t.add_resource( iam.InstanceProfile('WebserverInstanceProfile', Path='/', Roles=[Ref('WebserverRole')])) t.add_resource( ec2.Volume('DockerVolume', DeletionPolicy='Delete' if args.dev else 'Snapshot', Size=Ref('PracticeStorage'), AvailabilityZone=Select("0", GetAZs("")), VolumeType='gp2', Encrypted=True, KmsKeyId=OpenEMRKeyID, Tags=Tags(Name="OpenEMR Practice"))) bootstrapScript = [ "#!/bin/bash -x\n", "exec > /tmp/part-001.log 2>&1\n", "apt-get -y update\n", "apt-get -y install python-pip\n", "pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n", "cfn-init -v ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --configsets Setup ", " --region ", ref_region, "\n", "cfn-signal -e $? ", " --stack ", ref_stack_name, " --resource WebserverInstance ", " --region ", ref_region, "\n" ] setupScript = [ "#!/bin/bash -xe\n", "exec > /tmp/cloud-setup.log 2>&1\n", "DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" --force-yes\n", "mkfs -t ext4 /dev/xvdd\n", "mkdir /mnt/docker\n", "cat /root/fstab.append >> /etc/fstab\n", "mount /mnt/docker\n", "ln -s /mnt/docker /var/lib/docker\n", "apt-get -y install python-boto awscli\n", "S3=", Ref('S3Bucket'), "\n", "KMS=", OpenEMRKeyID, "\n", "touch /root/cloud-backups-enabled\n", "echo $S3 > /root/.cloud-s3.txt\n", "echo $KMS > /root/.cloud-kms.txt\n", "touch /tmp/mypass\n", "chmod 500 /tmp/mypass\n", "openssl rand -base64 32 >> /tmp/mypass\n", "aws s3 cp /tmp/mypass s3://$S3/Backup/passphrase.txt --sse aws:kms --sse-kms-key-id $KMS\n", "rm /tmp/mypass\n", "curl -L https://raw.githubusercontent.com/openemr/openemr-devops/master/packages/lightsail/launch.sh > /root/launch.sh\n", "chmod +x /root/launch.sh && /root/launch.sh -s 0\n" ] fstabFile = ["/dev/xvdd /mnt/docker ext4 defaults,nofail 0 0\n"] bootstrapInstall = cloudformation.InitConfig( files={ "/root/cloud-setup.sh": { "content": Join("", setupScript), "mode": "000500", "owner": "root", "group": "root" }, "/root/fstab.append": { "content": Join("", fstabFile), "mode": "000400", "owner": "root", "group": "root" } }, commands={"01_setup": { "command": "/root/cloud-setup.sh" }}) bootstrapMetadata = cloudformation.Metadata( cloudformation.Init(cloudformation.InitConfigSets(Setup=['Install']), Install=bootstrapInstall)) t.add_resource( ec2.Instance('WebserverInstance', Metadata=bootstrapMetadata, ImageId=FindInMap('RegionData', ref_region, 'UbuntuAMI'), InstanceType=Ref('InstanceSize'), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( AssociatePublicIpAddress=True, DeviceIndex="0", GroupSet=[Ref('WebserverSG')], SubnetId=Ref('PublicSubnet1')) ], KeyName=Ref('EC2KeyPair'), IamInstanceProfile=Ref('WebserverInstanceProfile'), Volumes=[{ "Device": "/dev/sdd", "VolumeId": Ref('DockerVolume') }], Tags=Tags(Name='OpenEMR Express Plus'), InstanceInitiatedShutdownBehavior='stop', UserData=Base64(Join('', bootstrapScript)), CreationPolicy={"ResourceSignal": { "Timeout": "PT25M" }})) return t
def cfn_ecs_cluster(): init_args = {} init_args.update({ "REPOSITORIES": cfm.InitConfig( commands=If( "GPUInstance", { "10-epel-repo": { "command": "amazon-linux-extras install -y epel", "test": "! test -e /etc/yum.repos.d/epel.repo", }, # 'cuda-repo': { # 'command': 'yum install -y ' # 'http://developer.download.nvidia.com' # '/compute/cuda/repos/rhel7/x86_64/' # 'cuda-repo-rhel7-10.0.130-1.x86_64.rpm', # 'test': 'test ! -e /etc/yum.repos.d/cuda.repo' # }, "nvidia-docker": { "command": "curl -s -L " "https://nvidia.github.io" "/nvidia-docker/amzn2/nvidia-docker.repo | " "tee /etc/yum.repos.d/nvidia-docker.repo", "test": "test ! -e /etc/yum.repos.d/nvidia-docker.repo", }, }, Ref("AWS::NoValue"), ), files=If( "GPUInstance", { "/etc/yum.repos.d/epel-nvidia.repo": { "content": Join( "\n", [ "[epel-nvidia]", "name=negativo17 - Nvidia", "baseurl=https://negativo17.org" "/repos/nvidia/epel-7/$basearch/", "enabled=1", "skip_if_unavailable=1", "gpgcheck=1", "gpgkey=https://negativo17.org" "/repos/RPM-GPG-KEY-slaanesh", "enabled_metadata=1", "metadata_expire=6h", "type=rpm-md", "repo_gpgcheck=0", ], ) } }, Ref("AWS::NoValue"), ), ), "PACKAGES": cfm.InitConfig( packages={ "yum": { "nfs-utils": [], "make": If("GPUInstance", [], Ref("AWS::NoValue")), "automake": If("GPUInstance", [], Ref("AWS::NoValue")), "gcc": If("GPUInstance", [], Ref("AWS::NoValue")), "gcc-c++": If("GPUInstance", [], Ref("AWS::NoValue")), "kernel-devel": If("GPUInstance", [], Ref("AWS::NoValue")), # 'cuda': If('GPUInstance', [], Ref('AWS::NoValue')), "dkms-nvidia": If("GPUInstance", [], Ref("AWS::NoValue")), "nvidia-driver-cuda": If("GPUInstance", [], Ref("AWS::NoValue")), "nvidia-docker2": If("GPUInstance", [], Ref("AWS::NoValue")), } }), "SERVICES": cfm.InitConfig( files={ "/etc/ecs/ecs.config": { "content": Join( "\n", [Sub("ECS_CLUSTER=${Cluster}")] + [ get_subvalue( "%s=${1M}" % camel_to_snake(n), f"ECSClusterBaseAgentCfg{n}", ) for n in cfg.ECSClusterBaseAgentCfg ] + [ If( "GPUInstance", "ECS_DISABLE_PRIVILEGED=false", Ref("AWS::NoValue"), ), ], ) }, "/etc/docker/daemon.json": If( "GPUInstance", { "content": Join( "\n", [ "{", ' "default-runtime": "nvidia",', ' "runtimes": {', ' "nvidia": {', ' "path": "/usr/bin/nvidia-container-runtime",', ' "runtimeArgs": []', " }", " }", "}", ], ) }, Ref("AWS::NoValue"), ), }, commands={ "01-kern-modules": If( "GPUInstance", { "command": "rmmod nvidia_modeset;rmmod nvidia_uvm;" "rmmod nvidia;modprobe nvidia;modprobe nvidia_uvm;true" }, Ref("AWS::NoValue"), ), "02-restart-docker": If( "GPUInstance", {"command": "pkill -SIGHUP dockerd"}, Ref("AWS::NoValue"), ), }, ), }) # aws ecs ami by default run postfix, use yaml cfg Postfix: false to # stop it try: cfg.Postfix except Exception: pass else: # Parameter P_Postfix = Parameter("Postfix") P_Postfix.Description = "Disable Postfix Service" # Condition C_Postfix = get_condition("Postfix", "equals", "no") # Output O_Postfix = Output("Postfix") O_Postfix.Value = get_endvalue("Postfix") add_obj([ P_Postfix, C_Postfix, O_Postfix, ]) init_args["SERVICES"].services = { "sysvinit": { "postfix": If( "Postfix", { "enabled": "false", "ensureRunning": "false" }, Ref("AWS::NoValue"), ) } } return init_args
def init_template(self): self.template.add_description(self.TEMPLATE_DESCRIPTION) ecs_cluster = self.template.add_resource(Cluster(self.CLUSTER_NAME)) ecs_instance_role = self.template.add_resource( Role('sitInstanceRole', Path='/', AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] })) ecs_instance_profile = self.template.add_resource( InstanceProfile('sitInstanceProfile', Path='/', Roles=[Ref(ecs_instance_role)])) ecs_instance_policy = self.template.add_resource( PolicyType('sitInstancePolicy', PolicyName='ecs-policy', Roles=[Ref(ecs_instance_role)], PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": [ "ecs:CreateCluster", "ecs:RegisterContainerInstance", "ecs:DeregisterContainerInstance", "ecs:DiscoverPollEndpoint", "ecs:Submit*", "ecs:Poll", "ecs:StartTelemetrySession", "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "*" }], })) commands = { '01_add_instance_to_cluster': { 'command': Join('', [ '#!/bin/bash\n', 'echo ECS_CLUSTER=', Ref(ecs_cluster), '$"\n"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=', self.ECS_TASK_CLEANUP_WAIT, ' >> /etc/ecs/ecs.config' ]) } } files = { "/etc/cfn/cfn-hup.conf": { "content": Join("", [ "[main]\n", "stack=", Ref("AWS::StackId"), "\n", "region=", Ref("AWS::Region"), "\n" ]), "mode": "000400", "owner": "root", "group": "root" }, "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { "content": Join("", [ "[cfn-auto-reloader-hook]\n", "triggers=post.update\n", "path=Resources.{0}.Metadata.AWS::CloudFormation::Init\n". format(self.LAUNCH_CONFIGURATION_NAME), "action=/opt/aws/bin/cfn-init -v ", " --stack ", Ref("AWS::StackName"), " --resource {0}".format( self.LAUNCH_CONFIGURATION_NAME), " --region ", Ref("AWS::Region"), "\n", "runas=root\n" ]) } } services = { "sysvinit": { "cfn-hup": { "enabled": "true", "ensureRunning": "true", "files": [ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf" ] } } } launch_configuration = self.template.add_resource( LaunchConfiguration(self.LAUNCH_CONFIGURATION_NAME, ImageId=self.AMI_ID, IamInstanceProfile=Ref(ecs_instance_profile), InstanceType=self.INSTANCE_TYPE, UserData=self.user_data.get_base64_data(), AssociatePublicIpAddress=False, SecurityGroups=self.SECURITY_GROUPS, KeyName=self.KEY_NAME, Metadata=autoscaling.Metadata( cloudformation.Init({ "config": cloudformation.InitConfig( commands=commands, files=files, services=services) })), BlockDeviceMappings=[ autoscaling.BlockDeviceMapping( DeviceName=self.EBS_DEVICE_NAME, Ebs=autoscaling.EBSBlockDevice( DeleteOnTermination=True, VolumeSize=self.EBS_VOLUME_SIZE, VolumeType='gp2')) ])) auto_scaling_group = self.template.add_resource( AutoScalingGroup(self.AUTOSCALING_GROUP_NAME, MaxSize=self.MAX_SIZE, MinSize=self.MIN_SIZE, Cooldown=60, LaunchConfigurationName=Ref(launch_configuration), VPCZoneIdentifier=[self.SUBNET])) """ Scale UP Policy """ scaling_up_policy = self.template.add_resource( ScalingPolicy('{0}ScaleUpPolicy'.format( self.AUTOSCALING_GROUP_NAME), AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(auto_scaling_group), Cooldown=60, ScalingAdjustment='1')) for alarm_name, alarm in self.AUTOSCALE_UP_ALARMS.iteritems(): """ Cloud Watch Alarm """ self.template.add_resource( Alarm('{0}ScaleUp{1}'.format(self.AUTOSCALING_GROUP_NAME, alarm_name), ActionsEnabled=True, Namespace='AWS/ECS', MetricName=alarm['scaling_metric'], ComparisonOperator='GreaterThanOrEqualToThreshold', Threshold=alarm['scale_up_threshold'], EvaluationPeriods=1, Statistic=alarm['statistic'], Period=alarm['period'], AlarmActions=[Ref(scaling_up_policy)], Dimensions=[ MetricDimension(Name='ClusterName', Value=Ref(ecs_cluster)) ])) """ Scale DOWN Policy """ scaling_down_policy = self.template.add_resource( ScalingPolicy('{0}ScaleDownPolicy'.format( self.AUTOSCALING_GROUP_NAME), AdjustmentType='ChangeInCapacity', AutoScalingGroupName=Ref(auto_scaling_group), Cooldown=60, ScalingAdjustment='-1')) for alarm_name, alarm in self.AUTOSCALE_DOWN_ALARMS.iteritems(): """ Cloud Watch Alarm """ self.template.add_resource( Alarm('{0}ScaleDown{1}'.format(self.AUTOSCALING_GROUP_NAME, alarm_name), ActionsEnabled=True, Namespace='AWS/ECS', MetricName=alarm['scaling_metric'], ComparisonOperator='LessThanOrEqualToThreshold', Threshold=alarm['scale_down_threshold'], EvaluationPeriods=1, Statistic=alarm['statistic'], Period=alarm['period'], AlarmActions=[Ref(scaling_down_policy)], Dimensions=[ MetricDimension(Name='ClusterName', Value=Ref(ecs_cluster)) ]))
dict(config=cloudformation.InitConfig( commands=dict(register_cluster=dict(command=Join( "", [ "#!/bin/bash\n", # Register the cluster "echo ECS_CLUSTER=", Ref(main_cluster), " >> /etc/ecs/ecs.config\n", # Enable CloudWatch docker logging 'echo \'ECS_AVAILABLE_LOGGING_DRIVERS=', '["json-file","awslogs"]\'', " >> /etc/ecs/ecs.config\n", ]))), files=cloudformation.InitFiles({ "/etc/cfn/cfn-hup.conf": cloudformation.InitFile( content=Join("", [ "[main]\n", "template=", Ref(AWS_STACK_ID), "\n", "region=", Ref(AWS_REGION), "\n", ]), mode="000400", owner="root", group="root", ), "/etc/cfn/hooks.d/cfn-auto-reload.conf": cloudformation.InitFile( content=Join("", [ "[cfn-auto-reloader-hook]\n", "triggers=post.update\n", "path=Resources.%s." % container_instance_configuration_name, "Metadata.AWS::CloudFormation::Init\n", "action=/opt/aws/bin/cfn-init -v ", " --stack", Ref(AWS_STACK_NAME), " --resource %s" % container_instance_configuration_name, " --region ", Ref("AWS::Region"), "\n", "runas=root\n", ])) }), services=dict(sysvinit=cloudformation.InitServices({ 'cfn-hup': cloudformation.InitService( enabled=True, ensureRunning=True, files=[ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf", ]), })))))),
def main(): """ Create a ElastiCache Redis Node and EC2 Instance """ template = Template() # Description template.set_description( 'AWS CloudFormation Sample Template ElastiCache_Redis:' 'Sample template showing how to create an Amazon' 'ElastiCache Redis Cluster. **WARNING** This template' 'creates an Amazon EC2 Instance and an Amazon ElastiCache' 'Cluster. You will be billed for the AWS resources used' 'if you create a stack from this template.') # Mappings template.add_mapping('AWSInstanceType2Arch', { 't1.micro': {'Arch': 'PV64'}, 't2.micro': {'Arch': 'HVM64'}, 't2.small': {'Arch': 'HVM64'}, 't2.medium': {'Arch': 'HVM64'}, 'm1.small': {'Arch': 'PV64'}, 'm1.medium': {'Arch': 'PV64'}, 'm1.large': {'Arch': 'PV64'}, 'm1.xlarge': {'Arch': 'PV64'}, 'm2.xlarge': {'Arch': 'PV64'}, 'm2.2xlarge': {'Arch': 'PV64'}, 'm2.4xlarge': {'Arch': 'PV64'}, 'm3.medium': {'Arch': 'HVM64'}, 'm3.large': {'Arch': 'HVM64'}, 'm3.xlarge': {'Arch': 'HVM64'}, 'm3.2xlarge': {'Arch': 'HVM64'}, 'c1.medium': {'Arch': 'PV64'}, 'c1.xlarge': {'Arch': 'PV64'}, 'c3.large': {'Arch': 'HVM64'}, 'c3.xlarge': {'Arch': 'HVM64'}, 'c3.2xlarge': {'Arch': 'HVM64'}, 'c3.4xlarge': {'Arch': 'HVM64'}, 'c3.8xlarge': {'Arch': 'HVM64'}, 'c4.large': {'Arch': 'HVM64'}, 'c4.xlarge': {'Arch': 'HVM64'}, 'c4.2xlarge': {'Arch': 'HVM64'}, 'c4.4xlarge': {'Arch': 'HVM64'}, 'c4.8xlarge': {'Arch': 'HVM64'}, 'g2.2xlarge': {'Arch': 'HVMG2'}, 'r3.large': {'Arch': 'HVM64'}, 'r3.xlarge': {'Arch': 'HVM64'}, 'r3.2xlarge': {'Arch': 'HVM64'}, 'r3.4xlarge': {'Arch': 'HVM64'}, 'r3.8xlarge': {'Arch': 'HVM64'}, 'i2.xlarge': {'Arch': 'HVM64'}, 'i2.2xlarge': {'Arch': 'HVM64'}, 'i2.4xlarge': {'Arch': 'HVM64'}, 'i2.8xlarge': {'Arch': 'HVM64'}, 'd2.xlarge': {'Arch': 'HVM64'}, 'd2.2xlarge': {'Arch': 'HVM64'}, 'd2.4xlarge': {'Arch': 'HVM64'}, 'd2.8xlarge': {'Arch': 'HVM64'}, 'hi1.4xlarge': {'Arch': 'HVM64'}, 'hs1.8xlarge': {'Arch': 'HVM64'}, 'cr1.8xlarge': {'Arch': 'HVM64'}, 'cc2.8xlarge': {'Arch': 'HVM64'} }) template.add_mapping('AWSRegionArch2AMI', { 'us-east-1': {'PV64': 'ami-0f4cfd64', 'HVM64': 'ami-0d4cfd66', 'HVMG2': 'ami-5b05ba30'}, 'us-west-2': {'PV64': 'ami-d3c5d1e3', 'HVM64': 'ami-d5c5d1e5', 'HVMG2': 'ami-a9d6c099'}, 'us-west-1': {'PV64': 'ami-85ea13c1', 'HVM64': 'ami-87ea13c3', 'HVMG2': 'ami-37827a73'}, 'eu-west-1': {'PV64': 'ami-d6d18ea1', 'HVM64': 'ami-e4d18e93', 'HVMG2': 'ami-72a9f105'}, 'eu-central-1': {'PV64': 'ami-a4b0b7b9', 'HVM64': 'ami-a6b0b7bb', 'HVMG2': 'ami-a6c9cfbb'}, 'ap-northeast-1': {'PV64': 'ami-1a1b9f1a', 'HVM64': 'ami-1c1b9f1c', 'HVMG2': 'ami-f644c4f6'}, 'ap-southeast-1': {'PV64': 'ami-d24b4280', 'HVM64': 'ami-d44b4286', 'HVMG2': 'ami-12b5bc40'}, 'ap-southeast-2': {'PV64': 'ami-ef7b39d5', 'HVM64': 'ami-db7b39e1', 'HVMG2': 'ami-b3337e89'}, 'sa-east-1': {'PV64': 'ami-5b098146', 'HVM64': 'ami-55098148', 'HVMG2': 'NOT_SUPPORTED'}, 'cn-north-1': {'PV64': 'ami-bec45887', 'HVM64': 'ami-bcc45885', 'HVMG2': 'NOT_SUPPORTED'} }) template.add_mapping('Region2Principal', { 'us-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'us-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'}, 'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn', 'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'}, 'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com', 'OpsWorksPrincipal': 'opsworks.amazonaws.com'} }) # Parameters cachenodetype = template.add_parameter(Parameter( 'ClusterNodeType', Description='The compute and memory capacity of the nodes in the Redis' ' Cluster', Type='String', Default='cache.m1.small', AllowedValues=['cache.m1.small', 'cache.m1.large', 'cache.m1.xlarge', 'cache.m2.xlarge', 'cache.m2.2xlarge', 'cache.m2.4xlarge', 'cache.c1.xlarge'], ConstraintDescription='must select a valid Cache Node type.', )) instancetype = template.add_parameter(Parameter( 'InstanceType', Description='WebServer EC2 instance type', Type='String', Default='t2.micro', AllowedValues=['t1.micro', 't2.micro', 't2.small', 't2.medium', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge', 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge', 'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'g2.2xlarge', 'r3.large', 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'd2.xlarge', 'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge', 'hi1.4xlarge', 'hs1.8xlarge', 'cr1.8xlarge', 'cc2.8xlarge', 'cg1.4xlarge'], ConstraintDescription='must be a valid EC2 instance type.', )) keyname = template.add_parameter(Parameter( 'KeyName', Description='Name of an existing EC2 KeyPair to enable SSH access' ' to the instance', Type='AWS::EC2::KeyPair::KeyName', ConstraintDescription='must be the name of an existing EC2 KeyPair.', )) sshlocation = template.add_parameter(Parameter( 'SSHLocation', Description='The IP address range that can be used to SSH to' ' the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.' '(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})', ConstraintDescription='must be a valid IP CIDR range of the' ' form x.x.x.x/x.' )) # Resources webserverrole = template.add_resource(iam.Role( 'WebServerRole', AssumeRolePolicyDocument=PolicyDocument( Statement=[ Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', [FindInMap('Region2Principal', Ref('AWS::Region'), 'EC2Principal')]), ) ] ), Path='/', )) template.add_resource(iam.PolicyType( 'WebServerRolePolicy', PolicyName='WebServerRole', PolicyDocument=PolicyDocument( Statement=[awacs.aws.Statement( Action=[awacs.aws.Action("elasticache", "DescribeCacheClusters")], Resource=["*"], Effect=awacs.aws.Allow )] ), Roles=[Ref(webserverrole)], )) webserverinstanceprofile = template.add_resource(iam.InstanceProfile( 'WebServerInstanceProfile', Path='/', Roles=[Ref(webserverrole)], )) webserversg = template.add_resource(ec2.SecurityGroup( 'WebServerSecurityGroup', GroupDescription='Enable HTTP and SSH access', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp=Ref(sshlocation), ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', ) ] )) webserverinstance = template.add_resource(ec2.Instance( 'WebServerInstance', Metadata=cloudformation.Metadata( cloudformation.Init({ 'config': cloudformation.InitConfig( packages={ 'yum': { 'httpd': [], 'php': [], 'php-devel': [], 'gcc': [], 'make': [] } }, files=cloudformation.InitFiles({ '/var/www/html/index.php': cloudformation.InitFile( content=Join('', [ '<?php\n', 'echo \"<h1>AWS CloudFormation sample' ' application for Amazon ElastiCache' ' Redis Cluster</h1>\";\n', '\n', '$cluster_config = json_decode(' 'file_get_contents(\'/tmp/cacheclusterconfig\'' '), true);\n', '$endpoint = $cluster_config[\'CacheClusters' '\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add' 'ress\'];\n', '$port = $cluster_config[\'CacheClusters\'][0]' '[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];' '\n', '\n', 'echo \"<p>Connecting to Redis Cache Cluster ' 'node \'{$endpoint}\' on port {$port}</p>\";' '\n', '\n', '$redis=new Redis();\n', '$redis->connect($endpoint, $port);\n', '$redis->set(\'testkey\', \'Hello World!\');' '\n', '$return = $redis->get(\'testkey\');\n', '\n', 'echo \"<p>Retrieved value: $return</p>\";' '\n', '?>\n' ]), mode='000644', owner='apache', group='apache' ), '/etc/cron.d/get_cluster_config': cloudformation.InitFile( content='*/5 * * * * root' ' /usr/local/bin/get_cluster_config', mode='000644', owner='root', group='root' ), '/usr/local/bin/get_cluster_config': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'aws elasticache describe-cache-clusters ', ' --cache-cluster-id ', Ref('RedisCluster'), ' --show-cache-node-info' ' --region ', Ref('AWS::Region'), ' > /tmp/cacheclusterconfig\n' ]), mode='000755', owner='root', group='root' ), '/usr/local/bin/install_phpredis': cloudformation.InitFile( content=Join('', [ '#! /bin/bash\n', 'cd /tmp\n', 'wget https://github.com/nicolasff/' 'phpredis/zipball/master -O phpredis.zip' '\n', 'unzip phpredis.zip\n', 'cd nicolasff-phpredis-*\n', 'phpize\n', './configure\n', 'make && make install\n', 'touch /etc/php.d/redis.ini\n', 'echo extension=redis.so > /etc/php.d/' 'redis.ini\n' ]), mode='000755', owner='root', group='root' ), '/etc/cfn/cfn-hup.conf': cloudformation.InitFile( content=Join('', [ '[main]\n', 'stack=', Ref('AWS::StackId'), '\n', 'region=', Ref('AWS::Region'), '\n' ]), mode='000400', owner='root', group='root' ), '/etc/cfn/hooks.d/cfn-auto-reloader.conf': cloudformation.InitFile( content=Join('', [ '[cfn-auto-reloader-hook]\n', 'triggers=post.update\n', 'path=Resources.WebServerInstance.Metadata' '.AWS::CloudFormation::Init\n', 'action=/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', 'runas=root\n' ]), # Why doesn't the Amazon template have this? # mode='000400', # owner='root', # group='root' ), }), commands={ '01-install_phpredis': { 'command': '/usr/local/bin/install_phpredis' }, '02-get-cluster-config': { 'command': '/usr/local/bin/get_cluster_config' } }, services={ "sysvinit": cloudformation.InitServices({ "httpd": cloudformation.InitService( enabled=True, ensureRunning=True, ), "cfn-hup": cloudformation.InitService( enabled=True, ensureRunning=True, files=['/etc/cfn/cfn-hup.conf', '/etc/cfn/hooks.d/' 'cfn-auto-reloader.conf'] ), }), }, ) }) ), ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'), FindInMap('AWSInstanceType2Arch', Ref(instancetype), 'Arch')), InstanceType=Ref(instancetype), SecurityGroups=[Ref(webserversg)], KeyName=Ref(keyname), IamInstanceProfile=Ref(webserverinstanceprofile), UserData=Base64(Join('', [ '#!/bin/bash -xe\n', 'yum update -y aws-cfn-bootstrap\n', '# Setup the PHP sample application\n', '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n', '# Signal the status of cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ', ' --stack ', Ref('AWS::StackName'), ' --resource WebServerInstance ', ' --region ', Ref('AWS::Region'), '\n' ])), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=Tags(Application=Ref('AWS::StackId'), Details='Created using Troposhpere') )) redisclustersg = template.add_resource(elasticache.SecurityGroup( 'RedisClusterSecurityGroup', Description='Lock the cluster down', )) template.add_resource(elasticache.SecurityGroupIngress( 'RedisClusterSecurityGroupIngress', CacheSecurityGroupName=Ref(redisclustersg), EC2SecurityGroupName=Ref(webserversg), )) template.add_resource(elasticache.CacheCluster( 'RedisCluster', Engine='redis', CacheNodeType=Ref(cachenodetype), NumCacheNodes='1', CacheSecurityGroupNames=[Ref(redisclustersg)], )) # Outputs template.add_output([ Output( 'WebsiteURL', Description='Application URL', Value=Join('', [ 'http://', GetAtt(webserverinstance, 'PublicDnsName'), ]) ) ]) # Print CloudFormation Template print(template.to_json())