def buildTemplate(self): self.t.add_metadata({ "Comments": "This will build a standard VPC with public and private subnets", "Version": VERSION, "Author": "Derek Belrose <*****@*****.**>", }) ### Back references ref_stack_id = Ref('AWS::StackId') ref_region = Ref('AWS::Region') ref_stack_name = Ref('AWS::StackName') ### Parameters self.t.add_parameter(Parameter( "AvailabilityZone", Description = "Default availablility zone to run this in", Type="String", Default = "us-east-1", AllowedValues = [ "us-east-1", "us-east-2", "us-west-1", "us-west-2", "us-west-3" ], ConstraintDescription="must be a valud AWS AZ in the US" )) paramCIDR=self.t.add_parameter(Parameter( "VPCCidrBlock", Description = "The CIDR block that the VPC will contain", Type="String", Default = "10.0.0.0/16", )) paramDepartment=self.t.add_parameter(Parameter( "Department", Description="Department for Tags", Type="String", Default = "OPS", AllowedValues=[ 'OPS', 'LAG', 'DMS', 'GIS', ] )) # Template resources vpc=self.t.add_resource(ec2VPC( "VPC", CidrBlock=Ref(paramCIDR), EnableDnsSupport="True", EnableDnsHostnames="True", Tags=Tags( Name=Join('', [ ref_stack_name, '-VPC']), Department=Ref(paramDepartment) ) )) ig=self.t.add_resource(InternetGateway( "InternetGateway", Tags=Tags( Name=Join('', [ref_stack_name, '-ig']), Department=Ref(paramDepartment) ) )) vpciga=self.t.add_resource(VPCGatewayAttachment( "GatewayAttachment", InternetGatewayId=Ref(ig), VpcId=Ref(vpc) )) rt = self.t.add_resource(RouteTable( "RouteTable", VpcId=Ref(vpc) )) eroute = self.t.add_resource(Route( "ExternalRoute", RouteTableId=Ref(rt), DestinationCidrBlock='0.0.0.0/0', GatewayId=Ref(ig) )) ### Outputs self.t.add_output([ Output( "VpcId", Description="VPC ID of the created VPC", Value=Ref(vpc) ), Output( "RouteTableId", Description="RouteTable Id", Value=Ref(rt) ), ]) return self.t
def create_network(self): t = self.template self.create_gateway() vpc_id = Ref("VPC") t.add_resource(ec2.NetworkAcl('DefaultACL', VpcId=vpc_id)) self.create_nat_security_groups() subnets = {'public': [], 'private': []} net_types = subnets.keys() zones = [] for i in range(self.local_parameters["AZCount"]): az = Select(i, GetAZs("")) zones.append(az) name_suffix = i for net_type in net_types: name_prefix = net_type.capitalize() subnet_name = "%sSubnet%s" % (name_prefix, name_suffix) subnets[net_type].append(subnet_name) t.add_resource( ec2.Subnet(subnet_name, AvailabilityZone=az, VpcId=vpc_id, DependsOn=GW_ATTACH, CidrBlock=Select(i, Ref("%sSubnets" % name_prefix)), Tags=Tags(type=net_type))) route_table_name = "%sRouteTable%s" % (name_prefix, name_suffix) t.add_resource( ec2.RouteTable(route_table_name, VpcId=vpc_id, Tags=[ec2.Tag('type', net_type)])) t.add_resource( ec2.SubnetRouteTableAssociation( "%sRouteTableAssociation%s" % (name_prefix, name_suffix), SubnetId=Ref(subnet_name), RouteTableId=Ref(route_table_name))) route_name = '%sRoute%s' % (name_prefix, name_suffix) if net_type == 'public': # the public subnets are where the NAT instances live, # so their default route needs to go to the AWS # Internet Gateway t.add_resource( ec2.Route(route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock="0.0.0.0/0", GatewayId=Ref(GATEWAY))) self.create_nat_instance(i, subnet_name) else: # Private subnets are where actual instances will live # so their gateway needs to be through the nat instances t.add_resource( ec2.Route(route_name, RouteTableId=Ref(route_table_name), DestinationCidrBlock='0.0.0.0/0', InstanceId=If( "UseNatInstances", Ref(NAT_INSTANCE_NAME % name_suffix), Ref("AWS::NoValue")), NatGatewayId=If( "UseNatGateway", Ref(NAT_GATEWAY_NAME % name_suffix), Ref("AWS::NoValue")))) for net_type in net_types: t.add_output( Output("%sSubnets" % net_type.capitalize(), Value=Join(",", [Ref(sn) for sn in subnets[net_type]]))) self.template.add_output( Output("AvailabilityZones", Value=Join(",", zones)))
def attach(self): """Attaches a bootstrapped Chef Node EC2 instance to an AWS CloudFormation template and returns the template. """ parameters = ec2_parameters.EC2Parameters(self.template) parameters.attach() resources = ec2_resources.EC2Resources(self.template) resources.attach() security_group = self.template.add_resource(ec2.SecurityGroup( 'SecurityGroup', GroupDescription='Allows SSH access from anywhere', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp=Ref(self.template.parameters['SSHLocation']) ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=80, ToPort=80, CidrIp='0.0.0.0/0' ), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort=8080, ToPort=8080, CidrIp='0.0.0.0/0' ) ], VpcId=ImportValue("prod2-VPCID"), Tags=Tags( Name='{0}SecurityGroup'.format(EC2_INSTANCE_NAME) ) )) self.template.add_resource(ec2.Instance( EC2_INSTANCE_NAME, ImageId=If( 'IsCentos7', FindInMap( "AWSRegionArch2Centos7LinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")), FindInMap( "AWSRegionArch2AmazonLinuxAMI", Ref("AWS::Region"), FindInMap("AWSInstanceType2Arch", Ref(self.template.parameters['InstanceType']), "Arch")) ), InstanceType=Ref(self.template.parameters['InstanceType']), KeyName=FindInMap('Region2KeyPair', Ref('AWS::Region'), 'key'), SecurityGroupIds=[Ref(security_group)], SubnetId=ImportValue("prod2-SubnetPublicAZ2"), IamInstanceProfile=Ref( self.template.resources['InstanceProfileResource']), UserData=Base64(Join('', [ If('IsCentos7', Join('\n', [ '#!/bin/bash ', 'sudo yum update -y ', 'sudo yum install -y vim ', 'sudo yum install -y epel-release ', 'sudo yum install -y awscli ', '# Install CFN-BootStrap ', ('/usr/bin/easy_install --script-dir /opt/aws/bin ' 'https://s3.amazonaws.com/cloudformation-examples/' 'aws-cfn-bootstrap-latest.tar.gz '), ('cp -v /usr/lib/python2*/site-packages/aws_cfn_' 'bootstrap*/init/redhat/cfn-hup /etc/init.d '), 'chmod +x /etc/init.d/cfn-hup ', ]), Join('\n', [ '#!/bin/bash -xe ', 'yum update -y ', '# Update CFN-BootStrap ', 'yum update -y aws-cfn-bootstrap', 'sudo yum install -y awslogs ', ])), Join('', [ '# Install the files and packages from the metadata\n' '/opt/aws/bin/cfn-init -v ', ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --configsets InstallAndRun', ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n', '# Signal the status from cfn-init\n', '/opt/aws/bin/cfn-signal -e $? ' ' --stack ', Ref('AWS::StackName'), ' --resource ', EC2_INSTANCE_NAME, ' --region ', Ref('AWS::Region'), ' --role ', Ref(self.template.resources['RoleResource']), '\n' ]), ] ) ), Metadata=cloudformation.Metadata( cloudformation.Init( cloudformation.InitConfigSets( InstallAndRun=['Install', 'InstallLogs', 'InstallChef', 'Configure'] ), Install=cloudformation.InitConfig( packages={ 'yum': { 'stress': [], 'docker': [] } }, files={ '/etc/cfn/cfn-hup.conf': { 'content': Join('\n', [ '[main]', 'stack={{stackid}}', 'region={{region}}', 'interval=1' ]), 'context': { 'stackid': Ref('AWS::StackId'), 'region': Ref('AWS::Region') }, 'mode': '000400', 'owner': 'root', 'group': 'root' }, '/etc/cfn/hooks.d/cfn-auto-reloader.conf': { 'content': Join('\n', [ '[cfn-auto-reloader-hook]', 'triggers=post.update', ('path=Resources.{{instance_name}}' '.Metadata' '.AWS::CloudFormation::Init'), ('action=/opt/aws/bin/cfn-init -v ' ' --stack {{stack_name}} ' ' --resource {{instance_name}} ' ' --configsets {{config_sets}} ' ' --region {{region}} '), 'runas={{run_as}}' ]), 'context': { 'instance_name': EC2_INSTANCE_NAME, 'stack_name': Ref('AWS::StackName'), 'region': Ref('AWS::Region'), 'config_sets': 'InstallAndRun', 'run_as': 'root' } } }, services={ 'sysvinit': { 'docker': { 'enabled': 'true', 'ensureRunning': 'true' }, 'cfn-hup': { 'enabled': 'true', 'ensureRunning': 'true' } } }, commands={ '01_test': { 'command': 'echo "$CFNTEST" > Install.txt', 'env': { 'CFNTEST': 'I come from Install.' }, 'cwd': '~' } } ), InstallLogs=cloudformation.InitConfig( files={ '/etc/awslogs/awslogs.conf': { 'content': Join('\n', [ '[general]', ('state_file= /var/awslogs/' 'state/agent-state'), '', '[/var/log/cloud-init.log]', 'file = /var/log/cloud-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cloud-init-output.log]', 'file = /var/log/cloud-init-output.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cloud-init-output.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-init.log]', 'file = /var/log/cfn-init.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-init.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-hup.log]', 'file = /var/log/cfn-hup.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-hup.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/cfn-wire.log]', 'file = /var/log/cfn-wire.log', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/cfn-wire.log'), 'datetime_format = {{datetime_format}}', '', '[/var/log/httpd]', 'file = /var/log/httpd/*', 'log_group_name = {{log_group_name}}', ('log_stream_name = ' '{instance_id}/httpd'), 'datetime_format = {{datetime_format}}' ]), 'context': { 'log_group_name': Ref( self.template.resources[ 'LogGroupResource']), 'datetime_format': '%d/%b/%Y:%H:%M:%S' } }, '/etc/awslogs/awscli.conf': { 'content': Join('\n', [ '[plugins]', 'cwlogs = cwlogs', '[default]', 'region = {{region}}' ]), 'context': { 'region': Ref('AWS::Region') }, 'mode': '000444', 'owner': 'root', 'group': 'root' } }, commands={ '01_create_state_directory': { 'command' : 'mkdir -p /var/awslogs/state' }, '02_test': { 'command': 'echo "$CFNTEST" > InstallLogs.txt', 'env': { 'CFNTEST': 'I come from install_logs.' }, 'cwd': '~' }, '03_install_aws_logs_if_centos': { 'command': If('IsCentos7', Join('\n', [ ('curl https://s3.amazonaws.com/aws-' 'cloudwatch/downloads/latest/awslogs-' 'agent-setup.py -O'), Join('', [ 'sudo python ./awslogs-agent-setup.py', ' --configfile /etc/awslogs/awslogs', '.conf --non-interactive --region ', Ref('AWS::Region')]) ]), Join('', [ 'echo "not installing awslogs from ', 'from source"' ])) } }, services={ 'sysvinit': { 'awslogs': { 'enabled': 'true', 'ensureRunning': 'true', 'files': ['/etc/awslogs/awslogs.conf'] } } } ), InstallChef=cloudformation.InitConfig( commands={ '01_invoke_omnitruck_install': { 'command': ( 'curl -L ' 'https://omnitruck.chef.io/install.sh | ' 'bash' ), } }, files={ '/etc/chef/client.rb': { 'source': S3_CLIENT_RB, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/jasondebolt-validator.pem': { 'source': S3_VALIDATOR_PEM, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' }, '/etc/chef/first-run.json': { 'source': S3_FIRST_RUN, 'mode': '000600', 'owner': 'root', 'group': 'root', 'authentication': 'S3AccessCreds' } } ), Configure=cloudformation.InitConfig( commands={ '01_test': { 'command': 'echo "$CFNTEST" > Configure.txt', 'env': { 'CFNTEST': 'I come from Configure.' }, 'cwd': '~' }, '02_chef_bootstrap': { 'command': ( 'chef-client -j ' '/etc/chef/first-run.json' ) } } ) ), cloudformation.Authentication({ 'S3AccessCreds': cloudformation.AuthenticationBlock( type='S3', roleName=Ref(self.template.resources['RoleResource'])) }) ), Tags=Tags( Name=Ref('AWS::StackName'), env='ops' ) )) self.template.add_output(Output( 'PublicIp', Description='Public IP of the newly created EC2 instance', Value=GetAtt(EC2_INSTANCE_NAME, 'PublicIp') )) self.template.add_output(Output( 'LinuxType', Description='The linux type of the EC2 instance.', Value=If('IsCentos7', 'centos_7', 'amazon_linux') )) return self.template
def add_resources(self): """Add resources to template.""" template = self.template variables = self.get_variables() vpnsecuritygroup = template.add_resource( ec2.SecurityGroup( 'VPNSecurityGroup', GroupDescription=Join( '-', [variables['CustomerName'].ref, 'vpn-servers']), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='udp', FromPort='1194', # OpenVPN server ToPort='1194', CidrIp='0.0.0.0/0') ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='-1', FromPort='0', ToPort='65535', CidrIp='0.0.0.0/0') ], Tags=Tags(Name=Join('-', [ variables['CustomerName'].ref, 'vpn-servers', variables['EnvironmentName'].ref ])), VpcId=Ref('VpcId'))) template.add_output( Output(vpnsecuritygroup.title, Description='Security group for VPN servers', Export=Export( Sub('${AWS::StackName}-%s' % vpnsecuritygroup.title)), Value=Ref(vpnsecuritygroup))) allsecuritygroup = template.add_resource( ec2.SecurityGroup( 'AllSecurityGroup', GroupDescription=Join( '-', [variables['CustomerName'].ref, 'all-servers']), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='-1', FromPort='0', ToPort='65535', SourceSecurityGroupId=Ref(vpnsecuritygroup)) ], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='-1', FromPort='0', ToPort='65535', CidrIp='0.0.0.0/0') ], Tags=Tags(Name=Join('-', [ variables['CustomerName'].ref, 'all-servers', variables['EnvironmentName'].ref ])), VpcId=Ref('VpcId'))) template.add_output( Output(allsecuritygroup.title, Description='Security group for all servers', Export=Export( Sub('${AWS::StackName}-%s' % allsecuritygroup.title)), Value=Ref(allsecuritygroup))) internalsecuritygroup = template.add_resource( ec2.SecurityGroup( 'InternalSecurityGroup', GroupDescription=Join( '-', [variables['CustomerName'].ref, 'internal-servers']), SecurityGroupIngress=[], SecurityGroupEgress=[ ec2.SecurityGroupRule(IpProtocol='-1', FromPort='0', ToPort='65535', CidrIp='0.0.0.0/0') ], Tags=Tags(Name=Join('-', [ variables['CustomerName'].ref, 'internal-servers', variables['EnvironmentName'].ref ])), VpcId=Ref('VpcId'))) template.add_output( Output(internalsecuritygroup.title, Description='Security group for internal servers', Export=Export( Sub('${AWS::StackName}-%s' % internalsecuritygroup.title)), Value=Ref(internalsecuritygroup)))
Interval="20", Timeout="15", ), ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=10, ), CrossZone=True, Subnets=Ref("PublicSubnet"), SecurityGroups=[Ref("LoadBalancerSecurityGroup")], )) ud = Base64( Join('\n', [ "#!/bin/bash", "yum install --enablerepo=epel -y git", "pip install ansible", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format( AnsiblePullCmd) ])) t.add_resource( Role("Role", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]))) t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")])) t.add_resource(
def main(): '''Function: Generates the Cloudformation template''' template = Template() template.set_description("Server Stack") keyname_param = template.add_parameter( Parameter( 'KeyName', Description='An existing EC2 KeyPair.', ConstraintDescription='An existing EC2 KeyPair.', Type='AWS::EC2::KeyPair::KeyName', ) ) template.add_mapping('RegionMap', {'eu-north-1': {'ami': 'ami-a536bedb'}, 'ap-south-1': {'ami': 'ami-00b2a5e29f669c903'}, 'eu-west-3': {'ami': 'ami-0d8581d2794d7df68'}, 'eu-west-2': {'ami': 'ami-02369579484abae2e'}, 'eu-west-1': {'ami': 'ami-0c17a2bccea3e36f9'}, 'ap-northeast-2': {'ami': 'ami-05daa9d0230f30d79'}, 'ap-northeast-1': {'ami': 'ami-03a90fe15b63befea'}, 'sa-east-1': {'ami': 'ami-0c04bf4cfbf3e9dbe'}, 'ca-central-1': {'ami': 'ami-013d2a414e834a144'}, 'ap-southeast-1': {'ami': 'ami-07ed1f021e2eea7cb'}, 'ap-southeast-2': {'ami': 'ami-068e6346d66ed62c8'}, 'eu-central-1': {'ami': 'ami-00aa61be0e9a8f948'}, 'us-east-1': {'ami': 'ami-0dd925351e231e8c7'}, 'us-east-2': {'ami': 'ami-06cb7cbcc0e8e90e8'}, 'us-west-1': {'ami': 'ami-0d8e4e7b60cd5f225'}, 'us-west-2': {'ami': 'ami-06ad92f74f2c20787'}}) ec2_security_group = template.add_resource( ec2.SecurityGroup( 'EC2SecurityGroup', Tags=[{'Key':'Name', 'Value':Ref('AWS::StackName')},], GroupDescription='EC2 Security Group', SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0', Description='SSH'), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='80', ToPort='80', CidrIp='0.0.0.0/0', Description='HTTP'), ec2.SecurityGroupRule( IpProtocol='tcp', FromPort='443', ToPort='443', CidrIp='0.0.0.0/0', Description='HTTPS'), ], ) ) ec2_instance = template.add_resource( ec2.Instance( 'Instance', Metadata=Metadata( Init({ "config": InitConfig( files=InitFiles({ "/tmp/instance.txt": InitFile( content=Ref('AWS::StackName'), mode="000644", owner="root", group="root" ) }), ) }), ), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal(Timeout='PT15M') ), Tags=[{'Key':'Name', 'Value':Ref('AWS::StackName')},], ImageId=FindInMap('RegionMap', Ref('AWS::Region'), 'ami'), InstanceType='t2.2xlarge', KeyName=Ref(keyname_param), SecurityGroups=[Ref(ec2_security_group)], UserData=Base64( Join( '', [ '#!/bin/bash -x\n', 'exec > /tmp/user-data.log 2>&1\n', 'unset UCF_FORCE_CONFFOLD\n', 'export UCF_FORCE_CONFFNEW=YES\n', 'ucf --purge /boot/grub/menu.lst\n', 'export DEBIAN_FRONTEND=noninteractive\n', 'apt-get update\n', 'apt-get -o Dpkg::Options::="--force-confnew" --force-yes -fuy upgrade\n', 'apt-get install -y python-pip\n', 'pip install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz\n', '# Signal Cloudformation when set up is complete\n', '/usr/local/bin/cfn-signal -e $? --resource=Instance --region=', Ref('AWS::Region'), ' --stack=', Ref('AWS::StackName'), '\n', ] ) ) ) ) template.add_resource( ec2.EIP( 'ElasticIP', InstanceId=Ref(ec2_instance), Domain='vpc' ) ) template.add_output([ Output( 'InstanceDnsName', Description='PublicDnsName', Value=GetAtt(ec2_instance, 'PublicDnsName'), ), ]) print(template.to_yaml())
def emit_configuration(): vpc = cfn.vpcs[0] region = Ref("AWS::Region") dbname = template.add_parameter( Parameter( "RDSDatabaseInstanceName", Default="reporting{0}".format(CLOUDENV), Description="Postgres Instance Name", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription= "Must begin with a letter and contain only alphanumeric characters" )) dbuser = template.add_parameter( Parameter( "RDSDatabaseUser", Default="sa", Description="The database admin account username", Type="String", MinLength="1", MaxLength="63", AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", ConstraintDescription="Must being with a letter and be alphanumeric" )) dbpassword = template.add_parameter( Parameter( "RDSDatabasePassword", NoEcho=True, Description="The database admin account password", Type="String", MinLength="1", MaxLength="41", AllowedPattern="[a-zA-Z0-9]*", ConstraintDescription="Must contain only alphanumeric characters.", Default="LeafLeaf123")) dbclass = template.add_parameter( Parameter("RDSInstanceClass", Default="db.t2.medium", Description="Database instance size", Type="String", AllowedValues=[ "db.t2.small", "db.t2.medium", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge" ])) allocated_storage = template.add_parameter( Parameter("RDSAllocatedStorage", Default="100", Description="The size of the Postgres Database (GB)", Type="Number", MinValue="5", MaxValue="512", ConstraintDescription="Must be between 5 and 512 GB")) db_subnet_group = template.add_resource( DBSubnetGroup( "RDSSubnetGroup", DBSubnetGroupDescription="Subnets available for RDS in {0}".format( CLOUDNAME), SubnetIds=[ Ref(sn) for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ])) ingress_rules = [ SecurityGroupRule(IpProtocol=p[0], CidrIp=DEFAULT_ROUTE, FromPort=p[1], ToPort=p[1]) for p in [('tcp', 5432)] ] security_group = template.add_resource( SecurityGroup("RDSDatabaseSecurityGroup", GroupDescription="Security group for Postgres Instances", VpcId=Ref(vpc), SecurityGroupIngress=ingress_rules, DependsOn=vpc.title)) database = template.add_resource( DBInstance( "RDSPostgresInstance", DBInstanceIdentifier=Ref(dbname), AllocatedStorage=Ref(allocated_storage), DBInstanceClass=Ref(dbclass), Engine="postgres", EngineVersion="9.3.6", MasterUsername=Ref(dbuser), MasterUserPassword=Ref(dbpassword), DBSubnetGroupName=Ref(db_subnet_group), VPCSecurityGroups=[Ref(security_group)], DependsOn=[ sn.title for sn in cfn.get_vpc_subnets(vpc, cfn.SubnetTypes.DATABASE) ])) template.add_output( Output("ConnectionString", Description="JDBC connection string for Postgres", Value=Join("", [ GetAtt("RDSPostgresInstance", "Endpoint.Address"), GetAtt("RDSPostgresInstance", "Endpoint.Port") ])))
def add_resources(self): """Add resources to template.""" template = self.template variables = self.get_variables() vpnrole = template.add_resource( iam.Role( 'VPNRole', AssumeRolePolicyDocument=iam_policies.assumerolepolicy('ec2'), ManagedPolicyArns=variables['VPNManagedPolicies'].ref, Path='/', Policies=[ iam.Policy( PolicyName=Join('-', [ 'customer-vpn-server-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ # ModifyInstanceAttribute is for src/dst check Statement(Action=[ awacs.ec2.DescribeRouteTables, awacs.ec2.DescribeAddresses, awacs.ec2.AssociateAddress, awacs.ec2.CreateRoute, awacs.ec2.ReplaceRoute, awacs.ec2.ModifyInstanceAttribute ], Effect=Allow, Resource=['*']), Statement( Action=[ awacs.aws.Action('s3', 'Get*'), awacs.aws.Action('s3', 'List*'), awacs.aws.Action('s3', 'Put*') ], Effect=Allow, Resource=[ Join( '', [ 'arn:aws:s3:::', variables['ChefDataBucketName'] .ref, # noqa pylint: disable=line-too-long '/', variables['EnvironmentName']. ref, '/', variables['BucketKey'].ref, '/*' ]) ]), Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[ Join('', [ 'arn:aws:s3:::', variables['ChefDataBucketName'].ref ]) # noqa pylint: disable=line-too-long ], Condition=Condition( StringLike( 's3:prefix', [ Join( '', [ variables[ 'EnvironmentName']. ref, # noqa pylint: disable=line-too-long '/', variables['BucketKey']. ref, # noqa pylint: disable=line-too-long '/*' ]) ]))) ])) ])) vpninstanceprofile = template.add_resource( iam.InstanceProfile('VPNInstanceProfile', Path='/', Roles=[Ref(vpnrole)])) amiid = template.add_resource( cfn_custom_classes.AMIId( 'AMIId', Condition='MissingVPNAMI', Platform=variables['VPNOS'].ref, Region=Ref('AWS::Region'), ServiceToken=variables['AMILookupArn'].ref)) # Lookup subnets from core VPC stack subnetlookuplambdarole = template.add_resource( iam.Role( 'SubnetLookupLambdaRole', Condition='PrivateSubnetCountOmitted', AssumeRolePolicyDocument=iam_policies.assumerolepolicy( 'lambda'), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName=Join('-', [ 'subnetlookup-lambda-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[ awacs.aws.Action( 'cloudformation', 'DescribeStack*'), awacs.aws.Action( 'cloudformation', 'Get*') ], Effect=Allow, Resource=[ Join('', [ 'arn:aws:cloudformation:', Ref('AWS::Region'), ':', Ref('AWS::AccountId'), ':stack/', variables['CoreVPCStack'].ref, '/*' ]) ]) ])) ])) cfncustomresourcesubnetlookup = template.add_resource( awslambda.Function( 'CFNCustomResourceSubnetLookup', Condition='PrivateSubnetCountOmitted', Description='Find subnets created by core stack', Code=awslambda.Code( ZipFile=variables['SubnetLookupLambdaFunction']), Handler='index.handler', Role=GetAtt(subnetlookuplambdarole, 'Arn'), Runtime='python2.7', Timeout=10)) subnetlookup = template.add_resource( cfn_custom_classes.SubnetLookup( 'SubnetLookup', Condition='PrivateSubnetCountOmitted', CoreVPCStack=variables['CoreVPCStack'].ref, Region=Ref('AWS::Region'), ServiceToken=GetAtt(cfncustomresourcesubnetlookup, 'Arn'))) common_userdata_prefix = [ "#cloud-config\n", "package_update: true\n", "package_upgrade: false\n", "write_files:\n", " - path: /usr/local/bin/update_vpn_routes.sh\n", " permissions: '0755'\n", " content: |\n", " #!/bin/bash\n", " \n", " export AWS_DEFAULT_REGION=\"", Ref('AWS::Region'), "\"\n", " my_instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)\n", # noqa pylint: disable=line-too-long " \n", " publicroutetableid=", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PublicRouteTableId'), If( 'PublicRouteTableSpecified', variables['PublicRouteTable'].ref, ImportValue( Sub("${%s}-PublicRouteTable" % variables['CoreVPCStack'].name)))), # noqa pylint: disable=line-too-long "\n", " private_route_tables=(", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PrivateRouteTables'), If( '3PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref, variables['PrivateRouteTable3'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable3" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long If( '2PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long, If( 'PublicRouteTableSpecified', variables['PrivateRouteTable1'].ref, ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)))))), # noqa pylint: disable=line-too-long ")\n", "\n", " openvpnroutepubdest=", variables['VPNSubnet'].ref, "\n", " \n", " # Disabling sourceDestCheck\n", " aws ec2 modify-instance-attribute --instance-id ${my_instance_id} --source-dest-check \"{\\\"Value\\\": false}\"\n", # noqa pylint: disable=line-too-long " \n", " if aws ec2 describe-route-tables | grep ${openvpnroutepubdest}; then\n", # noqa pylint: disable=line-too-long " # Update 'OpenVPNRoutePub' to point to this instance\n", # noqa pylint: disable=line-too-long " aws ec2 replace-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Update private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 replace-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " else\n", " # Create 'OpenVPNRoutePub'\n", " aws ec2 create-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Create private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 create-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " fi\n", " \n", "\n", " - path: /etc/chef/sync_cookbooks.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " aws --region ", Ref('AWS::Region'), " s3 sync s3://", variables['ChefBucketName'].ref, "/", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "/ /etc/chef/\n", " if compgen -G \"/etc/chef/cookbooks-*.tar.gz\" > /dev/null; then\n", # noqa pylint: disable=line-too-long " echo \"Cookbook archive found.\"\n", " if [ -d \"/etc/chef/cookbooks\" ]; then\n", " echo \"Removing previously extracted cookbooks.\"\n", # noqa pylint: disable=line-too-long " rm -r /etc/chef/cookbooks\n", " fi\n", " echo \"Extracting highest numbered cookbook archive.\"\n", # noqa pylint: disable=line-too-long " cbarchives=(/etc/chef/cookbooks-*.tar.gz)\n", " tar -zxf \"${cbarchives[@]: -1}\" -C /etc/chef\n", " chown -R root:root /etc/chef\n", " fi\n", " \n", "\n", " - path: /etc/chef/perform_chef_run.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " chef-client -z -r '", If('ChefRunListSpecified', variables['ChefRunList'].ref, Join('', ['recipe[', variables['CustomerName'].ref, '_vpn]'])), "' -c /etc/chef/client.rb -E ", variables['EnvironmentName'].ref, " --force-formatter --no-color -F min\n", "\n", " - path: /etc/chef/client.rb\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " log_level :info\n", " log_location '/var/log/chef/client.log'\n", " ssl_verify_mode :verify_none\n", " cookbook_path '/etc/chef/cookbooks'\n", " node_path '/etc/chef/nodes'\n", " role_path '/etc/chef/roles'\n", " data_bag_path '/etc/chef/data_bags'\n", " environment_path '/etc/chef/environments'\n", " local_mode 'true'\n", "\n", " - path: /etc/chef/environments/", variables['EnvironmentName'].ref, ".json\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " {\n", " \"name\": \"", variables['EnvironmentName'].ref, "\",\n", " \"default_attributes\": {\n", " \"sturdy\": {\n", " \"openvpn\": {\n", " \"core_vpc_cidr\": \"", variables['VpcCidr'].ref, "\",\n", " \"vpn_elastic_ip\": \"", variables['VpnEipPublicIp'].ref, "\",\n", " \"vpn_subnet_cidr\": \"", variables['VPNSubnet'].ref, "\",\n", " \"chef_data_bucket_name\": \"", variables['ChefDataBucketName'].ref, "\",\n", " \"chef_data_bucket_folder\": \"", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "\",\n", " \"chef_data_bucket_region\": \"", Ref('AWS::Region'), "\"\n", " }\n", " }\n", " },\n", " \"json_class\": \"Chef::Environment\",\n", " \"description\": \"", variables['EnvironmentName'].ref, " environment\",\n", " \"chef_type\": \"environment\"\n", " }\n", "\n", "runcmd:\n", " - set -euf\n", " - echo 'Attaching EIP'\n", " - pip install aws-ec2-assign-elastic-ip\n", # Allowing this command to fail (with ||true) as sturdy_openvpn # 2.3.0+ can handle this association instead. This will be removed # entirely in the next major release of this module (at which time # use of the updated sturdy_openvpn cookbook will be required) " - aws-ec2-assign-elastic-ip --region ", Ref('AWS::Region'), " --valid-ips ", variables['VpnEipPublicIp'].ref, " || true\n", " - echo 'Updating Routes'\n", " - /usr/local/bin/update_vpn_routes.sh\n", " - echo 'Installing Chef'\n", " - curl --max-time 10 --retry-delay 5 --retry 5 -L https://www.chef.io/chef/install.sh | bash -s -- -v ", # noqa pylint: disable=line-too-long variables['ChefClientVersion'].ref, "\n", " - echo 'Configuring Chef'\n", " - mkdir -p /var/log/chef /etc/chef/data_bags /etc/chef/nodes /etc/chef/roles\n", # noqa pylint: disable=line-too-long " - chmod 0755 /etc/chef\n", " - /etc/chef/sync_cookbooks.sh\n", " - /etc/chef/perform_chef_run.sh\n" ] vpnserverlaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'VpnServerLaunchConfig', AssociatePublicIpAddress=True, BlockDeviceMappings=[ # CentOS AMIs don't include this by default ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], IamInstanceProfile=Ref(vpninstanceprofile), ImageId=If('MissingVPNAMI', GetAtt(amiid, 'ImageId'), variables['VPNAMI'].ref), InstanceType=variables['ManagementInstanceType'].ref, InstanceMonitoring=False, # extra granularity not worth cost KeyName=If('SSHKeySpecified', variables['KeyName'].ref, Ref('AWS::NoValue')), PlacementTenancy=variables['VpcInstanceTenancy'].ref, SecurityGroups=variables['VPNSecurityGroups'].ref, UserData=If( 'RHELUserData', Base64( Join( '', common_userdata_prefix + [ "yum_repos:\n", " epel:\n", " name: Extra Packages for $releasever - $basearch\n", # noqa pylint: disable=line-too-long " baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch\n", # noqa pylint: disable=line-too-long " enabled: true\n", " failovermethod: priority\n", " gpgcheck: true\n", " gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7\n", # noqa pylint: disable=line-too-long "packages:\n", " - awscli\n", " - python-pip\n", " - python2-boto\n", " - python2-boto3\n" ])), Base64( Join( '', common_userdata_prefix + [ "packages:\n", " - awscli\n", " - python-pip\n", " - python-boto\n", " - python-boto3\n" ]))))) template.add_resource( autoscaling.AutoScalingGroup( 'VPNServerASG', MinSize=1, MaxSize=1, LaunchConfigurationName=Ref(vpnserverlaunchconfig), Tags=[ autoscaling.Tag( 'Name', Join('-', [ variables['CustomerName'].ref, 'vpn', variables['EnvironmentName'].ref ]), True), autoscaling.Tag('environment', variables['EnvironmentName'].ref, True), autoscaling.Tag('customer', variables['CustomerName'].ref, True) ], VPCZoneIdentifier=If( 'PublicSubnetsOmitted', GetAtt(subnetlookup.title, 'PublicSubnetList'), variables['PublicSubnets'].ref)))
"S3Bucket", DeletionPolicy="Retain" )) t.add_resource(BucketPolicy( "BucketPolicy", Bucket=Ref("S3Bucket"), PolicyDocument={ "Statement": [{ "Action": "s3:GetBucketAcl", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", [ "arn:aws:s3:::", Ref("S3Bucket") ]) }, { "Action": "s3:PutObject", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": Join("", [ "arn:aws:s3:::", Ref("S3Bucket"), "/AWSLogs/", Ref("AWS::AccountId"), "/*" ]), "Condition": {
"Creates an AWS WAF configuration that protects against common attacks") WebACLName = t.add_parameter( Parameter( "WebACLName", Default="CommonAttackProtection", Type="String", Description="Enter the name you want to use for the WebACL. " "This value is also added as a prefix for the names of the rules, " "conditions, and CloudWatch metrics created by this template.", )) SqliMatchSet = t.add_resource( SqlInjectionMatchSet( "SqliMatchSet", Name=Join("", [Ref(WebACLName), "SqliMatch"]), SqlInjectionMatchTuples=[ SqlInjectionMatchTuples( FieldToMatch=FieldToMatch(Type="QUERY_STRING"), TextTransformation="URL_DECODE"), SqlInjectionMatchTuples( FieldToMatch=FieldToMatch(Type="QUERY_STRING"), TextTransformation="HTML_ENTITY_DECODE"), SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="BODY"), TextTransformation="URL_DECODE"), SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="BODY"), TextTransformation="HTML_ENTITY_DECODE"), SqlInjectionMatchTuples(FieldToMatch=FieldToMatch(Type="URI"), TextTransformation="URL_DECODE") ]))
def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version("2010-09-09") template.set_description("Static Website - Dependencies") # Resources awslogbucket = template.add_resource( s3.Bucket( "AWSLogBucket", AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "AWSLogBucketName", Description="Name of bucket storing AWS logs", Value=awslogbucket.ref(), ) ) template.add_resource( s3.BucketPolicy( "AllowAWSLogWriting", Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(":", ["arn:aws:iam:", AccountId, "root"]) ), Resource=[ Join("", ["arn:aws:s3:::", awslogbucket.ref(), "/*"]) ], ) ], ), ) ) artifacts = template.add_resource( s3.Bucket( "Artifacts", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status="Enabled" ) ] ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "ArtifactsBucketName", Description="Name of bucket storing artifacts", Value=artifacts.ref(), ) ) if variables["AuthAtEdge"]: callbacks = self.context.hook_data["aae_callback_url_retriever"][ "callback_urls" ] if variables["CreateUserPool"]: user_pool = template.add_resource( cognito.UserPool("AuthAtEdgeUserPool") ) user_pool_id = user_pool.ref() template.add_output( Output( "AuthAtEdgeUserPoolId", Description="Cognito User Pool App Client for Auth @ Edge", Value=user_pool_id, ) ) else: user_pool_id = self.context.hook_data["aae_user_pool_id_retriever"][ "id" ] client = template.add_resource( cognito.UserPoolClient( "AuthAtEdgeClient", AllowedOAuthFlows=["code"], CallbackURLs=callbacks, UserPoolId=user_pool_id, AllowedOAuthScopes=variables["OAuthScopes"], ) ) template.add_output( Output( "AuthAtEdgeClient", Description="Cognito User Pool App Client for Auth @ Edge", Value=client.ref(), ) )
Output, Parameter, Ref, Template ) from troposphere.ecr import Repository t = Template() t.set_description("community-mother-api: ECR Repository") t.add_parameter(Parameter( "RepoName", Type="String", Description="Name of the ECR repository to create" )) t.add_resource(Repository( "Repository", RepositoryName=Ref("RepoName") )) t.add_output(Output( "Repository", Description="ECR repository", Value=Ref("RepoName"), Export=Export(Join("-", [Ref("RepoName"), "repo"])), )) print(t.to_json())
IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64(Join('', [ "#!/bin/bash\n", "yum -y update\n", "yum -y install epel-release\n", "yum install --enablerepo=epel -y git\n", "yum -y install python-pip\n", "pip install --upgrade\n", "pip-2.7 install ansible\n", AnsiblePullCmd, "\n", "npm install -g n\n", "n stable\n", "npm install -g mocha\n", "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format(AnsiblePullCmd) ])) t.add_resource(Role( "Role", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[AssumeRole],
Parameter( 'TeamNumber', ConstraintDescription= 'must be a single number indicating the team number.', Description='Team number', Type='Number', )) team_number = Ref(team_number_param) az = Select(0, GetAZs()) VPC = t.add_resource( VPC('VPC', CidrBlock='10.0.0.0/16', Tags=Tags(Name=Join(" ", ["VPC For Team", team_number])))) internetGateway = t.add_resource( InternetGateway('InternetGateway', Tags=Tags(Name=Join(" ", ["Igw for team", team_number]), Team=team_number))) gatewayAttachment = t.add_resource( VPCGatewayAttachment('AttachGatewayToVPC', VpcId=Ref(VPC), InternetGatewayId=Ref(internetGateway))) Public_DMZ_Subnet = t.add_resource( Subnet('PublicDMZSubnet', AvailabilityZone=az, CidrBlock='10.0.1.0/24',
t.add_description("Effective DevOps in AWS: ECS service - Helloworld") t.add_parameter( Parameter("Tag", Type="String", Default="latest", Description="Tag to deploy")) t.add_resource( TaskDefinition( "task", ContainerDefinitions=[ ContainerDefinition( Image=Join("", [ Ref("AWS::AccountId"), ".dkr.ecr.", Ref("AWS::Region"), ".amazonaws.com", "/", ImportValue("helloworld-repo"), ":", Ref("Tag") ]), Memory=32, Cpu=256, Name="helloworld", PortMappings=[ecs.PortMapping(ContainerPort=3000)]) ], )) t.add_resource( Role( "ServiceRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole],
Subnet("PrivateSubnet", VpcId=Ref("VPC"), CidrBlock=Ref(SubnetCIDR), Tags=Tags( Application=Ref("AWS::StackName"), Network="VPN Connected Subnet", ))) CustomerGateway = t.add_resource( CustomerGateway("CustomerGateway", BgpAsn="65000", IpAddress=Ref(VPNAddress), Type="ipsec.1", Tags=Tags( Application=Ref("AWS::StackName"), VPN=Join( "", ["Gateway to ", Ref(VPNAddress)]), ))) VPNConnectionRoute = t.add_resource( VPNConnectionRoute( "VPNConnectionRoute", VpnConnectionId=Ref("VPNConnection"), DestinationCidrBlock=Ref(OnPremiseCIDR), )) PrivateRouteTable = t.add_resource( RouteTable("PrivateRouteTable", VpcId=Ref("VPC"), Tags=Tags( Application=Ref("AWS::StackName"), Network="VPN Connected Subnet",
Tags=tags('ServiceVPC'), )) # Internet Gateway internet_gateway = t.add_resource( InternetGateway("InternetGateway", Tags=tags('InternetGateway'))) t.add_resource( VPCGatewayAttachment("VpcGatewayAttachment", InternetGatewayId=Ref(internet_gateway), VpcId=Ref(vpc))) # VPC DHCP Options dhcp_opts = t.add_resource( DHCPOptions("DhcpOptions", DomainName=Join("", [Ref("AWS::Region"), ".compute.internal"]), DomainNameServers=["AmazonProvidedDNS"], Tags=tags('DhcpOptions'))) dhcp_opts_assoc = t.add_resource( VPCDHCPOptionsAssociation("VpcDhcpOptionsAssociation", DhcpOptionsId=Ref(dhcp_opts), VpcId=Ref(vpc))) # Security Group sg = t.add_resource( SecurityGroup("BastionSG", GroupDescription="Used for source/dest rules", VpcId=Ref(vpc), Tags=tags('VPC-Bastion-SG')))
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.set_version("2010-09-09") template.set_description("Static Website - Dependencies") # Resources awslogbucket = template.add_resource( s3.Bucket( "AWSLogBucket", AccessControl=s3.Private, VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( Output( "AWSLogBucketName", Description="Name of bucket storing AWS logs", Value=awslogbucket.ref(), )) template.add_resource( s3.BucketPolicy( "AllowAWSLogWriting", Bucket=awslogbucket.ref(), PolicyDocument=Policy( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.PutObject], Effect=Allow, Principal=AWSPrincipal( Join(":", ["arn:aws:iam:", AccountId, "root"])), Resource=[ Join("", [ "arn:aws:s3:::", awslogbucket.ref(), "/*" ]) ], ) ], ), )) artifacts = template.add_resource( s3.Bucket( "Artifacts", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status="Enabled") ]), VersioningConfiguration=s3.VersioningConfiguration( Status="Enabled"), )) template.add_output( Output( "ArtifactsBucketName", Description="Name of bucket storing artifacts", Value=artifacts.ref(), )) if self.variables["AuthAtEdge"]: userpool_client_params = { "AllowedOAuthFlows": ["code"], "AllowedOAuthScopes": self.variables["OAuthScopes"], } if self.variables["Aliases"]: userpool_client_params[ "AllowedOAuthFlowsUserPoolClient"] = True userpool_client_params[ "SupportedIdentityProviders"] = self.variables[ "SupportedIdentityProviders"] redirect_domains = [ add_url_scheme(x) for x in self.variables["Aliases"] ] + [ add_url_scheme(x) for x in self.variables["AdditionalRedirectDomains"] ] redirect_uris = get_redirect_uris( redirect_domains, self.variables["RedirectPathSignIn"], self.variables["RedirectPathSignOut"], ) userpool_client_params["CallbackURLs"] = redirect_uris[ "sign_in"] userpool_client_params["LogoutURLs"] = redirect_uris[ "sign_out"] else: userpool_client_params[ "CallbackURLs"] = self.context.hook_data[ "aae_callback_url_retriever"]["callback_urls"] if self.variables["CreateUserPool"]: user_pool = template.add_resource( cognito.UserPool("AuthAtEdgeUserPool")) user_pool_id = user_pool.ref() template.add_output( Output( "AuthAtEdgeUserPoolId", Description= "Cognito User Pool App Client for Auth @ Edge", Value=user_pool_id, )) else: user_pool_id = self.context.hook_data[ "aae_user_pool_id_retriever"]["id"] userpool_client_params["UserPoolId"] = user_pool_id client = template.add_resource( cognito.UserPoolClient("AuthAtEdgeClient", **userpool_client_params)) template.add_output( Output( "AuthAtEdgeClient", Description="Cognito User Pool App Client for Auth @ Edge", Value=client.ref(), ))
ToPort="22", CidrIp="1.1.1.1/32", ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64( Join('\n', [ "#!/bin/bash", "sudo yum install --enablerepo=epel -y nodejs", "wget http://bit.ly/2vESNuc -O /home/ec2-user/helloworld.js", "wget http://bit.ly/2vVvT18 -O /etc/init/helloworld.conf", "start helloworld" ])) t.add_resource( ec2.Instance( "instance", ImageId="ami-922914f7", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, )) t.add_output( Output(
" try:", " response = requests.put(responseUrl,", " data=json_responseBody,", " headers=headers)", " log.info(\"Status code: \" + str(response.reason))", " return SUCCESS", " ", " except Exception as e:", " log.error(\"Error sending response: \" + str(e))", " return FAILED", ] CodeBuildInitFunction = t.add_resource(awslambda.Function( "CodeBuildInitFunction", Code=awslambda.Code( ZipFile=Join("\n", code) ), Handler="index.handler", Role=GetAtt("LambdaExecutionRole", "Arn"), Runtime="python3.6", MemorySize="128", Timeout="180", )) CodeBuildInit = t.add_resource(CustomCodeBuildTrigger( "CodeBuildInit", ServiceToken=GetAtt(CodeBuildInitFunction, 'Arn'), ProjectName="ghost-pipeline-init-codecommit", DependsOn=BuildProject ))
ToPort="22", CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64(Join('\n', [ "#!/bin/bash", "sudo yum install --enablerepo=epel -y git", "sudo pip install ansible", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format(AnsiblePullCmd) ])) t.add_resource(ec2.Instance( "instance", ImageId="ami-0ff8a91507f77f867", InstanceType="t2.micro", SecurityGroups=[Ref("SecurityGroup")], KeyName=Ref("KeyPair"), UserData=ud, )) t.add_output(Output( "InstancePublicIp",
CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64( Join('\n', [ "#!/bin/bash", "yum install --enablerepo=epel -y git", "pip install ansible", "curl --silent --location https://rpm.nodesource.com/setup_7.x | bash -", "yum -y install nodejs npm", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull".format( AnsiblePullCmd) ])) t.add_resource( Role("Role", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]))) t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")]))
def add_resources(self): self.KongPublicLoadBalancerSecurityGroup = self.template.add_resource( ec2.SecurityGroup( "KongPublicLoadBalancerSecurityGroup", GroupDescription= "Enable HTTP access on port 8000m and 8001 for Admin", VpcId=Ref(self.VpcId), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8000, ToPort=8001, CidrIp=Ref(self.KongAdminAccess), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8443, ToPort=8444, CidrIp=Ref(self.KongProxyAccess), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=Ref(self.KongAdminAccess), ) ], Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPublicLBSG"), )) self.KongPrivateLoadBalancerSecurityGroup = self.template.add_resource( ec2.SecurityGroup( "KongPrivateLoadBalancerSecurityGroup", GroupDescription="Enable HTTP access on port 8000 and 8001", VpcId=Ref(self.VpcId), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8000, ToPort=8001, CidrIp=Ref(self.KongAdminAccess), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8443, ToPort=8444, CidrIp=Ref(self.KongProxyAccess), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=22, ToPort=22, CidrIp=Ref(self.KongAdminAccess), ) ], Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPrivateLBSG"), )) self.KongPublicLoadBalancer = self.template.add_resource( elb.LoadBalancer( "KongPublicLoadBalancer", Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPubLB", Scheme="internet-facing", Subnets=[Ref(self.RESTPubSubnet1), Ref(self.RESTPubSubnet2)], SecurityGroups=[Ref(self.KongPublicLoadBalancerSecurityGroup)], Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPubLB"), )) self.KongPublicSecureProxyTargetGroup = self.template.add_resource( elb.TargetGroup( "KongPublicSecureProxyTargetGroup", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPubSecProxyTG", Port="8443", Protocol="HTTPS", UnhealthyThresholdCount="3", VpcId=Ref(self.VpcId), )) self.KongPublicSecureAdminTargetGroup = self.template.add_resource( elb.TargetGroup( "KongPublicSecureAdminTargetGroup", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTPS", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPubSecAdminTG", Port="8444", Protocol="HTTPS", UnhealthyThresholdCount="3", VpcId=Ref(self.VpcId), )) self.KongPrivateSecureProxyTargetGroup = self.template.add_resource( elb.TargetGroup( "KongPrivateSecureProxyTargetGroup", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTPS", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPrivateSecProxyTG", Port="8443", Protocol="HTTPS", UnhealthyThresholdCount="3", VpcId=Ref(self.VpcId), )) self.KongPrivateSecureAdminTargetGroup = self.template.add_resource( elb.TargetGroup( "KongPrivateSecureAdminTargetGroup", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTPS", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPrivSecAdminTG", Port="8444", Protocol="HTTPS", UnhealthyThresholdCount="3", VpcId=Ref(self.VpcId), )) self.KongPublicNonSecureAdminTargetGroup = self.template.add_resource( elb.TargetGroup( "KongPublicNonSecureAdminTargetGroup", HealthCheckIntervalSeconds="30", HealthCheckProtocol="HTTP", HealthCheckTimeoutSeconds="10", HealthyThresholdCount="4", Matcher=elb.Matcher(HttpCode="200"), Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPubNonSecAdminTG", Port="8001", Protocol="HTTP", UnhealthyThresholdCount="3", VpcId=Ref(self.VpcId), )) self.KongPrivateLoadBalancer = self.template.add_resource( elb.LoadBalancer( "KongPrivateLoadBalancer", Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPrivLB", Scheme="internal", Subnets=[Ref(self.RESTPrivSubnet1), Ref(self.RESTPrivSubnet2)], SecurityGroups=[ Ref(self.KongPrivateLoadBalancerSecurityGroup) ], Tags=Tags( Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongPrivLB") + self.base_tags, )) self.KongPublicSecureProxyListener = self.template.add_resource( elb.Listener( "KongPublicSecureProxyListener", Port="8443", Protocol="HTTPS", SslPolicy="ELBSecurityPolicy-2016-08", LoadBalancerArn=Ref(self.KongPublicLoadBalancer), Certificates=[ Certificate(CertificateArn=Ref(self.KongSSLCertificate)) ], DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref( self.KongPublicSecureProxyTargetGroup)) ])) self.KongPublicSecureAdminListener = self.template.add_resource( elb.Listener( "KongPublicSecureAdminListener", Port="8444", Protocol="HTTPS", SslPolicy="ELBSecurityPolicy-2016-08", LoadBalancerArn=Ref(self.KongPublicLoadBalancer), Certificates=[ Certificate(CertificateArn=Ref(self.KongSSLCertificate)) ], DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref( self.KongPublicSecureAdminTargetGroup)) ])) self.KongPrivateSecureProxyListener = self.template.add_resource( elb.Listener( "KongPrivateSecureProxyListener", Port="8443", Protocol="HTTPS", SslPolicy="ELBSecurityPolicy-2016-08", LoadBalancerArn=Ref(self.KongPrivateLoadBalancer), Certificates=[ Certificate(CertificateArn=Ref(self.KongSSLCertificate)) ], DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref( self.KongPrivateSecureProxyTargetGroup)) ])) self.KongPrivateSecureAdminListener = self.template.add_resource( elb.Listener( "KongPrivateSecureAdminListener", Port="8444", Protocol="HTTPS", SslPolicy="ELBSecurityPolicy-2016-08", LoadBalancerArn=Ref(self.KongPrivateLoadBalancer), Certificates=[ Certificate(CertificateArn=Ref(self.KongSSLCertificate)) ], DefaultActions=[ elb.Action(Type="forward", TargetGroupArn=Ref( self.KongPrivateSecureAdminTargetGroup)) ])) self.KongPublicNonSecureAdminListener = self.template.add_resource( elb.Listener("KongPublicNonSecureAdminListener", Port="8001", Protocol="HTTP", LoadBalancerArn=Ref(self.KongPublicLoadBalancer), DefaultActions=[ elb.Action( Type="forward", TargetGroupArn=Ref( self.KongPublicNonSecureAdminTargetGroup)) ])) self.KongEC2SecurityGroup = self.template.add_resource( ec2.SecurityGroup( "KongEC2SecurityGroup", GroupDescription= "Enable SSH access and HTTP access on the inbound Port", VpcId=Ref(self.VpcId), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8000, ToPort=8001, SourceSecurityGroupId=Ref( self.KongPublicLoadBalancerSecurityGroup), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8443, ToPort=8444, SourceSecurityGroupId=Ref( self.KongPublicLoadBalancerSecurityGroup), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8000, ToPort=8001, SourceSecurityGroupId=Ref( self.KongPrivateLoadBalancerSecurityGroup), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8443, ToPort=8444, SourceSecurityGroupId=Ref( self.KongPrivateLoadBalancerSecurityGroup), ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=8443, ToPort=8444, SourceSecurityGroupId=Ref( self.KongPrivateLoadBalancerSecurityGroup), ) ], Tags=self.base_tags + Tags(Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongEc2SG"), )) self.CassandraSGKongCommunicationIngress = self.template.add_resource( ec2.SecurityGroupIngress( "CassandraSGKongCommunicationIngress", DependsOn=self.KongEC2SecurityGroup, GroupId=Ref(self.CassandraSG), IpProtocol="tcp", FromPort=Ref(self.CassandraPort), ToPort=Ref(self.CassandraPort), SourceSecurityGroupId=Ref(self.KongEC2SecurityGroup), )) self.ASGUpdateRole = self.template.add_resource( Role("ASGUpdateRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]), Policies=[ iam.Policy( PolicyName="ASGUpdateRole", PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": [ "ec2:Describe*", "cloudformation:DescribeStackResource" ], "Effect": "Allow", "Resource": "*" }] }) ])) self.ASGUpdateProfile = self.template.add_resource( InstanceProfile( "ASGUpdateProfile", Path="/", Roles=[Ref(self.ASGUpdateRole)], )) self.KongVersionSSMParameter = self.template.add_resource( SSMParameter( "KongVersionSSMParameter", Description="The Kong Version", Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongVersion", Type="String", Value=Ref(self.KongVersionSSMParameterValue), )) self.KongExecuteMigrationsSSMParameter = self.template.add_resource( SSMParameter( "KongExecuteMigrationsSSMParameter", Description= "Flag to determine if Kong should Execute DB migrations", Name=self.environment_parameters["ClientEnvironmentKey"] + "-KongExecuteMigrations", Type="String", Value=Ref(self.KongExecuteMigrationsSSMParameterValue), )) self.CassandraSeedListSSMParameter = self.template.add_resource( SSMParameter( "CassandraSeedListSSMParameter", Description="The Cassandra Seed Node List", Name=self.environment_parameters["ClientEnvironmentKey"] + "-CassandraSeedList", Type="String", Value=Ref(self.CassandraSeedListSSMParameterValue), )) self.KongLaunchConfig = self.template.add_resource( LaunchConfiguration( "KongLaunchConfig", ImageId="ami-a4c7edb2", AssociatePublicIpAddress="false", InstanceType=Ref(self.KongInstanceType), IamInstanceProfile=Ref(self.KongIAMInstanceProfile), KeyName=Ref(self.KongKeyName), SecurityGroups=[Ref(self.KongEC2SecurityGroup)], UserData=Base64( Join('', [ "#!/bin/bash\n", "yum update -y \n", "pip install --upgrade pip \n", "pip install --upgrade awscli \n", "ClientCode=\"" + self.environment_parameters["ClientEnvironmentKey"] + "\" \n", "REGION=\"" + self.environment_parameters["EnvironmentRegion"] + "\" \n", "BootstrapRepositorySSMKey=\"-bootstrapRepository\" \n", "BootstrapRepositorySSMKey=${ClientCode}${BootstrapRepositorySSMKey} \n", "echo $BootstrapRepositorySSMKey \n", "bootstrapRepository=\"$(aws ssm get-parameter --name ${BootstrapRepositorySSMKey} --region ${REGION} --output text --query 'Parameter.Value')\" \n", "echo $bootstrapRepository \n", "keyprefix=\"/bootstrap/rest/bash\" \n", "localpath=\"/tmp/bootstrap\" \n", "echo $bootStrapRepository \n", "if [ ! -d \"${localpath}\"]; then \n", "mkdir - p \n", "\"${localpath}\" \n", "fi \n", "FullS3Path=${bootstrapRepository}${keyprefix} \n", "echo $FullS3Path \n", "aws s3 sync s3://$FullS3Path $localpath \n", "chmod u+x /tmp/bootstrap/kong-provision.sh \n", "/bin/bash /tmp/bootstrap/kong-provision.sh \n", ])))) self.KongScalingGroup = self.template.add_resource( AutoScalingGroup( "KongScalingGroup", AutoScalingGroupName=self. environment_parameters["ClientEnvironmentKey"] + "-KongAutoScalingGroup", AvailabilityZones=["us-east-1a"], LaunchConfigurationName=Ref(self.KongLaunchConfig), VPCZoneIdentifier=[Ref(self.RESTPrivSubnet1)], MinSize="1", MaxSize="1", DesiredCapacity="1", TargetGroupARNs=[ Ref(self.KongPublicNonSecureAdminTargetGroup), Ref(self.KongPrivateSecureAdminTargetGroup), Ref(self.KongPrivateSecureProxyTargetGroup), Ref(self.KongPublicSecureAdminTargetGroup), Ref(self.KongPublicSecureProxyTargetGroup) ], Tags=[ AutoScalingTag( "Name", self.environment_parameters["ClientEnvironmentKey"] + "-Kong-Ec2", True), AutoScalingTag( "Environment", self.environment_parameters["EnvironmentName"], True), AutoScalingTag( "ResourceOwner", self.environment_parameters["ResourceOwner"], True), AutoScalingTag( "ClientCode", self.environment_parameters["ClientEnvironmentKey"], True), ], ))
Split, Sub, Template, ec2 ) t = Template() t.add_description("Ganesh Effective DevOps in AWS: ALB for the ECS Cluster") t.add_resource(ec2.SecurityGroup( "LoadBalancerSecurityGroup", GroupDescription="Web load balancer security group.", VpcId=ImportValue( Join( "-", [Select(0, Split("-", Ref("AWS::StackName"))), "cluster-vpc-id"] ) ), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol="tcp", FromPort="3000", ToPort="3000", CidrIp="0.0.0.0/0", ), ], )) t.add_resource(elb.LoadBalancer( "LoadBalancer", Scheme="internet-facing",
def create_template(self) -> None: """Create template (main function called by Stacker).""" template = self.template template.add_version("2010-09-09") template.add_description("Kubernetes Master via EKS - V1.0.0") # Resources ccpsecuritygroup = template.add_resource( ec2.SecurityGroup( "ClusterControlPlaneSecurityGroup", GroupDescription="Cluster communication with worker nodes", Tags=[ { "Key": Sub("kubernetes.io/cluster/${EksClusterName}"), "Value": "owned", }, { "Key": "Product", "Value": "Kubernetes" }, { "Key": "Project", "Value": "eks" }, { "Key": "Name", "Value": Sub("${EksClusterName}-sg-worker-nodes") }, ], VpcId=self.variables["VPC"].ref, )) template.add_output( Output( ccpsecuritygroup.title, Description="Cluster communication with worker nodes", Export=Export( Sub("${AWS::StackName}-ControlPlaneSecurityGroup")), Value=ccpsecuritygroup.ref(), )) eksservicerole = template.add_resource( iam.Role( "EksServiceRole", AssumeRolePolicyDocument=make_simple_assume_policy( "eks.amazonaws.com"), ManagedPolicyArns=[ IAM_POLICY_ARN_PREFIX + "AmazonEKSClusterPolicy" ], Policies=[ iam.Policy( PolicyName="EksServiceRolePolicy", PolicyDocument=PolicyDocument(Statement=[ Statement( Action=[ awacs.iam.CreateServiceLinkedRole, awacs.iam.PutRolePolicy, ], Condition=Condition( StringLike( "iam:AWSServiceName", "elasticloadbalancing.amazonaws.com", )), Effect=Allow, Resource=[ Sub("arn:aws:iam::${AWS::AccountId}:role/" "aws-service-role/" "elasticloadbalancing.amazonaws.com/" "AWSServiceRoleForElasticLoadBalancing*" ) ], ) ]), ) ], )) ekscluster = template.add_resource( eks.Cluster( "EksCluster", Name=self.variables["EksClusterName"].ref, Version=self.variables["EksVersion"].ref, RoleArn=eksservicerole.get_att("Arn"), ResourcesVpcConfig=eks.ResourcesVpcConfig( SecurityGroupIds=[ccpsecuritygroup.ref()], SubnetIds=self.variables["EksSubnets"].ref, ), )) template.add_output( Output( "%sName" % ekscluster.title, Description="EKS Cluster Name", Export=Export( Sub("${AWS::StackName}-%sName" % ekscluster.title)), Value=ekscluster.ref(), )) template.add_output( Output( "%sEndpoint" % ekscluster.title, Description="EKS Cluster Endpoint", Export=Export( Sub("${AWS::StackName}-%sEndpoint" % ekscluster.title)), Value=ekscluster.get_att("Endpoint"), )) # Additional Outputs template.add_output( Output( "VpcId", Description="EKS Cluster VPC Id", Export=Export(Sub("${AWS::StackName}-VpcId")), Value=self.variables["VPC"].ref, )) template.add_output( Output( "Subnets", Description="EKS Cluster Subnets", Export=Export(Sub("${AWS::StackName}-Subnets")), Value=Join(",", self.variables["EksSubnets"].ref), ))
def _create_project_stack(self): update = True try: self._cloudformation_client.describe_stacks(StackName=self._stack_name()) except ClientError as e: if 'does not exist' not in str(e): raise e update = False self.info('Creating project stack') template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter(Parameter( f'{self._stack_name()}MemorySize', Type=NUMBER, Default='3008', )) timeout_gateway = template.add_parameter(Parameter( f'{self._stack_name()}GatewayTimeout', Type=NUMBER, Default='30' )) template.add_resource(Bucket( inflection.camelize(inflection.underscore(self._bucket)), BucketName=self._bucket, AccessControl='Private', LifecycleConfiguration=LifecycleConfiguration(Rules=[ LifecycleRule(Prefix='tmp', Status='Enabled', ExpirationInDays=1) ]) )) api = template.add_resource(Api( self._rest_api_name(), Name=f'{inflection.humanize(self._project)} {inflection.humanize(self._env)} API', ProtocolType='HTTP' )) role_title = f'{self._rest_api_name()}Role' self._add_role(role_title, template) default_lambda = template.add_resource(Function( f'{self._rest_api_name()}Function', FunctionName=self._rest_api_name(), Code=Code( ZipFile='\n'.join([ 'def handler(event, context):', ' return event' ]) ), Handler='index.handler', Role=GetAtt(role_title, 'Arn'), Runtime='python3.7', MemorySize=Ref(memory_size), Timeout=Ref(timeout_gateway) )) integration = template.add_resource(Integration( self._integration_name(), ApiId=Ref(api), IntegrationType='AWS_PROXY', PayloadFormatVersion='2.0', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(default_lambda), ]), DependsOn=f'{self._rest_api_name()}Function' )) template.add_resource(Route( self._route_name(), ApiId=Ref(api), RouteKey='$default', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=[integration] )) template.add_resource(Stage( f'{self._rest_api_name()}Stage', StageName='v2', ApiId=Ref(api), AutoDeploy=True )) template.add_resource(Deployment( f'{self._rest_api_name()}Deployment', ApiId=Ref(api), StageName='v2', DependsOn=[ f'{self._rest_api_name()}Stage', self._route_name(), self._integration_name(), self._rest_api_name(), ] )) template.add_output([ Output( self._rest_api_reference(), Export=Export(self._rest_api_reference()), Value=Ref(api) ), ]) if update: self._update_stack(self._stack_name(), template) else: self._create_stack(self._stack_name(), template)
"set -o errexit\n", "export postscript='", ref_postscript, ",\n", "export region='", ref_region, "'\n", "export stack='", ref_stack_name, "'\n", "export resource='", resource ,"'\n", "export ambari_server='", ambari_server ,"'\n", "export java_provider=", ref_java_provider ,"\n", "export java_version=", ref_java_version ,"\n", "export install_ambari_agent=", install_ambari_agent ,"\n", "export install_ambari_server=", install_ambari_server ,"\n", ] return exports + bootstrap_script_body.splitlines(True) AmbariNode = t.add_resource(ec2.Instance( "AmbariNode", UserData=Base64(Join("", my_bootstrap_script('AmbariNode','true','true','127.0.0.1'))), ImageId=FindInMap("CENTOS7", Ref("AWS::Region"), "AMI"), BlockDeviceMappings=If( "AmbariUseEBSBool", my_block_device_mappings_ebs(ref_disk_ambari_ebs_diskcount,"/dev/sd",ref_disk_ambari_ebs_volumesize,"gp2"), my_block_device_mappings_ephemeral(24,"/dev/sd")), CreationPolicy=CreationPolicy( ResourceSignal=ResourceSignal( Count=1, Timeout="PT30M" )), KeyName=Ref(KeyName), IamInstanceProfile=Ref(AmbariInstanceProfile), InstanceType=Ref(AmbariInstanceType), NetworkInterfaces=[ ec2.NetworkInterfaceProperty( DeleteOnTermination="true",
def _deploy_service(self, service: ff.Service): context = self._context_map.get_context(service.name) self._package_and_deploy_code(context) template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}MemorySize', Type=NUMBER, Default='3008' )) timeout_gateway = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}GatewayTimeout', Type=NUMBER, Default='30' )) timeout_async = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}AsyncTimeout', Type=NUMBER, Default='900' )) role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole' self._add_role(role_title, template) params = { 'FunctionName': f'{self._service_name(service.name)}Sync', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_gateway), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) api_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Sync', **params )) route = inflection.dasherize(context.name) proxy_route = f'{route}/{{proxy+}}' template.add_resource(Permission( f'{self._lambda_resource_name(service.name)}SyncPermission', Action='lambda:InvokeFunction', FunctionName=f'{self._service_name(service.name)}Sync', Principal='apigateway.amazonaws.com', SourceArn=Join('', [ 'arn:aws:execute-api:', self._region, ':', self._account_id, ':', ImportValue(self._rest_api_reference()), '/*/*/', route, '*' ]), DependsOn=api_lambda )) params = { 'FunctionName': f'{self._service_name(service.name)}Async', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_async), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) async_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Async', **params )) integration = template.add_resource(Integration( self._integration_name(context.name), ApiId=ImportValue(self._rest_api_reference()), PayloadFormatVersion='2.0', IntegrationType='AWS_PROXY', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(api_lambda), ]), )) template.add_resource(Route( f'{self._route_name(context.name)}Base', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) template.add_resource(Route( f'{self._route_name(context.name)}Proxy', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{proxy_route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) # Error alarms / subscriptions if 'errors' in self._aws_config: alerts_topic = template.add_resource(Topic( self._alert_topic_name(service.name), TopicName=self._alert_topic_name(service.name) )) self._add_error_alarm(template, f'{self._service_name(context.name)}Sync', context.name, alerts_topic) self._add_error_alarm(template, f'{self._service_name(context.name)}Async', context.name, alerts_topic) if 'email' in self._aws_config.get('errors'): template.add_resource(SubscriptionResource( self._alarm_subscription_name(context.name), Protocol='email', Endpoint=self._aws_config.get('errors').get('email').get('recipients'), TopicArn=self._alert_topic_arn(context.name), DependsOn=[alerts_topic] )) # Queues / Topics subscriptions = {} for subscription in self._get_subscriptions(context): if subscription['context'] not in subscriptions: subscriptions[subscription['context']] = [] subscriptions[subscription['context']].append(subscription) dlq = template.add_resource(Queue( f'{self._queue_name(context.name)}Dlq', QueueName=f'{self._queue_name(context.name)}Dlq', VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600 )) self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions) queue = template.add_resource(Queue( self._queue_name(context.name), QueueName=self._queue_name(context.name), VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(dlq, 'Arn'), maxReceiveCount=1000 ), DependsOn=dlq )) self._queue_policy(template, queue, self._queue_name(context.name), subscriptions) template.add_resource(EventSourceMapping( f'{self._lambda_resource_name(context.name)}AsyncMapping', BatchSize=1, Enabled=True, EventSourceArn=GetAtt(queue, 'Arn'), FunctionName=f'{self._service_name(service.name)}Async', DependsOn=[queue, async_lambda] )) topic = template.add_resource(Topic( self._topic_name(context.name), TopicName=self._topic_name(context.name) )) for context_name, list_ in subscriptions.items(): if context_name == context.name and len(list_) > 0: template.add_resource(SubscriptionResource( self._subscription_name(context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context.name), FilterPolicy={ '_name': [x['name'] for x in list_], }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq, topic] )) elif len(list_) > 0: if context_name not in self._context_map.contexts: self._find_or_create_topic(context_name) template.add_resource(SubscriptionResource( self._subscription_name(context.name, context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context_name), FilterPolicy={ '_name': [x['name'] for x in list_] }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq] )) self.info('Deploying stack') stack_name = self._stack_name(context.name) try: self._cloudformation_client.describe_stacks(StackName=stack_name) self._update_stack(self._stack_name(context.name), template) except ClientError as e: if f'Stack with id {stack_name} does not exist' in str(e): self._create_stack(self._stack_name(context.name), template) else: raise e self._execute_ddl(context) self.info('Done')
CidrIp=PublicCidrIp, ), ec2.SecurityGroupRule( IpProtocol="tcp", FromPort=ApplicationPort, ToPort=ApplicationPort, CidrIp="0.0.0.0/0", ), ], )) ud = Base64( Join('\n', [ "#!/bin/bash", "yum remove java-1.7.0-openjdk -y", "yum install java-1.8.0-openjdk -y", "yum install --enablerepo=epel -y git", "pip install ansible", AnsiblePullCmd, "echo '*/10 * * * * {}' > /etc/cron.d/ansible-pull". format(AnsiblePullCmd) ])) t.add_resource( Role("Role", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["ec2.amazonaws.com"])) ]))) t.add_resource( InstanceProfile("InstanceProfile", Path="/", Roles=[Ref("Role")]))
"AWS CloudFormation Sample Template RDS_Snapshot_On_Delete: Sample " "template showing how to create an RDS DBInstance that is snapshotted on " "stack deletion. **WARNING** This template creates an Amazon RDS database " "instance. When the stack is deleted a database snpshot will be left in " "your account. You will be billed for the AWS resources used if you " "create a stack from this template.") MyDB = t.add_resource( DBInstance( "MyDB", Engine="MySQL", MasterUsername="******", MasterUserPassword="******", AllocatedStorage="5", DBInstanceClass="db.m1.small", DBName="MyDatabase", )) JDBCConnectionString = t.add_output( Output( "JDBCConnectionString", Description="JDBC connection string for the database", Value=Join("", [ "jdbc:mysql://", GetAtt(MyDB, "Endpoint.Address"), ":", GetAtt(MyDB, "Endpoint.Port"), "/MyDatabase" ]), )) print(t.to_json()) write_to_file("templates/rds-mysql.yaml", t.to_yaml(clean_up=True))