def test_parameterized_codec_b64(self): expected = Base64( Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) ) out = parameterized_codec(u'Test {{Interpolation}} Here', True) self.assertEqual(Base64, out.__class__) self.assertTemplateEqual(expected, out)
def test_parameterized_codec_plain(self): expected = { 'Fn::Join': ['', ['Test ', { 'Ref': 'Interpolation' }, ' Here']] } self.assertEqual( expected, parameterized_codec('Test {{Interpolation}} Here', False).data)
def test_parameterized_codec_b64(self): expected = { 'Fn::Base64': { 'Fn::Join': ['', ['Test ', { 'Ref': 'Interpolation' }, ' Here']] } } self.assertEqual( expected, parameterized_codec('Test {{Interpolation}} Here', True).data)
def test_parameterized_codec_plain_no_interpolation(self): expected = u'Test Without Interpolation Here' out = parameterized_codec(u'Test Without Interpolation Here', False) self.assertEqual(GenericHelperFn, out.__class__) self.assertTemplateEqual(expected, out)
def test_parameterized_codec_plain(self): expected = Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) out = parameterized_codec(u'Test {{Interpolation}} Here', False) self.assertEqual(GenericHelperFn, out.__class__) self.assertTemplateEqual(expected, out)
class AmiLookup(Blueprint): """Extends Stacker Blueprint class.""" ami_lookup_src = parameterized_codec( open(path.join(AWS_LAMBDA_DIR, 'ami_lookup', 'index.py'), 'r').read(), False # disable base64 encoding ) VARIABLES = { 'AMILookupLambdaFunction': {'type': AWSHelperFn, 'description': 'Lambda function code', 'default': ami_lookup_src}, 'CustomerName': {'type': CFNString, 'description': 'The nickname for the new customer. ' 'Must be all lowercase letters, ' 'should not contain spaces or special ' 'characters, nor should it include ' 'any part of EnvironmentName.', 'allowed_pattern': '[-_ a-z]*', 'default': ''}, 'EnvironmentName': {'type': CFNString, 'description': 'Name of Environment', 'default': 'common'} } def add_resources_and_outputs(self): """Add resources and outputs to template.""" template = self.template variables = self.get_variables() amilookuplambdarole = template.add_resource( iam.Role( 'AMILookupLambdaRole', AssumeRolePolicyDocument=iam_policies.assumerolepolicy( 'lambda' ), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName=Join('-', ['amilookup-lambda-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[awacs.ec2.DescribeImages], Effect=Allow, Resource=['*'], Sid='AMIAccess' ) ] ) ) ] ) ) # If uploaded to S3 via stacker hook, use that URL; otherwise fall back # to the inline code if ('lambda' in self.context.hook_data and 'CoreAMILookup' in self.context.hook_data['lambda']): code = self.context.hook_data['lambda']['CoreAMILookup'] else: code = awslambda.Code( ZipFile=variables['AMILookupLambdaFunction'] ) amilookup = template.add_resource( awslambda.Function( 'AMILookup', Description='Find latest AMI for given platform', Code=code, Handler='index.handler', Role=GetAtt(amilookuplambdarole, 'Arn'), Runtime='python2.7', Timeout=60 ) ) template.add_output( Output( 'FunctionName', Description='AMI lookup function name', Export=Export( Sub('${AWS::StackName}-FunctionName') ), Value=Ref(amilookup) ) ) template.add_output( Output( 'FunctionArn', Description='AMI lookup function Arn', Export=Export( Sub('${AWS::StackName}-FunctionArn') ), Value=GetAtt(amilookup, 'Arn') ) ) template.add_output( Output( 'FunctionRegion', Description='AMI lookup function region', Value=Ref('AWS::Region') ) ) # IAM Instance Roles and Profiles amilookupaccesspolicy = template.add_resource( iam.ManagedPolicy( 'AmiLookupAccessPolicy', Description='Allows invocation of the AMI lookup lambda ' 'function.', Path='/', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[awacs.awslambda.InvokeFunction], Effect=Allow, Resource=[GetAtt(amilookup, 'Arn')] ) ] ) ) ) template.add_output( Output( 'AccessPolicy', Description='Policy allowing use of the AMI lookup lambda ' 'function', Export=Export( Sub('${AWS::StackName}-%s' % 'AccessPolicy') ), Value=Ref(amilookupaccesspolicy) ) ) def create_template(self): """Create template (main function called by Stacker).""" self.template.add_version('2010-09-09') self.template.add_description( 'Onica Platform - Core' ' - AMI Lookup - {}'.format(version.version()) ) self.add_resources_and_outputs()
class VpnServer(Blueprint): """Blueprint for setting up Sturdy Networks core AWS environment.""" subnet_lookup_src = parameterized_codec( open(path.join(AWS_LAMBDA_DIR, 'subnet_lookup.py'), 'r').read(), False # disable base64 encoding ) VARIABLES = { 'SubnetLookupLambdaFunction': { 'type': AWSHelperFn, 'description': 'Lambda function code', 'default': subnet_lookup_src }, 'AMILookupArn': { 'type': CFNString, 'description': 'ARN of the Lambda function providing ' 'AMI ids; unused if a value is ' 'provided for the "VPNAMI" parameter.', 'default': '' }, # Not using EC2KeyPairKeyName to allow KeyName to be optional 'KeyName': { 'type': CFNString, 'description': 'Name of an existing EC2-VPC KeyPair', 'default': '' }, 'BucketKey': { 'type': CFNString, 'description': 'S3 prefix for chef cookbook archives ' 'and artifacts. The environment name ' 'will be prepended to this. E.g. if ' '"ChefBucketName" is "foo", ' '"BucketKey" is "bar", ' '"EnvironmentName" is "dev", and ' '"ChefDataBucketName" is "citadel", ' 'instances will pull configuration ' 'tarballs from s3://foo/dev/bar/ and be ' 'able to access files (e.g. secrets/' 'artifacts) in s3://citadel/dev/bar/', 'default': 'vpnservers' }, 'ChefBucketName': { 'type': CFNString, 'description': 'Name of bucket storing core Chef ' 'configuration', 'default': 'common' }, 'ChefDataBucketName': { 'type': CFNString, 'description': 'Name of bucket storing extra ' 'Chef data', 'default': 'citadel' }, 'CustomerName': { 'type': CFNString, 'description': 'The nickname for the new customer. ' 'Must be all lowercase letters, ' 'should not contain spaces or special ' 'characters, nor should it include ' 'any part of EnvironmentName.', 'allowed_pattern': '[-_ a-z]*', 'default': '' }, 'EnvironmentName': { 'type': CFNString, 'description': 'Name of Environment', 'default': 'common' }, 'VpcCidr': { 'type': CFNString, 'description': 'VPC CIDR block (required for creating NAT ' 'security group rules for NATing traffic).', 'default': '10.12.0.0/21' }, 'CoreVPCStack': { 'type': CFNString, 'description': 'Core VPC CFN stack name (used to ' 'lookup subnets).' }, # Not using EC2ImageId to allow VPNAMI to be optional 'VPNAMI': { 'type': CFNString, 'description': 'AMI ID for VPN instance (leave blank to ' ' use the "VPNOS" parameter)', 'default': '' }, 'VPNManagedPolicies': { 'type': CFNCommaDelimitedList, 'description': 'Managed policy ARNs to apply ' 'to the VPN instances.' }, 'VPNOS': { 'type': CFNString, 'description': 'OS to deploy on the VPN server (can be ' 'overridden with the "VPNAMI" parameter). ' 'Also used to determine instance userdata ' 'configuration (i.e. yum vs apt package ' 'management).', 'default': 'ubuntu-16.04', 'allowed_values': ['centos-7', 'ubuntu-16.04', ''] }, 'VpnEipPublicIp': { 'type': CFNString, 'description': 'Elastic IP for the VPN Instance' }, 'ManagementInstanceType': { 'type': CFNString, 'description': 'Type of the management ' 'instances. T2 not allowed ' 'in dedicated tenancy.', 'default': 'm3.medium' }, 'VPNSecurityGroups': { 'type': EC2SecurityGroupIdList, 'description': 'Security groups to apply to the ' 'VPN instances.' }, 'VpcId': { 'type': EC2VPCId, 'description': 'VPC id.' }, 'VpcInstanceTenancy': { 'type': CFNString, 'description': 'Tenancy of the VPC', 'default': 'dedicated', 'allowed_values': ['dedicated', 'default'] }, 'VPNSubnet': { 'type': CFNString, 'description': 'Address range for a VPN subnet.', 'default': '10.12.14.0/24' }, 'ChefClientVersion': { 'type': CFNString, 'description': 'Version of chef-client to ' 'install.', 'default': '12.19.36' }, 'PublicRouteTable': { 'type': CFNString, 'description': '(typically unused) Public route ' 'table id.', 'default': '' }, 'PublicSubnets': { 'type': CFNCommaDelimitedList, 'description': '(typically unused) Public subnets ' 'in which to deploy the VPN server.', 'default': '' }, 'PrivateRouteTable1': { 'type': CFNString, 'description': '(typically unused) Private ' 'route table 1 id.', 'default': '' }, 'PrivateRouteTable2': { 'type': CFNString, 'description': '(typically unused) Private ' 'route table 2 id.', 'default': '' }, 'PrivateRouteTable3': { 'type': CFNString, 'description': '(typically unused) Private ' 'route table 3 id.', 'default': '' }, 'PrivateSubnetCount': { 'type': CFNString, 'description': 'Optional number of private ' 'subnets to reference in VPC ' 'stack. Leave at 0 to look this ' 'up dynamically via Lambda.', 'default': '0' }, 'ChefRunList': { 'type': CFNString, 'description': 'Optional override for the Chef recipe ' 'name; leave blank to default to ' '"recipe[CUSTOMERNAME_vpn]".', 'default': '' } } def add_conditions(self): """Set up template conditions.""" template = self.template variables = self.get_variables() template.add_condition( 'SSHKeySpecified', And(Not(Equals(variables['KeyName'].ref, '')), Not(Equals(variables['KeyName'].ref, 'undefined')))) template.add_condition( 'MissingVPNAMI', Or(Equals(variables['VPNAMI'].ref, ''), Equals(variables['VPNAMI'].ref, 'undefined'))) template.add_condition( 'RHELUserData', Not(Equals(variables['VPNOS'].ref, 'ubuntu-16.04'))) template.add_condition( 'ChefRunListSpecified', And(Not(Equals(variables['ChefRunList'].ref, '')), Not(Equals(variables['ChefRunList'].ref, 'undefined')))) template.add_condition( 'PublicRouteTableSpecified', And(Not(Equals(variables['PublicRouteTable'].ref, '')), Not(Equals(variables['PublicRouteTable'].ref, 'undefined')))) template.add_condition( 'PublicSubnetsOmitted', Equals(Join('', variables['PublicSubnets'].ref), '')) for i in range(AZS): template.add_condition( '%iPrivateSubnetsCreated' % (i + 1), Equals(variables['PrivateSubnetCount'].ref, str(i + 1))) template.add_condition( 'PrivateSubnetCountOmitted', Equals(variables['PrivateSubnetCount'].ref, '0')) def add_resources(self): """Add resources to template.""" template = self.template variables = self.get_variables() vpnrole = template.add_resource( iam.Role( 'VPNRole', AssumeRolePolicyDocument=iam_policies.assumerolepolicy('ec2'), ManagedPolicyArns=variables['VPNManagedPolicies'].ref, Path='/', Policies=[ iam.Policy( PolicyName=Join('-', [ 'customer-vpn-server-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ # ModifyInstanceAttribute is for src/dst check Statement(Action=[ awacs.ec2.DescribeRouteTables, awacs.ec2.DescribeAddresses, awacs.ec2.AssociateAddress, awacs.ec2.CreateRoute, awacs.ec2.ReplaceRoute, awacs.ec2.ModifyInstanceAttribute ], Effect=Allow, Resource=['*']), Statement( Action=[ awacs.aws.Action('s3', 'Get*'), awacs.aws.Action('s3', 'List*'), awacs.aws.Action('s3', 'Put*') ], Effect=Allow, Resource=[ Join( '', [ 'arn:aws:s3:::', variables['ChefDataBucketName'] .ref, # noqa pylint: disable=line-too-long '/', variables['EnvironmentName']. ref, '/', variables['BucketKey'].ref, '/*' ]) ]), Statement( Action=[awacs.s3.ListBucket], Effect=Allow, Resource=[ Join('', [ 'arn:aws:s3:::', variables['ChefDataBucketName'].ref ]) # noqa pylint: disable=line-too-long ], Condition=Condition( StringLike( 's3:prefix', [ Join( '', [ variables[ 'EnvironmentName']. ref, # noqa pylint: disable=line-too-long '/', variables['BucketKey']. ref, # noqa pylint: disable=line-too-long '/*' ]) ]))) ])) ])) vpninstanceprofile = template.add_resource( iam.InstanceProfile('VPNInstanceProfile', Path='/', Roles=[Ref(vpnrole)])) amiid = template.add_resource( cfn_custom_classes.AMIId( 'AMIId', Condition='MissingVPNAMI', Platform=variables['VPNOS'].ref, Region=Ref('AWS::Region'), ServiceToken=variables['AMILookupArn'].ref)) # Lookup subnets from core VPC stack subnetlookuplambdarole = template.add_resource( iam.Role( 'SubnetLookupLambdaRole', Condition='PrivateSubnetCountOmitted', AssumeRolePolicyDocument=iam_policies.assumerolepolicy( 'lambda'), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], Policies=[ iam.Policy( PolicyName=Join('-', [ 'subnetlookup-lambda-role', variables['EnvironmentName'].ref, variables['CustomerName'].ref ]), PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[ awacs.aws.Action( 'cloudformation', 'DescribeStack*'), awacs.aws.Action( 'cloudformation', 'Get*') ], Effect=Allow, Resource=[ Join('', [ 'arn:aws:cloudformation:', Ref('AWS::Region'), ':', Ref('AWS::AccountId'), ':stack/', variables['CoreVPCStack'].ref, '/*' ]) ]) ])) ])) cfncustomresourcesubnetlookup = template.add_resource( awslambda.Function( 'CFNCustomResourceSubnetLookup', Condition='PrivateSubnetCountOmitted', Description='Find subnets created by core stack', Code=awslambda.Code( ZipFile=variables['SubnetLookupLambdaFunction']), Handler='index.handler', Role=GetAtt(subnetlookuplambdarole, 'Arn'), Runtime='python2.7', Timeout=10)) subnetlookup = template.add_resource( cfn_custom_classes.SubnetLookup( 'SubnetLookup', Condition='PrivateSubnetCountOmitted', CoreVPCStack=variables['CoreVPCStack'].ref, Region=Ref('AWS::Region'), ServiceToken=GetAtt(cfncustomresourcesubnetlookup, 'Arn'))) common_userdata_prefix = [ "#cloud-config\n", "package_update: true\n", "package_upgrade: false\n", "write_files:\n", " - path: /usr/local/bin/update_vpn_routes.sh\n", " permissions: '0755'\n", " content: |\n", " #!/bin/bash\n", " \n", " export AWS_DEFAULT_REGION=\"", Ref('AWS::Region'), "\"\n", " my_instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)\n", # noqa pylint: disable=line-too-long " \n", " publicroutetableid=", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PublicRouteTableId'), If( 'PublicRouteTableSpecified', variables['PublicRouteTable'].ref, ImportValue( Sub("${%s}-PublicRouteTable" % variables['CoreVPCStack'].name)))), # noqa pylint: disable=line-too-long "\n", " private_route_tables=(", If( 'PrivateSubnetCountOmitted', GetAtt(subnetlookup.title, 'PrivateRouteTables'), If( '3PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref, variables['PrivateRouteTable3'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable3" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long If( '2PrivateSubnetsCreated', If( 'PublicRouteTableSpecified', Join(' ', [ variables['PrivateRouteTable1'].ref, variables['PrivateRouteTable2'].ref ]), Join( ' ', [ ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)), # noqa pylint: disable=line-too-long ImportValue( Sub("${%s}-PrivateRouteTable2" % variables['CoreVPCStack'].name)) ])), # noqa pylint: disable=line-too-long, If( 'PublicRouteTableSpecified', variables['PrivateRouteTable1'].ref, ImportValue( Sub("${%s}-PrivateRouteTable1" % variables['CoreVPCStack'].name)))))), # noqa pylint: disable=line-too-long ")\n", "\n", " openvpnroutepubdest=", variables['VPNSubnet'].ref, "\n", " \n", " # Disabling sourceDestCheck\n", " aws ec2 modify-instance-attribute --instance-id ${my_instance_id} --source-dest-check \"{\\\"Value\\\": false}\"\n", # noqa pylint: disable=line-too-long " \n", " if aws ec2 describe-route-tables | grep ${openvpnroutepubdest}; then\n", # noqa pylint: disable=line-too-long " # Update 'OpenVPNRoutePub' to point to this instance\n", # noqa pylint: disable=line-too-long " aws ec2 replace-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Update private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 replace-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " else\n", " # Create 'OpenVPNRoutePub'\n", " aws ec2 create-route --route-table-id ${publicroutetableid} --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " # Create private routes\n", " for i in \"${private_route_tables[@]}\"\n", " do\n", " aws ec2 create-route --route-table-id $i --destination-cidr-block ${openvpnroutepubdest} --instance-id ${my_instance_id}\n", # noqa pylint: disable=line-too-long " done\n", " fi\n", " \n", "\n", " - path: /etc/chef/sync_cookbooks.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " aws --region ", Ref('AWS::Region'), " s3 sync s3://", variables['ChefBucketName'].ref, "/", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "/ /etc/chef/\n", " if compgen -G \"/etc/chef/cookbooks-*.tar.gz\" > /dev/null; then\n", # noqa pylint: disable=line-too-long " echo \"Cookbook archive found.\"\n", " if [ -d \"/etc/chef/cookbooks\" ]; then\n", " echo \"Removing previously extracted cookbooks.\"\n", # noqa pylint: disable=line-too-long " rm -r /etc/chef/cookbooks\n", " fi\n", " echo \"Extracting highest numbered cookbook archive.\"\n", # noqa pylint: disable=line-too-long " cbarchives=(/etc/chef/cookbooks-*.tar.gz)\n", " tar -zxf \"${cbarchives[@]: -1}\" -C /etc/chef\n", " chown -R root:root /etc/chef\n", " fi\n", " \n", "\n", " - path: /etc/chef/perform_chef_run.sh\n", " permissions: '0755'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " #!/bin/bash\n", " set -e -o pipefail\n", " \n", " chef-client -z -r '", If('ChefRunListSpecified', variables['ChefRunList'].ref, Join('', ['recipe[', variables['CustomerName'].ref, '_vpn]'])), "' -c /etc/chef/client.rb -E ", variables['EnvironmentName'].ref, " --force-formatter --no-color -F min\n", "\n", " - path: /etc/chef/client.rb\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " log_level :info\n", " log_location '/var/log/chef/client.log'\n", " ssl_verify_mode :verify_none\n", " cookbook_path '/etc/chef/cookbooks'\n", " node_path '/etc/chef/nodes'\n", " role_path '/etc/chef/roles'\n", " data_bag_path '/etc/chef/data_bags'\n", " environment_path '/etc/chef/environments'\n", " local_mode 'true'\n", "\n", " - path: /etc/chef/environments/", variables['EnvironmentName'].ref, ".json\n", " permissions: '0644'\n", " owner: 'root'\n", " group: 'root'\n", " content: |\n", " {\n", " \"name\": \"", variables['EnvironmentName'].ref, "\",\n", " \"default_attributes\": {\n", " \"sturdy\": {\n", " \"openvpn\": {\n", " \"core_vpc_cidr\": \"", variables['VpcCidr'].ref, "\",\n", " \"vpn_elastic_ip\": \"", variables['VpnEipPublicIp'].ref, "\",\n", " \"vpn_subnet_cidr\": \"", variables['VPNSubnet'].ref, "\",\n", " \"chef_data_bucket_name\": \"", variables['ChefDataBucketName'].ref, "\",\n", " \"chef_data_bucket_folder\": \"", variables['EnvironmentName'].ref, "/", variables['BucketKey'].ref, "\",\n", " \"chef_data_bucket_region\": \"", Ref('AWS::Region'), "\"\n", " }\n", " }\n", " },\n", " \"json_class\": \"Chef::Environment\",\n", " \"description\": \"", variables['EnvironmentName'].ref, " environment\",\n", " \"chef_type\": \"environment\"\n", " }\n", "\n", "runcmd:\n", " - set -euf\n", " - echo 'Attaching EIP'\n", " - pip install aws-ec2-assign-elastic-ip\n", # Allowing this command to fail (with ||true) as sturdy_openvpn # 2.3.0+ can handle this association instead. This will be removed # entirely in the next major release of this module (at which time # use of the updated sturdy_openvpn cookbook will be required) " - aws-ec2-assign-elastic-ip --region ", Ref('AWS::Region'), " --valid-ips ", variables['VpnEipPublicIp'].ref, " || true\n", " - echo 'Updating Routes'\n", " - /usr/local/bin/update_vpn_routes.sh\n", " - echo 'Installing Chef'\n", " - curl --max-time 10 --retry-delay 5 --retry 5 -L https://www.chef.io/chef/install.sh | bash -s -- -v ", # noqa pylint: disable=line-too-long variables['ChefClientVersion'].ref, "\n", " - echo 'Configuring Chef'\n", " - mkdir -p /var/log/chef /etc/chef/data_bags /etc/chef/nodes /etc/chef/roles\n", # noqa pylint: disable=line-too-long " - chmod 0755 /etc/chef\n", " - /etc/chef/sync_cookbooks.sh\n", " - /etc/chef/perform_chef_run.sh\n" ] vpnserverlaunchconfig = template.add_resource( autoscaling.LaunchConfiguration( 'VpnServerLaunchConfig', AssociatePublicIpAddress=True, BlockDeviceMappings=[ # CentOS AMIs don't include this by default ec2.BlockDeviceMapping( DeviceName='/dev/sda1', Ebs=ec2.EBSBlockDevice(DeleteOnTermination=True)) ], IamInstanceProfile=Ref(vpninstanceprofile), ImageId=If('MissingVPNAMI', GetAtt(amiid, 'ImageId'), variables['VPNAMI'].ref), InstanceType=variables['ManagementInstanceType'].ref, InstanceMonitoring=False, # extra granularity not worth cost KeyName=If('SSHKeySpecified', variables['KeyName'].ref, Ref('AWS::NoValue')), PlacementTenancy=variables['VpcInstanceTenancy'].ref, SecurityGroups=variables['VPNSecurityGroups'].ref, UserData=If( 'RHELUserData', Base64( Join( '', common_userdata_prefix + [ "yum_repos:\n", " epel:\n", " name: Extra Packages for $releasever - $basearch\n", # noqa pylint: disable=line-too-long " baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch\n", # noqa pylint: disable=line-too-long " enabled: true\n", " failovermethod: priority\n", " gpgcheck: true\n", " gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7\n", # noqa pylint: disable=line-too-long "packages:\n", " - awscli\n", " - python-pip\n", " - python2-boto\n", " - python2-boto3\n" ])), Base64( Join( '', common_userdata_prefix + [ "packages:\n", " - awscli\n", " - python-pip\n", " - python-boto\n", " - python-boto3\n" ]))))) template.add_resource( autoscaling.AutoScalingGroup( 'VPNServerASG', MinSize=1, MaxSize=1, LaunchConfigurationName=Ref(vpnserverlaunchconfig), Tags=[ autoscaling.Tag( 'Name', Join('-', [ variables['CustomerName'].ref, 'vpn', variables['EnvironmentName'].ref ]), True), autoscaling.Tag('environment', variables['EnvironmentName'].ref, True), autoscaling.Tag('customer', variables['CustomerName'].ref, True) ], VPCZoneIdentifier=If( 'PublicSubnetsOmitted', GetAtt(subnetlookup.title, 'PublicSubnetList'), variables['PublicSubnets'].ref))) def create_template(self): """Boilerplate for CFN Template.""" self.template.add_version('2010-09-09') self.template.add_description('Onica Platform - Core' ' - VPN Server - {}'.format( version.version())) self.add_conditions() self.add_resources()
class Pipeline(Blueprint): """Stacker blueprint for app building components.""" cleanup_ecr_src = parameterized_codec( open(path.join(AWS_LAMBDA_DIR, 'cleanup_ecr.py'), 'r').read(), False # disable base64 encoding ) build_proj_spec = parameterized_codec( open( path.join(path.dirname(path.realpath(__file__)), 'build_project_buildspec.yml'), 'r').read(), False # disable base64 encoding ) VARIABLES = { 'ECRCleanupLambdaFunction': { 'type': AWSHelperFn, 'description': 'Lambda function code', 'default': cleanup_ecr_src }, 'BuildProjectBuildSpec': { 'type': AWSHelperFn, 'description': 'Inline buildspec code', 'default': build_proj_spec }, 'AppPrefix': { 'type': CFNString, 'description': 'Application prefix (for roles, etc)' }, 'EcrRepoName': { 'type': CFNString, 'description': 'Name of ECR repo' }, 'RolePermissionsBoundaryName': { 'type': CFNString, 'description': 'Roles\' boundary ' 'name' }, } def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version('2010-09-09') template.set_description('App - Build Pipeline') # Resources boundary_arn = Join('', [ 'arn:', Partition, ':iam::', AccountId, ':policy/', variables['RolePermissionsBoundaryName'].ref ]) # Repo image limit is 1000 by default; this lambda function will prune # old images image_param_path = Join( '', ['/', variables['AppPrefix'].ref, '/current-hash']) image_param_arn = Join('', [ 'arn:', Partition, ':ssm:', Region, ':', AccountId, ':parameter', image_param_path ]) ecr_repo_arn = Join('', [ 'arn:', Partition, ':ecr:', Region, ':', AccountId, ':repository/', variables['EcrRepoName'].ref ]) cleanuplambdarole = template.add_resource( iam.Role('CleanupLambdaRole', AssumeRolePolicyDocument=make_simple_assume_policy( 'lambda.amazonaws.com'), ManagedPolicyArns=[ IAM_ARN_PREFIX + 'AWSLambdaBasicExecutionRole' ], PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join( '', [variables['AppPrefix'].ref, '-ecrcleanup']), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement(Action=[awacs.ssm.GetParameter], Effect=Allow, Resource=[image_param_arn]), Statement(Action=[ awacs.ecr.DescribeImages, awacs.ecr.BatchDeleteImage ], Effect=Allow, Resource=[ecr_repo_arn]) ])) ])) cleanupfunction = template.add_resource( awslambda.Function( 'CleanupFunction', Description='Cleanup stale ECR images', Code=awslambda.Code( ZipFile=variables['ECRCleanupLambdaFunction']), Environment=awslambda.Environment( Variables={ 'ECR_REPO_NAME': variables['EcrRepoName'].ref, 'SSM_PARAM': image_param_path }), Handler='index.handler', Role=cleanuplambdarole.get_att('Arn'), Runtime='python3.6', Timeout=120)) cleanuprule = template.add_resource( events.Rule('CleanupRule', Description='Regularly invoke CleanupFunction', ScheduleExpression='rate(7 days)', State='ENABLED', Targets=[ events.Target(Arn=cleanupfunction.get_att('Arn'), Id='CleanupFunction') ])) template.add_resource( awslambda.Permission( 'AllowCWLambdaInvocation', FunctionName=cleanupfunction.ref(), Action=awacs.awslambda.InvokeFunction.JSONrepr(), Principal='events.amazonaws.com', SourceArn=cleanuprule.get_att('Arn'))) appsource = template.add_resource( codecommit.Repository( 'AppSource', RepositoryName=Join('-', [variables['AppPrefix'].ref, 'source']))) for i in ['Name', 'Arn']: template.add_output( Output("AppRepo%s" % i, Description="%s of app source repo" % i, Value=appsource.get_att(i))) bucket = template.add_resource( s3.Bucket( 'Bucket', AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule(NoncurrentVersionExpirationInDays=90, Status='Enabled') ]), VersioningConfiguration=s3.VersioningConfiguration( Status='Enabled'))) template.add_output( Output('PipelineBucketName', Description='Name of pipeline bucket', Value=bucket.ref())) # This list must be kept in sync between the CodeBuild project and its # role build_name = Join('', [variables['AppPrefix'].ref, '-build']) build_role = template.add_resource( iam.Role( 'BuildRole', AssumeRolePolicyDocument=make_simple_assume_policy( 'codebuild.amazonaws.com'), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join('', [build_name, '-policy']), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Resource=[ Join('', [bucket.get_att('Arn'), '/*']) ]), Statement( Action=[awacs.ecr.GetAuthorizationToken], Effect=Allow, Resource=['*']), Statement(Action=[ awacs.ecr.BatchCheckLayerAvailability, awacs.ecr.BatchGetImage, awacs.ecr.CompleteLayerUpload, awacs.ecr.DescribeImages, awacs.ecr.GetDownloadUrlForLayer, awacs.ecr.InitiateLayerUpload, awacs.ecr.PutImage, awacs.ecr.UploadLayerPart ], Effect=Allow, Resource=[ecr_repo_arn]), Statement(Action=[ awacs.ssm.GetParameter, awacs.ssm.PutParameter ], Effect=Allow, Resource=[image_param_arn]), Statement(Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents ], Effect=Allow, Resource=[ Join('', [ 'arn:', Partition, ':logs:', Region, ':', AccountId, ':log-group:/aws/codebuild/', build_name ] + x) for x in [[':*'], [':*/*']] ]) ])) ])) buildproject = template.add_resource( codebuild.Project( 'BuildProject', Artifacts=codebuild.Artifacts(Type='CODEPIPELINE'), Environment=codebuild.Environment( ComputeType='BUILD_GENERAL1_SMALL', EnvironmentVariables=[ codebuild.EnvironmentVariable( Name='AWS_DEFAULT_REGION', Type='PLAINTEXT', Value=Region), codebuild.EnvironmentVariable(Name='AWS_ACCOUNT_ID', Type='PLAINTEXT', Value=AccountId), codebuild.EnvironmentVariable( Name='IMAGE_REPO_NAME', Type='PLAINTEXT', Value=variables['EcrRepoName'].ref), ], Image='aws/codebuild/docker:18.09.0', Type='LINUX_CONTAINER'), Name=build_name, ServiceRole=build_role.get_att('Arn'), Source=codebuild.Source( Type='CODEPIPELINE', BuildSpec=variables['BuildProjectBuildSpec']))) pipelinerole = template.add_resource( iam.Role( 'PipelineRole', AssumeRolePolicyDocument=make_simple_assume_policy( 'codepipeline.amazonaws.com'), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join('', [build_name, '-pipeline-policy']), PolicyDocument=PolicyDocument( Version='2012-10-17', Statement=[ Statement( Action=[ awacs.codecommit.GetBranch, awacs.codecommit.GetCommit, awacs.codecommit.UploadArchive, awacs.codecommit. GetUploadArchiveStatus, # noqa awacs.codecommit.CancelUploadArchive ], # noqa Effect=Allow, Resource=[appsource.get_att('Arn')]), Statement( Action=[awacs.s3.GetBucketVersioning], Effect=Allow, Resource=[bucket.get_att('Arn')]), Statement( Action=[ awacs.s3.GetObject, awacs.s3.PutObject ], Effect=Allow, Resource=[ Join('', [bucket.get_att('Arn'), '/*']) ]), Statement( Action=[ awacs.codebuild.BatchGetBuilds, awacs.codebuild.StartBuild ], Effect=Allow, Resource=[buildproject.get_att('Arn')]) ])) ])) template.add_resource( codepipeline.Pipeline( 'Pipeline', ArtifactStore=codepipeline.ArtifactStore(Location=bucket.ref(), Type='S3'), Name=build_name, RoleArn=pipelinerole.get_att('Arn'), Stages=[ codepipeline.Stages( Name='Source', Actions=[ codepipeline.Actions( Name='CodeCommit', ActionTypeId=codepipeline.ActionTypeId( Category='Source', Owner='AWS', Provider='CodeCommit', Version='1'), Configuration={ 'RepositoryName': appsource.get_att('Name'), # noqa 'BranchName': 'master' }, OutputArtifacts=[ codepipeline.OutputArtifacts( Name='CodeCommitRepo') ]), ]), codepipeline.Stages( Name='Build', Actions=[ codepipeline.Actions( Name='Build', ActionTypeId=codepipeline.ActionTypeId( Category='Build', Owner='AWS', Provider='CodeBuild', Version='1'), Configuration={ 'ProjectName': buildproject.ref() }, InputArtifacts=[ codepipeline.InputArtifacts( Name='CodeCommitRepo') ]) ]) ]))
class Pipeline(Blueprint): # pylint: disable=too-few-public-methods """Stacker blueprint for app building components.""" cleanup_ecr_src = parameterized_codec( open(path.join(AWS_LAMBDA_DIR, "cleanup_ecr.py"), "r").read(), False, # disable base64 encoding ) build_proj_spec = parameterized_codec( open( path.join( path.dirname(path.realpath(__file__)), "build_project_buildspec.yml" ), "r", ).read(), False, # disable base64 encoding ) VARIABLES = { "ECRCleanupLambdaFunction": { "type": AWSHelperFn, "description": "Lambda function code", "default": cleanup_ecr_src, }, "BuildProjectBuildSpec": { "type": AWSHelperFn, "description": "Inline buildspec code", "default": build_proj_spec, }, "AppPrefix": { "type": CFNString, "description": "Application prefix (for roles, etc)", }, "EcrRepoName": {"type": CFNString, "description": "Name of ECR repo"}, "RolePermissionsBoundaryName": { "type": CFNString, "description": "Roles' boundary " "name", }, } def create_template(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() template.set_version("2010-09-09") template.set_description("App - Build Pipeline") # Resources boundary_arn = Join( "", [ "arn:", Partition, ":iam::", AccountId, ":policy/", variables["RolePermissionsBoundaryName"].ref, ], ) # Repo image limit is 1000 by default; this lambda function will prune # old images image_param_path = Join("", ["/", variables["AppPrefix"].ref, "/current-hash"]) image_param_arn = Join( "", [ "arn:", Partition, ":ssm:", Region, ":", AccountId, ":parameter", image_param_path, ], ) ecr_repo_arn = Join( "", [ "arn:", Partition, ":ecr:", Region, ":", AccountId, ":repository/", variables["EcrRepoName"].ref, ], ) cleanuplambdarole = template.add_resource( iam.Role( "CleanupLambdaRole", AssumeRolePolicyDocument=make_simple_assume_policy( "lambda.amazonaws.com" ), ManagedPolicyArns=[IAM_ARN_PREFIX + "AWSLambdaBasicExecutionRole"], PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join( "", [variables["AppPrefix"].ref, "-ecrcleanup"] ), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[awacs.ssm.GetParameter], Effect=Allow, Resource=[image_param_arn], ), Statement( Action=[ awacs.ecr.DescribeImages, awacs.ecr.BatchDeleteImage, ], Effect=Allow, Resource=[ecr_repo_arn], ), ], ), ) ], ) ) cleanupfunction = template.add_resource( awslambda.Function( "CleanupFunction", Description="Cleanup stale ECR images", Code=awslambda.Code(ZipFile=variables["ECRCleanupLambdaFunction"]), Environment=awslambda.Environment( Variables={ "ECR_REPO_NAME": variables["EcrRepoName"].ref, "SSM_PARAM": image_param_path, } ), Handler="index.handler", Role=cleanuplambdarole.get_att("Arn"), Runtime="python3.6", Timeout=120, ) ) cleanuprule = template.add_resource( events.Rule( "CleanupRule", Description="Regularly invoke CleanupFunction", ScheduleExpression="rate(7 days)", State="ENABLED", Targets=[ events.Target( Arn=cleanupfunction.get_att("Arn"), Id="CleanupFunction" ) ], ) ) template.add_resource( awslambda.Permission( "AllowCWLambdaInvocation", FunctionName=cleanupfunction.ref(), Action=awacs.awslambda.InvokeFunction.JSONrepr(), Principal="events.amazonaws.com", SourceArn=cleanuprule.get_att("Arn"), ) ) appsource = template.add_resource( codecommit.Repository( "AppSource", RepositoryName=Join("-", [variables["AppPrefix"].ref, "source"]), ) ) for i in ["Name", "Arn"]: template.add_output( Output( "AppRepo%s" % i, Description="%s of app source repo" % i, Value=appsource.get_att(i), ) ) bucket = template.add_resource( s3.Bucket( "Bucket", AccessControl=s3.Private, LifecycleConfiguration=s3.LifecycleConfiguration( Rules=[ s3.LifecycleRule( NoncurrentVersionExpirationInDays=90, Status="Enabled" ) ] ), VersioningConfiguration=s3.VersioningConfiguration(Status="Enabled"), ) ) template.add_output( Output( "PipelineBucketName", Description="Name of pipeline bucket", Value=bucket.ref(), ) ) # This list must be kept in sync between the CodeBuild project and its # role build_name = Join("", [variables["AppPrefix"].ref, "-build"]) build_role = template.add_resource( iam.Role( "BuildRole", AssumeRolePolicyDocument=make_simple_assume_policy( "codebuild.amazonaws.com" ), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join("", [build_name, "-policy"]), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[awacs.s3.GetObject], Effect=Allow, Resource=[Join("", [bucket.get_att("Arn"), "/*"])], ), Statement( Action=[awacs.ecr.GetAuthorizationToken], Effect=Allow, Resource=["*"], ), Statement( Action=[ awacs.ecr.BatchCheckLayerAvailability, awacs.ecr.BatchGetImage, awacs.ecr.CompleteLayerUpload, awacs.ecr.DescribeImages, awacs.ecr.GetDownloadUrlForLayer, awacs.ecr.InitiateLayerUpload, awacs.ecr.PutImage, awacs.ecr.UploadLayerPart, ], Effect=Allow, Resource=[ecr_repo_arn], ), Statement( Action=[ awacs.ssm.GetParameter, awacs.ssm.PutParameter, ], Effect=Allow, Resource=[image_param_arn], ), Statement( Action=[ awacs.logs.CreateLogGroup, awacs.logs.CreateLogStream, awacs.logs.PutLogEvents, ], Effect=Allow, Resource=[ Join( "", [ "arn:", Partition, ":logs:", Region, ":", AccountId, ":log-group:/aws/codebuild/", build_name, ] + x, ) for x in [[":*"], [":*/*"]] ], ), ], ), ) ], ) ) buildproject = template.add_resource( codebuild.Project( "BuildProject", Artifacts=codebuild.Artifacts(Type="CODEPIPELINE"), Environment=codebuild.Environment( ComputeType="BUILD_GENERAL1_SMALL", EnvironmentVariables=[ codebuild.EnvironmentVariable( Name="AWS_DEFAULT_REGION", Type="PLAINTEXT", Value=Region ), codebuild.EnvironmentVariable( Name="AWS_ACCOUNT_ID", Type="PLAINTEXT", Value=AccountId ), codebuild.EnvironmentVariable( Name="IMAGE_REPO_NAME", Type="PLAINTEXT", Value=variables["EcrRepoName"].ref, ), ], Image="aws/codebuild/docker:18.09.0", Type="LINUX_CONTAINER", ), Name=build_name, ServiceRole=build_role.get_att("Arn"), Source=codebuild.Source( Type="CODEPIPELINE", BuildSpec=variables["BuildProjectBuildSpec"] ), ) ) pipelinerole = template.add_resource( iam.Role( "PipelineRole", AssumeRolePolicyDocument=make_simple_assume_policy( "codepipeline.amazonaws.com" ), PermissionsBoundary=boundary_arn, Policies=[ iam.Policy( PolicyName=Join("", [build_name, "-pipeline-policy"]), PolicyDocument=PolicyDocument( Version="2012-10-17", Statement=[ Statement( Action=[ awacs.codecommit.GetBranch, awacs.codecommit.GetCommit, awacs.codecommit.UploadArchive, awacs.codecommit.GetUploadArchiveStatus, # noqa awacs.codecommit.CancelUploadArchive, ], # noqa Effect=Allow, Resource=[appsource.get_att("Arn")], ), Statement( Action=[awacs.s3.GetBucketVersioning], Effect=Allow, Resource=[bucket.get_att("Arn")], ), Statement( Action=[awacs.s3.GetObject, awacs.s3.PutObject], Effect=Allow, Resource=[Join("", [bucket.get_att("Arn"), "/*"])], ), Statement( Action=[ awacs.codebuild.BatchGetBuilds, awacs.codebuild.StartBuild, ], Effect=Allow, Resource=[buildproject.get_att("Arn")], ), ], ), ) ], ) ) template.add_resource( codepipeline.Pipeline( "Pipeline", ArtifactStore=codepipeline.ArtifactStore( Location=bucket.ref(), Type="S3" ), Name=build_name, RoleArn=pipelinerole.get_att("Arn"), Stages=[ codepipeline.Stages( Name="Source", Actions=[ codepipeline.Actions( Name="CodeCommit", ActionTypeId=codepipeline.ActionTypeId( Category="Source", Owner="AWS", Provider="CodeCommit", Version="1", ), Configuration={ "RepositoryName": appsource.get_att("Name"), # noqa "BranchName": "master", }, OutputArtifacts=[ codepipeline.OutputArtifacts(Name="CodeCommitRepo") ], ), ], ), codepipeline.Stages( Name="Build", Actions=[ codepipeline.Actions( Name="Build", ActionTypeId=codepipeline.ActionTypeId( Category="Build", Owner="AWS", Provider="CodeBuild", Version="1", ), Configuration={"ProjectName": buildproject.ref()}, InputArtifacts=[ codepipeline.InputArtifacts(Name="CodeCommitRepo") ], ) ], ), ], ) )