def test_join_unjoinable(self): with self.assertRaises(exceptions.IntrinsicFuncInputError) as ctx: functions.join(".") self.assertEqual(ctx.exception.message, functions.join._errmsg_needinput) with self.assertRaises(exceptions.IntrinsicFuncInputError) as ctx: functions.join(".", "x") self.assertEqual(ctx.exception.message, functions.join._errmsg_needinput)
def test_join_unjoinable(self): with self.assertRaises(exceptions.IntrinsicFuncInputError) as ctx: functions.join('.') self.assertEqual(ctx.exception.message, functions.join._errmsg_needinput) with self.assertRaises(exceptions.IntrinsicFuncInputError) as ctx: functions.join('.', 'x') self.assertEqual(ctx.exception.message, functions.join._errmsg_needinput)
def test_join(self): ret = functions.join('.', 'x', 'y', 'z') self.assertEqual(ret['Fn::Join'], ['.', ['x', 'y', 'z']])
def main(): config, config_yaml = load_config() derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ID=", functions.ref('RDSInstance'), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.10", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }), core.DependsOn('RDSInstance'), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) print(str(cft))
def make_template(config, config_yaml): """Make a fresh CloudFormation template object and return it""" stack_name = config['STACK_NAME'] # We discover the current git branch/commit so that the deployment script # can use it to clone the same commit commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = load_tags() # Stack Name is also used for instances. instance_tags.append({'Key': 'Name', 'Value': stack_name}) # This tag is variable and can be specified by # template Parameter. instance_tags.append({ 'Key': functions.ref('SnapshotSchedulerTag'), 'Value': 'default' }) config['tags'] = instance_tags config_uri = save_s3_config(config) sys.stdout.write("Configuration saved to {}\n".format(config_uri)) tls_rewrite = "false" if 'TLS_CERTIFICATE' in config: tls_rewrite = "true" # The userdata script is executed via CloudInit # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, and the aws.sh script user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "export FACTER_TLS_REWRITE=", tls_rewrite, "\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="Refinery Platform main") # This parameter tags the EC2 instances, and is intended to be used # with the AWS Reference Implementation EBS Snapshot Scheduler: # http://docs.aws.amazon.com/solutions/latest/ebs-snapshot-scheduler/welcome.html cft.parameters.add( core.Parameter( 'SnapshotSchedulerTag', 'String', { 'Default': 'scheduler:ebs-snapshot', 'Description': "Tag added to EC2 Instances so that " "the EBS Snapshot Scheduler will recognise them.", })) cft.parameters.add( core.Parameter( 'IdentityPoolName', 'String', { 'Default': 'Refinery Platform', 'Description': 'Name of Cognito identity pool for S3 uploads', })) cft.parameters.add( core.Parameter( 'DeveloperProviderName', 'String', { 'Default': 'login.refinery', 'Description': '"domain" by which Cognito will refer to users', 'AllowedPattern': '[a-z\-\.]+', 'ConstraintDescription': 'must only contain lower case letters, periods, ' 'underscores, and hyphens' })) cft.parameters.add( core.Parameter( 'StorageStackName', 'String', { 'Default': '${AWS::StackName}Storage', 'Description': 'Name of the S3 storage stack for Django ' 'static and media files', })) rds_properties = { "AllocatedStorage": "5", "AutoMinorVersionUpgrade": False, "BackupRetentionPeriod": "15", "CopyTagsToSnapshot": True, "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.14", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": config['RDS_SUPERUSER_PASSWORD'], "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? "VPCSecurityGroups": [functions.get_att('RDSSecurityGroup', 'GroupId')], } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': load_tags(), 'AvailabilityZone': functions.get_att('WebInstance', 'AvailabilityZone'), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'SecurityGroups': [functions.ref("InstanceSecurityGroup")], 'Tags': instance_tags, }), core.DependsOn(['RDSInstance']), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [functions.ref('WebInstanceRole')] })) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3FullAccess' ], 'Path': '/', 'Policies': [{ 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["iam:CreateAccessKey"], "Resource": ["*"] }] }, }, { 'PolicyName': "CreateSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["ec2:CreateSnapshot"], "Resource": ["*"] }] } }, { 'PolicyName': "CreateDBSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["rds:CreateDBSnapshot"], "Resource": ["*"] }] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["ec2:CreateTags"], "Resource": "*" }] } }, { "PolicyName": "CognitoAccess", "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "cognito-identity:ListIdentityPools", ], "Resource": "arn:aws:cognito-identity:*" }, { "Effect": "Allow", "Action": [ "cognito-identity:" "GetOpenIdTokenForDeveloperIdentity" ], "Resource": { "Fn::Sub": [ "arn:aws:cognito-identity:" "${AWS::Region}:${AWS::AccountId}:" "identitypool/${Pool}", { "Pool": functions.ref('IdentityPool') } ] } }] } }] })) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] })) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), })) # Security Group for Elastic Load Balancer # (public facing). cft.resources.elbsg = core.Resource( 'ELBSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery ELB", # Egress Rule defined via # AWS::EC2::SecurityGroupEgress resource, # to avoid circularity (below). # See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html # noqa: E501 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0", }, ], })) cft.resources.elbegress = core.Resource( 'ELBEgress', 'AWS::EC2::SecurityGroupEgress', core.Properties({ "GroupId": functions.get_att('ELBSecurityGroup', 'GroupId'), "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "DestinationSecurityGroupId": functions.get_att('InstanceSecurityGroup', 'GroupId'), })) # Security Group for EC2- instance. cft.resources.instancesg = core.Resource( 'InstanceSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery EC2 Instance", 'SecurityGroupEgress': [], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", # "CidrIp": "0.0.0.0/0", # Only accept connections from the ELB. "SourceSecurityGroupId": functions.get_att('ELBSecurityGroup', 'GroupId'), }, ], })) # Security Group for RDS instance. cft.resources.rdssg = core.Resource( 'RDSSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery RDS", 'SecurityGroupEgress': [ # We would like to remove all egress rules here, # but you can't do that with this version # of CloudFormation. # We decided that the hacky workarounds are # not worth it. ], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "5432", "ToPort": "5432", # Only accept connections from the # Instance Security Group. "SourceSecurityGroupId": functions.get_att('InstanceSecurityGroup', 'GroupId'), }, ], })) # ELB per # http://cfn-pyplates.readthedocs.io/en/latest/examples/options/template.html # Insecure, Port 80, HTTP listener http_listener = { 'LoadBalancerPort': '80', 'Protocol': 'HTTP', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [] } listeners = [http_listener] if 'TLS_CERTIFICATE' in config: # Secure, Port 443, HTTPS listener https_listener = { 'LoadBalancerPort': '443', 'Protocol': 'HTTPS', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [], 'SSLCertificateId': config['TLS_CERTIFICATE'] } listeners.append(https_listener) cft.resources.elb = core.Resource( 'LoadBalancer', 'AWS::ElasticLoadBalancing::LoadBalancer', { 'AccessLoggingPolicy': { 'EmitInterval': functions.ref('LogInterval'), 'Enabled': True, 'S3BucketName': config['S3_LOG_BUCKET'], # 'S3BucketPrefix' unused }, 'AvailabilityZones': [functions.get_att('WebInstance', 'AvailabilityZone')], 'ConnectionSettings': { 'IdleTimeout': 1800 # seconds }, 'HealthCheck': { 'HealthyThreshold': '2', 'Interval': '30', 'Target': 'HTTP:80/', 'Timeout': '5', 'UnhealthyThreshold': '4' }, 'Instances': [functions.ref('WebInstance')], 'LoadBalancerName': config['STACK_NAME'], 'Listeners': listeners, 'SecurityGroups': [functions.get_att('ELBSecurityGroup', 'GroupId')], 'Tags': load_tags(), }) cft.parameters.add( core.Parameter( 'LogInterval', 'Number', { 'Default': 60, 'Description': "How often, in minutes, the ELB emits its logs to the " "configured S3 bucket. The ELB log facility restricts " "this to be 5 or 60.", })) # Cognito Identity Pool for Developer Authenticated Identities Authflow # http://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html cft.resources.add( core.Resource( 'IdentityPool', 'AWS::Cognito::IdentityPool', core.Properties({ 'IdentityPoolName': functions.ref('IdentityPoolName'), 'AllowUnauthenticatedIdentities': False, 'DeveloperProviderName': functions.ref('DeveloperProviderName'), }))) cft.resources.add( core.Resource( 'IdentityPoolAuthenticatedRole', 'AWS::Cognito::IdentityPoolRoleAttachment', core.Properties({ 'IdentityPoolId': functions.ref('IdentityPool'), 'Roles': { 'authenticated': functions.get_att('CognitoS3UploadRole', 'Arn'), } }))) upload_role_trust_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Federated": "cognito-identity.amazonaws.com" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "cognito-identity.amazonaws.com:aud": functions.ref('IdentityPool') }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" } } }] } upload_access_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["cognito-identity:*"], "Resource": "*" }, { "Action": ["s3:PutObject", "s3:AbortMultipartUpload"], "Effect": "Allow", "Resource": { "Fn::Sub": [ "arn:aws:s3:::${MediaBucket}/uploads/" "${!cognito-identity.amazonaws.com:sub}/*", { "MediaBucket": { "Fn::ImportValue": { "Fn::Sub": "${StorageStackName}Media" } } } ] } }] } cft.resources.add( core.Resource( 'CognitoS3UploadRole', 'AWS::IAM::Role', core.Properties({ 'AssumeRolePolicyDocument': upload_role_trust_policy, 'Policies': [{ 'PolicyName': 'AuthenticatedS3UploadPolicy', 'PolicyDocument': upload_access_policy, }] }))) # See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy # noqa: E501 # for full list of region--principal identifiers. cft.mappings.region = core.Mapping( 'Region', {'us-east-1': { 'ELBPrincipal': '127311923021' }}) cft.resources.log_policy = core.Resource( 'LogBucketPolicy', 'AWS::S3::BucketPolicy', core.Properties({ 'Bucket': config['S3_LOG_BUCKET'], 'PolicyDocument': { 'Statement': [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": functions.join("", "arn:aws:s3:::", config['S3_LOG_BUCKET'], "/AWSLogs/", functions.ref("AWS::AccountId"), "/*"), "Principal": { "AWS": [ functions.find_in_map('Region', functions.ref("AWS::Region"), 'ELBPrincipal'), ] } }] } })) return cft
def templated_read(file_handle, context={}): """ This function reads content from a file handle and processes as a template The Jinja2 templating engine is used, and the supplied context is provided. Once Jinja template processed, the document is split to extract CFN functions, e.g. Ref and Fn::Join etc, and the whole lot is returned Fn::Joined together (using the cfn_pyplates `join` function) ready to place in a UserData argument. This process is required in order that the Cloudformation functions are not embedded in strings where they would not be correctly evaluated at the time the template is processed by Cloudformation. Args: file_handle: any file-like object context: a dictionary of keys to use in the template Example ------- File template: # snippet of script... $CFN_ROOT/cfn-init -s {{ stack_id }} -r {{ resource_name }} \ --region {{ aws_region }} || error_exit 'Failed to run cfn-init' In the PyPlates code: ... 'UserData': templated_read( open('my_template_script.sh', 'rt'), {'resource_name': 'MyWebServer', 'stack_id': ref('AWS::StackId'), 'aws_region': ref('AWS::Region') }), ... After processing, in the Cloudformation template: "UserData": { "Fn::Base64": { "Fn::Join": [ "", [ "$CFN_ROOT/cfn-init -s ", { "Ref": "AWS::StackId" }, " -r MyWebServer --region ", { "Ref": "AWS::Region" }, " || error_exit 'Failed to run cfn-init'" ] ] } }, """ template = Template(file_handle.read()) rendered = template.render(**context) tokens = FN_MATCH.split(rendered) return join("", *[_selective_eval(s) for s in tokens])
def main(): config = load_config() # The Availability Zone of the new instance needs to match # the availability zone of the existing EBS. derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_NAME=", config['RDS_NAME'], "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.10", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }) ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) print(str(cft))
def build_template(args): """ Build a CloudFormation template allowing for secure CloudTrail log aggregation and fine grained access control to SNS topics for notifications of new CloudTrail logs The reason that we create IAM roles for each client AWS account in order to enable clients to read their own CloudTrail logs, instead of merely delegating access to them in an S3 bucket policy is that "Bucket owner account can delegate permissions to users in its own account, but it cannot delegate permissions to other AWS accounts, because cross-account delegation is not supported." : http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html As a consequence we *can* delegate bucket permissions to client AWS accounts but we *can not* delegate object permissions (the log files themselves) to client AWS accounts. Example config : AccountRootARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::123456789012:root # HR - arn:aws:iam::234567890123:root # Marketing CloudTrailLogConsumers: - arn:aws:iam::345678901234:user/security_team # Security team user - TrustedARN: arn:aws:iam::456789012343:root # CloudCo Third Party TrustingARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::234567890123:root # Marketing - TrustedARN: arn:aws:iam::567890123434:root # Other.com Third Party TrustingARNs: - arn:aws:iam::123456789012:root # HR ForeignAccountStatusSubscribers: - arn:aws:iam::345678901234:root # Security Team """ config = args.config account_root_arns = ( config['AccountRootARNs'] if 'AccountRootARNs' in config and isinstance(config['AccountRootARNs'], list) else []) cft = CloudFormationTemplate( description="AWS CloudTrail Storage Account S3 Storage Bucket") # Create the bucket cft.resources.add( Resource("S3Bucket", "AWS::S3::Bucket", {"BucketName": args.bucketname}, DeletionPolicy("Retain"))) # Build the s3 bucket policy statement list bucket_policy_statements = [] # Allow the CloudTrail system to GetBucketAcl on the CloudTrail storage # bucket bucket_policy_statements.append({ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:GetBucketAcl"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }) # Allow each account to read it's own logs for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "CloudTrailLogReaderRole%s" % account_id, "AWS::CloudFormation::Stack", { "TemplateURL": "https://s3.amazonaws.com/infosec-cloudformation-templates/manage_iam_role.json", "Parameters": { "RoleName": "CloudTrailLogReader%s" % account_id, "TrustedEntities": get_consumer_arns( account_arn, config) }, "TimeoutInMinutes": "5" })) cft.resources.add( Resource( "CloudTrailLogReaderPolicy%s" % account_id, "AWS::IAM::Policy", { "PolicyName": "CloudTrailLogReaderPolicy%s" % account_id, "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "s3:GetObject", "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/%s/*" % account_id) }] }, "Roles": ["CloudTrailLogReader%s" % account_id] }, DependsOn("CloudTrailLogReaderRole%s" % account_id))) cft.resources.add( Resource( "ReadCloudTrailBucket", "AWS::IAM::ManagedPolicy", { "Description": "ReadCloudTrailBucket", "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:ListAllMyBuckets", "s3:GetBucketLocation"], "Resource": "*" }, { "Effect": "Allow", "Action": [ "s3:GetBucketAcl", "s3:ListBucket", "s3:GetBucketTagging" ], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }] }, "Roles": [ "CloudTrailLogReader%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns ] }, DependsOn([ "CloudTrailLogReaderRole%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns ]))) bucket_policy_statements.append({ # "Sid":"AWSCloudTrailWrite%s" % get_account_id_from_arn(account_arn), "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:PutObject"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/*"), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }) # Apply the bucket policy to the bucket cft.resources.add( Resource( "BucketPolicy", "AWS::S3::BucketPolicy", { "Bucket": ref("S3Bucket"), "PolicyDocument": { "Id": "BucketPolicyDocument", "Version": "2012-10-17", "Statement": bucket_policy_statements } })) # Create a single SNS Topic that each AWS account can publish to to report # on the CloudFormation progress cft.resources.add( Resource( "ForeignAccountStatusTopic", "AWS::SNS::Topic", { "DisplayName": "Topic for foreign accounts to publish status information to", "TopicName": "ForeignAccountStatus" })) cft.resources.add( Resource( "ForeignAccountStatusTopicPolicy", "AWS::SNS::TopicPolicy", { "Topics": [ref("ForeignAccountStatusTopic")], "PolicyDocument": { "Version": "2012-10-17", "Id": "ForeignAccountStatusPolicy", "Statement": [{ "Sid": "ForeignAccountStatusPublisher", "Effect": "Allow", "Principal": { "AWS": account_root_arns }, "Action": "SNS:Publish", "Resource": ref("ForeignAccountStatusTopic"), }, { "Sid": "ForeignAccountStatusSubscriber", "Effect": "Allow", "Principal": { "AWS": config['ForeignAccountStatusSubscribers'] }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": ref("ForeignAccountStatusTopic"), }] } })) # Create SNS Topics for each AWS account and grant those accounts rights # to publish and subscribe to those topics for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "Topic%s" % account_id, "AWS::SNS::Topic", { "DisplayName": "Mozilla CloudTrail Logs Topic for Account %s" % account_id, "TopicName": "MozillaCloudTrailLogs%s" % account_id })) # http://docs.aws.amazon.com/sns/latest/dg/AccessPolicyLanguage_UseCases_Sns.html#AccessPolicyLanguage_UseCase4_Sns cft.resources.add( Resource( "TopicPolicy%s" % account_id, "AWS::SNS::TopicPolicy", { "Topics": [ref("Topic%s" % account_id)], "PolicyDocument": { "Version": "2012-10-17", "Id": "AWSCloudTrailSNSPolicy%s" % account_id, "Statement": [{ "Sid": "CloudTrailSNSPublish%s" % account_id, "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "SNS:Publish", "Resource": ref("Topic%s" % account_id) }, { "Sid": "CloudTrailSNSSubscribe%s" % account_id, "Effect": "Allow", "Principal": { "AWS": account_arn }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": join(":", "arn:aws:sns", ref("AWS::Region"), ref("AWS::AccountId"), "MozillaCloudTrailLogs%s" % account_id) }] } })) return cft
def _get_autoscale(self, name, extra_security_groups=[], extra_cloud_config='', extra_props_autoscale={}, extra_props_launch={}, extra_attrs_launch=[], config_min_size=3, config_max_size=3): # general configs autoscale_name = '%sServerAutoScale' % name autoscale_launch_config = '%sServerLaunchConfig' % name # autoscaling configs props_autoscale = { "AvailabilityZones": { "Fn::GetAZs": { "Ref": "AWS::Region" } }, "LaunchConfigurationName": { "Ref": autoscale_launch_config }, "MinSize": "%s" % config_min_size, "MaxSize": "%s" % config_max_size, "Tags": [{ "Key": "Name", "Value": name, "PropagateAtLaunch": True }, { "Key": "Role", "Value": name, "PropagateAtLaunch": True }] } props_autoscale.update(extra_props_autoscale) # launch configs sec_groups = [{ "Ref": sec_group } for sec_group in ["SSHFromBastionSecurityGroup"] + extra_security_groups] cloud_config = self.get_user_cloud_config() cloud_config += extra_cloud_config props_launch = { "ImageId": { "Fn::FindInMap": ["RegionMap", { "Ref": "AWS::Region" }, name] }, "InstanceType": { "Ref": "%sInstanceType" % name }, "SecurityGroups": sec_groups, "UserData": { "Fn::Base64": join('', *cloud_config) } } props_launch.update(extra_props_launch) attrs_launch = extra_attrs_launch return [ Resource(autoscale_name, "AWS::AutoScaling::AutoScalingGroup", Properties(props_autoscale)), Resource(autoscale_launch_config, "AWS::AutoScaling::LaunchConfiguration", Properties(props_launch), attributes=attrs_launch) ]
def test_join(self): ret = functions.join(".", "x", "y", "z") self.assertEqual(ret["Fn::Join"], [".", ["x", "y", "z"]])
def build_template(args): """ Build a CloudFormation template allowing for secure CloudTrail log aggregation and fine grained access control to SNS topics for notifications of new CloudTrail logs The reason that we create IAM roles for each client AWS account in order to enable clients to read their own CloudTrail logs, instead of merely delegating access to them in an S3 bucket policy is that "Bucket owner account can delegate permissions to users in its own account, but it cannot delegate permissions to other AWS accounts, because cross-account delegation is not supported." : http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html As a consequence we *can* delegate bucket permissions to client AWS accounts but we *can not* delegate object permissions (the log files themselves) to client AWS accounts. Example config : AccountRootARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::123456789012:root # HR - arn:aws:iam::234567890123:root # Marketing CloudTrailLogConsumers: - arn:aws:iam::345678901234:user/security_team # Security team user - TrustedARN: arn:aws:iam::456789012343:root # CloudCo Third Party TrustingARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::234567890123:root # Marketing - TrustedARN: arn:aws:iam::567890123434:root # Other.com Third Party TrustingARNs: - arn:aws:iam::123456789012:root # HR ForeignAccountStatusSubscribers: - arn:aws:iam::345678901234:root # Security Team """ config = args.config account_root_arns = (config['AccountRootARNs'] if 'AccountRootARNs' in config and isinstance(config['AccountRootARNs'], list) else []) cft = CloudFormationTemplate( description="AWS CloudTrail Storage Account S3 Storage Bucket") # Create the bucket cft.resources.add(Resource("S3Bucket", "AWS::S3::Bucket", {"BucketName": args.bucketname}, DeletionPolicy("Retain"))) # Build the s3 bucket policy statement list bucket_policy_statements = [] # Allow the CloudTrail system to GetBucketAcl on the CloudTrail storage # bucket bucket_policy_statements.append({ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:GetBucketAcl"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }) # Allow each account to read it's own logs for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "CloudTrailLogReaderRole%s" % account_id, "AWS::CloudFormation::Stack", { "TemplateURL": "https://s3.amazonaws.com/infosec-cloudformation-templates/manage_iam_role.json", "Parameters": { "RoleName": "CloudTrailLogReader%s" % account_id, "TrustedEntities": get_consumer_arns(account_arn, config) }, "TimeoutInMinutes": "5" } ) ) cft.resources.add( Resource( "CloudTrailLogReaderPolicy%s" % account_id, "AWS::IAM::Policy", { "PolicyName": "CloudTrailLogReaderPolicy%s" % account_id, "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "s3:GetObject", "Resource": join( "", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/%s/*" % account_id)}]}, "Roles": ["CloudTrailLogReader%s" % account_id] }, DependsOn("CloudTrailLogReaderRole%s" % account_id) ) ) cft.resources.add( Resource("ReadCloudTrailBucket", "AWS::IAM::ManagedPolicy", {"Description": "ReadCloudTrailBucket", "PolicyDocument": { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Action": ["s3:ListAllMyBuckets", "s3:GetBucketLocation"], "Resource": "*" }, {"Effect": "Allow", "Action": ["s3:GetBucketAcl", "s3:ListBucket", "s3:GetBucketTagging"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) } ] }, "Roles": ["CloudTrailLogReader%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns] }, DependsOn(["CloudTrailLogReaderRole%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns]) )) bucket_policy_statements.append( { # "Sid":"AWSCloudTrailWrite%s" % get_account_id_from_arn(account_arn), "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:PutObject"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/*"), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }) # Apply the bucket policy to the bucket cft.resources.add( Resource( "BucketPolicy", "AWS::S3::BucketPolicy", { "Bucket": ref("S3Bucket"), "PolicyDocument": { "Id": "BucketPolicyDocument", "Version": "2012-10-17", "Statement": bucket_policy_statements } } ) ) # Create a single SNS Topic that each AWS account can publish to to report # on the CloudFormation progress cft.resources.add( Resource("ForeignAccountStatusTopic", "AWS::SNS::Topic", { "DisplayName": "Topic for foreign accounts to publish status information to", "TopicName": "ForeignAccountStatus" } ) ) cft.resources.add( Resource("ForeignAccountStatusTopicPolicy", "AWS::SNS::TopicPolicy", { "Topics": [ref("ForeignAccountStatusTopic")], "PolicyDocument": { "Version": "2012-10-17", "Id": "ForeignAccountStatusPolicy", "Statement": [ { "Sid": "ForeignAccountStatusPublisher", "Effect": "Allow", "Principal": {"AWS": account_root_arns}, "Action": "SNS:Publish", "Resource": ref("ForeignAccountStatusTopic"), }, { "Sid": "ForeignAccountStatusSubscriber", "Effect": "Allow", "Principal": { "AWS": config['ForeignAccountStatusSubscribers'] }, "Action":[ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": ref("ForeignAccountStatusTopic"), } ] } } ) ) # Create SNS Topics for each AWS account and grant those accounts rights # to publish and subscribe to those topics for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "Topic%s" % account_id, "AWS::SNS::Topic", { "DisplayName": "Mozilla CloudTrail Logs Topic for Account %s" % account_id, "TopicName": "MozillaCloudTrailLogs%s" % account_id } ) ) # http://docs.aws.amazon.com/sns/latest/dg/AccessPolicyLanguage_UseCases_Sns.html#AccessPolicyLanguage_UseCase4_Sns cft.resources.add( Resource( "TopicPolicy%s" % account_id, "AWS::SNS::TopicPolicy", { "Topics": [ref("Topic%s" % account_id)], "PolicyDocument": { "Version": "2012-10-17", "Id": "AWSCloudTrailSNSPolicy%s" % account_id, "Statement": [ { "Sid": "CloudTrailSNSPublish%s" % account_id, "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "SNS:Publish", "Resource": ref("Topic%s" % account_id) }, { "Sid": "CloudTrailSNSSubscribe%s" % account_id, "Effect": "Allow", "Principal": { "AWS": account_arn }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": join(":", "arn:aws:sns", ref("AWS::Region"), ref("AWS::AccountId"), "MozillaCloudTrailLogs%s" % account_id) } ] } } ) ) return cft
def main(): config, config_yaml = load_config() derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) tls_rewrite = "false" if 'TLS_CERTIFICATE' in config: tls_rewrite = "true" # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ID=", functions.ref('RDSInstance'), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "export FACTER_TLS_REWRITE=", tls_rewrite, "\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AutoMinorVersionUpgrade": False, "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.14", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }), core.DependsOn('RDSInstance'), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] }, }, { 'PolicyName': "CreateSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateSnapshot" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateDBSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds:CreateDBSnapshot" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) cft.resources.elbsg = core.Resource( 'ELBSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery ELB", 'SecurityGroupEgress': [], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0", }, ], }) ) # ELB per # http://cfn-pyplates.readthedocs.io/en/latest/examples/options/template.html # Insecure, Port 80, HTTP listener http_listener = { 'LoadBalancerPort': '80', 'Protocol': 'HTTP', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [] } listeners = [http_listener] if 'TLS_CERTIFICATE' in config: # Secure, Port 443, HTTPS listener https_listener = { 'LoadBalancerPort': '443', 'Protocol': 'HTTPS', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [], 'SSLCertificateId': config['TLS_CERTIFICATE'] } listeners.append(https_listener) cft.resources.elb = core.Resource( 'LoadBalancer', 'AWS::ElasticLoadBalancing::LoadBalancer', { 'AvailabilityZones': [config['AVAILABILITY_ZONE']], 'HealthCheck': { 'HealthyThreshold': '2', 'Interval': '30', 'Target': 'HTTP:80/', 'Timeout': '5', 'UnhealthyThreshold': '4' }, 'Instances': [functions.ref('WebInstance')], 'Listeners': listeners, 'SecurityGroups': [ functions.get_att('ELBSecurityGroup', 'GroupId')], "Tags": instance_tags, # todo: Should be different? }) sys.stdout.write(str(cft))