def create_key_pair_param(self, name, key_pair): key_pair_param = name + 'KeyPair' self.parameters.add( core.Parameter(key_pair_param, 'String', { 'Default': key_pair, 'Description': 'Key Pair for ' + name })) key_pair = functions.ref(key_pair_param) return key_pair
def create_iam_role_param(self, name, iam_role): iam_role_param = name + 'IamRole' self.parameters.add( core.Parameter(iam_role_param, 'String', { 'Default': iam_role, 'Description': 'IAM Role for ' + name })) iam_role = functions.ref(iam_role_param) return iam_role
def create_availability_zone_param(self, name, availability_zone): availability_zone_param = name + "AvailabilityZone" self.parameters.add( core.Parameter(availability_zone_param, 'String', { 'Default': availability_zone, 'Description': 'Availability Zone for ' + name })) availability_zone = functions.ref(availability_zone_param) return availability_zone
def create_availability_zone_param(self, name, availability_zone): availability_zone_param = name + "AvailabilityZone" self.parameters.add( core.Parameter( availability_zone_param, 'String', { 'Default': availability_zone, 'Description': 'Availability Zone for ' + name })) availability_zone = functions.ref(availability_zone_param) return availability_zone
def transform_reference(v): """ Transform ref and ref_att in dictionary to CloudFormation ref or get_att """ if v.startswith('ref('): v = v[len('ref('):-1].strip() v = functions.ref(v) elif v.startswith('get_att('): v = [s.strip() for s in v[len('get_att('):-1].split(',')] v = functions.get_att(v[0], v[1]) return v
def test_template_transform(self): """ Verify that templated_read returns correct output """ template = StringIO.StringIO( dedent( """\ this is a {{ string_value }} and this is a {{ function_value }}""" ) ) rendered = {"Fn::Join": ["", [u"this is a String Value and this is a\n", {"Ref": "AWS::Regions"}, u""]]} context = dict(string_value="String Value", function_value=ref("AWS::Regions")) output = templated_read(template, context) self.assertEquals(rendered, output)
def test_template_transform(self): """ Verify that templated_read returns correct output """ template = StringIO.StringIO( dedent('''\ this is a {{ string_value }} and this is a {{ function_value }}''')) rendered = { 'Fn::Join': [ '', [ u'this is a String Value and this is a\n', { 'Ref': 'AWS::Regions' }, u'' ] ] } context = dict(string_value="String Value", function_value=ref('AWS::Regions')) output = templated_read(template, context) self.assertEquals(rendered, output)
def test_ref(self): ret = functions.ref('ThingName') self.assertEqual(ret['Ref'], 'ThingName')
cft.resources.add(Resource( 'DHCPOptions', 'AWS::EC2::DHCPOptions', Properties({ # point to the onsite, IT-managed DNS servers 'DomainNameServers': [ "10.26.75.40", "10.26.75.41" ], 'Tags': [nametag('Releng Network Options')], }) )) cft.resources.add(Resource( 'DHCPOptionsAssociation', 'AWS::EC2::VPCDHCPOptionsAssociation', Properties({ 'VpcId': ref('RelengVPC'), 'DhcpOptionsId': ref('DHCPOptions'), }) )) # Internet Gateway cft.resources.add(Resource( 'IGW', 'AWS::EC2::InternetGateway', Properties({ 'Tags': [nametag('IGW for Releng VPC')], }) )) cft.resources.add(Resource( 'IGWAttachment', 'AWS::EC2::VPCGatewayAttachment',
def make_storage_template(): cft = CloudFormationTemplate(description="Refinery Platform storage") # Parameters cft.parameters.add( Parameter('StaticBucketName', 'String', { 'Description': 'Name of S3 bucket for Django static files', })) cft.parameters.add( Parameter( 'MediaBucketName', 'String', { 'Description': 'Name of S3 bucket for Django media files', # make names DNS-compliant without periods (".") for # compatibility with virtual-hosted-style access and S3 # Transfer Acceleration 'AllowedPattern': '[a-z0-9\-]+', 'ConstraintDescription': 'must only contain lower case letters, numbers, and ' 'hyphens', })) cft.parameters.add( Parameter( 'IdentityPoolName', 'String', { 'Default': 'Refinery Platform', 'Description': 'Name of Cognito identity pool for S3 uploads', })) cft.parameters.add( Parameter( 'DeveloperProviderName', 'String', { 'Default': 'login.refinery', 'Description': '"domain" by which Cognito will refer to users', 'AllowedPattern': '[a-z\-\.]+', 'ConstraintDescription': 'must only contain lower case letters, periods, ' 'underscores, and hyphens' })) # Resources cft.resources.add( Resource( 'StaticStorageBucket', 'AWS::S3::Bucket', Properties({ 'BucketName': ref('StaticBucketName'), 'AccessControl': 'PublicRead', 'CorsConfiguration': { 'CorsRules': [{ 'AllowedOrigins': ['*'], 'AllowedMethods': ['GET'], 'AllowedHeaders': ['Authorization'], 'MaxAge': 3000, }] }, }), DeletionPolicy('Retain'), )) cft.resources.add( Resource( 'MediaStorageBucket', 'AWS::S3::Bucket', Properties({ 'BucketName': ref('MediaBucketName'), 'AccessControl': 'PublicRead', 'CorsConfiguration': { 'CorsRules': [{ 'AllowedOrigins': ['*'], 'AllowedMethods': ['POST', 'PUT', 'DELETE'], 'AllowedHeaders': ['*'], 'ExposedHeaders': ['ETag'], 'MaxAge': 3000, }] } }), DeletionPolicy('Retain'), )) # Cognito Identity Pool for Developer Authenticated Identities Authflow # http://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html cft.resources.add( Resource( 'IdentityPool', 'AWS::Cognito::IdentityPool', Properties({ 'IdentityPoolName': ref('IdentityPoolName'), 'AllowUnauthenticatedIdentities': False, 'DeveloperProviderName': ref('DeveloperProviderName'), }))) cft.resources.add( Resource( 'IdentityPoolAuthenticatedRole', 'AWS::Cognito::IdentityPoolRoleAttachment', Properties({ 'IdentityPoolId': ref('IdentityPool'), 'Roles': { 'authenticated': get_att('CognitoS3UploadRole', 'Arn'), } }))) upload_role_trust_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Federated": "cognito-identity.amazonaws.com" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "cognito-identity.amazonaws.com:aud": ref('IdentityPool') }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" } } }] } upload_access_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["cognito-identity:*"], "Resource": "*" }, { "Action": ["s3:PutObject", "s3:AbortMultipartUpload"], "Effect": "Allow", "Resource": { "Fn::Sub": "arn:aws:s3:::${MediaStorageBucket}/uploads/" "${!cognito-identity.amazonaws.com:sub}/*" } }] } cft.resources.add( Resource( 'CognitoS3UploadRole', 'AWS::IAM::Role', Properties({ 'AssumeRolePolicyDocument': upload_role_trust_policy, 'Policies': [{ 'PolicyName': 'AuthenticatedS3UploadPolicy', 'PolicyDocument': upload_access_policy, }] }))) # Outputs cft.outputs.add( Output('IdentityPoolId', ref('IdentityPool'), {'Fn::Sub': '${AWS::StackName}IdentityPoolId'}, 'Cognito identity pool ID')) return cft
def make_storage_template(): cft = CloudFormationTemplate(description="Refinery Platform storage") # Parameters cft.parameters.add( Parameter('StaticBucketName', 'String', { 'Description': 'Name of S3 bucket for Django static files', })) cft.parameters.add( Parameter( 'MediaBucketName', 'String', { 'Description': 'Name of S3 bucket for Django media files', # make names DNS-compliant without periods (".") for compatibility # with virtual-hosted-style access and S3 Transfer Acceleration 'AllowedPattern': '[a-z0-9\-]+', 'ConstraintDescription': 'must only contain lower case letters, numbers, and hyphens', })) # Resources cft.resources.add( Resource( 'StaticStorageBucket', 'AWS::S3::Bucket', Properties({ 'BucketName': ref('StaticBucketName'), 'AccessControl': 'PublicRead', 'CorsConfiguration': { 'CorsRules': [{ 'AllowedOrigins': ['*'], 'AllowedMethods': ['GET'], 'AllowedHeaders': ['Authorization'], 'MaxAge': 3000, }] }, }), DeletionPolicy('Retain'), )) cft.resources.add( Resource( 'MediaStorageBucket', 'AWS::S3::Bucket', Properties({ 'BucketName': ref('MediaBucketName'), 'AccessControl': 'PublicRead', 'CorsConfiguration': { 'CorsRules': [{ 'AllowedOrigins': ['*'], 'AllowedMethods': ['POST', 'PUT', 'DELETE'], 'AllowedHeaders': ['*'], 'ExposedHeaders': ['ETag'], 'MaxAge': 3000, }] } }), DeletionPolicy('Retain'), )) cft.outputs.add( Output('MediaBucketName', ref('MediaStorageBucket'), {'Fn::Sub': '${AWS::StackName}Media'}, 'Name of S3 bucket for Django media files')) return cft
def main(): config = load_config() # The Availability Zone of the new instance needs to match # the availability zone of the existing EBS. derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_NAME=", config['RDS_NAME'], "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.10", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }) ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) print(str(cft))
def build_template(args): """ Build a CloudFormation template allowing for secure CloudTrail log aggregation and fine grained access control to SNS topics for notifications of new CloudTrail logs The reason that we create IAM roles for each client AWS account in order to enable clients to read their own CloudTrail logs, instead of merely delegating access to them in an S3 bucket policy is that "Bucket owner account can delegate permissions to users in its own account, but it cannot delegate permissions to other AWS accounts, because cross-account delegation is not supported." : http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html As a consequence we *can* delegate bucket permissions to client AWS accounts but we *can not* delegate object permissions (the log files themselves) to client AWS accounts. Example config : AccountRootARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::123456789012:root # HR - arn:aws:iam::234567890123:root # Marketing CloudTrailLogConsumers: - arn:aws:iam::345678901234:user/security_team # Security team user - TrustedARN: arn:aws:iam::456789012343:root # CloudCo Third Party TrustingARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::234567890123:root # Marketing - TrustedARN: arn:aws:iam::567890123434:root # Other.com Third Party TrustingARNs: - arn:aws:iam::123456789012:root # HR ForeignAccountStatusSubscribers: - arn:aws:iam::345678901234:root # Security Team """ config = args.config account_root_arns = ( config['AccountRootARNs'] if 'AccountRootARNs' in config and isinstance(config['AccountRootARNs'], list) else []) cft = CloudFormationTemplate( description="AWS CloudTrail Storage Account S3 Storage Bucket") # Create the bucket cft.resources.add( Resource("S3Bucket", "AWS::S3::Bucket", {"BucketName": args.bucketname}, DeletionPolicy("Retain"))) # Build the s3 bucket policy statement list bucket_policy_statements = [] # Allow the CloudTrail system to GetBucketAcl on the CloudTrail storage # bucket bucket_policy_statements.append({ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:GetBucketAcl"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }) # Allow each account to read it's own logs for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "CloudTrailLogReaderRole%s" % account_id, "AWS::CloudFormation::Stack", { "TemplateURL": "https://s3.amazonaws.com/infosec-cloudformation-templates/manage_iam_role.json", "Parameters": { "RoleName": "CloudTrailLogReader%s" % account_id, "TrustedEntities": get_consumer_arns( account_arn, config) }, "TimeoutInMinutes": "5" })) cft.resources.add( Resource( "CloudTrailLogReaderPolicy%s" % account_id, "AWS::IAM::Policy", { "PolicyName": "CloudTrailLogReaderPolicy%s" % account_id, "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "s3:GetObject", "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/%s/*" % account_id) }] }, "Roles": ["CloudTrailLogReader%s" % account_id] }, DependsOn("CloudTrailLogReaderRole%s" % account_id))) cft.resources.add( Resource( "ReadCloudTrailBucket", "AWS::IAM::ManagedPolicy", { "Description": "ReadCloudTrailBucket", "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["s3:ListAllMyBuckets", "s3:GetBucketLocation"], "Resource": "*" }, { "Effect": "Allow", "Action": [ "s3:GetBucketAcl", "s3:ListBucket", "s3:GetBucketTagging" ], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }] }, "Roles": [ "CloudTrailLogReader%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns ] }, DependsOn([ "CloudTrailLogReaderRole%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns ]))) bucket_policy_statements.append({ # "Sid":"AWSCloudTrailWrite%s" % get_account_id_from_arn(account_arn), "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:PutObject"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/*"), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }) # Apply the bucket policy to the bucket cft.resources.add( Resource( "BucketPolicy", "AWS::S3::BucketPolicy", { "Bucket": ref("S3Bucket"), "PolicyDocument": { "Id": "BucketPolicyDocument", "Version": "2012-10-17", "Statement": bucket_policy_statements } })) # Create a single SNS Topic that each AWS account can publish to to report # on the CloudFormation progress cft.resources.add( Resource( "ForeignAccountStatusTopic", "AWS::SNS::Topic", { "DisplayName": "Topic for foreign accounts to publish status information to", "TopicName": "ForeignAccountStatus" })) cft.resources.add( Resource( "ForeignAccountStatusTopicPolicy", "AWS::SNS::TopicPolicy", { "Topics": [ref("ForeignAccountStatusTopic")], "PolicyDocument": { "Version": "2012-10-17", "Id": "ForeignAccountStatusPolicy", "Statement": [{ "Sid": "ForeignAccountStatusPublisher", "Effect": "Allow", "Principal": { "AWS": account_root_arns }, "Action": "SNS:Publish", "Resource": ref("ForeignAccountStatusTopic"), }, { "Sid": "ForeignAccountStatusSubscriber", "Effect": "Allow", "Principal": { "AWS": config['ForeignAccountStatusSubscribers'] }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": ref("ForeignAccountStatusTopic"), }] } })) # Create SNS Topics for each AWS account and grant those accounts rights # to publish and subscribe to those topics for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "Topic%s" % account_id, "AWS::SNS::Topic", { "DisplayName": "Mozilla CloudTrail Logs Topic for Account %s" % account_id, "TopicName": "MozillaCloudTrailLogs%s" % account_id })) # http://docs.aws.amazon.com/sns/latest/dg/AccessPolicyLanguage_UseCases_Sns.html#AccessPolicyLanguage_UseCase4_Sns cft.resources.add( Resource( "TopicPolicy%s" % account_id, "AWS::SNS::TopicPolicy", { "Topics": [ref("Topic%s" % account_id)], "PolicyDocument": { "Version": "2012-10-17", "Id": "AWSCloudTrailSNSPolicy%s" % account_id, "Statement": [{ "Sid": "CloudTrailSNSPublish%s" % account_id, "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "SNS:Publish", "Resource": ref("Topic%s" % account_id) }, { "Sid": "CloudTrailSNSSubscribe%s" % account_id, "Effect": "Allow", "Principal": { "AWS": account_arn }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": join(":", "arn:aws:sns", ref("AWS::Region"), ref("AWS::AccountId"), "MozillaCloudTrailLogs%s" % account_id) }] } })) return cft
def test_ref(self): ret = functions.ref("ThingName") self.assertEqual(ret["Ref"], "ThingName")
def make_template(config, config_yaml): """Make a fresh CloudFormation template object and return it""" stack_name = config['STACK_NAME'] # We discover the current git branch/commit so that the deployment script # can use it to clone the same commit commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = load_tags() # Stack Name is also used for instances. instance_tags.append({'Key': 'Name', 'Value': stack_name}) # This tag is variable and can be specified by # template Parameter. instance_tags.append({ 'Key': functions.ref('SnapshotSchedulerTag'), 'Value': 'default' }) config['tags'] = instance_tags config_uri = save_s3_config(config) sys.stdout.write("Configuration saved to {}\n".format(config_uri)) tls_rewrite = "false" if 'TLS_CERTIFICATE' in config: tls_rewrite = "true" # The userdata script is executed via CloudInit # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, and the aws.sh script user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "export FACTER_TLS_REWRITE=", tls_rewrite, "\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="Refinery Platform main") # This parameter tags the EC2 instances, and is intended to be used # with the AWS Reference Implementation EBS Snapshot Scheduler: # http://docs.aws.amazon.com/solutions/latest/ebs-snapshot-scheduler/welcome.html cft.parameters.add( core.Parameter( 'SnapshotSchedulerTag', 'String', { 'Default': 'scheduler:ebs-snapshot', 'Description': "Tag added to EC2 Instances so that " "the EBS Snapshot Scheduler will recognise them.", })) cft.parameters.add( core.Parameter( 'IdentityPoolName', 'String', { 'Default': 'Refinery Platform', 'Description': 'Name of Cognito identity pool for S3 uploads', })) cft.parameters.add( core.Parameter( 'DeveloperProviderName', 'String', { 'Default': 'login.refinery', 'Description': '"domain" by which Cognito will refer to users', 'AllowedPattern': '[a-z\-\.]+', 'ConstraintDescription': 'must only contain lower case letters, periods, ' 'underscores, and hyphens' })) cft.parameters.add( core.Parameter( 'StorageStackName', 'String', { 'Default': '${AWS::StackName}Storage', 'Description': 'Name of the S3 storage stack for Django ' 'static and media files', })) rds_properties = { "AllocatedStorage": "5", "AutoMinorVersionUpgrade": False, "BackupRetentionPeriod": "15", "CopyTagsToSnapshot": True, "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.14", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": config['RDS_SUPERUSER_PASSWORD'], "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? "VPCSecurityGroups": [functions.get_att('RDSSecurityGroup', 'GroupId')], } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': load_tags(), 'AvailabilityZone': functions.get_att('WebInstance', 'AvailabilityZone'), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'SecurityGroups': [functions.ref("InstanceSecurityGroup")], 'Tags': instance_tags, }), core.DependsOn(['RDSInstance']), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [functions.ref('WebInstanceRole')] })) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3FullAccess' ], 'Path': '/', 'Policies': [{ 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["iam:CreateAccessKey"], "Resource": ["*"] }] }, }, { 'PolicyName': "CreateSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["ec2:CreateSnapshot"], "Resource": ["*"] }] } }, { 'PolicyName': "CreateDBSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["rds:CreateDBSnapshot"], "Resource": ["*"] }] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["ec2:CreateTags"], "Resource": "*" }] } }, { "PolicyName": "CognitoAccess", "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "cognito-identity:ListIdentityPools", ], "Resource": "arn:aws:cognito-identity:*" }, { "Effect": "Allow", "Action": [ "cognito-identity:" "GetOpenIdTokenForDeveloperIdentity" ], "Resource": { "Fn::Sub": [ "arn:aws:cognito-identity:" "${AWS::Region}:${AWS::AccountId}:" "identitypool/${Pool}", { "Pool": functions.ref('IdentityPool') } ] } }] } }] })) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] })) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), })) # Security Group for Elastic Load Balancer # (public facing). cft.resources.elbsg = core.Resource( 'ELBSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery ELB", # Egress Rule defined via # AWS::EC2::SecurityGroupEgress resource, # to avoid circularity (below). # See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html # noqa: E501 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0", }, ], })) cft.resources.elbegress = core.Resource( 'ELBEgress', 'AWS::EC2::SecurityGroupEgress', core.Properties({ "GroupId": functions.get_att('ELBSecurityGroup', 'GroupId'), "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "DestinationSecurityGroupId": functions.get_att('InstanceSecurityGroup', 'GroupId'), })) # Security Group for EC2- instance. cft.resources.instancesg = core.Resource( 'InstanceSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery EC2 Instance", 'SecurityGroupEgress': [], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "22", "ToPort": "22", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", # "CidrIp": "0.0.0.0/0", # Only accept connections from the ELB. "SourceSecurityGroupId": functions.get_att('ELBSecurityGroup', 'GroupId'), }, ], })) # Security Group for RDS instance. cft.resources.rdssg = core.Resource( 'RDSSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery RDS", 'SecurityGroupEgress': [ # We would like to remove all egress rules here, # but you can't do that with this version # of CloudFormation. # We decided that the hacky workarounds are # not worth it. ], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "5432", "ToPort": "5432", # Only accept connections from the # Instance Security Group. "SourceSecurityGroupId": functions.get_att('InstanceSecurityGroup', 'GroupId'), }, ], })) # ELB per # http://cfn-pyplates.readthedocs.io/en/latest/examples/options/template.html # Insecure, Port 80, HTTP listener http_listener = { 'LoadBalancerPort': '80', 'Protocol': 'HTTP', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [] } listeners = [http_listener] if 'TLS_CERTIFICATE' in config: # Secure, Port 443, HTTPS listener https_listener = { 'LoadBalancerPort': '443', 'Protocol': 'HTTPS', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [], 'SSLCertificateId': config['TLS_CERTIFICATE'] } listeners.append(https_listener) cft.resources.elb = core.Resource( 'LoadBalancer', 'AWS::ElasticLoadBalancing::LoadBalancer', { 'AccessLoggingPolicy': { 'EmitInterval': functions.ref('LogInterval'), 'Enabled': True, 'S3BucketName': config['S3_LOG_BUCKET'], # 'S3BucketPrefix' unused }, 'AvailabilityZones': [functions.get_att('WebInstance', 'AvailabilityZone')], 'ConnectionSettings': { 'IdleTimeout': 1800 # seconds }, 'HealthCheck': { 'HealthyThreshold': '2', 'Interval': '30', 'Target': 'HTTP:80/', 'Timeout': '5', 'UnhealthyThreshold': '4' }, 'Instances': [functions.ref('WebInstance')], 'LoadBalancerName': config['STACK_NAME'], 'Listeners': listeners, 'SecurityGroups': [functions.get_att('ELBSecurityGroup', 'GroupId')], 'Tags': load_tags(), }) cft.parameters.add( core.Parameter( 'LogInterval', 'Number', { 'Default': 60, 'Description': "How often, in minutes, the ELB emits its logs to the " "configured S3 bucket. The ELB log facility restricts " "this to be 5 or 60.", })) # Cognito Identity Pool for Developer Authenticated Identities Authflow # http://docs.aws.amazon.com/cognito/latest/developerguide/authentication-flow.html cft.resources.add( core.Resource( 'IdentityPool', 'AWS::Cognito::IdentityPool', core.Properties({ 'IdentityPoolName': functions.ref('IdentityPoolName'), 'AllowUnauthenticatedIdentities': False, 'DeveloperProviderName': functions.ref('DeveloperProviderName'), }))) cft.resources.add( core.Resource( 'IdentityPoolAuthenticatedRole', 'AWS::Cognito::IdentityPoolRoleAttachment', core.Properties({ 'IdentityPoolId': functions.ref('IdentityPool'), 'Roles': { 'authenticated': functions.get_att('CognitoS3UploadRole', 'Arn'), } }))) upload_role_trust_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Federated": "cognito-identity.amazonaws.com" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "cognito-identity.amazonaws.com:aud": functions.ref('IdentityPool') }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" } } }] } upload_access_policy = { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["cognito-identity:*"], "Resource": "*" }, { "Action": ["s3:PutObject", "s3:AbortMultipartUpload"], "Effect": "Allow", "Resource": { "Fn::Sub": [ "arn:aws:s3:::${MediaBucket}/uploads/" "${!cognito-identity.amazonaws.com:sub}/*", { "MediaBucket": { "Fn::ImportValue": { "Fn::Sub": "${StorageStackName}Media" } } } ] } }] } cft.resources.add( core.Resource( 'CognitoS3UploadRole', 'AWS::IAM::Role', core.Properties({ 'AssumeRolePolicyDocument': upload_role_trust_policy, 'Policies': [{ 'PolicyName': 'AuthenticatedS3UploadPolicy', 'PolicyDocument': upload_access_policy, }] }))) # See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy # noqa: E501 # for full list of region--principal identifiers. cft.mappings.region = core.Mapping( 'Region', {'us-east-1': { 'ELBPrincipal': '127311923021' }}) cft.resources.log_policy = core.Resource( 'LogBucketPolicy', 'AWS::S3::BucketPolicy', core.Properties({ 'Bucket': config['S3_LOG_BUCKET'], 'PolicyDocument': { 'Statement': [{ "Action": ["s3:PutObject"], "Effect": "Allow", "Resource": functions.join("", "arn:aws:s3:::", config['S3_LOG_BUCKET'], "/AWSLogs/", functions.ref("AWS::AccountId"), "/*"), "Principal": { "AWS": [ functions.find_in_map('Region', functions.ref("AWS::Region"), 'ELBPrincipal'), ] } }] } })) return cft
def main(): config, config_yaml = load_config() derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ID=", functions.ref('RDSInstance'), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.10", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }), core.DependsOn('RDSInstance'), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) print(str(cft))
def build_template(args): """ Build a CloudFormation template allowing for secure CloudTrail log aggregation and fine grained access control to SNS topics for notifications of new CloudTrail logs The reason that we create IAM roles for each client AWS account in order to enable clients to read their own CloudTrail logs, instead of merely delegating access to them in an S3 bucket policy is that "Bucket owner account can delegate permissions to users in its own account, but it cannot delegate permissions to other AWS accounts, because cross-account delegation is not supported." : http://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example4.html As a consequence we *can* delegate bucket permissions to client AWS accounts but we *can not* delegate object permissions (the log files themselves) to client AWS accounts. Example config : AccountRootARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::123456789012:root # HR - arn:aws:iam::234567890123:root # Marketing CloudTrailLogConsumers: - arn:aws:iam::345678901234:user/security_team # Security team user - TrustedARN: arn:aws:iam::456789012343:root # CloudCo Third Party TrustingARNs: - arn:aws:iam::012345678901:root # Sales - arn:aws:iam::234567890123:root # Marketing - TrustedARN: arn:aws:iam::567890123434:root # Other.com Third Party TrustingARNs: - arn:aws:iam::123456789012:root # HR ForeignAccountStatusSubscribers: - arn:aws:iam::345678901234:root # Security Team """ config = args.config account_root_arns = (config['AccountRootARNs'] if 'AccountRootARNs' in config and isinstance(config['AccountRootARNs'], list) else []) cft = CloudFormationTemplate( description="AWS CloudTrail Storage Account S3 Storage Bucket") # Create the bucket cft.resources.add(Resource("S3Bucket", "AWS::S3::Bucket", {"BucketName": args.bucketname}, DeletionPolicy("Retain"))) # Build the s3 bucket policy statement list bucket_policy_statements = [] # Allow the CloudTrail system to GetBucketAcl on the CloudTrail storage # bucket bucket_policy_statements.append({ "Sid": "AWSCloudTrailAclCheck", "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:GetBucketAcl"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) }) # Allow each account to read it's own logs for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "CloudTrailLogReaderRole%s" % account_id, "AWS::CloudFormation::Stack", { "TemplateURL": "https://s3.amazonaws.com/infosec-cloudformation-templates/manage_iam_role.json", "Parameters": { "RoleName": "CloudTrailLogReader%s" % account_id, "TrustedEntities": get_consumer_arns(account_arn, config) }, "TimeoutInMinutes": "5" } ) ) cft.resources.add( Resource( "CloudTrailLogReaderPolicy%s" % account_id, "AWS::IAM::Policy", { "PolicyName": "CloudTrailLogReaderPolicy%s" % account_id, "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "s3:GetObject", "Resource": join( "", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/%s/*" % account_id)}]}, "Roles": ["CloudTrailLogReader%s" % account_id] }, DependsOn("CloudTrailLogReaderRole%s" % account_id) ) ) cft.resources.add( Resource("ReadCloudTrailBucket", "AWS::IAM::ManagedPolicy", {"Description": "ReadCloudTrailBucket", "PolicyDocument": { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Action": ["s3:ListAllMyBuckets", "s3:GetBucketLocation"], "Resource": "*" }, {"Effect": "Allow", "Action": ["s3:GetBucketAcl", "s3:ListBucket", "s3:GetBucketTagging"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket")) } ] }, "Roles": ["CloudTrailLogReader%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns] }, DependsOn(["CloudTrailLogReaderRole%s" % get_account_id_from_arn(account_arn) for account_arn in account_root_arns]) )) bucket_policy_statements.append( { # "Sid":"AWSCloudTrailWrite%s" % get_account_id_from_arn(account_arn), "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": ["s3:PutObject"], "Resource": join("", "arn:aws:s3:::", ref("S3Bucket"), "/AWSLogs/*"), "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } } }) # Apply the bucket policy to the bucket cft.resources.add( Resource( "BucketPolicy", "AWS::S3::BucketPolicy", { "Bucket": ref("S3Bucket"), "PolicyDocument": { "Id": "BucketPolicyDocument", "Version": "2012-10-17", "Statement": bucket_policy_statements } } ) ) # Create a single SNS Topic that each AWS account can publish to to report # on the CloudFormation progress cft.resources.add( Resource("ForeignAccountStatusTopic", "AWS::SNS::Topic", { "DisplayName": "Topic for foreign accounts to publish status information to", "TopicName": "ForeignAccountStatus" } ) ) cft.resources.add( Resource("ForeignAccountStatusTopicPolicy", "AWS::SNS::TopicPolicy", { "Topics": [ref("ForeignAccountStatusTopic")], "PolicyDocument": { "Version": "2012-10-17", "Id": "ForeignAccountStatusPolicy", "Statement": [ { "Sid": "ForeignAccountStatusPublisher", "Effect": "Allow", "Principal": {"AWS": account_root_arns}, "Action": "SNS:Publish", "Resource": ref("ForeignAccountStatusTopic"), }, { "Sid": "ForeignAccountStatusSubscriber", "Effect": "Allow", "Principal": { "AWS": config['ForeignAccountStatusSubscribers'] }, "Action":[ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": ref("ForeignAccountStatusTopic"), } ] } } ) ) # Create SNS Topics for each AWS account and grant those accounts rights # to publish and subscribe to those topics for account_arn in account_root_arns: account_id = get_account_id_from_arn(account_arn) cft.resources.add( Resource( "Topic%s" % account_id, "AWS::SNS::Topic", { "DisplayName": "Mozilla CloudTrail Logs Topic for Account %s" % account_id, "TopicName": "MozillaCloudTrailLogs%s" % account_id } ) ) # http://docs.aws.amazon.com/sns/latest/dg/AccessPolicyLanguage_UseCases_Sns.html#AccessPolicyLanguage_UseCase4_Sns cft.resources.add( Resource( "TopicPolicy%s" % account_id, "AWS::SNS::TopicPolicy", { "Topics": [ref("Topic%s" % account_id)], "PolicyDocument": { "Version": "2012-10-17", "Id": "AWSCloudTrailSNSPolicy%s" % account_id, "Statement": [ { "Sid": "CloudTrailSNSPublish%s" % account_id, "Effect": "Allow", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Action": "SNS:Publish", "Resource": ref("Topic%s" % account_id) }, { "Sid": "CloudTrailSNSSubscribe%s" % account_id, "Effect": "Allow", "Principal": { "AWS": account_arn }, "Action": [ "SNS:GetTopicAttributes", "SNS:ListSubscriptionsByTopic", "SNS:Subscribe" ], "Resource": join(":", "arn:aws:sns", ref("AWS::Region"), ref("AWS::AccountId"), "MozillaCloudTrailLogs%s" % account_id) } ] } } ) ) return cft
def main(): config, config_yaml = load_config() derive_config(config) unique_suffix = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M") # We discover the current git branch/commit # so that the deployment script can use it # to clone the same commit. commit = os.popen("""git rev-parse HEAD""").read().rstrip() assert commit assert "'" not in config['SITE_NAME'] instance_tags = tags.load() # Set the `Name` as it appears on the EC2 web UI. instance_tags.append({'Key': 'Name', 'Value': "refinery-web-" + unique_suffix}) config['tags'] = instance_tags config_uri = save_s3_config(config, unique_suffix) sys.stderr.write("Configuration saved to {}\n".format(config_uri)) tls_rewrite = "false" if 'TLS_CERTIFICATE' in config: tls_rewrite = "true" # The userdata script is executed via CloudInit. # It's made by concatenating a block of parameter variables, # with the bootstrap.sh script, # and the aws.sh script. user_data_script = functions.join( "", "#!/bin/sh\n", "CONFIG_YAML=", base64.b64encode(config_yaml), "\n", "CONFIG_JSON=", base64.b64encode(json.dumps(config)), "\n", "AWS_DEFAULT_REGION=", functions.ref("AWS::Region"), "\n", "RDS_ID=", functions.ref('RDSInstance'), "\n", "RDS_ENDPOINT_ADDRESS=", functions.get_att('RDSInstance', 'Endpoint.Address'), "\n", "RDS_ENDPOINT_PORT=", functions.get_att('RDSInstance', 'Endpoint.Port'), "\n", "RDS_SUPERUSER_PASSWORD="******"\n", "RDS_ROLE=", config['RDS_ROLE'], "\n", "ADMIN=", config['ADMIN'], "\n", "DEFAULT_FROM_EMAIL=", config['DEFAULT_FROM_EMAIL'], "\n", "SERVER_EMAIL=", config['SERVER_EMAIL'], "\n", "IAM_SMTP_USER="******"\n", "export FACTER_TLS_REWRITE=", tls_rewrite, "\n", "S3_CONFIG_URI=", config['S3_CONFIG_URI'], "\n", "SITE_URL=", config['SITE_URL'], "\n", # May contain spaces, but can't contain "'" "SITE_NAME='", config['SITE_NAME'], "'\n", "GIT_BRANCH=", commit, "\n", "\n", open('bootstrap.sh').read(), open('aws.sh').read()) cft = core.CloudFormationTemplate(description="refinery platform.") rds_properties = { "AllocatedStorage": "5", "AutoMinorVersionUpgrade": False, "AvailabilityZone": config['AVAILABILITY_ZONE'], "BackupRetentionPeriod": "0", "DBInstanceClass": "db.t2.small", # todo:? "DBInstanceIdentifier": config['RDS_NAME'], "Engine": "postgres", "EngineVersion": "9.3.14", # "KmsKeyId" ? "MasterUsername": "******", "MasterUserPassword": "******", "MultiAZ": False, "Port": "5432", "PubliclyAccessible": False, "StorageType": "gp2", "Tags": instance_tags, # todo: Should be different? } if 'RDS_SNAPSHOT' in config: rds_properties['DBSnapshotIdentifier'] = config['RDS_SNAPSHOT'] cft.resources.rds_instance = core.Resource( 'RDSInstance', 'AWS::RDS::DBInstance', core.Properties(rds_properties), core.DeletionPolicy("Snapshot"), ) volume_properties = { 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'Encrypted': True, 'Size': config['DATA_VOLUME_SIZE'], 'Tags': tags.load(), 'VolumeType': config['DATA_VOLUME_TYPE'], } if 'DATA_SNAPSHOT' in config: volume_properties['SnapshotId'] = config['DATA_SNAPSHOT'] # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-ebs-volume.html cft.resources.ebs = core.Resource( 'RefineryData', 'AWS::EC2::Volume', core.Properties(volume_properties), core.DeletionPolicy("Snapshot"), ) cft.resources.ec2_instance = core.Resource( 'WebInstance', 'AWS::EC2::Instance', core.Properties({ 'AvailabilityZone': config['AVAILABILITY_ZONE'], 'ImageId': 'ami-d05e75b8', 'InstanceType': 'm3.medium', 'UserData': functions.base64(user_data_script), 'KeyName': config['KEY_NAME'], 'IamInstanceProfile': functions.ref('WebInstanceProfile'), 'Tags': instance_tags, }), core.DependsOn('RDSInstance'), ) cft.resources.instance_profile = core.Resource( 'WebInstanceProfile', 'AWS::IAM::InstanceProfile', core.Properties({ 'Path': '/', 'Roles': [ functions.ref('WebInstanceRole') ] }) ) cft.resources.web_role = core.Resource( 'WebInstanceRole', 'AWS::IAM::Role', core.Properties({ # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-templateexamples "AssumeRolePolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, 'ManagedPolicyArns': [ 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess' ], 'Path': '/', 'Policies': [ { 'PolicyName': "CreateAccessKey", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "iam:CreateAccessKey" ], "Resource": [ "*" ] } ] }, }, { 'PolicyName': "CreateSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateSnapshot" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateDBSnapshot", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds:CreateDBSnapshot" ], "Resource": [ "*" ] } ] } }, { 'PolicyName': "CreateTags", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:CreateTags" ], "Resource": "*" } ] } } ] }) ) cft.resources.smtp_user = core.Resource( 'RefinerySMTPUser', 'AWS::IAM::User', core.Properties({ 'Policies': [{ 'PolicyName': "SESSendingAccess", 'PolicyDocument': { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "ses:SendRawEmail", "Resource": "*" }] } }] }) ) cft.resources.mount = core.Resource( 'RefineryVolume', 'AWS::EC2::VolumeAttachment', core.Properties({ 'Device': '/dev/xvdr', 'InstanceId': functions.ref('WebInstance'), 'VolumeId': functions.ref('RefineryData'), }) ) cft.resources.elbsg = core.Resource( 'ELBSecurityGroup', 'AWS::EC2::SecurityGroup', core.Properties({ 'GroupDescription': "Refinery ELB", 'SecurityGroupEgress': [], 'SecurityGroupIngress': [ { "IpProtocol": "tcp", "FromPort": "80", "ToPort": "80", "CidrIp": "0.0.0.0/0", }, { "IpProtocol": "tcp", "FromPort": "443", "ToPort": "443", "CidrIp": "0.0.0.0/0", }, ], }) ) # ELB per # http://cfn-pyplates.readthedocs.io/en/latest/examples/options/template.html # Insecure, Port 80, HTTP listener http_listener = { 'LoadBalancerPort': '80', 'Protocol': 'HTTP', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [] } listeners = [http_listener] if 'TLS_CERTIFICATE' in config: # Secure, Port 443, HTTPS listener https_listener = { 'LoadBalancerPort': '443', 'Protocol': 'HTTPS', 'InstanceProtocol': 'HTTP', 'InstancePort': '80', 'PolicyNames': [], 'SSLCertificateId': config['TLS_CERTIFICATE'] } listeners.append(https_listener) cft.resources.elb = core.Resource( 'LoadBalancer', 'AWS::ElasticLoadBalancing::LoadBalancer', { 'AvailabilityZones': [config['AVAILABILITY_ZONE']], 'HealthCheck': { 'HealthyThreshold': '2', 'Interval': '30', 'Target': 'HTTP:80/', 'Timeout': '5', 'UnhealthyThreshold': '4' }, 'Instances': [functions.ref('WebInstance')], 'Listeners': listeners, 'SecurityGroups': [ functions.get_att('ELBSecurityGroup', 'GroupId')], "Tags": instance_tags, # todo: Should be different? }) sys.stdout.write(str(cft))