def create_api_user_resource(template, api_user_name_variable, uploads_bucket_resource): return template.add_resource( iam.User( 'ApiUser', UserName=api_user_name_variable, Policies=[ iam.Policy( PolicyName='ApiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 's3:*', 'Effect': 'Allow', 'Resource': [ GetAtt(uploads_bucket_resource, 'Arn'), Join( '/', [GetAtt(uploads_bucket_resource, 'Arn'), '*']) ] }, ] } ) ] ) )
def create_ci_user_resource(template, ci_user_name_variable): return template.add_resource( iam.User( 'CiUser', UserName=ci_user_name_variable, Policies=[ iam.Policy( PolicyName='CiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 'ecr:*', 'Effect': 'Allow', 'Resource': '*' }, { 'Action': 'ecs:UpdateService', 'Effect': 'Allow', 'Resource': '*' }, { 'Action': 'secretsmanager:GetSecretValue', 'Effect': 'Allow', 'Resource': '*' } ] } ) ] ) )
def generate(template): deployment_robot = template.add_resource( iam.User( 'DeploymentRobot', UserName="******", Groups=["Robots"], )) deployment_robot_access_key = template.add_resource( iam.AccessKey( 'DeploymentRobotAcessKey', DependsOn=[deployment_robot], Serial=0, Status='Active', UserName=Ref(deployment_robot), )) template.add_output( Output( 'DeploymentRobotAccessKey', Value=Ref(deployment_robot_access_key))) template.add_output( Output( 'DeploymentRobotSecretAccessKey', Value=GetAtt(deployment_robot_access_key, "SecretAccessKey")))
def create_template(self): t = self.template bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") cloudformation_scope = Sub( "arn:aws:cloudformation:${AWS::Region}:${AWS::AccountId}:" "stack/${StackerNamespace}-*") changeset_scope = "*" # This represents the precise IAM permissions that stacker itself # needs. stacker_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket ]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl ]), Statement(Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ]), Statement(Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")]), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation. CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation. CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.DescribeStacks ]) ])) user = t.add_resource( iam.User("FunctionalTestUser", Policies=[stacker_policy])) key = t.add_resource( iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user))) t.add_output(Output("User", Value=Ref(user))) t.add_output(Output("AccessKeyId", Value=Ref(key))) t.add_output( Output("SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey")))
def _build_template(self, template): t = template user = t.add_resource( iam.User(self.name, Path="/", Policies=[], ManagedPolicyArns=[])) if self._login is not None: user.UserName = self._login if self._generate_key: key_serial_param = t.add_parameter( Parameter('Input{}IAMUserKeySerial'.format(self.name), Type='String', Default='1', Description='Serial for User:{} key'.format( self.name))) key = t.add_resource( iam.AccessKey('{}IAMAccessKey'.format(self.name), Serial=Ref(key_serial_param), Status='Active', UserName=Ref(user))) t.add_output([ Output('{}IAMSecretKey'.format(self.name), Value=GetAtt(key.title, 'SecretAccessKey'), Description='IAM SecretKey for {}'.format(self.name)), Output('{}IAMAccessKey'.format(self.name), Value=Ref(key), Description='IAM AccessKey for {}'.format(self.name)) ]) if self._allow_console: def_passwd_param = t.add_parameter( Parameter('Input{}DefaultConsolePasswd'.format(self.name), Type='String', Default='D3fau1t9a55w0r6_c4ang3m3', Description='Default console passwd for {}'.format( self.name))) user.LoginProfile = iam.LoginProfile( Password=Ref(def_passwd_param), PasswordResetRequired=True) for policy in self.policies: policy._bind_role(t, user) for arn in self.managed_policy_arns: user.ManagedPolicyArns.append(arn) t.add_output([ Output('{}IAMUser'.format(self.name), Value=Ref(user), Description='{} User Output'.format(self.name)) ])
def get_template(): template = Template() template.add_parameter(Parameter("Parm1", Type="String")) template.add_parameter(Parameter("Parm2", Type="Number")) template.add_parameter(Parameter("Parm3", Type="String")) template.add_parameter(Parameter("Parm4", Type="String")) test_user = template.add_resource( iam.User("testIamUser", UserName="******")) template.add_output(Output("output", Value=Ref(test_user))) return template
def __init__(self, parameters, groups, roles): super(Users, self).__init__() self.DanielPilch = iam.User( "DanielPilch", Path="/", LoginProfile=iam.LoginProfile(Password=Ref( parameters.DefaultPassword.title), PasswordResetRequired=True), ) self.CIUser = iam.User("CIUser", ) # User to group memberships self.AWSEngineersMembership = iam.UserToGroupAddition( "AWSEngineersMembership", GroupName=Ref(groups.AWSEngineers), Users=[ Ref(self.DanielPilch), ], ) self.CIDeploymentMembership = iam.UserToGroupAddition( "CIDeploymentMembership", GroupName=Ref(groups.CIDeploymentServices), Users=[ Ref(self.CIUser), ], ) # EC2 Baseline Instance Profile self.EC2BaselineProfile = iam.InstanceProfile( "EC2BaselineProfile", Path="/", Roles=[Ref(roles.EC2Baseline)], InstanceProfileName="EC2BaselineProfile")
def IAM_Users(key): for n, v in getattr(cfg, key).items(): resname = f"{key}{n}" # Ex. IAMUserPincoPalla if not v.get("IBOX_ENABLED", True): continue ManagedPolicyArns = [] RoleGroups = [] if "RoleGroups" in v: for m, w in v["RoleGroups"].items(): condname = f"{resname}RoleGroups{m}" # conditions add_obj(get_condition(condname, "equals", "yes")) # resources RoleGroups.append(If(condname, m, Ref("AWS::NoValue"))) try: policy_arns = cfg.IAMGroup[m]["ManagedPolicyArns"] except Exception: pass else: for p in policy_arns: ManagedPolicyArns.append( If( condname, ImportValue(f"IAMPolicy{p}"), Ref("AWS::NoValue"), ) ) # resources r_Role = iam.Role(f"IAMRole{n}", ManagedPolicyArns=ManagedPolicyArns) auto_get_props( r_Role, mapname="IAMRoleUser", linked_obj_name=resname, linked_obj_index=v["UserName"], ) r_User = iam.User(resname, Groups=RoleGroups) auto_get_props(r_User, indexname=n, remapname=v["UserName"]) add_obj([r_User, r_Role])
def r_iam_user(self): return iam.User( 'S3BotUser', Path=Sub(self.conf['path']), UserName=Sub(self.conf['username']), Policies=[ iam.Policy( 'S3BotUserPolicy', PolicyName=Sub("${AWS::StackName}-policy"), PolicyDocument={ "Statement": [{ "Action": ['s3:*'], "Effect": "Allow", "Resource": Join( "", ["arn:aws:s3:::", self.r_bucket.ref(), '/*'], ), }] }) ])
def create_template(): cft = Template( Description= 'A stack to set up the requirements needed for deploying Why82? Lambda code' ) policy_doc = { 'Statement': [ { 'Action': [ 'cloudformation:DescribeStackResources', 'cloudformation:DescribeStackResource', 'cloudformation:DescribeStacks', 'cloudformation:DescribeStackEvents', 'cloudformation:CreateStack', 'cloudformation:DeleteStack', 'cloudformation:UpdateStack' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'cloudformation', Ref('AWS::Region'), Ref('AWS::AccountId'), Join('/', ['stack', SERVICE_PROD, '*']) ]) }, { 'Action': [ 's3:CreateBucket', 's3:ListBucket', 's3:DeleteBucket', 's3:PutBucketNotification' ], 'Effect': 'Allow', 'Resource': [ Join(':::', [ 'arn:aws:s3', ('%s-serverlessdeploymentbucket-*' % SERVICE_PROD) ]), Join(':::', ['arn:aws:s3', SERVICE_PROD]) ] }, { 'Action': ['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], 'Effect': 'Allow', 'Resource': [ Join(':::', [ 'arn:aws:s3', ('%s-serverlessdeploymentbucket-*/*' % SERVICE_PROD) ]), Join(':::', ['arn:aws:s3', ('%s/*' % SERVICE_PROD)]) ] }, { 'Action': ['s3:PutBucketCORS', 's3:GetBucketCORS', 's3:PutBucketAcl'], 'Effect': 'Allow', 'Resource': Join(':::', ['arn:aws:s3', SERVICE_PROD]) }, { 'Action': ['logs:DescribeLogStreams', 'logs:FilterLogEvents'], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'logs', Ref('AWS::Region'), Ref('AWS::AccountId'), 'log-group', ('/aws/lambda/%s' % SERVICE_PROD) ]) }, { 'Action': [ 'iam:CreateRole', 'iam:UpdateRole', 'iam:DeleteRole', 'iam:PutRolePolicy', 'iam:DeleteRolePolicy', 'iam:ListRolePolicies', 'iam:ListRoles', 'iam:PassRole', 'iam:GetRole' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'iam', '', Ref('AWS::AccountId'), ('role/%s-IamRoleLambdaExecution-*' % SERVICE_PROD) ]) }, { 'Action': [ 'lambda:GetFunction', 'lambda:CreateFunction', 'lambda:AddPermission', 'lambda:RemovePermission', 'lambda:DeleteFunction', 'lambda:InvokeFunction', 'lambda:GetFunctionConfiguration', 'lambda:UpdateFunctionConfiguration', 'lambda:UpdateFunctionCode' ], 'Effect': 'Allow', # Currently, AWS Lambda doesn't support permissions for this particular action at the resource-level. # Therefore, the policy specifies a wildcard character (*) as the Resource value. # http://docs.aws.amazon.com/lambda/latest/dg/access-control-identity-based.html 'Resource': '*' }, { 'Action': [ 'events:PutRule', 'events:PutTargets', 'events:RemoveTargets', 'events:DescribeRule', 'events:DeleteRule' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'events', Ref('AWS::Region'), Ref('AWS::AccountId'), ('rule/%s-*' % SERVICE_PROD) ]) } ] } user = iam.User(title='ciUser', UserName=CI_USERNAME, Policies=[ iam.Policy(PolicyDocument=policy_doc, PolicyName=('%s-cipolicy' % SERVICE)) ]) key = iam.AccessKey(title='ciKey', UserName=Ref(user)) cft.add_resource(user) cft.add_resource(key) cft.add_output([ Output('CiUser', Description='The user that CI will use to do releases', Value=Ref(user)), Output('CiAccessKey', Description='The CI user\'s access key', Value=Ref(key)), Output('CiSecretKey', Description='The CI user\'s secret key', Value=GetAtt(key, 'SecretAccessKey')) ]) return cft
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
notifications_queue_resource = template.add_resource( sqs.Queue('NotificationsQueue', QueueName=notifications_queue_name_variable)) search_queue_resource = template.add_resource( sqs.Queue('SearchQueue', QueueName=search_queue_name_variable)) ci_user_resource = template.add_resource( iam.User('CiUser', UserName=ci_user_name_variable, Policies=[ iam.Policy(PolicyName='CiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': 'secretsmanager:GetSecretValue', 'Effect': 'Allow', 'Resource': '*' }] }) ])) api_user_resource = template.add_resource( iam.User('ApiUser', UserName=api_user_name_variable, Policies=[ iam.Policy(PolicyName='ApiUserPolicy', PolicyDocument={ 'Version': '2012-10-17',
def create_template(self): t = self.template bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") objects_arn = Sub("arn:aws:s3:::${StackerBucket}*/*") cloudformation_scope = Sub( "arn:aws:cloudformation:*:${AWS::AccountId}:" "stack/${StackerNamespace}-*") sns_scope = Sub("arn:aws:sns:*:${AWS::AccountId}:" "${StackerNamespace}-*") changeset_scope = "*" # This represents the precise IAM permissions that stacker itself # needs. stacker_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=["*"], Action=[awacs.s3.ListAllMyBuckets]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket, awacs.s3.DeleteBucket, ]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl, ]), Statement(Effect="Allow", Resource=[objects_arn], Action=[ awacs.s3.DeleteObject, ]), Statement(Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ]), Statement(Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")]), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation. CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation. CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.SetStackPolicy, awacs.cloudformation.DescribeStacks, awacs.cloudformation.DescribeStackEvents ]), Statement(Effect="Allow", Resource=[sns_scope], Action=[ awacs.sns.CreateTopic, awacs.sns.DeleteTopic, awacs.sns.GetTopicAttributes ]) ])) principal = AWSPrincipal(Ref("AWS::AccountId")) role = t.add_resource( iam.Role("FunctionalTestRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Action=[awacs.sts.AssumeRole], Principal=principal) ]), Policies=[stacker_policy])) assumerole_policy = iam.Policy( PolicyName="AssumeRole", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=[GetAtt(role, "Arn")], Action=[awacs.sts.AssumeRole]) ])) user = t.add_resource( iam.User("FunctionalTestUser", Policies=[stacker_policy, assumerole_policy])) key = t.add_resource( iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user))) t.add_output(Output("User", Value=Ref(user))) t.add_output(Output("AccessKeyId", Value=Ref(key))) t.add_output( Output("SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey"))) t.add_output(Output("FunctionalTestRole", Value=GetAtt(role, "Arn")))
iam.User( 'ApiUser', UserName=Ref(api_user_name), Policies=[ iam.Policy( PolicyName='ApiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 's3:*', 'Effect': 'Allow', 'Resource': [ GetAtt(uploads_bucket, 'Arn'), Join('/', [GetAtt(uploads_bucket, 'Arn'), '*']) ] }, { 'Action': 'sqs:*', 'Effect': 'Allow', 'Resource': GetAtt(default_queue, 'Arn') }, { 'Action': 'sqs:*', 'Effect': 'Allow', 'Resource': GetAtt(notifications_queue, 'Arn') } ] } ) ] )
def generate(dry_run, file_location=None): """CloudFormation template generator to apply to all accounts which configures log sources to publish to the centralized log target(s) specified""" t = Template() t.add_version("2010-09-09") t.add_description( "UCSD Log Source AWS CloudFormation Template - this template is meant to be applied to pre-approved accounts and configures CloudWatch Logs to forward to the UCSD log aggregation process." ) # # CloudWatch Logs setup - Set up shipping to 'centralized' account # # Parameters delivery_stream_arn = t.add_parameter( Parameter('LogDeliveryDestinationArn', Type="String", Default="", Description="ARN of the Log Destination to send logs to.")) # resources cwl_group_retention = t.add_parameter( Parameter("LogGroupRetentionInDays", Type="Number", Description= "Number of days to retain logs in the CloudWatch Log Group", MinValue=1, MaxValue=14, Default=1)) cwl_group = t.add_resource( cwl.LogGroup('SecurityLogShippingGroup', LogGroupName=security_log_shipping_group_name, RetentionInDays=Ref(cwl_group_retention))) cwl_subscription = t.add_resource( cwl.SubscriptionFilter('SecurityLogShippingFilter', DestinationArn=Ref(delivery_stream_arn), LogGroupName=Ref(cwl_group), FilterPattern="")) cwl_primary_stream = t.add_resource( cwl.LogStream('PrimaryLogStream', LogGroupName=Ref(cwl_group), LogStreamName='PrimaryLogStream')) # Create IAM role to allow VPC Flow Logs within this account to push data to CloudWatch Logs per https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html#flow-logs-iam vpc_flow_log_iam_role = t.add_resource( iam.Role('VPCFlowLogToCWLIAMRole', AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal( "Service", "vpc-flow-logs.amazonaws.com")) ]))) vpc_flow_log_policies = t.add_resource( iam.PolicyType( 'VPCFlowLogToCWLPolicy', PolicyName='vpcflowlogtocwlpolicy20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ CreateLogGroup, CreateLogStream, PutLogEvents, DescribeLogGroups, DescribeLogStreams ], Resource=["*"]) ]))) # outputs t.add_output( Output( 'CloudWatchLogGroupName', Value=Ref(cwl_group), Description= "Name of the CloudWatch Log Group created to flow logs to the centralized logging stream." )) t.add_output( Output( 'CloudWatchLogGroupARN', Value=GetAtt(cwl_group, "Arn"), Description= "ARN of the CloudWatch Log Group created to flow logs to the centralized logging stream." )) t.add_output( Output( 'VPCFlowLogDeliveryLogsPermissionArn', Value=GetAtt(vpc_flow_log_iam_role, "Arn"), Description= "ARN of the IAM role for VPC Flow Logs to use within this account to ship VPC flow logs through." )) # # CloudTrail setup - ship to S3 in 'central account' as well as cloudtrail logs if it'll let us :) # # parameters ct_is_logging = t.add_parameter( Parameter( 'CloudTrailIsLogging', Type="String", Default="false", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is configured to send logs.")) ct_include_global = t.add_parameter( Parameter( 'CloudTrailIncludeGlobal', Type="String", Default="true", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is configured to capture global service events." )) ct_multi_region = t.add_parameter( Parameter( 'CloudTrailMultiRegion', Type="String", Default="true", AllowedValues=["true", "false"], Description= "Flag indicating that CloudTrail is to be configured in multi-region mode" )) ct_s3_key_prefix = t.add_parameter( Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) ct_bucket_name = t.add_parameter( Parameter( 'CloudTrailBucketName', Type='String', Default='', Description='Name of the S3 Bucket for delivery of CloudTrail logs' )) # resources ct_trail = t.add_resource( ct.Trail("SecurityTrail", TrailName=Join("-", ["SecurityTrail", Region]), S3BucketName=Ref(ct_bucket_name), S3KeyPrefix=Ref(ct_s3_key_prefix), IncludeGlobalServiceEvents=Ref(ct_include_global), IsMultiRegionTrail=Ref(ct_multi_region), IsLogging=Ref(ct_is_logging))) # outputs t.add_output( Output( 'CloudTrailARN', Description= "ARN of the CloudTrail Trail configured for this log source deployment.", Value=GetAtt(ct_trail, "Arn"))) # Splunk Addon User and Policies per http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions addon_user = t.add_resource( iam.User('SplunkAddonUser', UserName='******')) # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudTrail_permissions ct_splunk_user_policy = t.add_resource( iam.PolicyType('cloudtrailSplunkPolicy', PolicyName='cloudtrailsplunkuser20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ asqs.GetQueueAttributes, asqs.ListQueues, asqs.ReceiveMessage, asqs.GetQueueUrl, asqs.DeleteMessage, as3.Action('Get*'), as3.Action('List*'), as3.Action('Delete*') ], Resource=["*"]) ]))) # http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_CloudWatch_permissions cw_splunk_user_policy = t.add_resource( iam.PolicyType('cloudwatchSplunkPolicy', PolicyName='cloudwatchsplunkuser20180213', Roles=[Ref(vpc_flow_log_iam_role)], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ aas.Action("Describe*"), acw.Action("Describe*"), acw.Action("Get*"), acw.Action("List*"), asns.Action("Get*"), asns.Action("List*") ], Resource=['*']) ]))) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join( log_aggregation_cf, 'log_sources.json') with open(save_path, 'w') as f: f.write(t.to_json())
api_user_resource = template.add_resource( iam.User( 'ApiUser', UserName=api_user_name_variable, Policies=[ iam.Policy( PolicyName='ApiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': 's3:*', 'Effect': 'Allow', 'Resource': [ GetAtt(uploads_bucket_resource, 'Arn'), Join('/', [GetAtt(uploads_bucket_resource, 'Arn'), '*']) ] }, { 'Action': 'sqs:*', 'Effect': 'Allow', 'Resource': GetAtt(default_queue_resource, 'Arn') } ] } ) ] ) )
iam.User( 'BackupUser', Policies=[ iam.Policy( PolicyName='S3', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Effect': 'Allow', 'Action': ['s3:ListAllMyBuckets'], 'Resource': Sub( 'arn:${PARTITION}:s3:::*', PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), BUCKET_NAME=Ref(bucket), ), }, { 'Effect': 'Allow', 'Action': [ 's3:ListBucket', 's3:ListBucketMultipartUploads', 's3:GetBucketLocation', ], 'Resource': Sub( 'arn:${PARTITION}:s3:::${BUCKET_NAME}', PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), BUCKET_NAME=Ref(bucket), ), }, { 'Effect': 'Allow', 'Action': [ 's3:GetObject', 's3:PutObject', 's3:DeleteObject', 's3:ListMultipartUploadParts', # 's3:CreateMultipartUpload', 's3:AbortMultipartUpload', ], 'Resource': Sub( 'arn:${PARTITION}:s3:::${BUCKET_NAME}/*', PARTITION=If('IsChinaRegion', 'aws-cn', 'aws'), BUCKET_NAME=Ref(bucket), ), }, ] }), ]))
MinLength='1')) # ================================================== # Variables. # ================================================== ci_user_name_variable = Join('-', ['ci-frontend', Ref(environment_parameter)]) # ================================================== # Resources. # ================================================== ci_user_resource = template.add_resource( iam.User('CiUser', UserName=ci_user_name_variable, Policies=[ iam.Policy(PolicyName='CiUserPolicy', PolicyDocument={ 'Version': '2012-10-17', 'Statement': [{ 'Action': 'secretsmanager:GetSecretValue', 'Effect': 'Allow', 'Resource': '*' }] }) ])) # ================================================== # Print the generated template in JSON. # ================================================== print(template.to_json())