def create_policy(self): name_prefix = self.context.get_fqn(self.name) t = self.template variables = self.get_variables() external_roles = variables.get("RoleNames") or Ref("AWS::NoValue") external_groups = variables.get("GroupNames") or Ref("AWS::NoValue") external_users = variables.get("UserNames") or Ref("AWS::NoValue") create_policy = any([ variables["RoleNames"], variables["GroupNames"], variables["UserNames"], ]) if create_policy: t.add_resource( iam.PolicyType( FIREHOSE_WRITE_POLICY, PolicyName='{}-firehose'.format(name_prefix), PolicyDocument=firehose_write_policy(), Roles=external_roles, Groups=external_groups, Users=external_users, ), ) t.add_resource( iam.PolicyType( LOGS_POLICY, PolicyName='{}-logs'.format(name_prefix), PolicyDocument=logs_policy(), Roles=external_roles, Groups=external_groups, Users=external_users, ), )
def create_template(self): t = self.template variables = self.get_variables() self.bucket_ids = [] for title, attrs in variables["Buckets"].items(): bucket_id = Ref(title) t.add_resource(s3.Bucket.from_dict(title, attrs)) t.add_output(Output(title + "BucketId", Value=bucket_id)) t.add_output(Output(title + "BucketArn", Value=s3_arn(bucket_id))) t.add_output( Output(title + "BucketDomainName", Value=GetAtt(title, "DomainName"))) if "WebsiteConfiguration" in attrs: t.add_mapping("WebsiteEndpoints", S3_WEBSITE_ENDPOINTS) t.add_resource( s3.BucketPolicy( title + "BucketPolicy", Bucket=bucket_id, PolicyDocument=static_website_bucket_policy(bucket_id), )) t.add_output( Output(title + "WebsiteUrl", Value=GetAtt(title, "WebsiteURL"))) t.add_output( Output(title + "WebsiteEndpoint", Value=FindInMap("WebsiteEndpoints", Region, "endpoint"))) self.bucket_ids.append(bucket_id) read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), PolicyDocument=read_write_s3_bucket_policy( self.bucket_ids), Roles=read_write_roles, )) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=Sub("${AWS::StackName}ReadPolicy"), PolicyDocument=read_only_s3_bucket_policy(self.bucket_ids), Roles=read_only_roles, ))
def create_policy(self): ns = self.context.namespace name_prefix = "%s-%s" % (ns, self.name) t = self.template t.add_condition( 'ExternalRoles', Not(Equals(Join(",", Ref('RoleNames')), '')), ) t.add_condition( 'ExternalGroups', Not(Equals(Join(",", Ref('GroupNames')), '')), ) t.add_condition( 'ExternalUsers', Not(Equals(Join(",", Ref('UserNames')), '')), ) t.add_condition( 'CreatePolicy', Or( TropoCondition("ExternalRoles"), TropoCondition("ExternalGroups"), TropoCondition("ExternalUsers"), )) t.add_resource( iam.PolicyType( FIREHOSE_WRITE_POLICY, PolicyName='{}-firehose'.format(name_prefix), PolicyDocument=firehose_write_policy(), Roles=If("ExternalRoles", Ref("RoleNames"), Ref("AWS::NoValue")), Groups=If("ExternalGroups", Ref("GroupNames"), Ref("AWS::NoValue")), Users=If("ExternalUsers", Ref("UserNames"), Ref("AWS::NoValue")), Condition='CreatePolicy', ), ) t.add_resource( iam.PolicyType( LOGS_POLICY, PolicyName='{}-logs'.format(name_prefix), PolicyDocument=logs_policy(), Roles=If("ExternalRoles", Ref("RoleNames"), Ref("AWS::NoValue")), Groups=If("ExternalGroups", Ref("GroupNames"), Ref("AWS::NoValue")), Users=If("ExternalUsers", Ref("UserNames"), Ref("AWS::NoValue")), Condition='CreatePolicy', ), )
def create_policy(self, name=None): statements = self.generate_policy_statements() if not statements: return t = self.template logical_name = "Policy" if name: logical_name = "{}Policy".format(name) policy_name = Sub("${AWS::StackName}-${Name}-policy", Name=name) else: policy_name = Sub("${AWS::StackName}-policy") policy = t.add_resource( iam.PolicyType( logical_name, PolicyName=policy_name, PolicyDocument=Policy(Statement=statements, ), Roles=[Ref(role) for role in self.roles], )) t.add_output(Output("PolicyName", Value=Ref(policy))) self.policies.append(policy) return policy
def add_resources(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() # Resources # build the CFN template with the specified permissions iam_statements = [] iam_statements.append( Statement( Sid='AllowReadAccessToS3Bucket', Action=[ awacs.s3.GetObject, awacs.s3.ListBucket ], Effect=Allow, Resource=[variables['S3BucketArn'].ref] ) ) template.add_resource( iam.PolicyType( 'IamInlinePolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=iam_statements, ), PolicyName=Join('', [ 'S3AccessToBucket-', variables['S3BucketName'].ref ]), Roles=[variables['IamRoleName'].ref] ) )
def _generate_splunk_policy(policy_name='splunkAllAccessPolicy', roles=[], users=[]): """Helper method to encapsulate the complexity of generating the 'all-in-one' policy document for Splunk AWS Plugin per http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWSpermissions#Configure_one_policy_containing_permissions_for_all_inputs""" return iam.PolicyType(policy_name, PolicyName="%s20180224" % policy_name, Roles=roles, Users=users, PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Action=[asqs.GetQueueAttributes, asqs.ListQueues, asqs.ReceiveMessage, asqs.GetQueueUrl, asqs.SendMessage, asqs.DeleteMessage, as3.ListBucket, as3.GetObject, as3.GetBucketLocation, as3.ListAllMyBuckets, as3.GetBucketTagging, as3.GetAccelerateConfiguration, as3.GetBucketLogging, as3.GetLifecycleConfiguration, as3.GetBucketCORS, aconfig.DeliverConfigSnapshot, aconfig.DescribeConfigRules, aconfig.DescribeConfigRuleEvaluationStatus, aconfig.GetComplianceDetailsByConfigRule, aconfig.GetComplianceSummaryByConfigRule, aiam.GetUser, aiam.ListUsers, aiam.GetAccountPasswordPolicy, aiam.ListAccessKeys, aiam.GetAccessKeyLastUsed, aas.Action('Describe*'), acw.Action('Describe*'), acw.Action('Get*'), acw.Action('List*'), asns.Action('Get*'), asns.Action('List*'), asns.Publish, alogs.DescribeLogGroups, alogs.DescribeLogStreams, alogs.GetLogEvents, aec2.DescribeInstances, aec2.DescribeReservedInstances, aec2.DescribeSnapshots, aec2.DescribeRegions, aec2.DescribeKeyPairs, aec2.DescribeNetworkAcls, aec2.DescribeSecurityGroups, aec2.DescribeSubnets, aec2.DescribeVolumes, aec2.DescribeVpcs, aec2.DescribeImages, aec2.DescribeAddresses, al.ListFunctions, ards.DescribeDBInstances, acf.ListDistributions, aelb.DescribeLoadBalancers, aelb.DescribeInstanceHealth, aelb.DescribeTags, aelb.DescribeTargetGroups, aelb.DescribeTargetHealth, aelb.DescribeListeners, ainspector.Action('Describe*'), ainspector.Action('List*'), akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams, akms.Decrypt, asts.AssumeRole], Resource=["*"])]))
def attach_ssm_policy(self, myrole): return super(NCTemplate, self).add_resource( iam.PolicyType( 'policyssm', PolicyName='ssmpolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement(Effect=Allow, Action=[Action("ssm", "DescribeParameters")], Resource=["*"]), Statement( Effect=Allow, Action=[Action("ssm", "GetParameters")], Resource=[ Join("", [ "arn:aws:ssm", ":", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":", "parameter/", Ref("AWS::StackName"), "/*" ]), Join("", [ "arn:aws:ssm", ":", Ref("AWS::Region"), ":", Ref("AWS::AccountId"), ":", "parameter/", "globals", "/*" ]), ]), ]), Roles=[Ref(myrole)]))
def create_template(self): bucket_name = self.local_parameters['BucketName'] self.template.add_resource(s3.Bucket( BUCKET, BucketName=bucket_name, ), ) statements = [ Statement( Effect=Allow, Action=[ awacs.s3.GetObject, awacs.s3.ListBucket, awacs.s3.PutObject, ], Resource=[ awacs.s3.ARN(bucket_name), awacs.s3.ARN(os.path.join(bucket_name, '*')), ], ), ] self.template.add_resource( iam.PolicyType( BUCKET_POLICY, PolicyName='PrivateBucketPolicy', PolicyDocument=Policy(Statement=statements, ), Roles=Ref('Roles'), ), )
def create_policy(self): ns = self.context.namespace t = self.template t.add_resource( iam.PolicyType( FIREHOSE_WRITE_POLICY, PolicyName='{}-bot-firehose'.format(ns), PolicyDocument=firehose_write_policy(), Roles=[Ref('Role')], ), ) t.add_resource( iam.PolicyType( LOGS_POLICY, PolicyName='{}-bot-logs'.format(ns), PolicyDocument=logs_policy(), Roles=[Ref('Role')], ), )
def create_policy(self): ns = self.context.namespace t = self.template t.add_resource( iam.PolicyType( ROLE_POLICY, PolicyName='{}-cloudfront'.format(ns), PolicyDocument=self._role_policy(), Roles=[Ref('Role')], ), )
def handle(self, chain_context): template = chain_context.template template.add_resource( iam.PolicyType( self.name, PolicyName=self.name, PolicyDocument=self.policy, Roles=[Ref(EC2_ROLE_NAME)], ))
def create_template(self): t = self.template variables = self.get_variables() streams = variables["Streams"] stream_ids = [] for stream in streams: s = t.add_resource(stream) t.add_output(Output("%sStreamId" % stream.title, Value=s.Ref())) t.add_output(Output("%sStreamArn" % stream.title, Value=s.GetAtt("Arn"))) stream_ids.append(s.Ref()) stream_arns = [kinesis_stream_arn(stream) for stream in stream_ids] read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), PolicyDocument=read_write_kinesis_stream_policy( stream_arns ), Roles=read_write_roles, ) ) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=Sub("${AWS::StackName}ReadPolicy"), PolicyDocument=read_only_kinesis_stream_policy( stream_arns ), Roles=read_only_roles, ) )
def create_template(self): t = self.template variables = self.get_variables() policy_prefix = self.context.get_fqn(self.name) bucket_ids = [] for title, attrs in variables["Buckets"].items(): t.add_resource(s3.Bucket.from_dict(title, attrs)) t.add_output(Output(title + "BucketId", Value=Ref(title))) t.add_output(Output(title + "BucketArn", Value=s3_arn(Ref(title)))) t.add_output( Output(title + "BucketDomainName", Value=GetAtt(title, "DomainName"))) if "WebsiteConfiguration" in attrs: t.add_output( Output(title + "WebsiteUrl", Value=GetAtt(title, "WebsiteURL"))) bucket_ids.append(Ref(title)) read_write_roles = variables["ReadWriteRoles"] if read_write_roles: t.add_resource( iam.PolicyType( "ReadWritePolicy", PolicyName=policy_prefix + "ReadWritePolicy", PolicyDocument=read_write_s3_bucket_policy(bucket_ids), Roles=read_write_roles, )) read_only_roles = variables["ReadRoles"] if read_only_roles: t.add_resource( iam.PolicyType( "ReadPolicy", PolicyName=policy_prefix + "ReadPolicy", PolicyDocument=read_only_s3_bucket_policy(bucket_ids), Roles=read_only_roles, ))
def create_task_execution_role_policy(self): t = self.template policy_name = Sub("${AWS::StackName}-task-exeuction-role-policy") self.task_execution_role_policy = t.add_resource( iam.PolicyType( "TaskExecutionRolePolicy", PolicyName=policy_name, PolicyDocument=self.generate_task_execution_policy(), Roles=[self.task_execution_role.Ref()], ))
def bucket_access_policy(policy_name, role_name, bucket_name): return iam.PolicyType( policy_name, PolicyName="S3Download", # PolicyDocument=awacs.aws.Policy( PolicyDocument={ "Statement": [{ "Action": ["s3:GetObject", "kms:decrypt"], "Effect": "Allow", "Resource": "arn:aws:s3:::{0}/*".format(bucket_name) }] }, Roles=[Ref(role_name)])
def create_policy(self): t = self.template self.policy = t.add_resource( iam.PolicyType( "Policy", PolicyName=Sub("${AWS::StackName}-policy"), PolicyDocument=Policy( Statement=self.generate_policy_statements()), Roles=[self.role.Ref()], )) t.add_output(Output("PolicyName", Value=Ref(self.policy)))
def create_roles_policy(self): t = self.template variables = self.get_variables() statements = [ Statement( Effect=Allow, Action=self.get_allowed_actions(), Resource=[Join("/", [GetAtt(ES_DOMAIN, "DomainArn"), "*"])]) ] t.add_resource( iam.PolicyType(POLICY_NAME, PolicyName=POLICY_NAME, PolicyDocument=Policy(Statement=statements), Roles=variables["Roles"]))
def _create_code_build_role(self): code_build_role = iam.Role( 'CodeBuildRole', AssumeRolePolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[ _sts.AssumeRole, ], Principal=Principal( 'Service', 'codebuild.amazonaws.com' ) ) ] ) ) self._t.add_resource(code_build_role) code_build_policy = iam.PolicyType( 'CodeBuildPolicy', PolicyName='CodeBuildPolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Action=[ _logs.CreateLogGroup, _logs.CreateLogStream, _logs.PutLogEvents, ], Effect=Allow, Resource=['*'], ), Statement( Effect=Allow, Action=[ _s3.GetObject, _s3.GetObjectVersion, _s3.PutObject, ], Resource=[_s3.ARN('*')], ), ] ), Roles=[Ref(code_build_role)], ) self._t.add_resource(code_build_policy) return code_build_role
def attach_ssm_key_policy(self, myrole): return super(NCTemplate, self).add_resource( iam.PolicyType( 'policyssmkey', PolicyName='ssmkeypolicy', PolicyDocument=Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Action=[Action("kms", "Decrypt")], Resource=[ ImportValue( Sub("${EncryptLambdaStack}-KmsKeyArn")) ]) ]), Roles=[Ref(myrole)]))
def create_policy(self): t = self.template policy_prefix = self.context.get_fqn(self.name) self.policy = t.add_resource( iam.PolicyType( "Policy", PolicyName="%s-policy" % policy_prefix, PolicyDocument=Policy( Statement=self.generate_policy_statements() ), Roles=[Ref(self.role)], ) ) t.add_output( Output("PolicyName", Value=Ref(self.policy)) )
def create_policy(self): t = self.template ns = self.context.namespace policy_name = 'ServerlessPolicy' statements = [ Statement( Effect=Allow, Action=[awacs.awslambda.InvokeFunction], Resource=[Everybody], ), ] t.add_resource( iam.PolicyType( policy_name, PolicyName='{}-{}'.format(ns, policy_name), PolicyDocument=Policy(Statement=statements), Roles=[Ref('Role')], ))
def add_resources(self): """Create template (main function called by Stacker).""" template = self.template variables = self.get_variables() # Resources # build the CFN template with the specified permissions iam_policy_statements = [] iam_policy_statements.append( Statement(Sid='AllowReadAccessToS3Bucket', Action=[ Action('iam', 'ListRoles'), Action('iam', 'PassRole'), Action('sts', 'AssumeRole'), Action('s3', 'GetObject'), Action('s3', 'ListBucket'), ], Effect=Allow, Resource=[variables['S3BucketArn'].ref])) # allow Admin role to list, pass and assume Cloudformation service role iam_policy_statements.append( Statement(Sid='AllowListPassAssumeToRole', Action=[ Action('iam', 'ListRoles'), Action('iam', 'PassRole'), Action('sts', 'AssumeRole'), ], Effect=Allow, Resource=[variables['AssumeRoleArn'].ref])) template.add_resource( iam.PolicyType( 'IamPolicy', PolicyName=variables['PolicyName'].ref, PolicyDocument=Policy( Version='2012-10-17', Statement=iam_policy_statements, ), # pick one of the 3 below (Groups, Roles or Users) #Groups=variables['Groups'].ref Roles=variables['Roles'].ref #Users=variables['Users'].ref ))
def create_policy(self, name): statements = self.generate_policy_statements() if not statements: return t = self.template policy_prefix = self.context.get_fqn(self.name) policy = t.add_resource( iam.PolicyType( "{}Policy".format(name), PolicyName="{}-{}-policy".format(policy_prefix, name), PolicyDocument=Policy( Statement=statements, ), Roles=[Ref(role) for role in self.roles], ) ) t.add_output( Output(name + "PolicyName", Value=Ref(policy)) ) self.policies.append(policy)
def create_policy(t, domain): policy_name = 'ESDomainReadAccess' statements = [ Statement( Effect=Allow, Action=[ awacs.es.Action('HttpGet'), awacs.es.Action('HttpHead'), awacs.es.Action('HttpPost'), awacs.es.Action('HttpDelete'), ], Resource=[Join('/', [GetAtt('Domain', 'DomainArn'), '*'])], ), ] t.add_resource( iam.PolicyType( policy_name, PolicyName=policy_name, PolicyDocument=Policy(Statement=statements), Roles=[Ref('Role')], ), )
iam.PolicyType( "CodeBuildServiceRolePolicy", PolicyName="CodeBuildServiceRolePolicy", PolicyDocument={ "Version": "2012-10-17", "Statement": [ { "Sid": "CloudWatchLogsPolicy", "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": ["*"] }, { "Sid": "CodeCommitPolicy", "Effect": "Allow", "Action": ["codecommit:GitPull"], "Resource": ["*"] }, { "Sid": "S3GetObjectPolicy", "Effect": "Allow", "Action": ["s3:GetObject", "s3:GetObjectVersion"], "Resource": ["*"] }, { "Sid": "S3PutObjectPolicy", "Effect": "Allow", "Action": ["s3:PutObject"], "Resource": ["*"] }, { 'Action': ['ecr:GetAuthorizationToken'], 'Resource': ['*'], 'Effect': 'Allow' }, { 'Action': ['ecr:*'], 'Resource': [ Join("", [ "arn:aws:ecr:", Ref(AWS_REGION), ":", Ref(AWS_ACCOUNT_ID), ":repository/", Ref(Repository) ]), ], 'Effect': 'Allow' }, ] }, Roles=[Ref(ServiceRole)], ))
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def attach(self): """Attached an IAM Role, IAM Policy, and EC2 Instance Profile to a CloudFormation template and returns the template." """ self.template.add_resource( iam.Role( 'RoleResource', AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Service": ["ec2.amazonaws.com", "opsworks.amazonaws.com"] }, "Action": ["sts:AssumeRole"] }] }, ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonS3FullAccess", ], Path="/")) # Inline policy for the given role defined in the Roles attribute. self.template.add_resource( iam.PolicyType( 'LogPolicyResource', PolicyName=Ref(self.template.parameters['LogPolicyName']), PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Resource": ["*"], "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", "logs:DescribeLogStreams" ] }] }, Roles=[Ref(self.template.resources['RoleResource'])])) # Inline policy for the given role defined in the Roles attribute. self.template.add_resource( iam.PolicyType( 'DefaultPolicyResource', PolicyName='DefaultPolicyName', PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Resource": ["*"], "Action": ["cloudformation:*"] }] }, Roles=[Ref(self.template.resources['RoleResource'])])) self.template.add_resource( iam.InstanceProfile( 'InstanceProfileResource', Path="/", Roles=[Ref(self.template.resources['RoleResource'])])) self.template.add_resource( LogGroup('LogGroupResource', RetentionInDays=Ref( self.template.parameters['LogRetentionDays']), DeletionPolicy='Delete')) self.template.add_output( Output("LogGroupName", Description="LogGroupName (Physical ID)", Value=Ref(self.template.resources['LogGroupResource']))) return self.template
CodeBuildServiceRolePolicy = t.add_resource(iam.PolicyType( "CodeBuildServiceRolePolicy", PolicyName="CodeBuildServiceRolePolicy", PolicyDocument={"Version": "2012-10-17", "Statement": [ { "Sid": "CloudWatchLogsPolicy", "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": [ "*" ] }, { "Sid": "CodeCommitPolicy", "Effect": "Allow", "Action": [ "codecommit:GitPull" ], "Resource": [ "*" ] }, { "Sid": "S3GetObjectPolicy", "Effect": "Allow", "Action": [ "s3:GetObject", "s3:GetObjectVersion" ], "Resource": [ "*" ] }, { "Sid": "S3PutObjectPolicy", "Effect": "Allow", "Action": [ "s3:PutObject" ], "Resource": [ "*" ] }, {'Action': ['ecr:GetAuthorizationToken'], 'Resource': ['*'], 'Effect': 'Allow'}, {'Action': ['ecr:*'], 'Resource': [ Join("", ["arn:aws:ecr:", Ref(AWS_REGION), ":", Ref(AWS_ACCOUNT_ID), ":repository/", Ref(Repository)] ), ], 'Effect': 'Allow'}, ]}, Roles=[Ref(CodeBuildServiceRole)], ))
InternetGateway = t.add_resource(ec2.InternetGateway("InternetGateway", )) PublicSubnet = t.add_resource( ec2.Subnet( "PublicSubnet", VpcId=Ref("VPC"), CidrBlock=FindInMap("SubnetConfig", "Public", "CIDR"), )) CFNRolePolicies = t.add_resource( iam.PolicyType( "CFNRolePolicies", PolicyName="CFNaccess", PolicyDocument={ "Statement": [{ "Action": "cloudformation:Describe*", "Resource": "*", "Effect": "Allow" }] }, Roles=[Ref("AmbariAccessRole")], )) AmbariInstanceProfile = t.add_resource( iam.InstanceProfile( "AmbariInstanceProfile", Path="/", Roles=[Ref("AmbariAccessRole")], )) NodeAccessRole = t.add_resource( iam.Role(
iam.PolicyType( "ECRAccessPolicy", PolicyName="shibboleth-ecr", PolicyDocument={ 'Version': '2012-10-17', 'Statement': [ { 'Action': ['ecr:GetAuthorizationToken'], 'Resource': ['*'], 'Effect': 'Allow' }, { 'Action': ['ecr:*'], 'Resource': [ Join("", [ "arn:aws:ecr:", Ref(AWS_REGION), ":", Ref(AWS_ACCOUNT_ID), ":repository/", Ref(Repository) ]), ], 'Effect': 'Allow' }, { 'Action': ['ecr:*'], 'Resource': [ Join("", [ "arn:aws:ecr:", Ref(AWS_REGION), ":", Ref(AWS_ACCOUNT_ID), ":repository/", Ref(RedirectRepository) ]), ], 'Effect': 'Allow' }, ] }, Roles=[Ref(InstanceRole), Ref(TaskRole)], ))