def generate(template): deployment_robot = template.add_resource( iam.User( 'DeploymentRobot', UserName="******", Groups=["Robots"], )) deployment_robot_access_key = template.add_resource( iam.AccessKey( 'DeploymentRobotAcessKey', DependsOn=[deployment_robot], Serial=0, Status='Active', UserName=Ref(deployment_robot), )) template.add_output( Output( 'DeploymentRobotAccessKey', Value=Ref(deployment_robot_access_key))) template.add_output( Output( 'DeploymentRobotSecretAccessKey', Value=GetAtt(deployment_robot_access_key, "SecretAccessKey")))
def create_template(self): t = self.template bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") cloudformation_scope = Sub( "arn:aws:cloudformation:${AWS::Region}:${AWS::AccountId}:" "stack/${StackerNamespace}-*") changeset_scope = "*" # This represents the precise IAM permissions that stacker itself # needs. stacker_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket ]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl ]), Statement(Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ]), Statement(Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")]), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation. CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation. CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.DescribeStacks ]) ])) user = t.add_resource( iam.User("FunctionalTestUser", Policies=[stacker_policy])) key = t.add_resource( iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user))) t.add_output(Output("User", Value=Ref(user))) t.add_output(Output("AccessKeyId", Value=Ref(key))) t.add_output( Output("SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey")))
def _build_template(self, template): t = template user = t.add_resource( iam.User(self.name, Path="/", Policies=[], ManagedPolicyArns=[])) if self._login is not None: user.UserName = self._login if self._generate_key: key_serial_param = t.add_parameter( Parameter('Input{}IAMUserKeySerial'.format(self.name), Type='String', Default='1', Description='Serial for User:{} key'.format( self.name))) key = t.add_resource( iam.AccessKey('{}IAMAccessKey'.format(self.name), Serial=Ref(key_serial_param), Status='Active', UserName=Ref(user))) t.add_output([ Output('{}IAMSecretKey'.format(self.name), Value=GetAtt(key.title, 'SecretAccessKey'), Description='IAM SecretKey for {}'.format(self.name)), Output('{}IAMAccessKey'.format(self.name), Value=Ref(key), Description='IAM AccessKey for {}'.format(self.name)) ]) if self._allow_console: def_passwd_param = t.add_parameter( Parameter('Input{}DefaultConsolePasswd'.format(self.name), Type='String', Default='D3fau1t9a55w0r6_c4ang3m3', Description='Default console passwd for {}'.format( self.name))) user.LoginProfile = iam.LoginProfile( Password=Ref(def_passwd_param), PasswordResetRequired=True) for policy in self.policies: policy._bind_role(t, user) for arn in self.managed_policy_arns: user.ManagedPolicyArns.append(arn) t.add_output([ Output('{}IAMUser'.format(self.name), Value=Ref(user), Description='{} User Output'.format(self.name)) ])
def generate(account_list=None, region_list=None, file_location=None, output_keys=False, dry_run=False): """CloudFormation template generator for use in creating the resources required to capture logs in a centrally managed account per UCSD standards.""" if type(account_list) == tuple: account_list = list(account_list) parameter_groups = [] region_list = region_list if region_list else ['us-west-1', 'us-west-2', 'us-east-1', 'us-east-2'] t = Template() t.add_version("2010-09-09") t.add_description("UCSD Log Target AWS CloudFormation Template - this CFn template configures a given account to receive logs from other accounts so as to aggregate and then optionally forward those logs on to the UCSD Splunk installation.") # Create Kinesis and IAM Roles log_stream_shard_count = t.add_parameter(Parameter("LogStreamShardCount", Description="Number of shards to create within the AWS Kinesis stream created to handle CloudWatch Logs.", Type="Number", MinValue=1, MaxValue=64, Default=1)) log_stream_retention_period = t.add_parameter(Parameter("LogStreamRetentionPeriod", Description = "Number of hours to retain logs in the Kinesis stream.", Type="Number", MinValue=24, MaxValue=120, Default=24)) parameter_groups.append({'Label': {'default': 'Log Stream Inputs'}, 'Parameters': [log_stream_shard_count.name, log_stream_retention_period.name]}) log_stream = t.add_resource(k.Stream("LogStream", RetentionPeriodHours=Ref(log_stream_retention_period), ShardCount=Ref(log_stream_shard_count))) firehose_bucket = t.add_resource(s3.Bucket('LogS3DeliveryBucket')) firehose_delivery_role = t.add_resource(iam.Role('LogS3DeliveryRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal('Service', 'firehose.amazonaws.com'), Condition=Condition(StringEquals('sts:ExternalId', AccountId)))]))) log_s3_delivery_policy = t.add_resource(iam.PolicyType('LogS3DeliveryPolicy', Roles=[Ref(firehose_delivery_role)], PolicyName='LogS3DeliveryPolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[as3.AbortMultipartUpload, as3.GetBucketLocation, as3.GetObject, as3.ListBucket, as3.ListBucketMultipartUploads, as3.PutObject], Resource=[ Join('', ['arn:aws:s3:::', Ref(firehose_bucket)]), Join('', ['arn:aws:s3:::', Ref(firehose_bucket), '*'])]), Statement( Effect=Allow, Action=[akinesis.Action('Get*'), akinesis.DescribeStream, akinesis.ListStreams], Resource=[ GetAtt(log_stream, 'Arn') ])]))) s3_firehose = t.add_resource(fh.DeliveryStream('LogToS3DeliveryStream', DependsOn=[log_s3_delivery_policy.name], DeliveryStreamName='LogToS3DeliveryStream', DeliveryStreamType='KinesisStreamAsSource', KinesisStreamSourceConfiguration=fh.KinesisStreamSourceConfiguration( KinesisStreamARN=GetAtt(log_stream, 'Arn'), RoleARN=GetAtt(firehose_delivery_role, 'Arn') ), S3DestinationConfiguration=fh.S3DestinationConfiguration( BucketARN=GetAtt(firehose_bucket, 'Arn'), BufferingHints=fh.BufferingHints( IntervalInSeconds=300, SizeInMBs=50 ) , CompressionFormat='UNCOMPRESSED', Prefix='firehose/' , RoleARN=GetAtt(firehose_delivery_role, 'Arn'), ))) t.add_output(Output('SplunkKinesisLogStream', Value=GetAtt(log_stream, 'Arn'), Description='ARN of the kinesis stream for log aggregation.')) # Generate Bucket with Lifecycle Policies ct_s3_key_prefix = t.add_parameter(Parameter('CloudTrailKeyPrefix', Type='String', Default='', Description='Key name prefix for logs being sent to S3')) bucket_name = t.add_parameter(Parameter("BucketName", Description="Name to assign to the central logging retention bucket", Type="String", AllowedPattern="([a-z]|[0-9])+", MinLength=2, MaxLength=64)) glacier_migration_days = t.add_parameter(Parameter("LogMoveToGlacierInDays", Description="Number of days until logs are expired from S3 and transitioned to Glacier", Type="Number", Default=365)) glacier_deletion_days = t.add_parameter(Parameter("LogDeleteFromGlacierInDays", Description="Number of days until logs are expired from Glacier and deleted", Type="Number", Default=365*7)) parameter_groups.append({'Label': {'default': 'S3 Log Destination Parameters'}, 'Parameters': [bucket_name.name, ct_s3_key_prefix.name, glacier_migration_days.name, glacier_deletion_days.name]}) dead_letter_queue = t.add_resource(sqs.Queue('deadLetterQueue')) queue = t.add_resource(sqs.Queue('s3DeliveryQueue', MessageRetentionPeriod=14*24*60*60, # 14 d * 24 h * 60 m * 60 s VisibilityTimeout=5*60, RedrivePolicy=sqs.RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, 'Arn'), maxReceiveCount=10 ))) # 5 m * 60 s per Splunk docs here: http://docs.splunk.com/Documentation/AddOns/released/AWS/ConfigureAWS#Configure_SQS t.add_output(Output('SplunkS3Queue', Value=GetAtt(queue, 'Arn'), Description='Queue for Splunk SQS S3 ingest')) t.add_output(Output('SplunkS3DeadLetterQueue', Value=GetAtt(dead_letter_queue, 'Arn'), Description="Dead letter queue for Splunk SQS S3 ingest")) t.add_resource(sqs.QueuePolicy('s3DeliveryQueuePolicy', PolicyDocument=Policy( Statement=[Statement( Effect=Allow, Principal=Principal("AWS", "*"), Action=[asqs.SendMessage], Resource=[GetAtt(queue, 'Arn')], Condition=Condition(ArnLike("aws:SourceArn", Join('', ["arn:aws:s3:*:*:", Ref(bucket_name)]))))]), Queues=[Ref(queue)])) bucket = t.add_resource(s3.Bucket("LogDeliveryBucket", DependsOn=[log_stream.name, queue.name], BucketName=Ref(bucket_name), AccessControl="LogDeliveryWrite", NotificationConfiguration=s3.NotificationConfiguration( QueueConfigurations=[s3.QueueConfigurations( Event="s3:ObjectCreated:*", Queue=GetAtt(queue, 'Arn'))]), LifecycleConfiguration=s3.LifecycleConfiguration(Rules=[ s3.LifecycleRule( Id="S3ToGlacierTransition", Status="Enabled", ExpirationInDays=Ref(glacier_deletion_days), Transition=s3.LifecycleRuleTransition( StorageClass="Glacier", TransitionInDays=Ref(glacier_migration_days)))]))) bucket_policy = t.add_resource(s3.BucketPolicy("LogDeliveryBucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[GetBucketAcl], Resource=[GetAtt(bucket, 'Arn')]), Statement( Effect="Allow", Principal=Principal("Service", "cloudtrail.amazonaws.com"), Action=[PutObject], Condition=Condition(StringEquals({"s3:x-amz-acl": "bucket-owner-full-control"})), Resource=[Join('', [GetAtt(bucket, "Arn"), Ref(ct_s3_key_prefix), "/AWSLogs/", acct_id, "/*"]) for acct_id in account_list])]))) splunk_sqs_s3_user = t.add_resource(iam.User('splunkS3SQSUser', Path='/', UserName='******')) splunk_user_policy = t.add_resource(_generate_splunk_policy(users=[Ref(splunk_sqs_s3_user)])) t.add_output(Output('BucketName', Description="Name of the bucket for CloudTrail log delivery", Value=Ref(bucket))) # Log destination setup cwl_to_kinesis_role = t.add_resource(iam.Role('CWLtoKinesisRole', AssumeRolePolicyDocument=Policy( Statement=[Statement( Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", Join('', ["logs.", Region, ".amazonaws.com"])))]))) cwl_to_kinesis_policy_link = t.add_resource(iam.PolicyType('CWLtoKinesisPolicy', PolicyName='CWLtoKinesisPolicy', Roles=[Ref(cwl_to_kinesis_role)], PolicyDocument=Policy( Statement=[ Statement( Effect=Allow, Resource=[GetAtt(log_stream, 'Arn')], Action=[akinesis.PutRecord]), Statement( Effect=Allow, Resource=[GetAtt(cwl_to_kinesis_role, 'Arn')], Action=[IAMPassRole])]))) log_destination = t.add_resource(cwl.Destination('CWLtoKinesisDestination', DependsOn=[cwl_to_kinesis_policy_link.name], DestinationName='CWLtoKinesisDestination', DestinationPolicy=_generate_log_destination_policy_test('CWLtoKinesisDestination', account_list), RoleArn=GetAtt(cwl_to_kinesis_role, 'Arn'), TargetArn=GetAtt(log_stream, 'Arn'))) t.add_output(Output('childAccountLogDeliveryDestinationArn', Value=GetAtt(log_destination,'Arn'), Description='Log Destination to specify when deploying the source cloudformation template in other accounts.')) if output_keys: splunk_user_creds = t.add_resource(iam.AccessKey('splunkAccountUserCreds', UserName=Ref(splunk_sqs_s3_user))) t.add_output(Output('splunkUserAccessKey', Description='AWS Access Key for the user created for splunk to use when accessing logs', Value=Ref(splunk_user_creds))) t.add_output(Output('splunkUserSecretKey', Description='AWS Secret Access Key ID for the user created for splunk to use when accessing logs', Value=GetAtt(splunk_user_creds, 'SecretAccessKey'))) t.add_output(Output('splunkCWLRegion', Description="The AWS region that contains the data. In aws_cloudwatch_logs_tasks.conf, enter the region ID.", Value=Region)) t.add_output(Output("DeploymentAccount", Value=AccountId, Description="Convenience Output for referencing AccountID of the log aggregation account")) t.add_metadata({"AWS::CloudFormation::Interface": {"ParameterGroups": parameter_groups}}) if dry_run: print(t.to_json()) else: save_path = file_location if file_location else os.path.join(log_aggregation_cf, 'log_targets.json') with open (save_path, 'w') as f: f.write(t.to_json())
def create_template(): cft = Template( Description= 'A stack to set up the requirements needed for deploying Why82? Lambda code' ) policy_doc = { 'Statement': [ { 'Action': [ 'cloudformation:DescribeStackResources', 'cloudformation:DescribeStackResource', 'cloudformation:DescribeStacks', 'cloudformation:DescribeStackEvents', 'cloudformation:CreateStack', 'cloudformation:DeleteStack', 'cloudformation:UpdateStack' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'cloudformation', Ref('AWS::Region'), Ref('AWS::AccountId'), Join('/', ['stack', SERVICE_PROD, '*']) ]) }, { 'Action': [ 's3:CreateBucket', 's3:ListBucket', 's3:DeleteBucket', 's3:PutBucketNotification' ], 'Effect': 'Allow', 'Resource': [ Join(':::', [ 'arn:aws:s3', ('%s-serverlessdeploymentbucket-*' % SERVICE_PROD) ]), Join(':::', ['arn:aws:s3', SERVICE_PROD]) ] }, { 'Action': ['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], 'Effect': 'Allow', 'Resource': [ Join(':::', [ 'arn:aws:s3', ('%s-serverlessdeploymentbucket-*/*' % SERVICE_PROD) ]), Join(':::', ['arn:aws:s3', ('%s/*' % SERVICE_PROD)]) ] }, { 'Action': ['s3:PutBucketCORS', 's3:GetBucketCORS', 's3:PutBucketAcl'], 'Effect': 'Allow', 'Resource': Join(':::', ['arn:aws:s3', SERVICE_PROD]) }, { 'Action': ['logs:DescribeLogStreams', 'logs:FilterLogEvents'], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'logs', Ref('AWS::Region'), Ref('AWS::AccountId'), 'log-group', ('/aws/lambda/%s' % SERVICE_PROD) ]) }, { 'Action': [ 'iam:CreateRole', 'iam:UpdateRole', 'iam:DeleteRole', 'iam:PutRolePolicy', 'iam:DeleteRolePolicy', 'iam:ListRolePolicies', 'iam:ListRoles', 'iam:PassRole', 'iam:GetRole' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'iam', '', Ref('AWS::AccountId'), ('role/%s-IamRoleLambdaExecution-*' % SERVICE_PROD) ]) }, { 'Action': [ 'lambda:GetFunction', 'lambda:CreateFunction', 'lambda:AddPermission', 'lambda:RemovePermission', 'lambda:DeleteFunction', 'lambda:InvokeFunction', 'lambda:GetFunctionConfiguration', 'lambda:UpdateFunctionConfiguration', 'lambda:UpdateFunctionCode' ], 'Effect': 'Allow', # Currently, AWS Lambda doesn't support permissions for this particular action at the resource-level. # Therefore, the policy specifies a wildcard character (*) as the Resource value. # http://docs.aws.amazon.com/lambda/latest/dg/access-control-identity-based.html 'Resource': '*' }, { 'Action': [ 'events:PutRule', 'events:PutTargets', 'events:RemoveTargets', 'events:DescribeRule', 'events:DeleteRule' ], 'Effect': 'Allow', 'Resource': Join(':', [ 'arn', 'aws', 'events', Ref('AWS::Region'), Ref('AWS::AccountId'), ('rule/%s-*' % SERVICE_PROD) ]) } ] } user = iam.User(title='ciUser', UserName=CI_USERNAME, Policies=[ iam.Policy(PolicyDocument=policy_doc, PolicyName=('%s-cipolicy' % SERVICE)) ]) key = iam.AccessKey(title='ciKey', UserName=Ref(user)) cft.add_resource(user) cft.add_resource(key) cft.add_output([ Output('CiUser', Description='The user that CI will use to do releases', Value=Ref(user)), Output('CiAccessKey', Description='The CI user\'s access key', Value=Ref(key)), Output('CiSecretKey', Description='The CI user\'s secret key', Value=GetAtt(key, 'SecretAccessKey')) ]) return cft
def create_template(self): t = self.template bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") objects_arn = Sub("arn:aws:s3:::${StackerBucket}*/*") cloudformation_scope = Sub( "arn:aws:cloudformation:*:${AWS::AccountId}:" "stack/${StackerNamespace}-*") sns_scope = Sub("arn:aws:sns:*:${AWS::AccountId}:" "${StackerNamespace}-*") changeset_scope = "*" # This represents the precise IAM permissions that stacker itself # needs. stacker_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=["*"], Action=[awacs.s3.ListAllMyBuckets]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket, awacs.s3.DeleteBucket, ]), Statement(Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl, ]), Statement(Effect="Allow", Resource=[objects_arn], Action=[ awacs.s3.DeleteObject, ]), Statement(Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ]), Statement(Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")]), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation. CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation. CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.SetStackPolicy, awacs.cloudformation.DescribeStacks, awacs.cloudformation.DescribeStackEvents ]), Statement(Effect="Allow", Resource=[sns_scope], Action=[ awacs.sns.CreateTopic, awacs.sns.DeleteTopic, awacs.sns.GetTopicAttributes ]) ])) principal = AWSPrincipal(Ref("AWS::AccountId")) role = t.add_resource( iam.Role("FunctionalTestRole", AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Action=[awacs.sts.AssumeRole], Principal=principal) ]), Policies=[stacker_policy])) assumerole_policy = iam.Policy( PolicyName="AssumeRole", PolicyDocument=Policy(Statement=[ Statement(Effect="Allow", Resource=[GetAtt(role, "Arn")], Action=[awacs.sts.AssumeRole]) ])) user = t.add_resource( iam.User("FunctionalTestUser", Policies=[stacker_policy, assumerole_policy])) key = t.add_resource( iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user))) t.add_output(Output("User", Value=Ref(user))) t.add_output(Output("AccessKeyId", Value=Ref(key))) t.add_output( Output("SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey"))) t.add_output(Output("FunctionalTestRole", Value=GetAtt(role, "Arn")))