def create_a_lambda(self, flight, **extra): session_factory = self.replay_flight_data(flight, zdata=True) mode = { "type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu" } mode.update(extra) p = Policy( { "resource": "s3", "name": "hello-world", "actions": ["no-op"], "mode": mode, }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) def cleanup(): mgr.remove(pl) if self.recording: time.sleep(60) self.addCleanup(cleanup) return mgr, mgr.publish(pl)
def provision(config, session_factory): func_config = dict( name=config.get('lambda_name', 'cloud-custodian-mailer'), description=config.get('lambda_description', 'Cloud Custodian Mailer'), tags=config.get('lambda_tags', {}), handler='periodic.dispatch', runtime=config['runtime'], memory_size=config['memory'], timeout=config['timeout'], role=config['role'], subnets=config['subnets'], security_groups=config['security_groups'], dead_letter_config=config.get('dead_letter_config', {}), events=[ CloudWatchEventSource( {'type': 'periodic', 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')}, session_factory, prefix="") ]) archive = get_archive(config) func = LambdaFunction(func_config, archive) manager = LambdaManager(session_factory) manager.publish(func)
def main(): parser = setup_parser() options = parser.parse_args() logging.basicConfig(level=logging.DEBUG) logging.getLogger('botocore').setLevel(logging.ERROR) if not options.group and not options.prefix: print("Error: Either group or prefix must be specified") sys.exit(1) session_factory = SessionFactory( options.region, options.profile, options.assume) groups = get_groups(session_factory, options) func = logsub.get_function( session_factory, "cloud-custodian-error-notify", role=options.role, sns_topic=options.topic, subject=options.subject, log_groups=groups, pattern=options.pattern) manager = LambdaManager(session_factory) try: manager.publish(func) except Exception: import traceback, pdb, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1])
def test_publishes_a_lambda(self): session_factory = self.replay_flight_data("test_publishes_a_lambda") mgr = LambdaManager(session_factory) func = self.make_func() self.addCleanup(mgr.remove, func) result = mgr.publish(func) self.assertEqual(result["CodeSize"], 169)
def test_cwe_asg_instance(self): session_factory = self.replay_flight_data('test_cwe_asg', zdata=True) p = Policy({ 'resource': 'asg', 'name': 'asg-spin-detector', 'mode': { 'type': 'asg-instance-state', 'events': ['launch-failure']} }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'maid-asg-spin-detector', 'Handler': 'maid_policy.run', 'MemorySize': 512, 'Runtime': 'python2.7', 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="maid-asg-spin-detector") self.assert_items( result['Rules'][0], {"State": "ENABLED", "Name": "maid-asg-spin-detector"}) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), {"source": ["aws.autoscaling"], "detail-type": ["EC2 Instance Launch Unsuccessful"]}) mgr.remove(pl)
def test_sns_subscriber(self): self.patch(SNSSubscription, 'iam_delay', 0.01) session_factory = self.replay_flight_data('test_sns_subscriber') session = session_factory() client = session.client('sns') # create an sns topic tname = "custodian-test-sns-sub" topic_arn = client.create_topic(Name=tname)['TopicArn'] self.addCleanup(client.delete_topic, TopicArn=topic_arn) # provision a lambda via mu params = dict( session_factory=session_factory, name='c7n-hello-world', role='arn:aws:iam::644160558196:role/custodian-mu', events=[SNSSubscription(session_factory, [topic_arn])]) func = helloworld.get_function(**params) manager = LambdaManager(session_factory) manager.publish(func) self.addCleanup(manager.remove, func) # now publish to the topic and look for lambda log output client.publish(TopicArn=topic_arn, Message='Greetings, program!') #time.sleep(15) -- turn this back on when recording flight data log_events = manager.logs(func, '1970-1-1', '9170-1-1') messages = [e['message'] for e in log_events if e['message'].startswith('{"Records')] self.addCleanup( session.client('logs').delete_log_group, logGroupName='/aws/lambda/c7n-hello-world') self.assertEqual( json.loads(messages[0])['Records'][0]['Sns']['Message'], 'Greetings, program!')
def test_cwe_update_config_and_code(self): # Originally this was testing the no update case.. but # That is tricky to record, any updates to the code end up # causing issues due to checksum mismatches which imply updating # the function code / which invalidate the recorded data and # the focus of the test. session_factory = self.replay_flight_data( 'test_cwe_update', zdata=True) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'events': ["CreateBucket"], }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.addCleanup(mgr.remove, pl) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'memory': 256, 'events': [ "CreateBucket", {'event': 'PutBucketPolicy', 'ids': 'requestParameters.bucketName', 'source': 's3.amazonaws.com'}] }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) output = self.capture_logging('custodian.lambda', level=logging.DEBUG) result2 = mgr.publish(PolicyLambda(p), 'Dev', role=self.role) lines = output.getvalue().strip().split('\n') self.assertTrue( 'Updating function custodian-s3-bucket-policy code' in lines) self.assertTrue( 'Updating function: custodian-s3-bucket-policy config' in lines) self.assertEqual(result['FunctionName'], result2['FunctionName']) # drive by coverage functions = [i for i in mgr.list_functions() if i['FunctionName'] == 'custodian-s3-bucket-policy'] self.assertTrue(len(functions), 1) start = 0 end = time.time() * 1000 self.assertEqual(list(mgr.logs(pl, start, end)), [])
def test_cwe_schedule(self): session_factory = self.replay_flight_data( 'test_cwe_schedule', zdata=True) p = Policy({ 'resource': 'ec2', 'name': 'periodic-ec2-checker', 'mode': { 'type': 'periodic', 'schedule': 'rate(1 day)' } }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'maid-periodic-ec2-checker', 'Handler': 'maid_policy.run', 'MemorySize': 512, 'Runtime': 'python2.7', 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="maid-periodic-ec2-checker") self.assert_items( result['Rules'][0], { "State": "ENABLED", "ScheduleExpression": "rate(1 day)", "Name": "maid-periodic-ec2-checker"}) mgr.remove(pl)
def process(self, buckets): from c7n.mu import LambdaManager from c7n.ufuncs.s3crypt import get_function func = get_function( None, self.data.get('role', self.manager.config.assume_role)) # Publish function to all of our buckets regions region_funcs = {} regions = set([ b.get('LocationConstraint', 'us-east-1') for b in buckets]) for r in regions: lambda_mgr = LambdaManager( functools.partial(self.manager.session_factory, region=r)) region_funcs[r] = lambda_mgr.publish(func) with self.executor_factory(max_workers=3) as w: results = [] futures = [] for b in buckets: futures.append( w.submit( self.process_bucket, region_funcs[b.get('LocationConstraint', 'us-east-1')], b)) for f in as_completed(futures): if f.exception(): log.exception( "Error attaching lambda-encrypt %s" % (f.exception())) results.append(f.result()) return filter(None, results)
def test_cwl_subscriber(self): self.patch(CloudWatchLogSubscription, 'iam_delay', 0.01) session_factory = self.replay_flight_data('test_cwl_subscriber') session = session_factory() client = session.client('logs') lname = "custodian-test-log-sub" self.addCleanup(client.delete_log_group, logGroupName=lname) client.create_log_group(logGroupName=lname) linfo = client.describe_log_groups( logGroupNamePrefix=lname)['logGroups'][0] params = dict( session_factory=session_factory, name="c7n-log-sub", role=self.role, sns_topic="arn:", log_groups=[linfo]) func = logsub.get_function(**params) manager = LambdaManager(session_factory) finfo = manager.publish(func) self.addCleanup(manager.remove, func) results = client.describe_subscription_filters(logGroupName=lname) self.assertEqual(len(results['subscriptionFilters']), 1) self.assertEqual(results['subscriptionFilters'][0]['destinationArn'], finfo['FunctionArn'])
def get_metrics(self, start, end, period): from c7n.mu import LambdaManager, PolicyLambda manager = LambdaManager(self.policy.session_factory) values = manager.metrics( [PolicyLambda(self.policy)], start, end, period)[0] values.update( super(LambdaMode, self).get_metrics(start, end, period)) return values
def test_can_switch_runtimes(self): session_factory = self.replay_flight_data("test_can_switch_runtimes") func = self.make_func() mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, func) result = mgr.publish(func) self.assertEqual(result["Runtime"], "python2.7") func.func_data["runtime"] = "python3.6" result = mgr.publish(func) self.assertEqual(result["Runtime"], "python3.6")
def test_can_switch_runtimes(self): session_factory = self.replay_flight_data('test_can_switch_runtimes') func = self.make_func() mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, func) result = mgr.publish(func) self.assertEqual(result['Runtime'], 'python2.7') func.func_data['runtime'] = 'python3.6' result = mgr.publish(func) self.assertEqual(result['Runtime'], 'python3.6')
def test_config_rule_provision(self): session_factory = self.replay_flight_data('test_config_rule') p = Policy({ 'resource': 'security-group', 'name': 'sg-modified', 'mode': {'type': 'config-rule'}, }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assertEqual(result['FunctionName'], 'custodian-sg-modified') self.addCleanup(mgr.remove, pl)
def process(self, buckets): from c7n.mu import LambdaManager from c7n.ufuncs.s3crypt import get_function session = local_session(self.manager.session_factory) account_id = get_account_id(session) func = get_function( None, self.data.get('role', self.manager.config.assume_role), account_id=account_id) regions = set([ b.get('Location', { 'LocationConstraint': 'us-east-1'})['LocationConstraint'] for b in buckets]) # session managers by region region_sessions = {} for r in regions: region_sessions[r] = functools.partial( self.manager.session_factory, region=r) # Publish function to all of our buckets regions region_funcs = {} for r in regions: lambda_mgr = LambdaManager(region_sessions[r]) lambda_mgr.publish(func) region_funcs[r] = func with self.executor_factory(max_workers=3) as w: results = [] futures = [] for b in buckets: region = b.get('Location', { 'LocationConstraint': 'us-east-1'}).get( 'LocationConstraint') futures.append( w.submit( self.process_bucket, region_funcs[region], b, account_id, region_sessions[region] )) for f in as_completed(futures): if f.exception(): log.exception( "Error attaching lambda-encrypt %s" % (f.exception())) results.append(f.result()) return filter(None, results)
def test_config_rule_provision(self): session_factory = self.replay_flight_data("test_config_rule") p = Policy( { "resource": "security-group", "name": "sg-modified", "mode": {"type": "config-rule"}, }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, "Dev", role=ROLE) self.assertEqual(result["FunctionName"], "custodian-sg-modified") self.addCleanup(mgr.remove, pl)
def create_a_lambda(self, flight, **extra): session_factory = self.replay_flight_data(flight, zdata=True) mode = { 'type': 'config-rule', 'role':'arn:aws:iam::644160558196:role/custodian-mu'} mode.update(extra) p = Policy({ 'resource': 's3', 'name': 'hello-world', 'actions': ['no-op'], 'mode': mode, }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) return mgr, mgr.publish(pl)
def test_cwe_trail(self): session_factory = self.replay_flight_data("test_cwe_trail", zdata=True) p = Policy( { "resource": "s3", "name": "s3-bucket-policy", "mode": {"type": "cloudtrail", "events": ["CreateBucket"]}, "filters": [ { "type": "missing-policy-statement", "statement_ids": ["RequireEncryptedPutObject"], } ], "actions": ["no-op"], }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, "Dev", role=ROLE) events = pl.get_events(session_factory) self.assertEqual(len(events), 1) event = events.pop() self.assertEqual( json.loads(event.render_event_pattern()), { u"detail": { u"eventName": [u"CreateBucket"], u"eventSource": [u"s3.amazonaws.com"], }, u"detail-type": ["AWS API Call via CloudTrail"], }, ) self.assert_items( result, { "Description": "cloud-custodian lambda policy", "FunctionName": "custodian-s3-bucket-policy", "Handler": "custodian_policy.run", "MemorySize": 512, "Runtime": "python2.7", "Timeout": 60, }, )
def test_lambda_cross_account(self): self.patch(CrossAccountAccessFilter, "executor_factory", MainThreadExecutor) session_factory = self.replay_flight_data("test_cross_account_lambda") client = session_factory().client("lambda") name = "c7n-cross-check" tmp_dir = tempfile.mkdtemp() self.addCleanup(os.rmdir, tmp_dir) archive = PythonPackageArchive() archive.add_contents("handler.py", LAMBDA_SRC) archive.close() func = LambdaFunction( { "runtime": "python2.7", "name": name, "description": "", "handler": "handler.handler", "memory_size": 128, "timeout": 5, "role": self.role, }, archive, ) manager = LambdaManager(session_factory) manager.publish(func) self.addCleanup(manager.remove, func) client.add_permission( FunctionName=name, StatementId="oops", Principal="*", Action="lambda:InvokeFunction", ) p = self.load_policy( { "name": "lambda-cross", "resource": "lambda", "filters": ["cross-account"], }, session_factory=session_factory, ) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]["FunctionName"], name)
def provision(self): """Provision policy as a lambda function.""" # Avoiding runtime lambda dep, premature optimization? from c7n.mu import PolicyLambda, LambdaManager with self.ctx: self.log.info( "Provisioning policy lambda %s", self.name) try: manager = LambdaManager(self.session_factory) except ClientError: # For cli usage by normal users, don't assume the role just use # it for the lambda manager = LambdaManager( lambda assume=False: self.session_factory(assume)) return manager.publish( PolicyLambda(self), 'current', role=self.options.assume_role)
def get_metrics(self, start, end, period): # Avoiding runtime lambda dep, premature optimization? from c7n.mu import PolicyLambda, LambdaManager values = {} # Pickup lambda specific metrics (errors, invocations, durations) if self.is_lambda: manager = LambdaManager(self.session_factory) values = manager.metrics( [PolicyLambda(self)], start, end, period)[0] metrics = ['ResourceCount'] else: metrics = ['ResourceCount', 'ResourceTime', 'ActionTime'] default_dimensions = { 'Policy': self.name, 'ResType': self.resource_type, 'Scope': 'Policy'} # Support action, and filter custom metrics for el in itertools.chain( self.resource_manager.actions, self.resource_manager.filters): if el.metrics: metrics.extend(el.metrics) session = utils.local_session(self.session_factory) client = session.client('cloudwatch') for m in metrics: if isinstance(m, basestring): dimensions = default_dimensions else: m, m_dimensions = m dimensions = dict(default_dimensions) dimensions.update(m_dimensions) results = client.get_metric_statistics( Namespace=DEFAULT_NAMESPACE, Dimensions=[ {'Name': k, 'Value': v} for k, v in dimensions.items()], Statistics=['Sum', 'Average'], StartTime=start, EndTime=end, Period=period, MetricName=m) values[m] = results['Datapoints'] return values
def test_sqs_subscriber(self): session_factory = self.replay_flight_data('test_mu_sqs_subscriber') func_name = 'c7n-hello-sqs' queue_name = "my-dev-test-3" # Setup Queues session = session_factory() client = session.client('sqs') queue_url = client.create_queue(QueueName=queue_name).get('QueueUrl') queue_arn = client.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['QueueArn'])['Attributes']['QueueArn'] self.addCleanup(client.delete_queue, QueueUrl=queue_url) # Setup Function params = dict( session_factory=session_factory, name=func_name, role="arn:aws:iam::644160558196:role/custodian-mu", events=[SQSSubscription(session_factory, [queue_arn])]) func = helloworld.get_function(**params) manager = LambdaManager(session_factory) manager.publish(func) self.addCleanup(manager.remove, func) # Send and Receive Check client.send_message( QueueUrl=queue_url, MessageBody=json.dumps({'jurassic': 'block'})) if self.recording: time.sleep(60) log_events = list(manager.logs(func, "1970-1-1 UTC", "9170-1-1")) messages = [ e["message"] for e in log_events if e["message"].startswith('{"Records') ] self.addCleanup( session.client("logs").delete_log_group, logGroupName="/aws/lambda/%s" % func_name) self.assertIn( 'jurassic', json.loads(messages[0])["Records"][0]["body"])
def test_mu_metrics(self): session_factory = self.replay_flight_data('test_mu_metrics') p = Policy({ 'resources': 's3', 'name': 's3-bucket-policy', 'resource': 's3', 'mode': { 'type': 'cloudtrail', 'events': ['CreateBucket'], }, 'actions': ['no-op']}, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) end = datetime.utcnow() start = end - timedelta(1) results = mgr.metrics([pl], start, end, 3600) self.assertEqual( results, [{'Durations': [], 'Errors': [], 'Throttles': [], 'Invocations': []}])
def test_cwe_instance(self): session_factory = self.replay_flight_data("test_cwe_instance", zdata=True) p = Policy( { "resource": "s3", "name": "ec2-encrypted-vol", "mode": {"type": "ec2-instance-state", "events": ["pending"]}, }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, "Dev", role=ROLE) self.assert_items( result, { "Description": "cloud-custodian lambda policy", "FunctionName": "custodian-ec2-encrypted-vol", "Handler": "custodian_policy.run", "MemorySize": 512, "Runtime": "python2.7", "Timeout": 60, }, ) events = session_factory().client("events") result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol") self.assert_items( result["Rules"][0], {"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"}, ) self.assertEqual( json.loads(result["Rules"][0]["EventPattern"]), { "source": ["aws.ec2"], "detail": {"state": ["pending"]}, "detail-type": ["EC2 Instance State-change Notification"], }, )
def test_mu_metrics(self): session_factory = self.replay_flight_data("test_mu_metrics") p = Policy( { "resources": "s3", "name": "s3-bucket-policy", "resource": "s3", "mode": {"type": "cloudtrail", "events": ["CreateBucket"]}, "actions": ["no-op"], }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) end = datetime.utcnow() start = end - timedelta(1) results = mgr.metrics([pl], start, end, 3600) self.assertEqual( results, [{"Durations": [], "Errors": [], "Throttles": [], "Invocations": []}], )
def test_sns_subscriber_and_ipaddress(self): self.patch(SNSSubscription, "iam_delay", 0.01) session_factory = self.replay_flight_data("test_sns_subscriber_and_ipaddress") session = session_factory() client = session.client("sns") # create an sns topic tname = "custodian-test-sns-sub" topic_arn = client.create_topic(Name=tname)["TopicArn"] self.addCleanup(client.delete_topic, TopicArn=topic_arn) # provision a lambda via mu params = dict( session_factory=session_factory, name="c7n-hello-world", role="arn:aws:iam::644160558196:role/custodian-mu", events=[SNSSubscription(session_factory, [topic_arn])], ) func = helloworld.get_function(**params) manager = LambdaManager(session_factory) manager.publish(func) self.addCleanup(manager.remove, func) # now publish to the topic and look for lambda log output client.publish(TopicArn=topic_arn, Message="Greetings, program!") if self.recording: time.sleep(30) log_events = manager.logs(func, "1970-1-1 UTC", "9170-1-1") messages = [ e["message"] for e in log_events if e["message"].startswith('{"Records') ] self.addCleanup( session.client("logs").delete_log_group, logGroupName="/aws/lambda/c7n-hello-world", ) self.assertEqual( json.loads(messages[0])["Records"][0]["Sns"]["Message"], "Greetings, program!", )
def test_lambda_cross_account(self): self.patch( CrossAccountAccessFilter, 'executor_factory', MainThreadExecutor) session_factory = self.replay_flight_data('test_cross_account_lambda') client = session_factory().client('lambda') name = 'c7n-cross-check' tmp_dir = tempfile.mkdtemp() self.addCleanup(os.rmdir, tmp_dir) archive = PythonPackageArchive(tmp_dir, tmp_dir) archive.create() archive.add_contents('handler.py', LAMBDA_SRC) archive.close() func = LambdaFunction({ 'runtime': 'python2.7', 'name': name, 'description': '', 'handler': 'handler.handler', 'memory_size': 128, 'timeout': 5, 'role': self.role}, archive) manager = LambdaManager(session_factory) info = manager.publish(func) self.addCleanup(manager.remove, func) client.add_permission( FunctionName=name, StatementId='oops', Principal='*', Action='lambda:InvokeFunction') p = self.load_policy( {'name': 'lambda-cross', 'resource': 'lambda', 'filters': ['cross-account']}, session_factory=session_factory) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]['FunctionName'], name)
def provision(self): # Avoiding runtime lambda dep, premature optimization? from c7n.mu import PolicyLambda, LambdaManager with self.policy.ctx: self.policy.log.info( "Provisioning policy lambda %s", self.policy.name) variables = { 'account_id': self.policy.options.account_id, 'region': self.policy.options.region } self.policy.data = self.expand_variables(variables) try: manager = LambdaManager(self.policy.session_factory) except ClientError: # For cli usage by normal users, don't assume the role just use # it for the lambda manager = LambdaManager( lambda assume=False: self.policy.session_factory(assume)) return manager.publish( PolicyLambda(self.policy), 'current', role=self.policy.options.assume_role)
def test_cwe_asg_instance(self): session_factory = self.replay_flight_data("test_cwe_asg", zdata=True) p = Policy( { "resource": "asg", "name": "asg-spin-detector", "mode": {"type": "asg-instance-state", "events": ["launch-failure"]}, }, Config.empty(), ) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, "Dev", role=ROLE) self.assert_items( result, { "FunctionName": "custodian-asg-spin-detector", "Handler": "custodian_policy.run", "MemorySize": 512, "Runtime": "python2.7", "Timeout": 60, }, ) events = session_factory().client("events") result = events.list_rules(NamePrefix="custodian-asg-spin-detector") self.assert_items( result["Rules"][0], {"State": "ENABLED", "Name": "custodian-asg-spin-detector"}, ) self.assertEqual( json.loads(result["Rules"][0]["EventPattern"]), { "source": ["aws.autoscaling"], "detail-type": ["EC2 Instance Launch Unsuccessful"], }, )
def test_publish_a_lambda_with_layer_and_concurrency(self): factory = self.replay_flight_data('test_lambda_layer_concurrent_publish') mgr = LambdaManager(factory) layers = ['arn:aws:lambda:us-east-1:644160558196:layer:CustodianLayer:2'] func = self.make_func( concurrency=5, layers=layers) self.addCleanup(mgr.remove, func) result = mgr.publish(func) self.assertEqual(result['Layers'][0]['Arn'], layers[0]) state = mgr.get(func.name) self.assertEqual(state['Concurrency']['ReservedConcurrentExecutions'], 5) func = self.make_func(layers=layers) output = self.capture_logging("custodian.serverless", level=logging.DEBUG) result = mgr.publish(func) self.assertEqual(result['Layers'][0]['Arn'], layers[0]) lines = output.getvalue().strip().split("\n") self.assertFalse('Updating function: test-foo-bar config Layers' in lines) self.assertTrue('Removing function: test-foo-bar concurrency' in lines)
def provision(config, session_factory): func_config = dict( name='cloud-custodian-mailer', description='Cloud Custodian Mailer', handler='periodic.dispatch', runtime='python2.7', memory_size=config['memory'], timeout=config['timeout'], role=config['role'], subnets=config['subnets'], security_groups=config['security_groups'], events=[ CloudWatchEventSource( {'type': 'periodic', 'schedule': 'rate(5 minutes)'}, session_factory, prefix="") ]) archive = get_archive(config) func = LambdaFunction(func_config, archive) manager = LambdaManager(session_factory) manager.publish(func)
def test_cwe_trail(self): session_factory = self.replay_flight_data('test_cwe_trail', zdata=True) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'events': ["CreateBucket"], }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, 'Dev', role=ROLE) events = pl.get_events(session_factory) self.assertEqual(len(events), 1) event = events.pop() self.assertEqual( json.loads(event.render_event_pattern()), {u'detail': {u'eventName': [u'CreateBucket'], u'eventSource': [u's3.amazonaws.com']}, u'detail-type': ['AWS API Call via CloudTrail']}) self.assert_items( result, {'Description': 'cloud-custodian lambda policy', 'FunctionName': 'custodian-s3-bucket-policy', 'Handler': 'custodian_policy.run', 'MemorySize': 512, 'Runtime': 'python2.7', 'Timeout': 60})
def test_cwe_asg_instance(self): session_factory = self.replay_flight_data('test_cwe_asg', zdata=True) p = Policy( { 'resource': 'asg', 'name': 'asg-spin-detector', 'mode': { 'type': 'asg-instance-state', 'events': ['launch-failure'] } }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, { 'FunctionName': 'maid-asg-spin-detector', 'Handler': 'maid_policy.run', 'MemorySize': 512, 'Runtime': 'python2.7', 'Timeout': 60 }) events = session_factory().client('events') result = events.list_rules(NamePrefix="maid-asg-spin-detector") self.assert_items(result['Rules'][0], { "State": "ENABLED", "Name": "maid-asg-spin-detector" }) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), { "source": ["aws.autoscaling"], "detail-type": ["EC2 Instance Launch Unsuccessful"] }) mgr.remove(pl)
def test_cwe_instance(self): session_factory = self.replay_flight_data( 'test_cwe_instance', zdata=True) p = Policy({ 'resource': 's3', 'name': 'ec2-encrypted-vol', 'mode': { 'type': 'ec2-instance-state', 'events': ['pending']} }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, 'Dev', role=ROLE) self.assert_items( result, {'Description': 'cloud-custodian lambda policy', 'FunctionName': 'custodian-ec2-encrypted-vol', 'Handler': 'custodian_policy.run', 'MemorySize': 512, 'Runtime': 'python2.7', 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol") self.assert_items( result['Rules'][0], {"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"}) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), {"source": ["aws.ec2"], "detail": { "state": ["pending"]}, "detail-type": ["EC2 Instance State-change Notification"]})
def test_attach_encrypt(self): self.patch(s3, 'S3_AUGMENT_TABLE', [('get_bucket_location', 'Location', None, None)]) self.patch(s3.S3, 'executor_factory', MainThreadExecutor) session_factory = self.replay_flight_data('test_s3_attach_encrypt') bname = "custodian-attach-encrypt-test" role = "arn:aws:iam::644160558196:role/custodian-mu" self.maxDiff = None session = session_factory(region='us-west-2') client = session.client('s3') client.create_bucket( Bucket=bname, CreateBucketConfiguration={ 'LocationConstraint': 'us-west-2'}) self.addCleanup(destroyBucket, client, bname) p = self.load_policy({ 'name': 'attach-encrypt', 'resource': 's3', 'filters': [{'Name': bname}], 'actions': [{ 'type': 'attach-encrypt', 'role': role}] }, session_factory=session_factory) self.addCleanup( LambdaManager(functools.partial(session_factory, region='us-west-2')).remove, s3crypt.get_function(None, role)) resources = p.run() self.assertEqual(len(resources), 1) #time.sleep(10) notifications = client.get_bucket_notification_configuration( Bucket=bname) notifications.pop('ResponseMetadata') self.assertEqual( notifications, {'LambdaFunctionConfigurations': [{ 'Events': ['s3:ObjectCreated:*'], 'Id': 'c7n-s3-encrypt', 'LambdaFunctionArn': 'arn:aws:lambda:us-west-2:644160558196:function:c7n-s3-encrypt'}]}) client.put_object( Bucket=bname, Key='hello-world.txt', Body='hello world', ContentType='text/plain') #time.sleep(30) info = client.head_object(Bucket=bname, Key='hello-world.txt') self.assertTrue('ServerSideEncryption' in info)
def deploy_one(region_name, account, policy, sentry_dsn): from c7n.mu import LambdaManager def session_factory(): return boto3.Session(region_name=region_name) log_group_name = '/aws/lambda/custodian-{}'.format(policy['name']) arn = 'arn:aws:logs:{}:{}:log-group:{}:*'.format( region_name, account['account_id'], log_group_name) function = get_function( session_factory=session_factory, name='cloud-custodian-sentry', handler='handler.process_log_event', role=account['role'], log_groups=[{'logGroupName': log_group_name, 'arn': arn}], project=None, account_name=account['name'], account_id=account['account_id'], sentry_dsn=sentry_dsn, ) log.info("Deploying lambda for {} in {}".format( log_group_name, region_name)) LambdaManager(session_factory).publish(function)
def test_attach_encrypt(self): self.patch(s3, 'S3_AUGMENT_TABLE', []) session_factory = self.replay_flight_data('test_s3_attach_encrypt') bname = "custodian-attach-encrypt-test" role = 'arn:aws:iam::619193117841:role/lambda_s3_exec_role' self.maxDiff = None session = session_factory() client = session.client('s3') client.create_bucket(Bucket=bname) self.addCleanup(destroyBucket, client, bname) p = self.load_policy({ 'name': 'attach-encrypt', 'resource': 's3', 'filters': [{'Name': bname}], 'actions': [{ 'type': 'attach-encrypt', 'role': role}] }, session_factory=session_factory) self.addCleanup( LambdaManager(session_factory).remove, s3crypt.get_function(None, role)) resources = p.run() notifications = client.get_bucket_notification_configuration( Bucket=bname) notifications.pop('ResponseMetadata') self.assertEqual( notifications, {'LambdaFunctionConfigurations': [{ 'Events': ['s3:ObjectCreated:*'], 'Id': 'custodian-s3-encrypt', 'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:619193117841:function:custodian-s3-encrypt'}]}) client.put_object( Bucket=bname, Key='hello-world.txt', Body='hello world', ContentType='text/plain') info = client.head_object(Bucket=bname, Key='hello-world.txt') self.assertTrue('ServerSideEncryption' in info)
def test_update(self): assert LambdaManager.diff_tags( {'Foo': 'Bar'}, {'Foo': 'Baz'}) == ({'Foo': 'Baz'}, [])
def test_addition(self): assert LambdaManager.diff_tags( {}, {'Foo': 'Bar'}) == ({'Foo': 'Bar'}, [])
def test_removal(self): assert LambdaManager.diff_tags({'Foo': 'Bar'}, {}) == ({}, ['Foo'])
def test_empty(self): assert LambdaManager.diff_tags({}, {}) == ({}, [])
def test_cwe_update_config_and_code(self): # Originally this was testing the no update case.. but # That is tricky to record, any updates to the code end up # causing issues due to checksum mismatches which imply updating # the function code / which invalidate the recorded data and # the focus of the test. session_factory = self.replay_flight_data("test_cwe_update", zdata=True) p = self.load_policy({ "resource": "s3", "name": "s3-bucket-policy", "mode": { "type": "cloudtrail", "events": ["CreateBucket"], 'runtime': 'python2.7' }, "filters": [ { "type": "missing-policy-statement", "statement_ids": ["RequireEncryptedPutObject"] }, ], "actions": ["no-op"], }) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, "Dev", role=ROLE) self.addCleanup(mgr.remove, pl) p = self.load_policy( { "resource": "s3", "name": "s3-bucket-policy", "mode": { "type": "cloudtrail", "memory": 256, 'runtime': 'python2.7', "events": [ "CreateBucket", { "event": "PutBucketPolicy", "ids": "requestParameters.bucketName", "source": "s3.amazonaws.com", }, ], }, "filters": [{ "type": "missing-policy-statement", "statement_ids": ["RequireEncryptedPutObject"], }], "actions": ["no-op"], }, ) output = self.capture_logging("custodian.serverless", level=logging.DEBUG) result2 = mgr.publish(PolicyLambda(p), "Dev", role=ROLE) lines = output.getvalue().strip().split("\n") self.assertTrue( "Updating function custodian-s3-bucket-policy code" in lines) self.assertTrue( "Updating function: custodian-s3-bucket-policy config MemorySize" in lines) self.assertEqual(result["FunctionName"], result2["FunctionName"]) # drive by coverage functions = [ i for i in mgr.list_functions() if i["FunctionName"] == "custodian-s3-bucket-policy" ] self.assertTrue(len(functions), 1) start = 0 end = time.time() * 1000 self.assertEqual(list(mgr.logs(pl, start, end)), [])
def test_update(self): assert LambdaManager.diff_tags({"Foo": "Bar"}, {"Foo": "Baz"}) == ({ "Foo": "Baz" }, [])
def test_addition(self): assert LambdaManager.diff_tags({}, {"Foo": "Bar"}) == ({ "Foo": "Bar" }, [])
def test_removal(self): assert LambdaManager.diff_tags({"Foo": "Bar"}, {}) == ({}, ["Foo"])
def test_cwe_update_config_and_code(self): # Originally this was testing the no update case.. but # That is tricky to record, any updates to the code end up # causing issues due to checksum mismatches which imply updating # the function code / which invalidate the recorded data and # the focus of the test. session_factory = self.replay_flight_data('test_cwe_update', zdata=True) p = Policy( { 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'events': ["CreateBucket"], }, 'filters': [{ 'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject'] }], 'actions': ['no-op'] }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=ROLE) self.addCleanup(mgr.remove, pl) p = Policy( { 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'memory': 256, 'events': [ "CreateBucket", { 'event': 'PutBucketPolicy', 'ids': 'requestParameters.bucketName', 'source': 's3.amazonaws.com' } ] }, 'filters': [{ 'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject'] }], 'actions': ['no-op'] }, Config.empty()) output = self.capture_logging('custodian.lambda', level=logging.DEBUG) result2 = mgr.publish(PolicyLambda(p), 'Dev', role=ROLE) lines = output.getvalue().strip().split('\n') self.assertTrue( 'Updating function custodian-s3-bucket-policy code' in lines) self.assertTrue( 'Updating function: custodian-s3-bucket-policy config' in lines) self.assertEqual(result['FunctionName'], result2['FunctionName']) # drive by coverage functions = [ i for i in mgr.list_functions() if i['FunctionName'] == 'custodian-s3-bucket-policy' ] self.assertTrue(len(functions), 1) start = 0 end = time.time() * 1000 self.assertEqual(list(mgr.logs(pl, start, end)), [])