def test_policy_name_user_agent(self): session = SessionFactory("us-east-1") session.policy_name = "test-policy-name-ua" client = session().client('s3') self.assertTrue( client._client_config.user_agent.startswith( "CloudCustodian(test-policy-name-ua)/%s" % version ) )
def test_local_session_agent_update(self): factory = SessionFactory('us-east-1') factory.policy_name = "check-ebs" client = local_session(factory).client('ec2') self.assertTrue( 'check-ebs' in client._client_config.user_agent) factory.policy_name = "check-ec2" factory.update(local_session(factory)) client = local_session(factory).client('ec2') self.assertTrue( 'check-ec2' in client._client_config.user_agent)
def get_session(account, session_name, region): if account.get('role'): roles = account['role'] if isinstance(roles, six.string_types): roles = [roles] s = None for r in roles: try: s = assumed_session(r, session_name, region=region, external_id=account.get('external_id'), session=s) except ClientError as e: log.error( "unable to obtain credentials for account:%s role:%s error:%s", account['name'], r, e) raise return s elif account.get('profile'): return SessionFactory(region, account['profile'])() else: raise ValueError("No profile or role assume specified for account %s" % account)
def main(): parser = setup_parser() options = parser.parse_args() logging.basicConfig(level=logging.DEBUG) logging.getLogger('botocore').setLevel(logging.ERROR) if not options.group and not options.prefix: print("Error: Either group or prefix must be specified") sys.exit(1) session_factory = SessionFactory(options.region, options.profile, options.assume) groups = get_groups(session_factory, options) func = get_function(session_factory, options, groups) manager = LambdaManager(session_factory) try: manager.publish(func) except Exception: import traceback, pdb, sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1])
def get_session(role, session_name, profile): region = os.environ.get('AWS_DEFAULT_REGION', 'eu-west-1') if role: return assumed_session(role, session_name, region=region) else: return SessionFactory(region, profile)()
def test_session_factory(self): factory = SessionFactory('us-east-1') session = factory() self.assertTrue(session._session.user_agent().startswith( 'CloudCustodian/%s' % version))
def get_session_factory(self, options): return SessionFactory(options.region, options.profile, options.assume_role, options.external_id)
def region_gc(options, region, policy_config, policies): session_factory = SessionFactory( region=region, assume_role=policy_config.assume_role, profile=policy_config.profile, external_id=policy_config.external_id) manager = mu.LambdaManager(session_factory) funcs = list(manager.list_functions(options.prefix)) client = session_factory().client('lambda') remove = [] current_policies = [p.name for p in policies] pattern = re.compile(options.policy_regex) for f in funcs: if not pattern.match(f['FunctionName']): continue match = False for pn in current_policies: if f['FunctionName'].endswith(pn): match = True if options.present: if match: remove.append(f) elif not match: remove.append(f) for n in remove: events = [] try: result = client.get_policy(FunctionName=n['FunctionName']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': log.warning( "Region:%s Lambda Function or Access Policy Statement missing: %s", region, n['FunctionName']) else: log.warning( "Region:%s Unexpected error: %s for function %s", region, e, n['FunctionName']) # Continue on with next function instead of raising an exception continue if 'Policy' not in result: pass else: p = json.loads(result['Policy']) for s in p['Statement']: principal = s.get('Principal') if not isinstance(principal, dict): log.info("Skipping function %s" % n['FunctionName']) continue if principal == {'Service': 'events.amazonaws.com'}: events.append( mu.CloudWatchEventSource({}, session_factory)) elif principal == {'Service': 'config.amazonaws.com'}: events.append( mu.ConfigRule({}, session_factory)) f = mu.LambdaFunction({ 'name': n['FunctionName'], 'role': n['Role'], 'handler': n['Handler'], 'timeout': n['Timeout'], 'memory_size': n['MemorySize'], 'description': n['Description'], 'runtime': n['Runtime'], 'events': events}, None) log.info("Region:%s Removing %s", region, n['FunctionName']) if options.dryrun: log.info("Dryrun skipping removal") continue manager.remove(f) log.info("Region:%s Removed %s", region, n['FunctionName'])
def get_session(role, session_name, profile, region): if role: return assumed_session(role, session_name, region=region) else: return SessionFactory(region, profile)()
def process_bucket(bucket_name, prefix, output=None, uid_filter=None, event_filter=None, service_filter=None, not_service_filter=None, data_dir=None): session_factory = SessionFactory(options.region, options.profile, options.assume_role) s3 = session_factory().client('s3', config=Config(signature_version='s3v4')) paginator = s3.get_paginator('list_objects') # PyPy has some memory leaks.... :-( pool = Pool(maxtasksperchild=10) t = time.time() object_count = object_size = 0 log.info("Processing:%d cloud-trail %s" % (cpu_count(), prefix)) record_processor = partial(process_records, uid_filter=uid_filter, event_filter=event_filter, service_filter=service_filter, not_service_filter=not_service_filter, data_dir=data_dir) object_processor = partial(process_trail_set, map_records=record_processor, reduce_results=reduce_records, trail_bucket=bucket_name) db = TrailDB(output) bsize = math.ceil(1000 / float(cpu_count())) for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix): objects = page.get('Contents', ()) object_count += len(objects) object_size += sum([o['Size'] for o in objects]) pt = time.time() if pool: results = pool.map(object_processor, chunks(objects, bsize)) else: results = map(object_processor, chunks(objects, bsize)) st = time.time() log.info("Loaded page time:%0.2fs", st - pt) for r in results: for fpath in r: with open(fpath) as fh: db.insert(load(fh.read())) os.remove(fpath) db.flush() l = t # NOQA t = time.time() log.info("Stored page time:%0.2fs", t - st) log.info("Processed paged time:%0.2f size:%s count:%s" % (t - l, object_size, object_count)) if objects: log.info('Last Page Key: %s', objects[-1]['Key'])
def resources_gc_prefix(options, policy_collection): """Garbage collect old custodian policies based on prefix. We attempt to introspect to find the event sources for a policy but without the old configuration this is implicit. """ session_factory = SessionFactory(options.region, options.profile, options.assume_role) manager = mu.LambdaManager(session_factory) funcs = list(manager.list_functions(options.prefix)) client = session_factory().client('lambda') remove = [] current_policies = [p.name for p in policy_collection] for f in funcs: pn = f['FunctionName'].split('-', 1)[1] if pn not in current_policies: remove.append(f) for n in remove: events = [] try: result = client.get_policy(FunctionName=n['FunctionName']) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': log.warn( "Lambda Function or Access Policy Statement missing: {}". format(n['FunctionName'])) else: log.warn("Unexpected error: {} for function {}".format( e, n['FunctionName'])) # Continue on with next function instead of raising an exception continue if 'Policy' not in result: pass else: p = json.loads(result['Policy']) for s in p['Statement']: principal = s.get('Principal') if not isinstance(principal, dict): log.info("Skipping function %s" % n['FunctionName']) continue if principal == {'Service': 'events.amazonaws.com'}: events.append(mu.CloudWatchEventSource({}, session_factory)) elif principal == {'Service': 'config.amazonaws.com'}: events.append(mu.ConfigRule({}, session_factory)) f = mu.LambdaFunction( { 'name': n['FunctionName'], 'role': n['Role'], 'handler': n['Handler'], 'timeout': n['Timeout'], 'memory_size': n['MemorySize'], 'description': n['Description'], 'runtime': n['Runtime'], 'events': events }, None) log.info("Removing %s" % n['FunctionName']) if options.dryrun: log.info("Dryrun skipping removal") continue manager.remove(f) log.info("Removed %s" % n['FunctionName'])
def run(organization, hook_context, github_url, github_token, verbose, metrics=False, since=None, assume=None, region=None): """scan org repo status hooks""" logging.basicConfig(level=logging.DEBUG) since = dateparser.parse(since, settings={ 'RETURN_AS_TIMEZONE_AWARE': True, 'TO_TIMEZONE': 'UTC' }) headers = {"Authorization": "token {}".format(github_token)} response = requests.post(github_url, headers=headers, json={ 'query': query, 'variables': { 'organization': organization } }) result = response.json() if response.status_code != 200 or 'errors' in result: raise Exception( "Query failed to run by returning code of {}. {}".format( response.status_code, response.content)) now = datetime.utcnow().replace(tzinfo=tzutc()) stats = Counter() repo_metrics = RepoMetrics( Bag(session_factory=SessionFactory(region, assume_role=assume)), {'namespace': DEFAULT_NAMESPACE}) for r in result['data']['organization']['repositories']['nodes']: commits = jmespath.search( 'pullRequests.edges[].node[].commits[].nodes[].commit[]', r) if not commits: continue log.debug("processing repo: %s prs: %d", r['name'], len(commits)) repo_metrics.dims = { 'Hook': hook_context, 'Repo': '{}/{}'.format(organization, r['name']) } # Each commit represents a separate pr for c in commits: process_commit(c, r, repo_metrics, stats, since, now) repo_metrics.dims = None if stats['missing']: repo_metrics.put_metric('RepoHookPending', stats['missing'], 'Count', Hook=hook_context) repo_metrics.put_metric('RepoHookLatency', stats['missing_time'], 'Seconds', Hook=hook_context) if not metrics: print(dumps(repo_metrics.buf, indent=2)) return else: repo_metrics.BUF_SIZE = 20 repo_metrics.flush()