def disable(config, tags, accounts, master, debug, suspend, disable_detector, delete_detector, dissociate, region): """suspend guard duty in the given accounts.""" accounts_config, master_info, executor = guardian_init( config, debug, master, accounts, tags) if sum(map(int, (suspend, disable_detector, dissociate))) != 1: raise ValueError( ("One and only of suspend, disable-detector, dissociate" "can be specified.")) master_session = get_session(master_info['role'], 'c7n-guardian', master_info.get('profile'), region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) if suspend: unprocessed = master_client.stop_monitoring_members( DetectorId=detector_id, AccountIds=[a['account_id'] for a in accounts_config['accounts'] ]).get('UnprocessedAccounts', ()) if unprocessed: log.warning("Following accounts where unprocessed\n %s", format_event(unprocessed)) log.info("Stopped monitoring %d accounts in master", len(accounts_config['accounts'])) return if dissociate: master_client.disassociate_members( DetectorId=detector_id, AccountIds=[a['account_id'] for a in accounts_config['accounts']]) # Seems like there's a couple of ways to disable an account # delete the detector (member), disable the detector (master or member), # or disassociate members, or from member disassociate from master. for a in accounts_config['accounts']: member_session = get_session(a['role'], 'c7n-guardian', a.get('profile'), region) member_client = member_session.client('guardduty') m_detector_id = get_or_create_detector_id(member_client) if disable_detector: member_client.update_detector(DetectorId=m_detector_id, Enable=False) log.info("Disabled detector in account:%s", a['name']) if dissociate: try: log.info("Disassociated member account:%s", a['name']) result = member_client.disassociate_from_master_account( DetectorId=m_detector_id) log.info("Result %s", format_event(result)) except ClientError as e: if e.response['Error']['Code'] == 'InvalidInputException': continue if delete_detector: member_client.delete_detector(DetectorId=m_detector_id) log.info("Deleted detector in account:%s", a['name'])
def dispatch_event(event, context): error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config['policies'][0].get('mode', {}).get( 'execution-options', {}) options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def dispatch_event(event, context): error = event.get('detail', {}).get('errorCode') if error and C7N_SKIP_EVTERR: log.debug("Skipping failed operation: %s" % error) return if C7N_DEBUG_EVENT: event['debug'] = True log.info("Processing event\n %s", format_event(event)) # Policies file should always be valid in lambda so do loading naively global policy_config if policy_config is None: with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False options = init_config(policy_config) policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: try: # validation provides for an initialization point for # some filters/actions. p.validate() p.push(event, context) except Exception: log.exception("error during policy execution") if C7N_CATCH_ERR: continue raise return True
def dispatch_event(event, context): global account_id error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # Policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get('C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config['policies'][0].get('mode', {}).get( 'execution-options', {}) # if using assume role in lambda ensure that the correct # execution account is captured in options. if 'assume_role' in options_overrides: account_id = options_overrides['assume_role'].split(':')[4] elif account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) # Historical compatibility with manually set execution options # previously this was a boolean, its now a string value with the # boolean flag triggering a string value of 'aws' if 'metrics_enabled' in options_overrides and isinstance( options_overrides['metrics_enabled'], bool): options_overrides['metrics_enabled'] = 'aws' options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def dispatch_event(event, context): error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config[ 'policies'][0].get('mode', {}).get('execution-options', {}) options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def dispatch_event(event, context): error = event.get('detail', {}).get('errorCode') if error and C7N_SKIP_EVTERR: log.debug("Skipping failed operation: %s" % error) return # one time initialization for cold starts. global policy_config, policy_data if policy_config is None: with open('config.json') as f: policy_data = json.load(f) policy_config = init_config(policy_data) load_resources(StructureParser().get_resource_types(policy_data)) if C7N_DEBUG_EVENT: event['debug'] = True log.info("Processing event\n %s", format_event(event)) if not policy_data or not policy_data.get('policies'): return False policies = PolicyCollection.from_data(policy_data, policy_config) for p in policies: try: # validation provides for an initialization point for # some filters/actions. p.validate() p.push(event, context) except Exception: log.exception("error during policy execution") if C7N_CATCH_ERR: continue raise return True
def test_format_event(self): event = { 'message': 'This is a test', 'timestamp': 1234567891011, } event_json = ('{\n "timestamp": 1234567891011, \n' ' "message": "This is a test"\n}') self.assertEqual(utils.format_event(event), event_json)
def dispatch_event(event, context): log.info("Processing event\n %s", format_event(event)) error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True policies = load(Config.empty(), 'config.json', format='json') for p in policies: p.push(event, context)
def test_format_event(self): event = { 'message': 'This is a test', 'timestamp': 1234567891011, } event_json = ( '{\n "timestamp": 1234567891011, \n' ' "message": "This is a test"\n}' ) self.assertEqual( json.loads(utils.format_event(event)), json.loads(event_json))
def dispatch_event(event, context): global account_id if account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get( 'C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config[ 'policies'][0].get('mode', {}).get('execution-options', {}) options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def dispatch_event(event, context): global account_id if account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get('C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config['policies'][0].get('mode', {}).get( 'execution-options', {}) options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) load_resources() policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def enable_region(master_info, accounts_config, executor, message, region): master_session = get_session( master_info.get('role'), 'c7n-guardian', master_info.get('profile'), region=region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) # Temporary (need a separate gd config file).. support master trusted # ip set mgmt. if 'trusted_ip_uri' in master_info: ip_set, ip_set_change = get_or_create_ip_set( master_client, detector_id, master_info.get('trusted_ip_uri')) if ip_set_change: log.info( "Region:%s master %s ipset %s for guard duty", region, ip_set_change, ip_set) results = master_client.get_paginator( 'list_members').paginate(DetectorId=detector_id, OnlyAssociated="FALSE") extant_members = results.build_full_result().get('Members', ()) extant_ids = {m['AccountId'] for m in extant_members} # Find active members active_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Enabled'} # Find invited members invited_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Invited'} # Find extant members currently have guardduty disabled(removed) resigned_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Resigned'} resigned_ids = {a['account_id'] for a in accounts_config['accounts'] if a['account_id'] in resigned_ids} if resigned_ids: master_client.delete_members( DetectorId=detector_id, AccountIds=list(resigned_ids)) log.info( "Region:%s %d resigned members removed to re-enable.", region, len(resigned_ids)) extant_ids = extant_ids.difference(resigned_ids) # Find extant members not currently enabled suspended_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Disabled'} # Filter by accounts under consideration per config and cli flags suspended_ids = {a['account_id'] for a in accounts_config['accounts'] if a['account_id'] in suspended_ids} if suspended_ids: unprocessed = master_client.start_monitoring_members( DetectorId=detector_id, AccountIds=list(suspended_ids)).get('UnprocessedAccounts') if unprocessed: log.warning( "Region: %s Unprocessed accounts on re-start monitoring %s", region, format_event(unprocessed)) log.info("Region: %s Restarted monitoring on %d accounts", region, len(suspended_ids)) members = [{'AccountId': account['account_id'], 'Email': account['email']} for account in accounts_config['accounts'] if account['account_id'] not in extant_ids] if not members: if not suspended_ids and not invited_ids: log.info("Region:%s All accounts already enabled", region) return list(active_ids) if (len(members) + len(extant_ids)) > 1000: raise ValueError( ("Region:%s Guard Duty only supports " "1000 member accounts per master account") % (region)) log.info( "Region:%s Enrolling %d accounts in guard duty", region, len(members)) unprocessed = [] for account_set in chunks(members, 25): unprocessed.extend(master_client.create_members( DetectorId=detector_id, AccountDetails=account_set).get('UnprocessedAccounts', [])) if unprocessed: log.warning( "Region:%s accounts where unprocessed - member create\n %s", region, format_event(unprocessed)) log.info("Region:%s Inviting %d member accounts", region, len(members)) unprocessed = [] for account_set in chunks( [m for m in members if not m['AccountId'] in invited_ids], 25): params = {'AccountIds': [m['AccountId'] for m in account_set], 'DetectorId': detector_id} if message: params['Message'] = message unprocessed.extend(master_client.invite_members( **params).get('UnprocessedAccounts', [])) if unprocessed: log.warning( "Region:%s accounts where unprocessed invite-members\n %s", region, format_event(unprocessed)) members = [{'AccountId': account['account_id'], 'Email': account['email']} for account in accounts_config['accounts'] if account['account_id'] not in active_ids] log.info("Region:%s Accepting %d invitations in members", region, len(members)) # Added in https://github.com/capitalone/cloud-custodian/pull/2445 # Not entirely sure this is needed, i haven't needed it across any # of our usage, but the respecting the user report as accurate. -kapil time.sleep(5) with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: if a == master_info: continue if a['account_id'] in active_ids: continue futures[w.submit(enable_account, a, master_info['account_id'], region)] = a for f in as_completed(futures): a = futures[f] if f.exception(): log.error("Region:%s Error processing account:%s error:%s", region, a['name'], f.exception()) continue if f.result(): log.info('Region:%s Enabled guard duty on account:%s', region, a['name']) return members
def debug(event, context): print(sys.executable) print(sys.version) print(sys.path) pprint.pprint(os.environ) print(format_event(event))
def save(self, record): try: log.info("Serializing record %s", format_event(record)) except TypeError: pass self.table.put_item(Item=record)
def test_format_event(self): event = {"message": "This is a test", "timestamp": 1234567891011} event_json = ( '{\n "timestamp": 1234567891011, \n' ' "message": "This is a test"\n}' ) self.assertEqual(json.loads(utils.format_event(event)), json.loads(event_json))
def debug(event, context): print sys.executable print sys.version print sys.path pprint.pprint(os.environ) print format_event(event)
def enable(config, master, tags, accounts, debug, message, region): """enable guard duty on a set of accounts""" accounts_config, master_info, executor = guardian_init( config, debug, master, accounts, tags) master_session = get_session(master_info.get('role'), 'c7n-guardian', master_info.get('profile'), region=region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) extant_members = master_client.list_members(DetectorId=detector_id).get( 'Members', ()) extant_ids = {m['AccountId'] for m in extant_members} # Find extant members not currently enabled suspended_ids = { m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Disabled' } # Filter by accounts under consideration per config and cli flags suspended_ids = { a['account_id'] for a in accounts_config['accounts'] if a['account_id'] in suspended_ids } if suspended_ids: unprocessed = master_client.start_monitoring_members( DetectorId=detector_id, AccountIds=list(suspended_ids)).get('UnprocessedAccounts') if unprocessed: log.warning("Unprocessed accounts on re-start monitoring %s" % (format_event(unprocessed))) log.info("Restarted monitoring on %d accounts" % (len(suspended_ids))) members = [{ 'AccountId': account['account_id'], 'Email': account['email'] } for account in accounts_config['accounts'] if account['account_id'] not in extant_ids] if not members: if not suspended_ids: log.info("All accounts already enabled") return if (len(members) + len(extant_ids)) > 1000: raise ValueError( "Guard Duty only supports 1000 member accounts per master account") log.info("Enrolling %d accounts in guard duty" % len(members)) log.info("Creating member accounts:%d region:%s", len(members), region) unprocessed = [] for account_set in chunks(members, 25): unprocessed.extend( master_client.create_members(DetectorId=detector_id, AccountDetails=account_set).get( 'UnprocessedAccounts', [])) if unprocessed: log.warning("Following accounts where unprocessed\n %s" % format_event(unprocessed)) log.info("Inviting member accounts") params = { 'AccountIds': [m['AccountId'] for m in members], 'DetectorId': detector_id } if message: params['Message'] = message unprocessed = master_client.invite_members( **params).get('UnprocessedAccounts') if unprocessed: log.warning("Following accounts where unprocessed\n %s" % format_event(unprocessed)) log.info("Accepting invitations") with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: if a == master_info: continue if a['account_id'] in extant_ids: continue futures[w.submit(enable_account, a, master_info['account_id'], region)] = a for f in as_completed(futures): a = futures[f] if f.exception(): log.error("Error processing account:%s error:%s", a['name'], f.exception()) continue if f.result(): log.info('Enabled guard duty on account:%s' % a['name'])
def dispatch_event(event, context): global account_id error = event.get('detail', {}).get('errorCode') if error: log.debug("Skipping failed operation: %s" % error) return event['debug'] = True if event['debug']: log.info("Processing event\n %s", format_event(event)) # Policies file should always be valid in lambda so do loading naively with open('config.json') as f: policy_config = json.load(f) if not policy_config or not policy_config.get('policies'): return False # Initialize output directory, we've seen occassional perm issues with # lambda on temp directory and changing unix execution users, so # use a per execution temp space. output_dir = os.environ.get( 'C7N_OUTPUT_DIR', '/tmp/' + str(uuid.uuid4())) if not os.path.exists(output_dir): try: os.mkdir(output_dir) except OSError as error: log.warning("Unable to make output directory: {}".format(error)) # TODO. This enshrines an assumption of a single policy per lambda. options_overrides = policy_config[ 'policies'][0].get('mode', {}).get('execution-options', {}) # if using assume role in lambda ensure that the correct # execution account is captured in options. if 'assume_role' in options_overrides: account_id = options_overrides['assume_role'].split(':')[4] elif account_id is None: session = boto3.Session() account_id = get_account_id_from_sts(session) # Historical compatibility with manually set execution options # previously this was a boolean, its now a string value with the # boolean flag triggering a string value of 'aws' if 'metrics_enabled' in options_overrides and isinstance( options_overrides['metrics_enabled'], bool): options_overrides['metrics_enabled'] = 'aws' options_overrides['account_id'] = account_id if 'output_dir' not in options_overrides: options_overrides['output_dir'] = output_dir options = Config.empty(**options_overrides) policies = PolicyCollection.from_data(policy_config, options) if policies: for p in policies: p.push(event, context) return True
def enable(config, master, tags, accounts, debug, message, region): """enable guard duty on a set of accounts""" accounts_config, master_info, executor = guardian_init( config, debug, master, accounts, tags) master_session = get_session( master_info.get('role'), 'c7n-guardian', master_info.get('profile'), region=region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) extant_members = master_client.list_members(DetectorId=detector_id).get('Members', ()) extant_ids = {m['AccountId'] for m in extant_members} # Find extant members not currently enabled suspended_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Disabled'} # Filter by accounts under consideration per config and cli flags suspended_ids = {a['account_id'] for a in accounts_config['accounts'] if a['account_id'] in suspended_ids} if suspended_ids: unprocessed = master_client.start_monitoring_members( DetectorId=detector_id, AccountIds=list(suspended_ids)).get('UnprocessedAccounts') if unprocessed: log.warning( "Unprocessed accounts on re-start monitoring %s" % (format_event(unprocessed))) log.info("Restarted monitoring on %d accounts" % (len(suspended_ids))) members = [{'AccountId': account['account_id'], 'Email': account['email']} for account in accounts_config['accounts'] if account['account_id'] not in extant_ids] if not members: if not suspended_ids: log.info("All accounts already enabled") return if (len(members) + len(extant_ids)) > 1000: raise ValueError( "Guard Duty only supports 1000 member accounts per master account") log.info("Enrolling %d accounts in guard duty" % len(members)) log.info("Creating member accounts:%d region:%s", len(members), region) unprocessed = [] for account_set in chunks(members, 25): unprocessed.extend(master_client.create_members( DetectorId=detector_id, AccountDetails=account_set).get('UnprocessedAccounts', [])) if unprocessed: log.warning("Following accounts where unprocessed\n %s" % format_event(unprocessed)) log.info("Inviting member accounts") params = {'AccountIds': [m['AccountId'] for m in members], 'DetectorId': detector_id} if message: params['Message'] = message unprocessed = master_client.invite_members(**params).get('UnprocessedAccounts') if unprocessed: log.warning("Following accounts where unprocessed\n %s" % format_event(unprocessed)) log.info("Accepting invitations") with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: if a == master_info: continue if a['account_id'] in extant_ids: continue futures[w.submit(enable_account, a, master_info['account_id'], region)] = a for f in as_completed(futures): a = futures[f] if f.exception(): log.error("Error processing account:%s error:%s", a['name'], f.exception()) continue if f.result(): log.info('Enabled guard duty on account:%s' % a['name'])
def disable(config, tags, accounts, master, debug, suspend, disable_detector, delete_detector, dissociate, region): """suspend guard duty in the given accounts.""" accounts_config, master_info, executor = guardian_init( config, debug, master, accounts, tags) if sum(map(int, (suspend, disable_detector, dissociate))) != 1: raise ValueError(( "One and only of suspend, disable-detector, dissociate" "can be specified.")) master_session = get_session( master_info['role'], 'c7n-guardian', master_info.get('profile'), region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) if suspend: unprocessed = master_client.stop_monitoring_members( DetectorId=detector_id, AccountIds=[a['account_id'] for a in accounts_config['accounts']] ).get('UnprocessedAccounts', ()) if unprocessed: log.warning( "Following accounts where unprocessed\n %s", format_event(unprocessed)) log.info("Stopped monitoring %d accounts in master", len(accounts_config['accounts'])) return if dissociate: master_client.disassociate_members( DetectorId=detector_id, AccountIds=[a['account_id'] for a in accounts_config['accounts']]) # Seems like there's a couple of ways to disable an account # delete the detector (member), disable the detector (master or member), # or disassociate members, or from member disassociate from master. for a in accounts_config['accounts']: member_session = get_session( a['role'], 'c7n-guardian', a.get('profile'), region) member_client = member_session.client('guardduty') m_detector_id = get_or_create_detector_id(member_client) if disable_detector: member_client.update_detector( DetectorId=m_detector_id, Enable=False) log.info("Disabled detector in account:%s", a['name']) if dissociate: try: log.info("Disassociated member account:%s", a['name']) result = member_client.disassociate_from_master_account( DetectorId=m_detector_id) log.info("Result %s", format_event(result)) except ClientError as e: if e.response['Error']['Code'] == 'InvalidInputException': continue if delete_detector: member_client.delete_detector(DetectorId=m_detector_id) log.info("Deleted detector in account:%s", a['name'])
def enable_region(master_info, accounts_config, executor, message, region): master_session = get_session( master_info.get('role'), 'c7n-guardian', master_info.get('profile'), region=region) master_client = master_session.client('guardduty') detector_id = get_or_create_detector_id(master_client) results = master_client.get_paginator( 'list_members').paginate(DetectorId=detector_id, OnlyAssociated="FALSE") extant_members = results.build_full_result().get('Members', ()) extant_ids = {m['AccountId'] for m in extant_members} # Find active members active_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Enabled'} # Find invited members invited_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Invited'} # Find extant members not currently enabled suspended_ids = {m['AccountId'] for m in extant_members if m['RelationshipStatus'] == 'Disabled'} # Filter by accounts under consideration per config and cli flags suspended_ids = {a['account_id'] for a in accounts_config['accounts'] if a['account_id'] in suspended_ids} if suspended_ids: unprocessed = master_client.start_monitoring_members( DetectorId=detector_id, AccountIds=list(suspended_ids)).get('UnprocessedAccounts') if unprocessed: log.warning( "Region: %s Unprocessed accounts on re-start monitoring %s", region, format_event(unprocessed)) log.info("Region: %s Restarted monitoring on %d accounts", region, len(suspended_ids)) members = [{'AccountId': account['account_id'], 'Email': account['email']} for account in accounts_config['accounts'] if account['account_id'] not in extant_ids] if not members: if not suspended_ids and not invited_ids: log.info("Region:%s All accounts already enabled", region) return list(active_ids) if (len(members) + len(extant_ids)) > 1000: raise ValueError( ("Region:%s Guard Duty only supports " "1000 member accounts per master account") % (region)) log.info( "Region:%s Enrolling %d accounts in guard duty", region, len(members)) unprocessed = [] for account_set in chunks(members, 25): unprocessed.extend(master_client.create_members( DetectorId=detector_id, AccountDetails=account_set).get('UnprocessedAccounts', [])) if unprocessed: log.warning( "Region:%s accounts where unprocessed - member create\n %s", region, format_event(unprocessed)) log.info("Region:%s Inviting %d member accounts", region, len(members)) unprocessed = [] for account_set in chunks( [m for m in members if not m['AccountId'] in invited_ids], 25): params = {'AccountIds': [m['AccountId'] for m in account_set], 'DetectorId': detector_id} if message: params['Message'] = message unprocessed.extend(master_client.invite_members( **params).get('UnprocessedAccounts', [])) if unprocessed: log.warning( "Region:%s accounts where unprocessed invite-members\n %s", region, format_event(unprocessed)) members = [{'AccountId': account['account_id'], 'Email': account['email']} for account in accounts_config['accounts'] if account['account_id'] not in active_ids] log.info("Region:%s Accepting %d invitations in members", region, len(members)) with executor(max_workers=WORKER_COUNT) as w: futures = {} for a in accounts_config['accounts']: if a == master_info: continue if a['account_id'] in active_ids: continue futures[w.submit(enable_account, a, master_info['account_id'], region)] = a for f in as_completed(futures): a = futures[f] if f.exception(): log.error("Region:%s Error processing account:%s error:%s", region, a['name'], f.exception()) continue if f.result(): log.info('Region:%s Enabled guard duty on account:%s', region, a['name']) return members