def setup_scheduler(): """Sets up the APScheduler""" log = logging.getLogger('apscheduler') try: accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all() # noqa accounts = [account.name for account in accounts] for account in accounts: app.logger.debug("Scheduler adding account {}".format(account)) rep = Reporter(account=account) delay = app.config.get('REPORTER_START_DELAY', 10) for period in rep.get_intervals(account): scheduler.add_interval_job( run_change_reporter, minutes=period, start_date=datetime.now()+timedelta(seconds=delay), args=[[account], period] ) auditors = [] for monitor in all_monitors(account): auditors.extend(monitor.auditors) scheduler.add_cron_job(_audit_changes, hour=10, day_of_week="mon-fri", args=[account, auditors, True]) # Clear out old exceptions: scheduler.add_cron_job(_clear_old_exceptions, hour=3, minute=0) except Exception as e: app.logger.warn("Scheduler Exception: {}".format(e)) app.logger.warn(traceback.format_exc()) store_exception("scheduler", None, e)
def task_audit(self, account_name, technology_name): setup() app.logger.info("[ ] Executing Celery task to audit changes for Account: {} Technology: {}".format(account_name, technology_name)) # Verify that the account exists (was it deleted? was it renamed?): if not Account.query.filter(Account.name == account_name).first(): app.logger.error("[X] Account has been removed or renamed: {}. Please restart the scheduler to fix.".format( account_name )) return try: audit_changes([account_name], [technology_name], True) app.logger.info("[+] Completed Celery task for account: {}, technology: {}".format(account_name, technology_name)) except Exception as e: if sentry: sentry.captureException() app.logger.error("[X] Task Audit Scheduler Exception ({}/{}): {}".format(account_name, technology_name, e)) app.logger.error(traceback.format_exc()) store_exception("scheduler-exception-on-audit", None, e) self.retry(exc=e)
def test_child_deletion_cascade_check(self): """ If the exception object is deleted, then the parent object (items, account, tech.) should NOT be deleted. :return: """ try: raise ValueError("This is a test") except ValueError as e: test_exception = e location = ("iamrole", "testing", "us-west-2", "testrole") store_exception("tests", location, test_exception) assert len(self.item.exceptions) == 1 assert len(self.account.exceptions) == 1 assert len(self.technology.exceptions) == 1 db.session.delete(self.item.exceptions[0]) db.session.commit() exc = ExceptionLogs.query.all() assert len(exc) == 0 assert len(Item.query.filter(Item.name == "testrole").all()) == 1 assert len(Technology.query.filter(Technology.name == "iamrole").all()) == 1 assert len(Account.query.filter(Account.name == "testing").all()) == 1 assert len(self.item.exceptions) == 0 assert len(self.account.exceptions) == 0 assert len(self.technology.exceptions) == 0
def decorated_function(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: index = kwargs.get('index') account = kwargs.get('account_name') # Allow the recording region to be overridden for universal tech like IAM region = kwargs.get('exception_record_region') or kwargs.get( 'region') name = kwargs.get('name') exception_map = kwargs.get('exception_map') exc = BotoConnectionIssue(str(e), index, account, name) if name: location = (index, account, region, name) elif region: location = (index, account, region) elif account: location = (index, account) else: location = (index, ) exception_map[location] = exc # Store the exception (the original one passed in, not exc): store_exception(source=source, location=location, exception=e)
def setup_scheduler(): """Sets up the APScheduler""" log = logging.getLogger('apscheduler') try: accounts = Account.query.filter(Account.third_party == False).filter( Account.active == True).all() # noqa accounts = [account.name for account in accounts] for account in accounts: app.logger.debug("Scheduler adding account {}".format(account)) rep = Reporter(account=account) delay = app.config.get('REPORTER_START_DELAY', 10) for period in rep.get_intervals(account): scheduler.add_interval_job(run_change_reporter, minutes=period, start_date=datetime.now() + timedelta(seconds=delay), args=[[account], period]) auditors = [] for monitor in all_monitors(account): auditors.extend(monitor.auditors) scheduler.add_cron_job(_audit_changes, hour=10, day_of_week="mon-fri", args=[account, auditors, True]) # Clear out old exceptions: scheduler.add_cron_job(_clear_old_exceptions, hour=3, minute=0) except Exception as e: app.logger.warn("Scheduler Exception: {}".format(e)) app.logger.warn(traceback.format_exc()) store_exception("scheduler", None, e)
def setup_scheduler(): """Sets up the APScheduler""" log = logging.getLogger('apscheduler') try: accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all() # noqa accounts = [account.name for account in accounts] for account in accounts: print "Scheduler adding account {}".format(account) rep = Reporter(accounts=[account]) for period in rep.get_intervals(account): scheduler.add_interval_job( run_change_reporter, minutes=period, start_date=datetime.now()+timedelta(seconds=2), args=[account, period] ) auditors = [a for (_, a) in rep.get_watchauditors(account) if a] if auditors: scheduler.add_cron_job(_audit_changes, hour=10, day_of_week="mon-fri", args=[account, auditors, True]) # Clear out old exceptions: scheduler.add_cron_job(_clear_old_exceptions, hour=3, minute=0) except Exception as e: app.logger.warn("Scheduler Exception: {}".format(e)) app.logger.warn(traceback.format_exc()) store_exception("scheduler", None, e)
def task_account_tech(self, account_name, technology_name): setup() app.logger.info("[ ] Executing Celery task for account: {}, technology: {}".format(account_name, technology_name)) time1 = time.time() # Verify that the account exists (was it deleted? was it renamed?): if not Account.query.filter(Account.name == account_name).first(): app.logger.error("[X] Account has been removed or renamed: {}. Please restart the scheduler to fix.".format( account_name )) return try: reporter_logic(account_name, technology_name) time2 = time.time() app.logger.info('[@] Run Account for Technology (%s/%s) took %0.1f s' % (account_name, technology_name, (time2 - time1))) app.logger.info( "[+] Completed Celery task for account: {}, technology: {}".format(account_name, technology_name)) except Exception as e: if sentry: sentry.captureException() app.logger.error("[X] Task Account Scheduler Exception ({}/{}): {}".format(account_name, technology_name, e)) app.logger.error(traceback.format_exc()) store_exception("scheduler-exception-on-watch", None, e) raise self.retry(exc=e)
def _audit_specific_changes(monitor, audit_items, send_report, debug=True): """ Runs the auditor on specific items that are passed in. :param monitor: :param audit_items: :param send_report: :param debug: :return: """ try: for au in monitor.auditors: au.items = audit_items au.audit_objects() au.save_issues() if send_report: report = au.create_report() au.email_report(report) if jirasync: app.logger.info('[-->] Syncing {} issues on {} with Jira'.format(au.index, monitor.watcher.accounts[0])) jirasync.sync_issues(monitor.watcher.accounts, au.index) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("[X] Database error processing accounts %s, cleaning up session.", monitor.watcher.accounts[0]) db.session.remove() store_exception("scheduler-audit-changes", None, e)
def decorated_function(*args, **kwargs): # prevent these from being passed to the wrapped function: m = kwargs.pop if pop_exception_fields else kwargs.get exception_values = { 'index': m('index'), 'account': m('account_name', None), 'exception_record_region': m('exception_record_region', None), 'name': m('name', None), 'exception_map': m('exception_map') } try: return f(*args, **kwargs) except Exception as e: index = exception_values['index'] account = exception_values['account'] # Allow the recording region to be overridden for universal tech like IAM region = exception_values[ 'exception_record_region'] or kwargs.get('region') name = exception_values['name'] exception_map = exception_values['exception_map'] exc = BotoConnectionIssue(str(e), index, account, name) if name: location = (index, account, region, name) elif region: location = (index, account, region) elif account: location = (index, account) else: location = (index, ) exception_map[location] = exc # Store the exception (the original one passed in, not exc): store_exception(source=source, location=location, exception=e)
def decorated_function(*args, **kwargs): # prevent these from being passed to the wrapped function: m = kwargs.pop if pop_exception_fields else kwargs.get exception_values = { 'index': m('index'), 'account': m('account_name', None), 'exception_record_region': m('exception_record_region', None), 'name': m('name', None), 'exception_map': m('exception_map', {}) } try: return f(*args, **kwargs) except Exception as e: if sentry: sentry.captureException() index = exception_values['index'] account = exception_values['account'] # Allow the recording region to be overridden for universal tech like IAM region = exception_values['exception_record_region'] or kwargs.get('region') name = exception_values['name'] exception_map = exception_values['exception_map'] exc = BotoConnectionIssue(str(e), index, account, name) if name: location = (index, account, region, name) elif region: location = (index, account, region) elif account: location = (index, account) else: location = (index, ) exception_map[location] = exc # Store the exception (the original one passed in, not exc): store_exception(source=source, location=location, exception=e)
def test_store_exception_with_new_techid(self): try: raise ValueError("This is a test") except ValueError as e: test_exception = e location = ['newtech'] ttl_month = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).month ttl_day = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).day current_month = datetime.datetime.utcnow().month current_day = datetime.datetime.utcnow().day store_exception("tests", location, test_exception) # Fetch the exception and validate it: exc_logs = ExceptionLogs.query.all() assert len(exc_logs) == 1 exc_log = exc_logs[0] assert exc_log.type == type(test_exception).__name__ assert exc_log.message == str(test_exception) assert exc_log.stacktrace == traceback.format_exc() assert exc_log.occurred.day == current_day assert exc_log.occurred.month == current_month assert exc_log.ttl.month == ttl_month assert exc_log.ttl.day == ttl_day tech = Technology.query.filter(Technology.name == "newtech").first() assert tech assert exc_log.tech_id == tech.id
def manual_run_change_reporter(accounts): """Manual change reporting from the command line""" app.logger.info("[ ] Executing manual change reporter task...") try: for account in accounts: time1 = time.time() rep = Reporter(account=account) for monitor in rep.all_monitors: if monitor.watcher: app.logger.info("[ ] Running change finder for " "account: {} technology: {}".format( account, monitor.watcher.index)) reporter_logic(account, monitor.watcher.index) time2 = time.time() app.logger.info('[@] Run Account %s took %0.1f s' % (account, (time2 - time1))) app.logger.info("[+] Completed manual change reporting.") except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception( "[X] Database error processing cleaning up session.") db.session.remove() store_exception("scheduler-run-change-reporter", None, e) raise e
def task_account_tech(self, account_name, technology_name): setup() app.logger.info( "[ ] Executing Celery task for account: {}, technology: {}".format( account_name, technology_name)) time1 = time.time() try: reporter_logic(account_name, technology_name) time2 = time.time() app.logger.info('[@] Run Account for Technology (%s/%s) took %0.1f s' % (account_name, technology_name, (time2 - time1))) app.logger.info( "[+] Completed Celery task for account: {}, technology: {}".format( account_name, technology_name)) except Exception as e: if sentry: sentry.captureException() app.logger.error( "[X] Task Account Scheduler Exception ({}/{}): {}".format( account_name, technology_name, e)) app.logger.error(traceback.format_exc()) store_exception("scheduler-exception-on-watch", None, e) raise self.retry(exc=e)
def _audit_specific_changes(monitor, audit_items, send_report, debug=True): """ Runs the auditor on specific items that are passed in. :param monitor: :param audit_items: :param send_report: :param debug: :return: """ try: for au in monitor.auditors: au.items = audit_items au.audit_objects() au.save_issues() if send_report: report = au.create_report() au.email_report(report) if jirasync: app.logger.info( '[-->] Syncing {} issues on {} with Jira'.format( au.index, monitor.watcher.accounts[0])) jirasync.sync_issues(monitor.watcher.accounts, au.index) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception( "[X] Database error processing accounts %s, cleaning up session.", monitor.watcher.accounts[0]) db.session.remove() store_exception("scheduler-audit-changes", None, e)
def amazon_accounts(): """ Pre-populates standard AWS owned accounts """ import os import json from security_monkey.datastore import Account data_file = os.path.join(os.path.dirname(__file__), "data", "aws_accounts.json") data = json.load(open(data_file, 'r')) app.logger.info('Adding / updating Amazon owned accounts') try: for group, info in data.items(): for aws_account in info['accounts']: acct_name = "{group} ({region})".format(group=group, region=aws_account['region']) account = Account.query.filter(Account.number == aws_account['account_id']).first() if not account: app.logger.debug(' Adding account {0}'.format(acct_name)) account = Account() else: app.logger.debug(' Updating account {0}'.format(acct_name)) account.number = aws_account['account_id'] account.active = False account.third_party = True account.name = acct_name account.notes = info['url'] db.session.add(account) db.session.commit() app.logger.info('Finished adding Amazon owned accounts') except Exception as e: app.logger.exception("An error occured while adding accounts") store_exception("manager-amazon-accounts", None, e)
def run_change_reporter(account_names, interval=None): """ Runs Reporter """ try: for account in account_names: reporter = Reporter(account=account, debug=True) reporter.run(account, interval) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("Database error processing accounts %s, cleaning up session.", account_names) db.session.remove() store_exception("scheduler-run-change-reporter", None, e)
def run_change_reporter(account_names, interval=None): """ Runs Reporter """ try: for account in account_names: reporter = Reporter(account=account, alert_accounts=account_names, debug=True) reporter.run(account, interval) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("Database error processing accounts %s, cleaning up session.", account_names) db.session.remove() store_exception("scheduler-run-change-reporter", None, e)
def run(self, account, interval=None): """Starts the process of watchers -> auditors -> alerters """ app.logger.info("Starting work on account {}.".format(account)) time1 = time.time() mons = self.get_monitors_to_run(account, interval) watchers_with_changes = set() for monitor in mons: app.logger.info( "Running slurp {} for {} ({} minutes interval)".format( monitor.watcher.i_am_singular, account, interval)) # Batch logic needs to be handled differently: if monitor.batch_support: from security_monkey.scheduler import batch_logic batch_logic(monitor, monitor.watcher, account, False) else: (items, exception_map) = monitor.watcher.slurp() monitor.watcher.find_changes(items, exception_map) if (len(monitor.watcher.created_items) > 0) or (len( monitor.watcher.changed_items) > 0): watchers_with_changes.add(monitor.watcher.index) monitor.watcher.save() db_account = get_account_by_name(account) for monitor in self.all_monitors: # Skip over batched items, since they are done: if monitor.batch_support: continue for auditor in monitor.auditors: if auditor.applies_to_account(db_account): items_to_audit = self.get_items_to_audit( monitor.watcher, auditor, watchers_with_changes) app.logger.info("Running audit {} for {}".format( monitor.watcher.index, account)) try: auditor.items = items_to_audit auditor.audit_objects() auditor.save_issues() except Exception as e: store_exception('reporter-run-auditor', (auditor.index, account), e) continue time2 = time.time() app.logger.info('Run Account %s took %0.1f s' % (account, (time2 - time1))) self.account_alerter.report() db.session.close()
def slurp_exception(self, location=None, exception=None, exception_map={}, source="watcher"): """ Logs any exceptions that happen in slurp and adds them to the exception_map using their location as the key. The location is a tuple in the form: (technology, account, region, item_name) that describes the object where the exception occurred. Location can also exclude an item_name if the exception is region wide. """ if location in exception_map: app.logger.debug("Exception map already has location {}. This should not happen.".format(location)) exception_map[location] = exception app.logger.debug("Adding {} to the exceptions list. Exception was: {}".format(location, str(exception))) # Store it to the database: store_exception(source, location, exception)
def amazon_accounts(): """ Pre-populates standard AWS owned accounts """ import os import json from security_monkey.datastore import Account, AccountType data_file = os.path.join(os.path.dirname(__file__), "data", "aws_accounts.json") data = json.load(open(data_file, 'r')) app.logger.info('Adding / updating Amazon owned accounts') try: account_type_result = AccountType.query.filter( AccountType.name == 'AWS').first() if not account_type_result: account_type_result = AccountType(name='AWS') db.session.add(account_type_result) db.session.commit() db.session.refresh(account_type_result) for group, info in data.items(): for aws_account in info['accounts']: acct_name = "{group} ({region})".format( group=group, region=aws_account['region']) account = Account.query.filter( Account.number == aws_account['account_id']).first() if not account: app.logger.debug( ' Adding account {0}'.format(acct_name)) account = Account() else: app.logger.debug( ' Updating account {0}'.format(acct_name)) account.number = aws_account['account_id'] account.identifier = aws_account['account_id'] account.account_type_id = account_type_result.id account.active = False account.third_party = True account.name = acct_name account.notes = info['url'] db.session.add(account) db.session.commit() app.logger.info('Finished adding Amazon owned accounts') except Exception as e: app.logger.exception("An error occured while adding accounts") store_exception("manager-amazon-accounts", None, e)
def run(self, account, interval=None): """Starts the process of watchers -> auditors -> alerters """ app.logger.info("Starting work on account {}.".format(account)) time1 = time.time() mons = self.get_monitors_to_run(account, interval) watchers_with_changes = set() for monitor in mons: app.logger.info("Running slurp {} for {} ({} minutes interval)".format(monitor.watcher.i_am_singular, account, interval)) # Batch logic needs to be handled differently: if monitor.batch_support: from security_monkey.scheduler import batch_logic batch_logic(monitor, monitor.watcher, account, False) else: (items, exception_map) = monitor.watcher.slurp() monitor.watcher.find_changes(items, exception_map) if (len(monitor.watcher.created_items) > 0) or (len(monitor.watcher.changed_items) > 0): watchers_with_changes.add(monitor.watcher.index) monitor.watcher.save() db_account = get_account_by_name(account) for monitor in self.all_monitors: # Skip over batched items, since they are done: if monitor.batch_support: continue for auditor in monitor.auditors: if auditor.applies_to_account(db_account): items_to_audit = self.get_items_to_audit(monitor.watcher, auditor, watchers_with_changes) app.logger.info("Running audit {} for {}".format( monitor.watcher.index, account)) try: auditor.items = items_to_audit auditor.audit_objects() auditor.save_issues() except Exception as e: store_exception('reporter-run-auditor', (auditor.index, account), e) continue time2 = time.time() app.logger.info('Run Account %s took %0.1f s' % (account, (time2-time1))) self.account_alerter.report() db.session.close()
def test_exception_length(self): some_string = "".join(random.choice(string.ascii_uppercase) for _ in range(1024)) try: raise ValueError(some_string) except ValueError as e: test_exception = e location = ("iamrole", "testing", "us-west-2", "testrole") store_exception("tests", location, test_exception) exc_log = ExceptionLogs.query.order_by(ExceptionLogs.id.desc()).first() assert len(exc_log.message) == 512 assert exc_log.message[:512] == some_string[:512]
def _audit_changes(accounts, auditors, send_report, debug=True): """ Runs auditors on all items """ try: for au in auditors: au.audit_all_objects() au.save_issues() if send_report: report = au.create_report() au.email_report(report) if jirasync: app.logger.info('Syncing {} issues on {} with Jira'.format(au.index, accounts)) jirasync.sync_issues(accounts, au.index) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("Database error processing accounts %s, cleaning up session.", accounts) db.session.remove() store_exception("scheduler-audit-changes", None, e)
def test_store_exception(self): try: raise ValueError("This is a test") except ValueError as e: test_exception = e attrs = [ ("technology", "iamrole"), ("account", "testing"), ("region", "us-west-2"), ("item", "testrole") ] location = ("iamrole", "testing", "us-west-2", "testrole") ttl_month = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).month ttl_day = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).day current_month = datetime.datetime.utcnow().month current_day = datetime.datetime.utcnow().day # Test all cases... for i in range(1, 5): store_exception("tests", tuple(location[:i]), test_exception) # Fetch the exception and validate it: exc_log = ExceptionLogs.query.order_by(ExceptionLogs.id.desc()).first() assert exc_log.type == type(test_exception).__name__ assert exc_log.message == str(test_exception) assert exc_log.stacktrace == traceback.format_exc() assert exc_log.occurred.day == current_day assert exc_log.occurred.month == current_month assert exc_log.ttl.month == ttl_month assert exc_log.ttl.day == ttl_day for x in range(0, i): attr = getattr(exc_log, attrs[x][0]) if isinstance(attr, unicode): assert attr == attrs[x][1] else: assert attr.name == attrs[x][1] assert len(self.account.exceptions) == 3 assert len(self.technology.exceptions) == 4 assert len(self.item.exceptions) == 1
def test_store_exception(self): try: raise ValueError("This is a test") except ValueError as e: test_exception = e attrs = [ ("technology", "iamrole"), ("account", "testing"), ("region", "us-west-2"), ("item", "testrole") ] location = ("iamrole", "testing", "us-west-2", "testrole") ttl_month = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).month ttl_day = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).day current_month = datetime.datetime.utcnow().month current_day = datetime.datetime.utcnow().day # Test all cases... for i in range(1, 5): store_exception("tests", tuple(location[:i]), test_exception) # Fetch the exception and validate it: exc_log = ExceptionLogs.query.order_by(ExceptionLogs.id.desc()).first() assert exc_log.type == type(test_exception).__name__ assert exc_log.message == str(test_exception) assert exc_log.stacktrace == traceback.format_exc() assert exc_log.occurred.day == current_day assert exc_log.occurred.month == current_month assert exc_log.ttl.month == ttl_month assert exc_log.ttl.day == ttl_day for x in range(0, i): attr = getattr(exc_log, attrs[x][0]) if isinstance(attr, text_type): assert attr == attrs[x][1] else: assert attr.name == attrs[x][1] assert len(self.account.exceptions) == 3 assert len(self.technology.exceptions) == 4 assert len(self.item.exceptions) == 1
def reporter_logic(account_name, technology_name): """Logic for the run change reporter""" try: # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them: fix_orphaned_deletions(account_name, technology_name) # Watch and Audit: monitors = find_changes(account_name, technology_name) # Alert: app.logger.info("[ ] Sending alerts (if applicable) for account: {}, technology: {}".format(account_name, technology_name)) Alerter(monitors, account=account_name).report() except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("[X] Database error processing account %s - technology %s cleaning up session.", account_name, technology_name) db.session.remove() store_exception("scheduler-task-account-tech", None, e) raise e
def manual_run_change_finder(accounts, technologies): """Manual change finder""" app.logger.info("[ ] Executing manual find changes task...") try: for account in accounts: time1 = time.time() for tech in technologies: find_changes(account, tech) time2 = time.time() app.logger.info('[@] Run Account %s took %0.1f s' % (account, (time2 - time1))) app.logger.info("[+] Completed manual change finder.") except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("[X] Database error processing cleaning up session.") db.session.remove() store_exception("scheduler-run-change-reporter", None, e) raise e
def test_exception_clearing(self): location = ("iamrole", "testing", "us-west-2", "testrole") for i in range(0, 5): try: raise ValueError("This is test: {}".format(i)) except ValueError as e: test_exception = e store_exception("tests", location, test_exception, ttl=(datetime.datetime.now() - datetime.timedelta(days=1))) store_exception("tests", location, test_exception) clear_old_exceptions() # Get all the exceptions: exc_list = ExceptionLogs.query.all() assert len(exc_list) == 1
def test_doesnt_delete_parent_cascade(self): """ If the exception is deleted, the parent (tech., item, account) should not be deleted. :return: """ try: raise ValueError("This is a test") except ValueError as e: test_exception = e location = ("iamrole", "testing", "us-west-2", "testrole") store_exception("tests", location, test_exception) exc = ExceptionLogs.query.all() db.session.delete(exc[0]) db.session.commit() assert len(Item.query.filter(Item.name == "testrole").all()) == 1 assert len(Technology.query.filter(Technology.name == "iamrole").all()) == 1 assert len(Account.query.filter(Account.name == "testing").all()) == 1
def amazon_accounts(): """ Pre-populates standard AWS owned accounts """ import json from security_monkey.datastore import Account, AccountType data = json.load(open("data/aws_accounts.json", 'r')) app.logger.info('Adding / updating Amazon owned accounts') try: account_type_result = AccountType.query.filter(AccountType.name == 'AWS').first() if not account_type_result: account_type_result = AccountType(name='AWS') db.session.add(account_type_result) db.session.commit() db.session.refresh(account_type_result) for group, info in data.items(): for aws_account in info['accounts']: acct_name = "{group} ({region})".format(group=group, region=aws_account['region']) account = Account.query.filter(Account.identifier == aws_account['account_id']).first() if not account: app.logger.debug(' Adding account {0}'.format(acct_name)) account = Account() else: app.logger.debug(' Updating account {0}'.format(acct_name)) account.identifier = aws_account['account_id'] account.account_type_id = account_type_result.id account.active = False account.third_party = True account.name = acct_name account.notes = info['url'] db.session.add(account) db.session.commit() app.logger.info('Finished adding Amazon owned accounts') except Exception as e: app.logger.exception("An error occured while adding accounts") store_exception("manager-amazon-accounts", None, e)
def task_audit(self, account_name, technology_name): setup() app.logger.info( "[ ] Executing Celery task to audit changes for Account: {} Technology: {}" .format(account_name, technology_name)) try: audit_changes([account_name], [technology_name], True) app.logger.info( "[+] Completed Celery task for account: {}, technology: {}".format( account_name, technology_name)) except Exception as e: if sentry: sentry.captureException() app.logger.error( "[X] Task Audit Scheduler Exception ({}/{}): {}".format( account_name, technology_name, e)) app.logger.error(traceback.format_exc()) store_exception("scheduler-exception-on-audit", None, e) self.retry(exc=e)
def test_safe_child_deletion_cascade(self): """ If the parent is deleted (item, account, tech.), the exception should be deleted, BUT the other parents should remain. :return: """ try: raise ValueError("This is a test") except ValueError as e: test_exception = e location = ("iamrole", "testing", "us-west-2", "testrole") store_exception("tests", location, test_exception) db.session.delete(self.item) db.session.commit() exc = ExceptionLogs.query.all() assert len(exc) == 0 assert len(Item.query.filter(Item.name == "testrole").all()) == 0 assert len(Technology.query.filter(Technology.name == "iamrole").all()) == 1 assert len(Account.query.filter(Account.name == "testing").all()) == 1
def manual_run_change_reporter(accounts): """Manual change reporting from the command line""" app.logger.info("[ ] Executing manual change reporter task...") try: for account in accounts: time1 = time.time() rep = Reporter(account=account) for monitor in rep.all_monitors: if monitor.watcher: app.logger.info("[ ] Running change finder for " "account: {} technology: {}".format(account, monitor.watcher.index)) reporter_logic(account, monitor.watcher.index) time2 = time.time() app.logger.info('[@] Run Account %s took %0.1f s' % (account, (time2 - time1))) app.logger.info("[+] Completed manual change reporting.") except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception("[X] Database error processing cleaning up session.") db.session.remove() store_exception("scheduler-run-change-reporter", None, e) raise e
def decorated_function(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: index = kwargs.get('index') account = kwargs.get('account_name') # Allow the recording region to be overridden for universal tech like IAM region = kwargs.get('exception_record_region') or kwargs.get('region') name = kwargs.get('name') exception_map = kwargs.get('exception_map') exc = BotoConnectionIssue(str(e), index, account, name) if name: location = (index, account, region, name) elif region: location = (index, account, region) elif account: location = (index, account) else: location = (index, ) exception_map[location] = exc # Store the exception (the original one passed in, not exc): store_exception(source=source, location=location, exception=e)
def setup_the_tasks(sender, **kwargs): setup() # Purge out all current tasks waiting to execute: purge_it() # Add all the tasks: try: # TODO: Investigate options to have the scheduler skip different types of accounts accounts = Account.query.filter(Account.third_party == False).filter( Account.active == True).all() # noqa for account in accounts: app.logger.info( "[ ] Scheduling tasks for {type} account: {name}".format( type=account.type.name, name=account.name)) rep = Reporter(account=account.name) for monitor in rep.all_monitors: if monitor.watcher: app.logger.debug( "[{}] Scheduling for technology: {}".format( account.type.name, monitor.watcher.index)) interval = monitor.watcher.get_interval() * 60 # Start the task immediately: task_account_tech.apply_async( (account.name, monitor.watcher.index)) app.logger.debug("[-->] Scheduled immediate task") # Schedule it based on the schedule: sender.add_periodic_task( interval, task_account_tech.s(account.name, monitor.watcher.index)) app.logger.debug( "[+] Scheduled task to occur every {} minutes".format( interval)) # TODO: Due to a bug with Celery (https://github.com/celery/celery/issues/4041) we temporarily # disabled this to avoid many duplicate events from getting added. # Also schedule a manual audit changer just in case it doesn't properly # audit (only for non-batched): # if not monitor.batch_support: # sender.add_periodic_task( # crontab(hour=10, day_of_week="mon-fri"), task_audit.s(account.name, monitor.watcher.index)) # app.logger.debug("[+] Scheduled task for tech: {} for audit".format(monitor.watcher.index)) # # app.logger.debug("[{}] Completed scheduling for technology: {}".format(account.name, # monitor.watcher.index)) app.logger.debug( "[+] Completed scheduling tasks for account: {}".format( account.name)) # Schedule the task for clearing out old exceptions: app.logger.info("Scheduling task to clear out old exceptions.") # TODO: Investigate if this creates many duplicate tasks RE: Celery bug mentioned above sender.add_periodic_task(crontab(hour=3, minute=0), clear_expired_exceptions.s()) except Exception as e: if sentry: sentry.captureException() app.logger.error("[X] Scheduler Exception: {}".format(e)) app.logger.error(traceback.format_exc()) store_exception("scheduler", None, e)
def exception_listener(event): store_exception("scheduler-change-reporter-uncaught", None, event.exception)
def setup_the_tasks(sender, **kwargs): setup() # Purge out all current tasks waiting to execute: purge_it() # Get the celery configuration (Get the raw module since Celery doesn't document a good way to do this # see https://github.com/celery/celery/issues/4633): celery_config = get_celery_config_file() # Add all the tasks: try: accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all() # noqa for account in accounts: rep = Reporter(account=account.name) # Is this a dedicated watcher stack, or is this stack ignoring anything? only_watch = get_sm_celery_config_value(celery_config, "security_monkey_only_watch", set) # If only_watch is set, then ignoring is ignored. if only_watch: ignoring = set() else: # Check if we are ignoring any watchers: ignoring = get_sm_celery_config_value(celery_config, "security_monkey_watcher_ignore", set) or set() for monitor in rep.all_monitors: # Is this watcher enabled? if monitor.watcher.is_active() and monitor.watcher.index not in ignoring: # Did we specify specific watchers to run? if only_watch and monitor.watcher.index not in only_watch: continue app.logger.info("[ ] Scheduling tasks for {type} account: {name}".format(type=account.type.name, name=account.name)) interval = monitor.watcher.get_interval() if not interval: app.logger.debug("[/] Skipping watcher for technology: {} because it is set for external " "monitoring.".format(monitor.watcher.index)) continue app.logger.debug("[{}] Scheduling for technology: {}".format(account.type.name, monitor.watcher.index)) # Start the task immediately: task_account_tech.apply_async((account.name, monitor.watcher.index)) app.logger.debug("[-->] Scheduled immediate task") schedule = interval * 60 schedule_at_full_hour = get_sm_celery_config_value(celery_config, "schedule_at_full_hour", bool) or False if schedule_at_full_hour: if interval == 15: # 15 minute schedule = crontab(minute="0,15,30,45") elif interval == 60: # Hourly schedule = crontab(minute="0") elif interval == 720: # 12 hour schedule = crontab(minute="0", hour="0,12") elif interval == 1440: # Daily schedule = crontab(minute="0", hour="0") elif interval == 10080: # Weekly schedule = crontab(minute="0", hour="0", day_of_week="0") # Schedule it based on the schedule: sender.add_periodic_task(schedule, task_account_tech.s(account.name, monitor.watcher.index)) app.logger.debug("[+] Scheduled task to occur every {} minutes".format(interval)) # TODO: Due to a bug with Celery (https://github.com/celery/celery/issues/4041) we temporarily # disabled this to avoid many duplicate events from getting added. # Also schedule a manual audit changer just in case it doesn't properly # audit (only for non-batched): # if not monitor.batch_support: # sender.add_periodic_task( # crontab(hour=10, day_of_week="mon-fri"), task_audit.s(account.name, monitor.watcher.index)) # app.logger.debug("[+] Scheduled task for tech: {} for audit".format(monitor.watcher.index)) # # app.logger.debug("[{}] Completed scheduling for technology: {}".format(account.name, # monitor.watcher.index)) app.logger.debug("[+] Completed scheduling tasks for account: {}".format(account.name)) # Schedule the task for clearing out old exceptions: app.logger.info("Scheduling task to clear out old exceptions.") # Run every 24 hours (and clear it now): clear_expired_exceptions.apply_async() sender.add_periodic_task(86400, clear_expired_exceptions.s()) except Exception as e: if sentry: sentry.captureException() app.logger.error("[X] Scheduler Exception: {}".format(e)) app.logger.error(traceback.format_exc()) store_exception("scheduler", None, e)
def slurp(self): """ :returns: item_list - list of cloud_trail items. :returns: exception_map - A dict where the keys are a tuple containing the location of the exception and the value is the actual exception """ self.prep_for_slurp() item_list = [] exception_map = {} from security_monkey.common.sts_connect import connect for account in self.accounts: for region in regions(): app.logger.debug( "Checking {}/{}/{}".format(self.index, account, region.name)) try: cloud_trail = connect( account, 'boto3.cloudtrail.client', region=region) app.logger.debug("Cloud Trail is: {}".format(cloud_trail)) response = self.wrap_aws_rate_limited_call( cloud_trail.describe_trails ) trails = response.get('trailList', []) except Exception as e: app.logger.debug("Exception found: {}".format(e)) if region.name not in TROUBLE_REGIONS: exc = BotoConnectionIssue( str(e), self.index, account, region.name) self.slurp_exception( (self.index, account, region.name), exc, exception_map) continue app.logger.debug("Found {} {}.".format( len(trails), self.i_am_plural)) for trail in trails: name = trail.get('Name') # Some trails are returned for every region, however, HomeRegion # always refers to the region in which the trail was # created. home_region = trail.get('HomeRegion') trail_enabled = "" try: get_trail_status = self.wrap_aws_rate_limited_call(cloud_trail.get_trail_status, Name=trail['TrailARN']) trail_enabled = get_trail_status["IsLogging"] except Exception as e: app.logger.debug("Issues getting the status of cloudtrail") # Store it to the database: location = (self.index, account, region.name, name) store_exception("cloudtrail", location, e) if self.check_ignore_list(name): continue item_config = { 'trail': name, 'trail_status': trail_enabled, 's3_bucket_name': trail['S3BucketName'], 's3_key_prefix': trail.get('S3KeyPrefix'), 'sns_topic_name': trail.get('SnsTopicName'), 'include_global_service_events': trail.get('IncludeGlobalServiceEvents', False), 'is_multi_region_trail': trail.get('IsMultiRegionTrail', False), 'home_region': home_region, 'trail_arn': trail.get('TrailARN'), 'log_file_validation_enabled': trail.get('LogFileValidationEnabled', False), 'cloudwatch_logs_log_group_arn': trail.get('CloudWatchLogsLogGroupArn'), 'cloudwatch_logs_role_arn': trail.get('CloudWatchLogsRoleArn'), 'kms_key_id': trail.get('KmsKeyId'), } # Utilizing home_region here ensures a single, unique entry # for each CloudTrail resource item = CloudTrailItem( region=home_region, account=account, name=name, arn=trail.get('TrailARN'), config=item_config) item_list.append(item) return item_list, exception_map
def setup_the_tasks(sender, **kwargs): setup() # Purge out all current tasks waiting to execute: purge_it() # Get the celery configuration (Get the raw module since Celery doesn't document a good way to do this # see https://github.com/celery/celery/issues/4633): celery_config = get_celery_config_file() # Add all the tasks: try: accounts = Account.query.filter(Account.third_party == False).filter(Account.active == True).all() # noqa for account in accounts: rep = Reporter(account=account.name) # Is this a dedicated watcher stack, or is this stack ignoring anything? only_watch = get_sm_celery_config_value(celery_config, "security_monkey_only_watch", set) # If only_watch is set, then ignoring is ignored. if only_watch: ignoring = set() else: # Check if we are ignoring any watchers: ignoring = get_sm_celery_config_value(celery_config, "security_monkey_watcher_ignore", set) or set() for monitor in rep.all_monitors: # Is this watcher enabled? if monitor.watcher.is_active() and monitor.watcher.index not in ignoring: # Did we specify specific watchers to run? if only_watch and monitor.watcher.index not in only_watch: continue app.logger.info("[ ] Scheduling tasks for {type} account: {name}".format(type=account.type.name, name=account.name)) interval = monitor.watcher.get_interval() * 60 if not interval: app.logger.debug("[/] Skipping watcher for technology: {} because it is set for external " "monitoring.".format(monitor.watcher.index)) continue app.logger.debug("[{}] Scheduling for technology: {}".format(account.type.name, monitor.watcher.index)) # Start the task immediately: task_account_tech.apply_async((account.name, monitor.watcher.index)) app.logger.debug("[-->] Scheduled immediate task") # Schedule it based on the schedule: sender.add_periodic_task(interval, task_account_tech.s(account.name, monitor.watcher.index)) app.logger.debug("[+] Scheduled task to occur every {} minutes".format(interval)) # TODO: Due to a bug with Celery (https://github.com/celery/celery/issues/4041) we temporarily # disabled this to avoid many duplicate events from getting added. # Also schedule a manual audit changer just in case it doesn't properly # audit (only for non-batched): # if not monitor.batch_support: # sender.add_periodic_task( # crontab(hour=10, day_of_week="mon-fri"), task_audit.s(account.name, monitor.watcher.index)) # app.logger.debug("[+] Scheduled task for tech: {} for audit".format(monitor.watcher.index)) # # app.logger.debug("[{}] Completed scheduling for technology: {}".format(account.name, # monitor.watcher.index)) app.logger.debug("[+] Completed scheduling tasks for account: {}".format(account.name)) # Schedule the task for clearing out old exceptions: app.logger.info("Scheduling task to clear out old exceptions.") # Run every 24 hours (and clear it now): clear_expired_exceptions.apply_async() sender.add_periodic_task(86400, clear_expired_exceptions.s()) except Exception as e: if sentry: sentry.captureException() app.logger.error("[X] Scheduler Exception: {}".format(e)) app.logger.error(traceback.format_exc()) store_exception("scheduler", None, e)
def slurp(self): """ :returns: item_list - list of cloud_trail items. :returns: exception_map - A dict where the keys are a tuple containing the location of the exception and the value is the actual exception """ self.prep_for_slurp() item_list = [] exception_map = {} from security_monkey.common.sts_connect import connect for account in self.accounts: for region in regions(): app.logger.debug("Checking {}/{}/{}".format( self.index, account, region.name)) try: cloud_trail = connect(account, 'boto3.cloudtrail.client', region=region) app.logger.debug("Cloud Trail is: {}".format(cloud_trail)) response = self.wrap_aws_rate_limited_call( cloud_trail.describe_trails) trails = response.get('trailList', []) except Exception as e: app.logger.debug("Exception found: {}".format(e)) if region.name not in TROUBLE_REGIONS: exc = BotoConnectionIssue(str(e), self.index, account, region.name) self.slurp_exception( (self.index, account, region.name), exc, exception_map) continue app.logger.debug("Found {} {}.".format(len(trails), self.i_am_plural)) for trail in trails: name = trail.get('Name') # Some trails are returned for every region, however, HomeRegion # always refers to the region in which the trail was # created. home_region = trail.get('HomeRegion') trail_enabled = "" try: get_trail_status = self.wrap_aws_rate_limited_call( cloud_trail.get_trail_status, Name=trail['TrailARN']) trail_enabled = get_trail_status["IsLogging"] except Exception as e: app.logger.debug( "Issues getting the status of cloudtrail") # Store it to the database: location = (self.index, account, region.name, name) store_exception("cloudtrail", location, e) if self.check_ignore_list(name): continue item_config = { 'trail': name, 'trail_status': trail_enabled, 's3_bucket_name': trail['S3BucketName'], 's3_key_prefix': trail.get('S3KeyPrefix'), 'sns_topic_name': trail.get('SnsTopicName'), 'include_global_service_events': trail.get('IncludeGlobalServiceEvents', False), 'is_multi_region_trail': trail.get('IsMultiRegionTrail', False), 'home_region': home_region, 'trail_arn': trail.get('TrailARN'), 'log_file_validation_enabled': trail.get('LogFileValidationEnabled', False), 'cloudwatch_logs_log_group_arn': trail.get('CloudWatchLogsLogGroupArn'), 'cloudwatch_logs_role_arn': trail.get('CloudWatchLogsRoleArn'), 'kms_key_id': trail.get('KmsKeyId'), } # Utilizing home_region here ensures a single, unique entry # for each CloudTrail resource item = CloudTrailItem(region=home_region, account=account, name=name, arn=trail.get('TrailARN'), config=item_config, source_watcher=self) item_list.append(item) return item_list, exception_map