def find_changes(account_name, monitor_name, debug=True): """ Runs the watcher and stores the result, re-audits all types to account for downstream dependencies. """ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them: fix_orphaned_deletions(account_name, monitor_name) monitors = get_monitors(account_name, [monitor_name], debug) for mon in monitors: cw = mon.watcher app.logger.info("[-->] Looking for changes in account: {}, technology: {}".format(account_name, cw.index)) if mon.batch_support: batch_logic(mon, cw, account_name, debug) else: # Just fetch normally... (items, exception_map) = cw.slurp() cw.find_changes(current=items, exception_map=exception_map) cw.save() # Batched monitors have already been monitored, and they will be skipped over. audit_changes([account_name], [monitor_name], False, debug) db.session.close() return monitors
def find_changes(account_name, monitor_name, debug=True): """ Runs the watcher and stores the result, re-audits all types to account for downstream dependencies. """ # Before doing anything... Look for orphaned items for this given technology. If they exist, then delete them: fix_orphaned_deletions(account_name, monitor_name) monitors = get_monitors(account_name, [monitor_name], debug) for mon in monitors: cw = mon.watcher app.logger.info( "[-->] Looking for changes in account: {}, technology: {}".format( account_name, cw.index)) if mon.batch_support: batch_logic(mon, cw, account_name, debug) else: # Just fetch normally... (items, exception_map) = cw.slurp() cw.find_changes(current=items, exception_map=exception_map) cw.save() # Batched monitors have already been monitored, and they will be skipped over. audit_changes([account_name], [monitor_name], False, debug) db.session.close() return monitors
def find_changes(accounts, monitor_names, debug=True): monitors = get_monitors(accounts, monitor_names, debug) for monitor in monitors: cw = monitor.watcher (items, exception_map) = cw.slurp() cw.find_changes(current=items, exception_map=exception_map) cw.save() audit_changes(accounts, monitor_names, False, debug) db.session.close()
def find_changes(accounts, monitor_names, debug=True): """ Runs the watcher and stores the result, reaudits all types to account for downstream dependencies. """ for account_name in accounts: monitors = get_monitors(account_name, monitor_names, debug) for mon in monitors: cw = mon.watcher (items, exception_map) = cw.slurp() cw.find_changes(current=items, exception_map=exception_map) cw.save() audit_changes(accounts, monitor_names, False, debug) db.session.close()
def find_changes(accounts, monitor_names, debug=True): """ Runs the watcher and stores the result, re-audits all types to account for downstream dependencies. """ for account_name in accounts: monitors = get_monitors(account_name, monitor_names, debug) for mon in monitors: cw = mon.watcher if mon.batch_support: batch_logic(mon, cw, account_name, debug) else: # Just fetch normally... (items, exception_map) = cw.slurp() cw.find_changes(current=items, exception_map=exception_map) cw.save() # Batched monitors have already been monitored, and they will be skipped over. audit_changes(accounts, monitor_names, False, debug) db.session.close()
def backup_config_to_json(account_names, monitor_names, output_folder): for account_name in account_names: monitors = get_monitors(account_name, monitor_names) for monitor in monitors: _backup_items_in_account(account_name, monitor.watcher, output_folder)
def audit_changes(accounts, monitor_names, send_report, debug=True): monitors = get_monitors(accounts, monitor_names, debug) for monitor in monitors: _audit_changes(monitor.auditors, send_report, debug)