def __init__( self, repo_name, repo_args, conduit_manager, url_watcher_wrapper, sys_admin_emails, mail_sender): self._is_disabled = False self._refcache_repo = phlgitx_refcache.Repo( phlsys_git.Repo( repo_args.repo_path)) self._abd_repo = abdt_git.Repo( self._refcache_repo, "origin", repo_args.repo_desc) self._name = repo_name self._args = repo_args self._conduit_manager = conduit_manager conduit_cache = conduit_manager.get_conduit_and_cache_for_args( repo_args) self._arcyd_conduit, self._review_cache = conduit_cache self._mail_sender = mail_sender self._url_watcher_wrapper = url_watcher_wrapper self._mail_sender = mail_sender self._on_exception = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, repo_name)
def __init__( self, repo_name, repo_args, conduit_manager, url_watcher_wrapper, sys_admin_emails, mail_sender): self._active_state = _RepoActiveRetryState( retry_timestr_list=["10 seconds", "10 minutes", "1 hours"]) sys_repo = phlsys_git.Repo(repo_args.repo_path) self._refcache_repo = phlgitx_refcache.Repo(sys_repo) self._differ_cache = abdt_differresultcache.Cache(self._refcache_repo) self._abd_repo = abdt_git.Repo( self._refcache_repo, self._differ_cache, "origin", repo_args.repo_desc) self._name = repo_name self._args = repo_args self._conduit_manager = conduit_manager conduit_cache = conduit_manager.get_conduit_and_cache_for_args( repo_args) self._arcyd_conduit, self._review_cache = conduit_cache self._mail_sender = mail_sender self._url_watcher_wrapper = url_watcher_wrapper self._mail_sender = mail_sender self._on_exception = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, repo_name)
def _process_operations(is_no_loop, operations, sys_admin_emails, reporter): on_exception_delay = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, reporter, None) if is_no_loop: def process_once(): return phlsys_scheduleunreliables.process_once(list(operations)) new_ops = _try_handle_reset_file(process_once, on_exception_delay) if new_ops != set(operations): print 'ERROR: some operations failed' sys.exit(1) else: def loopForever(): phlsys_scheduleunreliables.process_loop_forever(list(operations)) while True: _try_handle_reset_file(loopForever, on_exception_delay)
def _append_operations_for_repos( operations, reporter, conduits, url_watcher, urlwatcher_cache_path, sys_admin_emails, repos): strToTime = phlsys_strtotime.duration_string_to_time_delta retry_delays = [strToTime(d) for d in ["10 minutes", "1 hours"]] for repo_name, repo_args in repos: # create a function to update this particular repo. # # use partial to ensure we capture the value of the variables, # note that a closure would use the latest value of the variables # rather than the value at declaration time. abd_repo = abdt_git.Repo( phlsys_git.Repo(repo_args.repo_path), "origin", repo_args.repo_desc) process_func = functools.partial( _process_single_repo, abd_repo, repo_name, repo_args, reporter, conduits, url_watcher, urlwatcher_cache_path) on_exception_delay = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, reporter, repo_name) operation = phlsys_scheduleunreliables.DelayedRetryNotifyOperation( process_func, list(retry_delays), # make a copy to be sure on_exception_delay) operations.append(operation)
def __init__(self, repo_name, repo_args, conduit_manager, url_watcher_wrapper, sys_admin_emails, mail_sender): self._active_state = _RepoActiveRetryState( retry_timestr_list=["10 seconds", "10 minutes", "1 hours"]) sys_repo = phlsys_git.Repo(repo_args.repo_path) self._refcache_repo = phlgitx_refcache.Repo(sys_repo) self._differ_cache = abdt_differresultcache.Cache(self._refcache_repo) self._abd_repo = abdt_git.Repo(self._refcache_repo, self._differ_cache, "origin", repo_args.repo_desc) self._name = repo_name self._args = repo_args self._conduit_manager = conduit_manager conduit_cache = conduit_manager.get_conduit_and_cache_for_args( repo_args) self._arcyd_conduit, self._review_cache = conduit_cache self._mail_sender = mail_sender self._url_watcher_wrapper = url_watcher_wrapper self._mail_sender = mail_sender self._on_exception = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, repo_name)
def on_pause(): on_exception_delay = abdt_exhandlers.make_exception_delay_handler( sys_admin_emails, reporter, None) on_exception_delay("until_file_removed")
def _process(args, reporter): retry_delays = _get_retry_delays() repos = [] for repo in args.repo_configs: parser = argparse.ArgumentParser(fromfile_prefix_chars='@') abdi_repoargs.setup_parser(parser) repo_name = repo[0] # oddly this comes to us as a list repo_name = repo_name[1:] # strip off the '@' prefix repo_args = (repo_name, parser.parse_args(repo)) repos.append(repo_args) out = phlsys_statusline.StatusLine() # TODO: test write access to repos here operations = [] conduits = {} url_watcher = phlurl_watcher.Watcher() urlwatcher_cache_path = os.path.abspath('.arcyd.urlwatcher.cache') # load the url watcher cache (if any) if os.path.isfile(urlwatcher_cache_path): with open(urlwatcher_cache_path) as f: url_watcher.load(f) for repo, repo_args in repos: # create a function to update this particular repo. # # use partial to ensure we capture the value of the variables, # note that a closure would use the latest value of the variables # rather than the value at declaration time. process_func = functools.partial( process_single_repo, repo, repo_args, out, reporter, conduits, url_watcher, urlwatcher_cache_path) on_exception_delay = abdt_exhandlers.make_exception_delay_handler( args, reporter, repo) operation = phlsys_scheduleunreliables.DelayedRetryNotifyOperation( process_func, list(retry_delays), # make a copy to be sure on_exception_delay) operations.append(operation) def on_pause(): on_exception_delay = abdt_exhandlers.make_exception_delay_handler( args, reporter, None) on_exception_delay("until_file_removed") operations.append( FileCheckOperation( args.kill_file, args.reset_file, args.pause_file, on_pause)) operations.append( DelayedRetrySleepOperation( out, args.sleep_secs, reporter)) operations.append( RefreshCachesOperation( conduits, url_watcher, reporter)) if args.no_loop: def process_once(): return phlsys_scheduleunreliables.process_once(list(operations)) new_ops = tryHandleSpecialFiles(process_once, on_exception_delay) if new_ops != set(operations): print 'ERROR: some operations failed' sys.exit(1) else: def loopForever(): phlsys_scheduleunreliables.process_loop_forever(list(operations)) while True: tryHandleSpecialFiles(loopForever, on_exception_delay)