def process(args): # exit gracefully if this process is killed phlsys_signal.set_exit_on_sigterm() fs = abdt_fs.make_default_accessor() pid = fs.get_pid_or_none() if pid is not None and phlsys_pid.is_running(pid): raise Exception("already running") if not args.foreground: phlsys_daemonize.do( stdout_path=fs.layout.stdout, stderr_path=fs.layout.stderr) # important that we do this *after* daemonizing pid = phlsys_pid.get() fs.set_pid(pid) parser = argparse.ArgumentParser() params = [] for line in open(fs.layout.root_config): params.append(line.strip()) if args.no_loop: params.append('--no-loop') abdi_processrepos.setupParser(parser) args = parser.parse_args(params) abdi_processrepos.process(args, fs.repo_config_path_list())
def start_arcyd(daemonize=True, loop=True, restart=False, stop_message=''): # exit gracefully if this process is killed phlsys_signal.set_exit_on_sigterm() fs = abdt_fs.make_default_accessor() with fs.lockfile_context(): pid = fs.get_pid_or_none() if pid is not None and phlsys_pid.is_running(pid): if restart: stop_arcyd_pid(pid, fs.layout.killfile, stop_message) else: raise Exception("already running") if daemonize: phlsys_daemonize.do( stdout_path=fs.layout.stdout, stderr_path=fs.layout.stderr) # important that we do this *after* daemonizing pid = phlsys_pid.get() fs.set_pid(pid) parser = argparse.ArgumentParser() params = [] for line in open(fs.layout.root_config): params.append(line.strip()) if not loop: params.append('--no-loop') repo_configs = abdi_repoargs.parse_config_file_list( fs.repo_config_path_list()) abdi_processrepos.setupParser(parser) args = parser.parse_args(params) def logger_config(): _setup_logger(fs, daemonize) with phlsys_multiprocessing.logging_context(logger_config): _LOGGER.debug("start with args: {}".format(args)) while True: _LOGGER.info("arcyd started") try: exit_code = abdi_processrepos.process(args, repo_configs) _LOGGER.debug("arcyd process loop exit_code: %s" % exit_code) if exit_code == abdi_processexitcodes.ExitCodes.ec_exit: break finally: _LOGGER.info("arcyd stopped") _LOGGER.debug("reloading arcyd configuration") try: with fs.lockfile_context(): repo_configs = abdi_repoargs.parse_config_file_list( fs.repo_config_path_list()) except phlsys_fs.LockfileExistsError: _LOGGER.error("couldn't acquire lockfile, reload failed")
def process(args): fs = abdt_fs.make_default_accessor() pid = fs.get_pid_or_none() if pid is not None and phlsys_pid.is_running(pid): raise Exception("already running") pid = phlsys_pid.get() fs.set_pid(pid) repo_configs = _list_repo_configs_in_workingdir() # XXX: hack this horribly by delegating everything to the 'process-repos' # command parser = argparse.ArgumentParser() params = [] for line in open('config'): params.append(line.strip()) if args.no_loop: params.append('--no-loop') params.append('--repo-configs') for repo in repo_configs: params.append('@' + repo) abdcmd_processrepos.setupParser(parser) args = parser.parse_args(params) abdcmd_processrepos.process(args)
def start_arcyd(daemonize=True, loop=True, restart=False): # exit gracefully if this process is killed phlsys_signal.set_exit_on_sigterm() fs = abdt_fs.make_default_accessor() with fs.lockfile_context(): pid = fs.get_pid_or_none() if pid is not None and phlsys_pid.is_running(pid): if restart: stop_arcyd_pid(pid) else: raise Exception("already running") if daemonize: phlsys_daemonize.do( stdout_path=fs.layout.stdout, stderr_path=fs.layout.stderr) # important that we do this *after* daemonizing pid = phlsys_pid.get() fs.set_pid(pid) parser = argparse.ArgumentParser() params = [] for line in open(fs.layout.root_config): params.append(line.strip()) if not loop: params.append('--no-loop') repo_configs = abdi_repoargs.parse_config_file_list( fs.repo_config_path_list()) abdi_processrepos.setupParser(parser) args = parser.parse_args(params) def logger_config(): _setup_logger(fs) with phlsys_multiprocessing.logging_context(logger_config): _LOGGER.debug("start with args: {}".format(args)) _LOGGER.info("arcyd started") try: abdi_processrepos.process(args, repo_configs) finally: _LOGGER.info("arcyd stopped")
def process(args): # exit gracefully if this process is killed phlsys_signal.set_exit_on_sigterm() fs = abdt_fs.make_default_accessor() with fs.lockfile_context(): pid = fs.get_pid_or_none() if pid is not None and phlsys_pid.is_running(pid): raise Exception("already running") if not args.foreground: phlsys_daemonize.do( stdout_path=fs.layout.stdout, stderr_path=fs.layout.stderr) # important that we do this *after* daemonizing pid = phlsys_pid.get() fs.set_pid(pid) parser = argparse.ArgumentParser() params = [] for line in open(fs.layout.root_config): params.append(line.strip()) if args.no_loop: params.append('--no-loop') repo_configs = abdi_repoargs.parse_config_file_list( fs.repo_config_path_list()) abdi_processrepos.setupParser(parser) args = parser.parse_args(params) # setup to log everything to fs.layout.log_info, with a timestamp logging.Formatter.converter = time.gmtime logging.basicConfig( format='%(asctime)s UTC: %(levelname)s: %(message)s', level=logging.INFO, filename=fs.layout.log_info) _LOGGER.info("arcyd started") try: abdi_processrepos.process(args, repo_configs) finally: _LOGGER.info("arcyd stopped")