Ejemplo n.º 1
0
def process(args):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    pid = fs.get_pid_or_none()
    if pid is not None and phlsys_pid.is_running(pid):
        raise Exception("already running")

    if not args.foreground:
        phlsys_daemonize.do(
            stdout_path=fs.layout.stdout,
            stderr_path=fs.layout.stderr)

    # important that we do this *after* daemonizing
    pid = phlsys_pid.get()
    fs.set_pid(pid)

    parser = argparse.ArgumentParser()
    params = []

    for line in open(fs.layout.root_config):
        params.append(line.strip())

    if args.no_loop:
        params.append('--no-loop')

    abdi_processrepos.setupParser(parser)
    args = parser.parse_args(params)
    abdi_processrepos.process(args, fs.repo_config_path_list())
Ejemplo n.º 2
0
def start_arcyd(daemonize=True, loop=True, restart=False, stop_message=''):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            if restart:
                stop_arcyd_pid(pid, fs.layout.killfile, stop_message)
            else:
                raise Exception("already running")

        if daemonize:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if not loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    def logger_config():
        _setup_logger(fs, daemonize)

    with phlsys_multiprocessing.logging_context(logger_config):
        _LOGGER.debug("start with args: {}".format(args))
        while True:
            _LOGGER.info("arcyd started")
            try:
                exit_code = abdi_processrepos.process(args, repo_configs)
                _LOGGER.debug("arcyd process loop exit_code: %s" % exit_code)
                if exit_code == abdi_processexitcodes.ExitCodes.ec_exit:
                    break
            finally:
                _LOGGER.info("arcyd stopped")

            _LOGGER.debug("reloading arcyd configuration")
            try:
                with fs.lockfile_context():
                    repo_configs = abdi_repoargs.parse_config_file_list(
                        fs.repo_config_path_list())
            except phlsys_fs.LockfileExistsError:
                _LOGGER.error("couldn't acquire lockfile, reload failed")
Ejemplo n.º 3
0
def start_arcyd(daemonize=True, loop=True, restart=False):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            if restart:
                stop_arcyd_pid(pid)
            else:
                raise Exception("already running")

        if daemonize:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if not loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    def logger_config():
        _setup_logger(fs)

    with phlsys_multiprocessing.logging_context(logger_config):
        _LOGGER.debug("start with args: {}".format(args))
        _LOGGER.info("arcyd started")
        try:
            abdi_processrepos.process(args, repo_configs)
        finally:
            _LOGGER.info("arcyd stopped")
Ejemplo n.º 4
0
def process(args):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("already running")

        if not args.foreground:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if args.no_loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    # setup to log everything to fs.layout.log_info, with a timestamp
    logging.Formatter.converter = time.gmtime
    logging.basicConfig(
        format='%(asctime)s UTC: %(levelname)s: %(message)s',
        level=logging.INFO,
        filename=fs.layout.log_info)

    _LOGGER.info("arcyd started")
    try:
        abdi_processrepos.process(args, repo_configs)
    finally:
        _LOGGER.info("arcyd stopped")