def process(args):
    fs = abdt_fs.make_default_accessor()

    pid = fs.get_pid_or_none()
    if pid is not None and phlsys_pid.is_running(pid):
        raise Exception("already running")

    pid = phlsys_pid.get()
    fs.set_pid(pid)

    repo_configs = _list_repo_configs_in_workingdir()

    # XXX: hack this horribly by delegating everything to the 'process-repos'
    #      command
    parser = argparse.ArgumentParser()
    params = []

    for line in open('config'):
        params.append(line.strip())

    if args.no_loop:
        params.append('--no-loop')

    params.append('--repo-configs')
    for repo in repo_configs:
        params.append('@' + repo)

    abdcmd_processrepos.setupParser(parser)
    args = parser.parse_args(params)
    abdcmd_processrepos.process(args)
Esempio n. 2
0
def process(args):

    _ = args  # NOQA
    fs = abdt_fs.make_default_accessor()

    repo_config_path_list = fs.repo_config_path_list()
    repo_name_config_list = abdi_repoargs.parse_config_file_list(
        repo_config_path_list)

    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs.layout.urlwatcher_cache_path)

    url_watcher_wrapper.watcher.refresh()

    for repo_name, repo_config in repo_name_config_list:
        print(repo_name + '..', end=' ')
        snoop_url = abdi_repoargs.get_repo_snoop_url(repo_config)

        abd_repo = abdt_git.Repo(
            phlsys_git.Repo(repo_config.repo_path),
            "origin",
            repo_config.repo_desc)

        did_fetch = abdi_processrepoargs.fetch_if_needed(
            url_watcher_wrapper.watcher,
            snoop_url,
            abd_repo,
            repo_config.repo_desc)

        if did_fetch:
            print('fetched')
        else:
            print('skipped')

        url_watcher_wrapper.save()
Esempio n. 3
0
def start_arcyd(daemonize=True, loop=True, restart=False, stop_message=''):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            if restart:
                stop_arcyd_pid(pid, fs.layout.killfile, stop_message)
            else:
                raise Exception("already running")

        if daemonize:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if not loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    def logger_config():
        _setup_logger(fs, daemonize)

    with phlsys_multiprocessing.logging_context(logger_config):
        _LOGGER.debug("start with args: {}".format(args))
        while True:
            _LOGGER.info("arcyd started")
            try:
                exit_code = abdi_processrepos.process(args, repo_configs)
                _LOGGER.debug("arcyd process loop exit_code: %s" % exit_code)
                if exit_code == abdi_processexitcodes.ExitCodes.ec_exit:
                    break
            finally:
                _LOGGER.info("arcyd stopped")

            _LOGGER.debug("reloading arcyd configuration")
            try:
                with fs.lockfile_context():
                    repo_configs = abdi_repoargs.parse_config_file_list(
                        fs.repo_config_path_list())
            except phlsys_fs.LockfileExistsError:
                _LOGGER.error("couldn't acquire lockfile, reload failed")
Esempio n. 4
0
def process(args):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    pid = fs.get_pid_or_none()
    if pid is not None and phlsys_pid.is_running(pid):
        raise Exception("already running")

    if not args.foreground:
        phlsys_daemonize.do(
            stdout_path=fs.layout.stdout,
            stderr_path=fs.layout.stderr)

    # important that we do this *after* daemonizing
    pid = phlsys_pid.get()
    fs.set_pid(pid)

    parser = argparse.ArgumentParser()
    params = []

    for line in open(fs.layout.root_config):
        params.append(line.strip())

    if args.no_loop:
        params.append('--no-loop')

    abdi_processrepos.setupParser(parser)
    args = parser.parse_args(params)
    abdi_processrepos.process(args, fs.repo_config_path_list())
Esempio n. 5
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    exit_code = 0

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("cannot fsck whilst arcyd is running.")

        repo_config_path_list = _determine_repo_config_path_list(
            fs, args.repos)

        if not _check_repo_config_path_list(repo_config_path_list):
            exit_code = 1

        repo_name_config_list = abdi_repoargs.parse_config_file_list(
            repo_config_path_list)

        if not _check_repo_name_config_list(args, repo_name_config_list):
            exit_code = 1

    if exit_code != 0 and not args.fix:
        print("use '--fix' to attempt to fix the issues")

    return exit_code
Esempio n. 6
0
def process(args):
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is None or not phlsys_pid.is_running(pid):
            raise Exception("Arcyd is not running")

        if args.force:
            phlsys_pid.request_terminate(pid)
        else:
            killfile = 'var/command/killfile'
            phlsys_fs.write_text_file(killfile, '')

            if os.path.isfile(killfile):
                time.sleep(1)
                while os.path.isfile(killfile):
                    print 'waiting for arcyd to remove killfile ..'
                    time.sleep(1)

        # wait for Arcyd to not be running
        if phlsys_pid.is_running(pid):
            time.sleep(1)
            while phlsys_pid.is_running(pid):
                print 'waiting for arcyd to exit'
                time.sleep(1)
def process(args):

    fs = abdt_fs.make_default_accessor()

    # generate the config file
    config = ""

    if args.repo_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REPO_URL_FORMAT.format(
                repo_url_format=args.repo_url_format)])

    if args.repo_snoop_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REPO_SNOOP_URL_FORMAT.format(
                repo_snoop_url_format=args.repo_snoop_url_format)])

    if args.branch_url_format:
        config = '\n'.join([
            config,
            _CONFIG_BRANCH_URL_FORMAT.format(
                branch_url_format=args.branch_url_format)])

    if args.admin_emails:
        config = '\n'.join([
            config,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))])

    config = config.strip()

    # write out the config
    fs.create_repohost_config(args.name, config)
Esempio n. 8
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    # make sure we can connect with those parameters
    uri, user, cert, _ = phlsys_makeconduit.get_uri_user_cert_explanation(
        args.instance_uri, args.arcyd_user, args.arcyd_cert)
    conduit = phlsys_conduit.Conduit(uri,
                                     user,
                                     cert,
                                     https_proxy=args.https_proxy)
    conduit.ping()

    content = _CONFIG.format(instance_uri=uri,
                             arcyd_user=user,
                             arcyd_cert=cert,
                             review_url_format=args.review_url_format)

    if args.https_proxy:
        content = '\n'.join([
            content,
            _CONFIG_HTTPS_PROXY.format(https_proxy=args.https_proxy)
        ])

    if args.admin_emails:
        content = '\n'.join([
            content,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))
        ])

    with fs.lockfile_context():
        fs.create_phabricator_config(args.name, content)
def process(args):

    fs = abdt_fs.make_default_accessor()

    exit_code = 0

    for repo in fs.repo_config_path_list():
        parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
        abdi_repoargs.setup_parser(parser)

        with open(repo) as f:
            repo_params = parser.parse_args(
                line.strip() for line in f)

        if not os.path.isdir(repo_params.repo_path):
            print "'{}' is missing repo '{}'".format(
                repo, repo_params.repo_path)
            if args.fix:
                repo_url = abdi_repoargs.get_repo_url(repo_params)
                print "cloning '{}' ..".format(repo_url)
                abdi_repo.setup_repo(repo_url, repo_params.repo_path)
            else:
                exit_code = 1

    if exit_code != 0 and not args.fix:
        print "use '--fix' to attempt to fix the issues"

    return exit_code
def process(args):

    fs = abdt_fs.make_default_accessor()

    # make sure we can connect with those parameters
    uri, user, cert, _ = phlsys_makeconduit.get_uri_user_cert_explanation(
        args.instance_uri, args.arcyd_user, args.arcyd_cert)
    conduit = phlsys_conduit.Conduit(
        uri, user, cert, https_proxy=args.https_proxy)
    conduit.ping()

    content = _CONFIG.format(
        instance_uri=uri,
        arcyd_user=user,
        arcyd_cert=cert,
        review_url_format=args.review_url_format)

    if args.https_proxy:
        content = '\n'.join([
            content,
            _CONFIG_HTTPS_PROXY.format(
                https_proxy=args.https_proxy)])

    if args.admin_emails:
        content = '\n'.join([
            content,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))])

    with fs.lockfile_context():
        fs.create_phabricator_config(args.name, content)
def stop_arcyd():
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is None or not phlsys_pid.is_running(pid):
            raise Exception("Arcyd is not running")
        stop_arcyd_pid(pid)
Esempio n. 12
0
def process(args):
    fs = abdt_fs.make_default_accessor()

    for repo_args in _iter_repo_args(fs):
        if args.only_formatted_repo_urls:
            print abdi_repoargs.get_repo_url(repo_args)
        else:
            print repo_args
Esempio n. 13
0
def process(args):

    _ = args  # NOQA
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("cannot fetch whilst arcyd is running.")

        repo_config_path_list = fs.repo_config_path_list()
        repo_name_config_list = abdi_repoargs.parse_config_file_list(
            repo_config_path_list)

        url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
            fs.layout.urlwatcher_cache_path)

        # Let the user know what's happening before potentially blocking for a
        # while.
        print('Refreshing repository snoop status ..', end=' ')
        # Make sure that the output is actually visible by flushing stdout
        # XXX: Will use 'flush' parameter to 'print()' in Python 3.3
        sys.stdout.flush()
        print("done")

        url_watcher_wrapper.watcher.refresh()

        for repo_name, repo_config in repo_name_config_list:
            print(repo_name + ' ..', end=' ')

            # Make sure that the output is actually visible by flushing stdout
            # XXX: Will use 'flush' parameter to 'print()' in Python 3.3
            sys.stdout.flush()

            snoop_url = abdi_repoargs.get_repo_snoop_url(repo_config)

            sys_repo = phlsys_git.Repo(repo_config.repo_path)
            refcache_repo = phlgitx_refcache.Repo(sys_repo)
            differ_cache = abdt_differresultcache.Cache(refcache_repo)
            abd_repo = abdt_git.Repo(
                refcache_repo,
                differ_cache,
                "origin",
                repo_config.repo_desc)

            did_fetch = abdi_processrepoarglist.fetch_if_needed(
                url_watcher_wrapper.watcher,
                snoop_url,
                abd_repo,
                repo_config.repo_desc)

            if did_fetch:
                print('fetched')
            else:
                print('skipped')

            url_watcher_wrapper.save()
Esempio n. 14
0
def reload_arcyd():
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is None or not phlsys_pid.is_running(pid):
            raise Exception("Arcyd is not running")

        phlsys_fs.write_text_file('var/command/reload', '')
Esempio n. 15
0
def stop_arcyd(message=''):
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is None or not phlsys_pid.is_running(pid):
            raise Exception("Arcyd is not running")

        stop_arcyd_pid(pid, fs.layout.killfile, message)
def process(args):

    _ = args  # NOQA
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("cannot fetch whilst arcyd is running.")

        repo_config_path_list = fs.repo_config_path_list()
        repo_name_config_list = abdi_repoargs.parse_config_file_list(
            repo_config_path_list)

        url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
            fs.layout.urlwatcher_cache_path)

        # Let the user know what's happening before potentially blocking for a
        # while.
        print('Refreshing repository snoop status ..', end=' ')
        # Make sure that the output is actually visible by flushing stdout
        # XXX: Will use 'flush' parameter to 'print()' in Python 3.3
        sys.stdout.flush()
        print("done")

        url_watcher_wrapper.watcher.refresh()

        for repo_name, repo_config in repo_name_config_list:
            print(repo_name + ' ..', end=' ')

            # Make sure that the output is actually visible by flushing stdout
            # XXX: Will use 'flush' parameter to 'print()' in Python 3.3
            sys.stdout.flush()

            snoop_url = abdi_repoargs.get_repo_snoop_url(repo_config)

            sys_repo = phlsys_git.Repo(repo_config.repo_path)
            refcache_repo = phlgitx_refcache.Repo(sys_repo)
            differ_cache = abdt_differresultcache.Cache(refcache_repo)
            abd_repo = abdt_git.Repo(refcache_repo, differ_cache, "origin",
                                     repo_config.repo_desc)

            did_fetch = abdi_processrepoarglist.fetch_if_needed(
                url_watcher_wrapper.watcher, snoop_url, abd_repo,
                repo_config.repo_desc)

            if did_fetch:
                print('fetched')
            else:
                print('skipped')

            url_watcher_wrapper.save()
def do(
        repo_config_path_list,
        sys_admin_emails,
        kill_file,
        reset_file,
        pause_file,
        sleep_secs,
        is_no_loop,
        reporter):

    repo_configs = abdi_repoargs.parse_config_file_list(repo_config_path_list)

    # TODO: test write access to repos here

    operations = []
    conduits = {}

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # refresh cache after loading and before any repos are processed, otherwise
    # we may not pull when we need to on the first run around the loop.
    # TODO: wrap in usual retry handlers so that we can start up in unstable
    #       environments
    url_watcher_wrapper.watcher.refresh()

    _append_operations_for_repos(
        operations,
        reporter,
        conduits,
        url_watcher_wrapper,
        sys_admin_emails,
        repo_configs)

    _append_interrupt_operations(
        operations,
        sys_admin_emails,
        kill_file,
        reset_file,
        pause_file,
        sleep_secs,
        reporter)

    operations.append(
        abdi_operation.RefreshCaches(
            conduits, url_watcher_wrapper.watcher, reporter))

    _process_operations(
        is_no_loop, operations, sys_admin_emails, reporter)
Esempio n. 18
0
def process(args):
    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():

        repo_config_path_list = fs.repo_config_path_list()
        repo_name_config_list = abdi_repoargs.parse_config_file_list(
            repo_config_path_list)

        for _, repo_config in repo_name_config_list:
            if args.only_formatted_repo_urls:
                print(abdi_repoargs.get_repo_url(repo_config))
            else:
                print(repo_config)
Esempio n. 19
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    pid = fs.get_pid_or_none()
    if pid is not None and phlsys_pid.is_running(pid):
        raise Exception("cannot remove repo whilst arcyd is running.")

    repo_name = args.name

    os.remove(fs.layout.repo_config(repo_name))
    os.remove(fs.layout.repo_try(repo_name))
    os.remove(fs.layout.repo_ok(repo_name))
    shutil.rmtree(fs.layout.repo(repo_name))
def start_arcyd(daemonize=True, loop=True, restart=False):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            if restart:
                stop_arcyd_pid(pid)
            else:
                raise Exception("already running")

        if daemonize:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if not loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    def logger_config():
        _setup_logger(fs)

    with phlsys_multiprocessing.logging_context(logger_config):
        _LOGGER.debug("start with args: {}".format(args))
        _LOGGER.info("arcyd started")
        try:
            abdi_processrepos.process(args, repo_configs)
        finally:
            _LOGGER.info("arcyd stopped")
Esempio n. 21
0
def process(args):
    # exit gracefully if this process is killed
    phlsys_signal.set_exit_on_sigterm()

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("already running")

        if not args.foreground:
            phlsys_daemonize.do(
                stdout_path=fs.layout.stdout,
                stderr_path=fs.layout.stderr)

        # important that we do this *after* daemonizing
        pid = phlsys_pid.get()
        fs.set_pid(pid)

        parser = argparse.ArgumentParser()
        params = []

        for line in open(fs.layout.root_config):
            params.append(line.strip())

        if args.no_loop:
            params.append('--no-loop')

        repo_configs = abdi_repoargs.parse_config_file_list(
            fs.repo_config_path_list())

        abdi_processrepos.setupParser(parser)
        args = parser.parse_args(params)

    # setup to log everything to fs.layout.log_info, with a timestamp
    logging.Formatter.converter = time.gmtime
    logging.basicConfig(
        format='%(asctime)s UTC: %(levelname)s: %(message)s',
        level=logging.INFO,
        filename=fs.layout.log_info)

    _LOGGER.info("arcyd started")
    try:
        abdi_processrepos.process(args, repo_configs)
    finally:
        _LOGGER.info("arcyd stopped")
Esempio n. 22
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    # generate the config file
    config = ""

    if args.repo_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REPO_URL_FORMAT.format(
                repo_url_format=args.repo_url_format)
        ])

    if args.repo_push_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REPO_PUSH_URL_FORMAT.format(
                repo_push_url_format=args.repo_push_url_format)
        ])

    if args.repo_snoop_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REPO_SNOOP_URL_FORMAT.format(
                repo_snoop_url_format=args.repo_snoop_url_format)
        ])

    if args.branch_url_format:
        config = '\n'.join([
            config,
            _CONFIG_BRANCH_URL_FORMAT.format(
                branch_url_format=args.branch_url_format)
        ])

    if args.admin_emails:
        config = '\n'.join([
            config,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))
        ])

    config = config.strip()

    # write out the config
    with fs.lockfile_context():
        fs.create_repohost_config(args.name, config)
Esempio n. 23
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("cannot remove repo whilst arcyd is running.")

        repo_name = args.name
        if args.lookup_url:
            repo_name = _determine_name_from_url(fs, repo_name)

        _remove_file_ignore_fail(fs.layout.repo_try(repo_name))
        _remove_file_ignore_fail(fs.layout.repo_ok(repo_name))
        _remove_dir_ignore_fail(fs.layout.repo(repo_name))
        fs.remove_repo_config(repo_name)
Esempio n. 24
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    with fs.lockfile_context():
        pid = fs.get_pid_or_none()
        if pid is not None and phlsys_pid.is_running(pid):
            raise Exception("cannot remove repo whilst arcyd is running.")

        repo_name = args.name
        if args.lookup_url:
            repo_name = _determine_name_from_url(fs, repo_name)

        _remove_file_ignore_fail(fs.layout.repo_try(repo_name))
        _remove_file_ignore_fail(fs.layout.repo_ok(repo_name))
        _remove_dir_ignore_fail(fs.layout.repo(repo_name))
        fs.remove_repo_config(repo_name)
Esempio n. 25
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    exit_code = 0

    with fs.lockfile_context():
        for repo in fs.repo_config_path_list():
            parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
            abdi_repoargs.setup_parser(parser)

            with open(repo) as f:
                repo_params = parser.parse_args(
                    line.strip() for line in f)

            if not os.path.isdir(repo_params.repo_path):
                print "'{}' is missing repo '{}'".format(
                    repo, repo_params.repo_path)
                if args.fix:
                    repo_url = abdi_repoargs.get_repo_url(repo_params)
                    print "cloning '{}' ..".format(repo_url)
                    abdi_repo.setup_repo(repo_url, repo_params.repo_path)
                else:
                    exit_code = 1
            else:
                is_ignoring = phlgitx_ignoreident.is_repo_definitely_ignoring
                if not is_ignoring(repo_params.repo_path):
                    print "'{}' is not ignoring ident attributes".format(
                        repo_params.repo_path)
                    if args.fix:
                        print "setting {} to ignore ident ..".format(
                            repo_params.repo_path)

                        phlgitx_ignoreident.ensure_repo_ignoring(
                            repo_params.repo_path)
                    else:
                        exit_code = 1

    if exit_code != 0 and not args.fix:
        print "use '--fix' to attempt to fix the issues"

    return exit_code
def process(args):

    fs = abdt_fs.make_default_accessor()

    # make sure we can connect with those parameters
    conduit = phlsys_conduit.Conduit(
        args.instance_uri,
        args.arcyd_user,
        args.arcyd_cert,
        https_proxy=args.https_proxy)
    conduit.ping()

    content = _CONFIG.format(
        instance_uri=args.instance_uri,
        arcyd_user=args.arcyd_user,
        arcyd_cert=args.arcyd_cert)

    if args.https_proxy:
        content = '\n'.join([
            content,
            _CONFIG_HTTPS_PROXY.format(
                https_proxy=args.https_proxy)])

    fs.create_phabricator_config(args.name, content)
Esempio n. 27
0
def do(repo_configs, sys_admin_emails, sleep_secs, is_no_loop,
       external_report_command, mail_sender, max_workers, overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(name, config, conduit_manager,
                                    url_watcher_wrapper, sys_admin_emails,
                                    mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(repo_list, max_workers,
                                         max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context('refresh-git-snoop',
                                                       ''):
            abdt_tryloop.critical_tryloop(url_watcher_wrapper.watcher.refresh,
                                          abdt_errident.GIT_SNOOP, '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(external_report_command,
                                     type(e).__name__))
                    _LOGGER.error(
                        "VERBOSE MESSAGE: CycleReportJson:{}".format(e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
Esempio n. 28
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    repo_name = args.name
    if repo_name is None:
        repo_name = _repo_name_for_params(args.phabricator_name,
                                          args.repohost_name, args.repo_url)

    repo_desc = args.repo_desc
    if repo_desc is None:
        repo_desc = _repo_desc_for_params(args.phabricator_name,
                                          args.repohost_name, args.repo_url)

    try_touch_path = fs.layout.repo_try(repo_name)
    ok_touch_path = fs.layout.repo_ok(repo_name)
    repo_path = fs.layout.repo(repo_name)

    # make sure the repo doesn't exist already
    if os.path.exists(repo_path):
        raise Exception('{} already exists'.format(repo_path))

    # make sure the phabricator config exists
    phab_config_path = fs.get_phabricator_config_rel_path(
        args.phabricator_name)

    # make sure the repohost config exists
    repohost_config_path = fs.get_repohost_config_rel_path(args.repohost_name)

    # generate the config file
    config = _CONFIG.format(phabricator_config=phab_config_path,
                            repohost_config=repohost_config_path,
                            repo_desc=repo_desc,
                            repo_url=args.repo_url,
                            repo_path=repo_path,
                            try_touch_path=try_touch_path,
                            ok_touch_path=ok_touch_path)

    if args.admin_emails:
        config = '\n'.join([
            config,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))
        ])

    # parse the arguments again, as a real repo
    parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
    abdi_repoargs.setup_parser(parser)
    repo_args = config.splitlines()
    repo_params = parser.parse_args(repo_args)

    abdi_repoargs.validate_args(repo_params)

    # make sure we can use the snoop URL
    repo_snoop_url = abdi_repoargs.get_repo_snoop_url(repo_params)
    if repo_snoop_url:
        phlurl_request.get(repo_snoop_url)

    # determine the repo url from the parsed params
    repo_url = abdi_repoargs.get_repo_url(repo_params)

    # determine the repo push url from the parsed params
    repo_push_url = abdi_repoargs.get_repo_push_url(repo_params)

    with fs.lockfile_context():
        with abdi_repo.setup_repo_context(repo_url, repo_path, repo_push_url):
            fs.create_repo_config(repo_name, config)
def process(args):

    fs = abdt_fs.make_default_accessor()

    try_touch_path = fs.layout.repo_try(args.name)
    ok_touch_path = fs.layout.repo_ok(args.name)
    repo_path = fs.layout.repo(args.name)

    # make sure the repo doesn't exist already
    if os.path.exists(repo_path):
        raise Exception('{} already exists'.format(repo_path))

    # make sure the phabricator config exists
    phab_config_path = fs.get_phabricator_config_rel_path(
        args.phabricator_name)

    # make sure we can use the snoop URL
    if args.repo_snoop_url:
        phlurl_request.get(args.repo_snoop_url)

    # generate the config file
    config = _CONFIG.format(
        phabricator_config=phab_config_path,
        repo_desc=args.repo_desc,
        repo_path=repo_path,
        try_touch_path=try_touch_path,
        ok_touch_path=ok_touch_path,
        arcyd_email=args.arcyd_email,
        admin_email=args.admin_email)

    if args.repo_snoop_url:
        config = '\n'.join([
            config,
            _CONFIG_SNOOP_URL.format(
                repo_snoop_url=args.repo_snoop_url)])

    if args.review_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REVIEW_URL.format(
                review_url_format=args.review_url_format)])

    if args.branch_url_format:
        config = '\n'.join([
            config,
            _CONFIG_BRANCH_URL.format(
                branch_url_format=args.branch_url_format)])

    # if there's any failure after cloning then we should remove the repo
    phlsys_subprocess.run(
        'git', 'clone', args.repo_url, repo_path)
    try:
        repo = phlsys_git.Repo(repo_path)

        # test pushing to master
        repo.call('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo.call('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo.call('checkout', '-')

        # test push to special refs
        repo.call(
            'push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')
        repo.call(
            'push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')

        # fetch the 'landed' and 'abandoned' refs if they exist
        ref_list = set(repo.call('ls-remote').split()[1::2])
        special_refs = [
            (abdt_git.ARCYD_ABANDONED_REF, abdt_git.ARCYD_ABANDONED_BRANCH_FQ),
            (abdt_git.ARCYD_LANDED_REF, abdt_git.ARCYD_LANDED_BRANCH_FQ),
        ]
        for ref in special_refs:
            if ref[0] in ref_list:
                repo.call('fetch', 'origin', '{}:{}'.format(ref[0], ref[1]))

        # success, write out the config
        fs.create_repo_config(args.name, config)
    except Exception:
        # clean up the git repo
        shutil.rmtree(repo_path)
        raise
Esempio n. 30
0
def process(args):

    fs = abdt_fs.make_default_accessor()

    repo_name = args.name
    if repo_name is None:
        repo_name = _repo_name_for_params(
            args.phabricator_name, args.repohost_name, args.repo_url)

    repo_desc = args.repo_desc
    if repo_desc is None:
        repo_desc = _repo_desc_for_params(
            args.phabricator_name, args.repohost_name, args.repo_url)

    try_touch_path = fs.layout.repo_try(repo_name)
    ok_touch_path = fs.layout.repo_ok(repo_name)
    repo_path = fs.layout.repo(repo_name)

    # make sure the repo doesn't exist already
    if os.path.exists(repo_path):
        raise Exception('{} already exists'.format(repo_path))

    # make sure the phabricator config exists
    phab_config_path = fs.get_phabricator_config_rel_path(
        args.phabricator_name)

    # make sure the repohost config exists
    repohost_config_path = fs.get_repohost_config_rel_path(
        args.repohost_name)

    # generate the config file
    config = _CONFIG.format(
        phabricator_config=phab_config_path,
        repohost_config=repohost_config_path,
        repo_desc=repo_desc,
        repo_url=args.repo_url,
        repo_path=repo_path,
        try_touch_path=try_touch_path,
        ok_touch_path=ok_touch_path)

    if args.admin_emails:
        config = '\n'.join([
            config,
            _CONFIG_ADMIN_EMAILS_FORMAT.format(
                admin_emails='\n'.join(args.admin_emails))])

    # parse the arguments again, as a real repo
    parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
    abdi_repoargs.setup_parser(parser)
    repo_args = config.splitlines()
    repo_params = parser.parse_args(repo_args)

    abdi_repoargs.validate_args(repo_params)

    # make sure we can use the snoop URL
    repo_snoop_url = abdi_repoargs.get_repo_snoop_url(repo_params)
    if repo_snoop_url:
        phlurl_request.get(repo_snoop_url)

    # determine the repo url from the parsed params
    repo_url = abdi_repoargs.get_repo_url(repo_params)

    with fs.lockfile_context():
        with abdi_repo.setup_repo_context(repo_url, repo_path):
            fs.create_repo_config(repo_name, config)
def do(
        repo_configs,
        sys_admin_emails,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context(
                'refresh-git-snoop', ''):
            abdt_tryloop.critical_tryloop(
                url_watcher_wrapper.watcher.refresh,
                abdt_errident.GIT_SNOOP,
                '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(
                            external_report_command, type(e).__name__))
                    _LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
                        e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
def do(
        repo_configs,
        sys_admin_emails,
        kill_file,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    finished = False

    # decide max workers based on number of CPUs if no value is specified
    if max_workers is None:
        try:
            # use the same default as multiprocessing.Pool
            max_workers = multiprocessing.cpu_count()
            _LOGGER.debug(
                "max_workers unspecified, defaulted to cpu_count: {}".format(
                    max_workers))
        except NotImplementedError:
            _LOGGER.warning(
                "multiprocessing.cpu_count() not supported, disabling "
                "multiprocessing. Specify max workers explicitly to enable.")
            max_workers = 0

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    while not finished:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        abdt_tryloop.critical_tryloop(
            url_watcher_wrapper.watcher.refresh,
            abdt_errident.GIT_SNOOP,
            '')

        conduit_manager.refresh_conduits()

        if max_workers:
            for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                repo = repo_list[i]
                repo.merge_from_worker(res)
        else:
            for r in repo_list:
                r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        if external_report_command:
            report = {
                "cycle_time_secs": cycle_timer.restart(),
                "overrun_jobs": pool.num_active_jobs,
            }
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            try:
                phlsys_subprocess.run(full_path, stdin=report_json)
            except phlsys_subprocess.CalledProcessError as e:
                _LOGGER.error("CycleReportJson: {}".format(e))

        # look for killfile
        if os.path.isfile(kill_file):

            # finish any jobs that overran
            for i, res in pool.finish_results():
                repo = repo_list[i]
                repo.merge_from_worker(res)

            # important to do this before stopping arcyd and as soon as
            # possible after doing fetches
            url_watcher_wrapper.save()

            os.remove(kill_file)
            finished = True
            break

        if is_no_loop:
            finished = True
            break

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0:
            time.sleep(secs_to_sleep)