def do(self):
        self._reporter.start_cache_refresh()

        with self._reporter.tag_timer_context('refresh conduit cache'):
            for key in self._conduits:
                conduit = self._conduits[key]
                abdt_tryloop.critical_tryloop(
                    conduit.refresh_cache_on_cycle,
                    abdt_errident.CONDUIT_REFRESH,
                    conduit.describe())

        with self._reporter.tag_timer_context('refresh git watcher'):
            abdt_tryloop.critical_tryloop(
                self._url_watcher.refresh, abdt_errident.GIT_SNOOP, '')

        self._reporter.finish_cache_refresh()
        return True
    def do(self):
        self._reporter.start_cache_refresh()

        with self._reporter.tag_timer_context('refresh conduit cache'):
            for key in self._conduits:
                conduit = self._conduits[key]
                abdt_tryloop.critical_tryloop(
                    conduit.refresh_cache_on_cycle,
                    "conduit-refresh",
                    conduit.describe())

        with self._reporter.tag_timer_context('refresh git watcher'):
            abdt_tryloop.critical_tryloop(
                self._url_watcher.refresh, 'git-snoop', '')

        self._reporter.finish_cache_refresh()
        return True
Beispiel #3
0
def do(repo_configs, sys_admin_emails, sleep_secs, is_no_loop,
       external_report_command, mail_sender, max_workers, overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(name, config, conduit_manager,
                                    url_watcher_wrapper, sys_admin_emails,
                                    mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(repo_list, max_workers,
                                         max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context('refresh-git-snoop',
                                                       ''):
            abdt_tryloop.critical_tryloop(url_watcher_wrapper.watcher.refresh,
                                          abdt_errident.GIT_SNOOP, '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(external_report_command,
                                     type(e).__name__))
                    _LOGGER.error(
                        "VERBOSE MESSAGE: CycleReportJson:{}".format(e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
Beispiel #4
0
 def refresh_conduits(self):
     for conduit, cache in self._conduits_caches.itervalues():
         abdt_tryloop.critical_tryloop(cache.refresh_active_reviews,
                                       abdt_errident.CONDUIT_REFRESH,
                                       conduit.describe())
def do(
        repo_configs,
        sys_admin_emails,
        kill_file,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    finished = False

    # decide max workers based on number of CPUs if no value is specified
    if max_workers is None:
        try:
            # use the same default as multiprocessing.Pool
            max_workers = multiprocessing.cpu_count()
            _LOGGER.debug(
                "max_workers unspecified, defaulted to cpu_count: {}".format(
                    max_workers))
        except NotImplementedError:
            _LOGGER.warning(
                "multiprocessing.cpu_count() not supported, disabling "
                "multiprocessing. Specify max workers explicitly to enable.")
            max_workers = 0

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    while not finished:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        abdt_tryloop.critical_tryloop(
            url_watcher_wrapper.watcher.refresh,
            abdt_errident.GIT_SNOOP,
            '')

        conduit_manager.refresh_conduits()

        if max_workers:
            for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                repo = repo_list[i]
                repo.merge_from_worker(res)
        else:
            for r in repo_list:
                r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        if external_report_command:
            report = {
                "cycle_time_secs": cycle_timer.restart(),
                "overrun_jobs": pool.num_active_jobs,
            }
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            try:
                phlsys_subprocess.run(full_path, stdin=report_json)
            except phlsys_subprocess.CalledProcessError as e:
                _LOGGER.error("CycleReportJson: {}".format(e))

        # look for killfile
        if os.path.isfile(kill_file):

            # finish any jobs that overran
            for i, res in pool.finish_results():
                repo = repo_list[i]
                repo.merge_from_worker(res)

            # important to do this before stopping arcyd and as soon as
            # possible after doing fetches
            url_watcher_wrapper.save()

            os.remove(kill_file)
            finished = True
            break

        if is_no_loop:
            finished = True
            break

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0:
            time.sleep(secs_to_sleep)
 def refresh_conduits(self):
     for conduit, cache in self._conduits_caches.itervalues():
         abdt_tryloop.critical_tryloop(
             cache.refresh_active_reviews,
             abdt_errident.CONDUIT_REFRESH,
             conduit.describe())
def do(
        repo_configs,
        sys_admin_emails,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context(
                'refresh-git-snoop', ''):
            abdt_tryloop.critical_tryloop(
                url_watcher_wrapper.watcher.refresh,
                abdt_errident.GIT_SNOOP,
                '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(
                            external_report_command, type(e).__name__))
                    _LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
                        e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code