def enable_count_cycles_script(self):
     assert not self._has_enabled_count_cycles
     config_path = os.path.join(self._root_dir, 'configfile')
     config_text = phlsys_fs.read_text_file(config_path)
     config_text += '\n--external-report-command\ncount_cycles.sh'
     phlsys_fs.write_text_file(config_path, config_text)
     self._has_enabled_count_cycles = True
    def _read_log(self, name):
        log_path = '{}/var/log/{}'.format(self._root_dir, name)

        if os.path.isfile(log_path):
            return phlsys_fs.read_text_file(log_path)
        else:
            return ""
Ejemplo n.º 3
0
 def set_overrun_secs(self, overrun_secs):
     assert not self._has_set_overrun_secs
     config_path = os.path.join(self._root_dir, 'configfile')
     config_text = phlsys_fs.read_text_file(config_path)
     config_text += '\n--overrun-secs\n{}'.format(overrun_secs)
     phlsys_fs.write_text_file(config_path, config_text)
     self._has_set_overrun_secs = True
Ejemplo n.º 4
0
 def enable_count_cycles_script(self):
     assert not self._has_enabled_count_cycles
     config_path = os.path.join(self._root_dir, 'configfile')
     config_text = phlsys_fs.read_text_file(config_path)
     config_text += '\n--external-report-command\ncount_cycles.sh'
     phlsys_fs.write_text_file(config_path, config_text)
     self._has_enabled_count_cycles = True
 def set_overrun_secs(self, overrun_secs):
     assert not self._has_set_overrun_secs
     config_path = os.path.join(self._root_dir, 'configfile')
     config_text = phlsys_fs.read_text_file(config_path)
     config_text += '\n--overrun-secs\n{}'.format(overrun_secs)
     phlsys_fs.write_text_file(config_path, config_text)
     self._has_set_overrun_secs = True
def ensure_repo_ignoring(repo_path):
    if is_repo_definitely_ignoring(repo_path):
        # nothing to do
        return

    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)

    # check that any existing file is compatible with the new contents we will
    # write, i.e. it is a subset of the new content
    if os.path.exists(repo_attributes_path):
        contents = phlsys_fs.read_text_file(repo_attributes_path)
        lines = contents.splitlines()
        for l in lines:
            stripped = l.strip()
            if stripped and stripped not in _REPO_ATTRIBUTES_TUPLE:
                # we won't try to do any sort of merging, just escalate
                raise Exception(
                    "cannot merge attributes in existing file: {}".format(
                        repo_attributes_path))

    # the file is exactly one of the existing attributes, we can merge
    # correctly by overwriting it with our superset of attributes
    phlsys_fs.write_text_file(
        repo_attributes_path,
        _REPO_ATTRIBUTES_CONTENT)
Ejemplo n.º 7
0
    def _read_log(self, name):
        log_path = '{}/var/log/{}'.format(self._root_dir, name)

        if os.path.isfile(log_path):
            return phlsys_fs.read_text_file(
                log_path)
        else:
            return ""
def is_repo_definitely_ignoring(repo_path):
    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)
    if not os.path.exists(repo_attributes_path):
        return False
    else:
        # check the existing file
        content = phlsys_fs.read_text_file(repo_attributes_path)
        return content == _REPO_ATTRIBUTES_CONTENT
    def debug_log(self):
        debug_path = '{}/var/log/debug'.format(self._root_dir)

        if os.path.isfile(debug_path):
            return phlsys_fs.read_text_file(
                debug_path)
        else:
            return ""
def is_repo_definitely_ignoring(repo_path):
    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)
    if not os.path.exists(repo_attributes_path):
        return False
    else:
        # check the existing file
        content = phlsys_fs.read_text_file(repo_attributes_path)
        return content == _REPO_ATTRIBUTES_CONTENT
 def git_fetch_counter(self, *args, **kwargs):
     phlsys_fs.write_text_file("/tmp/1", self.working_dir)
     fetch_counter_path = os.path.join(self.working_dir, ".git", "fetch_counter")
     if args and args[0] == "fetch":
         if not os.path.exists(fetch_counter_path):
             phlsys_fs.write_text_file(fetch_counter_path, "1")
         else:
             old_count = phlsys_fs.read_text_file(fetch_counter_path)
             new_count = str(int(old_count) + 1)
             phlsys_fs.write_text_file(fetch_counter_path, new_count)
     return old_call(self, *args, **kwargs)
 def test_B_exercise_overrun_secs(self):
     with setup_arcyd() as arcyd:
         # [ B] overrun secs are not set by default
         self.assertFalse(arcyd._has_set_overrun_secs)
         arcyd.set_overrun_secs(5)
         # [ B] overrun secs are set by set_overrun_secs
         self.assertTrue(arcyd._has_set_overrun_secs)
         config = phlsys_fs.read_text_file(
             os.path.join(arcyd._root_dir, 'configfile')).split()
         overrun_config_index = config.index('--overrun-secs') + 1
         # [ B] set_overrun_secs writes correct value to config
         self.assertEqual(5, int(config[overrun_config_index]))
 def test_B_exercise_overrun_secs(self):
     with setup_arcyd() as arcyd:
         # [ B] overrun secs are not set by default
         self.assertFalse(arcyd._has_set_overrun_secs)
         arcyd.set_overrun_secs(5)
         # [ B] overrun secs are set by set_overrun_secs
         self.assertTrue(arcyd._has_set_overrun_secs)
         config = phlsys_fs.read_text_file(os.path.join(
             arcyd._root_dir, 'configfile')).split()
         overrun_config_index = config.index('--overrun-secs') + 1
         # [ B] set_overrun_secs writes correct value to config
         self.assertEqual(5, int(config[overrun_config_index]))
 def git_fetch_counter(self, *args, **kwargs):
     phlsys_fs.write_text_file("/tmp/1", self.working_dir)
     fetch_counter_path = os.path.join(self.working_dir, '.git',
                                       'fetch_counter')
     if args and args[0] == 'fetch':
         if not os.path.exists(fetch_counter_path):
             phlsys_fs.write_text_file(fetch_counter_path, '1')
         else:
             old_count = phlsys_fs.read_text_file(fetch_counter_path)
             new_count = str(int(old_count) + 1)
             phlsys_fs.write_text_file(fetch_counter_path, new_count)
     return old_call(self, *args, **kwargs)
Ejemplo n.º 15
0
def _test_push_during_overrun(fixture):
    arcyd = fixture.arcyds[0]
    repo = fixture.repos[0]
    phab_str = 'localphab'
    repohost_prefix = 'repohost'
    repo_prefix = 'repo'

    for i, r in enumerate(fixture.repos):
        repo_url_format = r.central_path
        arcyd(
            'add-repohost',
            '--name', 'repohost-{}'.format(i),
            '--repo-url-format', repo_url_format,
            '--repo-snoop-url-format', r.snoop_url)
        arcyd(
            'add-repo',
            phab_str,
            '{}-{}'.format(repohost_prefix, i),
            '{}-{}'.format(repo_prefix, i))

    branch1_name = '_test_push_during_overrun'
    branch2_name = '_test_push_during_overrun2'

    arcyd.enable_count_cycles_script()
    arcyd.set_overrun_secs(1)
    repo.hold_dev_arcyd_refs()
    repo.alice.push_new_review_branch(branch1_name)
    with arcyd.daemon_context():
        arcyd.wait_one_or_more_cycles()
        repo.alice.push_new_review_branch(branch2_name)
        arcyd.wait_one_or_more_cycles()
        repo.release_dev_arcyd_refs()
        arcyd.wait_one_or_more_cycles()
        arcyd.wait_one_or_more_cycles()

    repo.alice.fetch()
    reviews = repo.alice.list_reviews()
    assert len(reviews) == 2

    fetch_counter_path = os.path.join(
        arcyd._root_dir,
        'var',
        'repo',
        '{}_{}-0_{}-0'.format(phab_str, repohost_prefix, repo_prefix),
        '.git',
        'fetch_counter')
    fetch_count = int(phlsys_fs.read_text_file(fetch_counter_path))
    assert fetch_count == 4
def ensure_repo_ignoring(repo_path):
    """Make sure the .gitattributes override is set up.

    Note that this function will perform clean checkout of all files
    in the working copy from the index so any non-staged changes will
    be lost.

    :repo_path: repository to set up

    """
    if is_repo_definitely_ignoring(repo_path):
        # nothing to do
        return

    repo = phlsys_git.Repo(repo_path)
    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)

    # Files in our working copy might have been 'smudged' by some
    # filters. After repo-wide attributes override is written those
    # smudged files might be considered as 'modified' because
    # apropriate clean filter is no longer applied.
    #
    # To fix that side effect we need to rebuild the working copy
    # after the attributes are modified.

    # check that any existing file is compatible with the new contents we will
    # write, i.e. it is a subset of the new content
    if os.path.exists(repo_attributes_path):
        contents = phlsys_fs.read_text_file(repo_attributes_path)
        lines = contents.splitlines()
        for l in lines:
            stripped = l.strip()
            if stripped and stripped not in _REPO_ATTRIBUTES_TUPLE:
                # we won't try to do any sort of merging, just escalate
                raise Exception(
                    "cannot merge attributes in existing file: {}".format(
                        repo_attributes_path))

    # the file is exactly one of the existing attributes, we can merge
    # correctly by overwriting it with our superset of attributes
    phlsys_fs.write_text_file(
        repo_attributes_path,
        _REPO_ATTRIBUTES_CONTENT)

    # overwrite working copy with files from index
    repo("checkout-index", "-afqu")
def ensure_repo_ignoring(repo_path):
    """Make sure the .gitattributes override is set up.

    Note that this function will perform clean checkout of all files
    in the working copy from the index so any non-staged changes will
    be lost.

    :repo_path: repository to set up

    """
    if is_repo_definitely_ignoring(repo_path):
        # nothing to do
        return

    repo = phlsys_git.Repo(repo_path)
    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)

    # Files in our working copy might have been 'smudged' by some
    # filters. After repo-wide attributes override is written those
    # smudged files might be considered as 'modified' because
    # apropriate clean filter is no longer applied.
    #
    # To fix that side effect we need to rebuild the working copy
    # after the attributes are modified.

    # check that any existing file is compatible with the new contents we will
    # write, i.e. it is a subset of the new content
    if os.path.exists(repo_attributes_path):
        contents = phlsys_fs.read_text_file(repo_attributes_path)
        lines = contents.splitlines()
        for l in lines:
            stripped = l.strip()
            if stripped and stripped not in _REPO_ATTRIBUTES_TUPLE:
                # we won't try to do any sort of merging, just escalate
                raise Exception(
                    "cannot merge attributes in existing file: {}".format(
                        repo_attributes_path))

    # the file is exactly one of the existing attributes, we can merge
    # correctly by overwriting it with our superset of attributes
    phlsys_fs.write_text_file(repo_attributes_path, _REPO_ATTRIBUTES_CONTENT)

    # overwrite working copy with files from index
    repo("checkout-index", "-afqu")
def ensure_repo_ignoring(repo_path):
    if is_repo_definitely_ignoring(repo_path):
        # nothing to do
        return

    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)

    # check that any existing file is compatible with the new contents we will
    # write, i.e. it is a subset of the new content
    if os.path.exists(repo_attributes_path):
        contents = phlsys_fs.read_text_file(repo_attributes_path)
        lines = contents.splitlines()
        for l in lines:
            stripped = l.strip()
            if stripped and stripped not in _REPO_ATTRIBUTES_TUPLE:
                # we won't try to do any sort of merging, just escalate
                raise Exception(
                    "cannot merge attributes in existing file: {}".format(
                        repo_attributes_path))

    # the file is exactly one of the existing attributes, we can merge
    # correctly by overwriting it with our superset of attributes
    phlsys_fs.write_text_file(repo_attributes_path, _REPO_ATTRIBUTES_CONTENT)
def ensure_repo_ignoring(repo_path):
    if is_repo_definitely_ignoring(repo_path):
        # nothing to do
        return

    repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)
    if not os.path.exists(repo_attributes_path):
        # create the file with required content
        phlsys_fs.write_text_file(
            repo_attributes_path,
            _REPO_ATTRIBUTES_CONTENT)
    else:
        contents = phlsys_fs.read_text_file(repo_attributes_path)
        if contents in _REPO_ATTRIBUTES_TUPLE:
            # the file is exactly one of the existing attributes, we can merge
            # correctly by overwriting it with our superset of attributes
            phlsys_fs.write_text_file(
                repo_attributes_path,
                _REPO_ATTRIBUTES_CONTENT)
        else:
            # we won't try to do any sort of merging, just escalate
            raise Exception(
                "cannot ensure ignore attributes in existing file: {}".format(
                    repo_attributes_path))
def do(
        repo_configs,
        sys_admin_emails,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context(
                'refresh-git-snoop', ''):
            abdt_tryloop.critical_tryloop(
                url_watcher_wrapper.watcher.refresh,
                abdt_errident.GIT_SNOOP,
                '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(
                            external_report_command, type(e).__name__))
                    _LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
                        e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
 def count_cycles(self):
     assert self._has_enabled_count_cycles
     counter_path = os.path.join(self._root_dir, 'cycle_counter')
     if not os.path.exists(counter_path):
         return None
     return int(phlsys_fs.read_text_file(counter_path).strip())
 def debug_log(self):
     return phlsys_fs.read_text_file(
         '{}/var/log/debug'.format(self._root_dir))
Ejemplo n.º 23
0
def do(repo_configs, sys_admin_emails, sleep_secs, is_no_loop,
       external_report_command, mail_sender, max_workers, overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(name, config, conduit_manager,
                                    url_watcher_wrapper, sys_admin_emails,
                                    mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(repo_list, max_workers,
                                         max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context('refresh-git-snoop',
                                                       ''):
            abdt_tryloop.critical_tryloop(url_watcher_wrapper.watcher.refresh,
                                          abdt_errident.GIT_SNOOP, '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(external_report_command,
                                     type(e).__name__))
                    _LOGGER.error(
                        "VERBOSE MESSAGE: CycleReportJson:{}".format(e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
Ejemplo n.º 24
0
 def count_cycles(self):
     assert self._has_enabled_count_cycles
     counter_path = os.path.join(self._root_dir, 'cycle_counter')
     if not os.path.exists(counter_path):
         return None
     return int(phlsys_fs.read_text_file(counter_path).strip())