Ejemplo n.º 1
0
    def do(self):

        report = {
            "cycle_time_secs": self._timer.restart(),
            "count_user_action": self._count_user_action.sample(
                self._reporter.count_user_action),
            "count_repo": self._count_repo_start.sample(
                self._reporter.count_repo_start),
            "count_repo_fetch": self._count_repo_fetch.sample(
                self._reporter.count_repo_fetch),
        }

        report_json = json.dumps(report)

        # skip actually reporting the first cycle so that we don't get
        # incomplete results - we may not be the last operation to be processed
        #
        if self._is_first_cycle:
            self._is_first_cycle = False
        else:
            try:
                phlsys_subprocess.run(self._report_command, stdin=report_json)
            except phlsys_subprocess.CalledProcessError as e:
                _LOGGER.error("CycleReportJson: {}".format(e))
                return False

        return True
Ejemplo n.º 2
0
def initialise_here():
    """Return a new default Accessor after initialising the current directory.

    :returns: a new Accessor, mounted at the current directory

    """
    layout = Layout()

    phlsys_subprocess.run('git', 'init')
    repo = phlsys_git.Repo('.')

    # create filesystem hierarchy
    phlsys_fs.write_text_file(layout.arcydroot, 'this dir is an arcydroot')
    phlsys_fs.write_text_file('README', _README)
    phlsys_fs.write_text_file('var/README', _VAR_README)
    phlsys_fs.write_text_file('var/repo/README', _VAR_REPO_README)
    phlsys_fs.write_text_file('var/log/README', _VAR_LOG_README)
    phlsys_fs.write_text_file('var/status/README', _VAR_STATUS_README)
    phlsys_fs.write_text_file('var/command/README', _VAR_COMMAND_README)
    phlsys_fs.write_text_file('var/run/README', _VAR_RUN_README)

    repo.call('add', '.')
    phlsys_fs.write_text_file('.gitignore', 'var\n')
    repo.call('add', '.')
    phlgit_commit.index(repo, 'Initialised new Arcyd instance')

    return Accessor(Layout(), '.')
Ejemplo n.º 3
0
def setup_repo_context(repo_url, repo_path):
    """Setup a repository, if an exception is raised then remove the repo.

    :repo_url: string url of the repo to clone
    :repo_path: string path to clone the repo to
    :returns: None

    """
    # if there's any failure after cloning then we should remove the repo
    phlsys_subprocess.run(
        'git', 'clone', repo_url, repo_path)
    try:
        repo = phlsys_git.Repo(repo_path)

        # test pushing to master
        repo('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo('checkout', '-')

        # test push to special refs
        repo(
            'push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')
        repo(
            'push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')

        # fetch the 'landed' and 'abandoned' refs if they exist
        abdt_git.checkout_master_fetch_special_refs(repo, 'origin')

        # success, allow the caller to do work
        yield
    except Exception:
        # clean up the git repo on any exception
        shutil.rmtree(repo_path)
        raise
Ejemplo n.º 4
0
def initialise_here():
    """Return a new default Accessor after initialising the current directory.

    :returns: a new Accessor, mounted at the current directory

    """
    layout = Layout()

    phlsys_subprocess.run('git', 'init')
    repo = phlsys_git.Repo('.')

    # create filesystem hierarchy
    phlsys_fs.write_text_file(layout.arcydroot, 'this dir is an arcydroot')
    phlsys_fs.write_text_file('README', _README)
    phlsys_fs.write_text_file('config/README', _CONFIG_README)
    phlsys_fs.write_text_file(
        'config/phabricator/README', _CONFIG_PHABRICATOR_README)
    phlsys_fs.write_text_file(
        'config/repository/README', _CONFIG_REPOSITORY_README)
    phlsys_fs.write_text_file('var/README', _VAR_README)
    phlsys_fs.write_text_file('var/repo/README', _VAR_REPO_README)
    phlsys_fs.write_text_file('var/log/README', _VAR_LOG_README)
    phlsys_fs.write_text_file('var/status/README', _VAR_STATUS_README)
    phlsys_fs.write_text_file('var/command/README', _VAR_COMMAND_README)
    phlsys_fs.write_text_file('var/run/README', _VAR_RUN_README)

    repo('add', '.')
    phlsys_fs.write_text_file('.gitignore', 'var\n')
    repo('add', '.')
    phlgit_commit.index(repo, 'Initialised new Arcyd instance')

    return Accessor(Layout(), '.')
 def log_system_error(self, identifier, detail):
     self._add_log_item(self._log_system_error, identifier, detail)
     if self._external_system_error_logger:
         phlsys_subprocess.run(
             self._external_system_error_logger,
             identifier,
             detail)
Ejemplo n.º 6
0
    def do(self):
        report = {"cycle_time_secs": self._timer.restart()}
        report_json = json.dumps(report)

        try:
            phlsys_subprocess.run(self._report_command, stdin=report_json)
        except phlsys_subprocess.CalledProcessError as e:
            _LOGGER.error("CycleReportJson: {}".format(e))
            return False

        return True
Ejemplo n.º 7
0
def on_system_error(identifier, detail):
    if _EXTERNAL_SYSTEM_ERROR_LOGGER:

        #  It's easily possible for 'detail' to exceed the length of
        #  command-line parameters allowed when calling out to a registered
        #  external system error logger.
        #
        #  Limit the amount of detail that will be sent to an arbitrary
        #  small number to prevent errors when reporting errors.
        #
        detail = detail[:160]

        phlsys_subprocess.run(_EXTERNAL_SYSTEM_ERROR_LOGGER, identifier,
                              detail)
def no_index(left_path, right_path, working_dir=None):
    """Return a string diff between the two paths.

    :left_path: the string path of the left file to diff
    :right_path: the string path of the right file to diff
    :working_dir: the directory to perform the diff relative to
    :returns: the string diff result

    """
    diff = None
    try:
        result = phlsys_subprocess.run(
            'git',
            'diff',
            '--no-index',
            left_path,
            right_path,
            workingDir=working_dir)
        diff = result.stdout
    except phlsys_subprocess.CalledProcessError as e:
        # we expect diff --no-index to return exit codes:
        #   0 if there's no difference between the files
        #   1 if there is a difference
        #
        if e.exitcode != 1:
            raise
        diff = e.stdout

    return diff
Ejemplo n.º 9
0
def no_index(left_path, right_path, working_dir=None):
    """Return a string diff between the two paths.

    :left_path: the string path of the left file to diff
    :right_path: the string path of the right file to diff
    :working_dir: the directory to perform the diff relative to
    :returns: the string diff result

    """
    diff = None
    try:
        result = phlsys_subprocess.run('git',
                                       'diff',
                                       '--no-index',
                                       left_path,
                                       right_path,
                                       workingDir=working_dir)
        diff = result.stdout
    except phlsys_subprocess.CalledProcessError as e:
        # we expect diff --no-index to return exit codes:
        #   0 if there's no difference between the files
        #   1 if there is a difference
        #
        if e.exitcode != 1:
            raise
        diff = e.stdout

    return diff
 def __call__(self, *args, **kwargs):
     stdin = kwargs.pop("stdin", None)
     assert not kwargs
     result = phlsys_subprocess.run(
         self._command_path, *args,
         stdin=stdin, workingDir=self._working_dir_path)
     return result.stdout
Ejemplo n.º 11
0
def on_system_error(identifier, detail):
    if _EXTERNAL_SYSTEM_ERROR_LOGGER:

        #  It's easily possible for 'detail' to exceed the length of
        #  command-line parameters allowed when calling out to a registered
        #  external system error logger.
        #
        #  Limit the amount of detail that will be sent to an arbitrary
        #  small number to prevent errors when reporting errors.
        #
        detail = detail[:160]

        phlsys_subprocess.run(
            _EXTERNAL_SYSTEM_ERROR_LOGGER,
            identifier,
            detail)
Ejemplo n.º 12
0
 def call(self, *args, **kwargs):
     stdin = kwargs.pop("stdin", None)
     assert(not kwargs)
     result = phlsys_subprocess.run(
         'git', *args,
         stdin=stdin, workingDir=self._workingDir)
     return result.stdout
 def __call__(self, *args, **kwargs):
     stdin = kwargs.pop("stdin", None)
     assert (not kwargs)
     result = phlsys_subprocess.run(self._command_path,
                                    *args,
                                    stdin=stdin,
                                    workingDir=self._working_dir_path)
     return result.stdout
    def log_system_error(self, identifier, detail):
        self._add_log_item(self._log_system_error, identifier, detail)
        if self._external_system_error_logger:

            #  It's easily possible for 'detail' to exceed the length of
            #  command-line parameters allowed when calling out to a registered
            #  external system error logger.
            #
            #  Limit the amount of detail that will be sent to an arbitrary
            #  small number to prevent errors when reporting errors.
            #
            detail = detail[:160]

            phlsys_subprocess.run(
                self._external_system_error_logger,
                identifier,
                detail)
 def test_run_list(self):
     "Passing valid list on stdin sorted in reverse order"
     args = ("sort", "-r")
     kwargs = {"stdin": "1\n2\n3"}
     result = phlsys_subprocess.run(*args, **kwargs)
     expect = phlsys_subprocess.RunResult(
         stdout=kwargs['stdin'][::-1] + "\n", stderr='')
     self.assertEqual(result, expect)
Ejemplo n.º 16
0
 def __call__(self, *args, **kwargs):
     stdin = kwargs.pop("stdin", None)
     assert (not kwargs)
     result = phlsys_subprocess.run('git',
                                    *args,
                                    stdin=stdin,
                                    workingDir=self._workingDir)
     return result.stdout
Ejemplo n.º 17
0
def setup_repo_context(repo_url, repo_path, repo_push_url=None):
    """Setup a repository, if an exception is raised then remove the repo.

    :repo_url: string url of the repo to clone
    :repo_path: string path to clone the repo to
    :repo_push_url: string url to push to, or None
    :returns: None

    """
    # if there's any failure after cloning then we should remove the repo
    if repo_push_url is not None:
        phlsys_subprocess.run(
            'git', 'clone', repo_url, repo_path,
            '--config', 'remote.origin.pushurl=' + repo_push_url)
    else:
        phlsys_subprocess.run(
            'git', 'clone', repo_url, repo_path)

    try:
        repo = phlsys_git.Repo(repo_path)

        # make sure we have no problems with 'ident' strings, we won't build
        # from arcyd so it shouldn't be externally visible that we don't expand
        # them.
        phlgitx_ignoreattributes.ensure_repo_ignoring(repo_path)

        # test pushing to master
        repo('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo('checkout', '-')

        try_push_special_refs(repo)

        # fetch the 'landed' and 'abandoned' refs if they exist
        abdt_git.checkout_master_fetch_special_refs(repo, 'origin')

        ensure_reserve_branch(repo)

        # success, allow the caller to do work
        yield
    except Exception:
        # clean up the git repo on any exception
        shutil.rmtree(repo_path)
        raise
 def test_run_list(self):
     """Passing valid list on stdin sorted in reverse order."""
     args = ("sort", "-r")
     kwargs = {"stdin": "1\n2\n3"}
     result = phlsys_subprocess.run(*args, **kwargs)
     expect = phlsys_subprocess.RunResult(stdout=kwargs['stdin'][::-1] +
                                          "\n",
                                          stderr='')
     self.assertEqual(result, expect)
Ejemplo n.º 19
0
def setup_repo_context(repo_url, repo_path, repo_push_url=None):
    """Setup a repository, if an exception is raised then remove the repo.

    :repo_url: string url of the repo to clone
    :repo_path: string path to clone the repo to
    :repo_push_url: string url to push to, or None
    :returns: None

    """
    # if there's any failure after cloning then we should remove the repo
    if repo_push_url is not None:
        phlsys_subprocess.run('git', 'clone', repo_url, repo_path, '--config',
                              'remote.origin.pushurl=' + repo_push_url)
    else:
        phlsys_subprocess.run('git', 'clone', repo_url, repo_path)

    try:
        repo = phlsys_git.Repo(repo_path)

        # make sure we have no problems with 'ident' strings, we won't build
        # from arcyd so it shouldn't be externally visible that we don't expand
        # them.
        phlgitx_ignoreattributes.ensure_repo_ignoring(repo_path)

        # test pushing to master
        repo('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo('checkout', '-')

        try_push_special_refs(repo)

        # fetch the 'landed' and 'abandoned' refs if they exist
        abdt_git.checkout_master_fetch_special_refs(repo, 'origin')

        ensure_reserve_branch(repo)

        # success, allow the caller to do work
        yield
    except Exception:
        # clean up the git repo on any exception
        shutil.rmtree(repo_path)
        raise
Ejemplo n.º 20
0
def setup_repo_context(repo_url, repo_path):
    """Setup a repository, if an exception is raised then remove the repo.

    :repo_url: string url of the repo to clone
    :repo_path: string path to clone the repo to
    :returns: None

    """
    # if there's any failure after cloning then we should remove the repo
    phlsys_subprocess.run(
        'git', 'clone', repo_url, repo_path)
    try:
        repo = phlsys_git.Repo(repo_path)

        # test pushing to master
        repo('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo('checkout', '-')

        # test push to special refs
        repo(
            'push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')
        repo(
            'push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')

        # fetch the 'landed' and 'abandoned' refs if they exist
        ref_list = set(repo('ls-remote').split()[1::2])
        special_refs = [
            (abdt_git.ARCYD_ABANDONED_REF, abdt_git.ARCYD_ABANDONED_BRANCH_FQ),
            (abdt_git.ARCYD_LANDED_REF, abdt_git.ARCYD_LANDED_BRANCH_FQ),
        ]
        for ref in special_refs:
            if ref[0] in ref_list:
                repo('fetch', 'origin', '{}:{}'.format(ref[0], ref[1]))

        # success, allow the caller to do work
        yield
    except Exception:
        # clean up the git repo on any exception
        shutil.rmtree(repo_path)
        raise
def run(dir_path):
    """Return errors from running cppcheck in supplied 'dir_path'.

    :dir_path: string of the path to the directory to run in
    :returns: list of Result

    """
    with phlsys_fs.chdir_context(dir_path):
        # XXX: handle the "couldn't find files" exception
        return parse_output(phlsys_subprocess.run(
            'cppcheck', '-q', '.', '--xml', '--xml-version=2').stderr.strip())
Ejemplo n.º 22
0
def run(dir_path):
    """Return errors from running cppcheck in supplied 'dir_path'.

    :dir_path: string of the path to the directory to run in
    :returns: list of Result

    """
    with phlsys_fs.chdir_context(dir_path):
        # XXX: handle the "couldn't find files" exception
        return parse_output(
            phlsys_subprocess.run('cppcheck', '-q', '.', '--xml',
                                  '--xml-version=2').stderr.strip())
Ejemplo n.º 23
0
def do(repo_configs, sys_admin_emails, sleep_secs, is_no_loop,
       external_report_command, mail_sender, max_workers, overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(name, config, conduit_manager,
                                    url_watcher_wrapper, sys_admin_emails,
                                    mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(repo_list, max_workers,
                                         max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context('refresh-git-snoop',
                                                       ''):
            abdt_tryloop.critical_tryloop(url_watcher_wrapper.watcher.refresh,
                                          abdt_errident.GIT_SNOOP, '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(external_report_command,
                                     type(e).__name__))
                    _LOGGER.error(
                        "VERBOSE MESSAGE: CycleReportJson:{}".format(e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
Ejemplo n.º 24
0
 def setUp(self):
     # TODO: make this more portable with shutil etc.
     phlsys_subprocess.run_commands("mkdir " + self.path)
     phlsys_subprocess.run("git", "init", workingDir=self.path)
     self.clone = phlsys_git.GitClone(self.path)
def process(args):

    fs = abdt_fs.make_default_accessor()

    try_touch_path = fs.layout.repo_try(args.name)
    ok_touch_path = fs.layout.repo_ok(args.name)
    repo_path = fs.layout.repo(args.name)

    # make sure the repo doesn't exist already
    if os.path.exists(repo_path):
        raise Exception('{} already exists'.format(repo_path))

    # make sure the phabricator config exists
    phab_config_path = fs.get_phabricator_config_rel_path(
        args.phabricator_name)

    # make sure we can use the snoop URL
    if args.repo_snoop_url:
        phlurl_request.get(args.repo_snoop_url)

    # generate the config file
    config = _CONFIG.format(
        phabricator_config=phab_config_path,
        repo_desc=args.repo_desc,
        repo_path=repo_path,
        try_touch_path=try_touch_path,
        ok_touch_path=ok_touch_path,
        arcyd_email=args.arcyd_email,
        admin_email=args.admin_email)

    if args.repo_snoop_url:
        config = '\n'.join([
            config,
            _CONFIG_SNOOP_URL.format(
                repo_snoop_url=args.repo_snoop_url)])

    if args.review_url_format:
        config = '\n'.join([
            config,
            _CONFIG_REVIEW_URL.format(
                review_url_format=args.review_url_format)])

    if args.branch_url_format:
        config = '\n'.join([
            config,
            _CONFIG_BRANCH_URL.format(
                branch_url_format=args.branch_url_format)])

    # if there's any failure after cloning then we should remove the repo
    phlsys_subprocess.run(
        'git', 'clone', args.repo_url, repo_path)
    try:
        repo = phlsys_git.Repo(repo_path)

        # test pushing to master
        repo.call('checkout', 'origin/master')
        phlgit_commit.allow_empty(repo, 'test commit for pushing')
        repo.call('push', 'origin', '--dry-run', 'HEAD:refs/heads/master')
        repo.call('checkout', '-')

        # test push to special refs
        repo.call(
            'push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')
        repo.call(
            'push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')

        # fetch the 'landed' and 'abandoned' refs if they exist
        ref_list = set(repo.call('ls-remote').split()[1::2])
        special_refs = [
            (abdt_git.ARCYD_ABANDONED_REF, abdt_git.ARCYD_ABANDONED_BRANCH_FQ),
            (abdt_git.ARCYD_LANDED_REF, abdt_git.ARCYD_LANDED_BRANCH_FQ),
        ]
        for ref in special_refs:
            if ref[0] in ref_list:
                repo.call('fetch', 'origin', '{}:{}'.format(ref[0], ref[1]))

        # success, write out the config
        fs.create_repo_config(args.name, config)
    except Exception:
        # clean up the git repo
        shutil.rmtree(repo_path)
        raise
 def test_run_valid_simple_cmd(self):
     "Valid simple cmd - run returns equal RunResult instance"
     args = ("echo", "hello stdout")
     result = phlsys_subprocess.run(*args)
     expect = phlsys_subprocess.RunResult(stdout=args[1] + "\n", stderr='')
     self.assertEqual(result, expect)
def run_once(args, out):
    sender = phlmail_sender.MailSender(
        phlsys_sendmail.Sendmail(), args.arcyd_email)
    mailer = abdmail_mailer.Mailer(
        sender,
        [args.admin_email],
        args.repo_desc,
        args.instance_uri)  # TODO: this should be a URI for users not conduit

    # prepare delays in the event of trouble when fetching or connecting
    # TODO: perhaps this policy should be decided higher-up
    delays = [
        datetime.timedelta(seconds=1),
        datetime.timedelta(seconds=1),
        datetime.timedelta(seconds=10),
        datetime.timedelta(seconds=10),
        datetime.timedelta(seconds=100),
        datetime.timedelta(seconds=100),
        datetime.timedelta(seconds=1000),
    ]

    # log.error if we get an exception when fetching
    def on_exception(e, delay):
        logging.error(str(e) + "\nwill wait " + str(delay))

    if args.try_touch_path:
        try:
            # TODO: don't rely on the touch command
            phlsys_subprocess.run("touch", args.try_touch_path)
        except Exception:
            pass  # XXX: we don't care atm, later log this

    with phlsys_fs.chdir_context(args.repo_path):
        out.display("fetch (" + args.repo_desc + "): ")
        phlsys_tryloop.try_loop_delay(
            lambda: phlsys_subprocess.run_commands("git fetch -p"),
            delays,
            onException=on_exception)

    # XXX: until conduit refreshes the connection, we'll suffer from
    #      timeouts; reduce the probability of this by using a new
    #      conduit each time.

    # create an array so that the 'connect' closure binds to the 'conduit'
    # variable as we'd expect, otherwise it'll just modify a local variable
    # and this 'conduit' will remain 'None'
    # XXX: we can do better in python 3.x
    conduit = [None]

    def connect():
        #nonlocal conduit # XXX: we'll rebind in python 3.x, instead of array
        conduit[0] = phlsys_conduit.Conduit(
            args.instance_uri,
            args.arcyd_user,
            args.arcyd_cert,
            https_proxy=args.https_proxy)

    phlsys_tryloop.try_loop_delay(connect, delays, onException=on_exception)

    out.display("process (" + args.repo_desc + "): ")
    abdi_processrepo.processUpdatedRepo(
        conduit[0], args.repo_path, "origin", mailer)

    if args.ok_touch_path:
        try:
            # TODO: don't rely on the touch command
            phlsys_subprocess.run("touch", args.ok_touch_path)
        except Exception:
            pass  # XXX: we don't care atm, later log this
 def send(self, stdin):
     result = phlsys_subprocess.run(
         self._binary, *self._params, stdin=stdin)
     return result.stdout
 def test_run_valid_simple_cmd(self):
     "Valid simple cmd - run returns equal RunResult instance"
     args = ("echo", "hello stdout")
     result = phlsys_subprocess.run(*args)
     expect = phlsys_subprocess.RunResult(stdout=args[1] + "\n", stderr='')
     self.assertEqual(result, expect)
def do(
        repo_configs,
        sys_admin_emails,
        kill_file,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    finished = False

    # decide max workers based on number of CPUs if no value is specified
    if max_workers is None:
        try:
            # use the same default as multiprocessing.Pool
            max_workers = multiprocessing.cpu_count()
            _LOGGER.debug(
                "max_workers unspecified, defaulted to cpu_count: {}".format(
                    max_workers))
        except NotImplementedError:
            _LOGGER.warning(
                "multiprocessing.cpu_count() not supported, disabling "
                "multiprocessing. Specify max workers explicitly to enable.")
            max_workers = 0

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    while not finished:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        abdt_tryloop.critical_tryloop(
            url_watcher_wrapper.watcher.refresh,
            abdt_errident.GIT_SNOOP,
            '')

        conduit_manager.refresh_conduits()

        if max_workers:
            for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                repo = repo_list[i]
                repo.merge_from_worker(res)
        else:
            for r in repo_list:
                r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        if external_report_command:
            report = {
                "cycle_time_secs": cycle_timer.restart(),
                "overrun_jobs": pool.num_active_jobs,
            }
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            try:
                phlsys_subprocess.run(full_path, stdin=report_json)
            except phlsys_subprocess.CalledProcessError as e:
                _LOGGER.error("CycleReportJson: {}".format(e))

        # look for killfile
        if os.path.isfile(kill_file):

            # finish any jobs that overran
            for i, res in pool.finish_results():
                repo = repo_list[i]
                repo.merge_from_worker(res)

            # important to do this before stopping arcyd and as soon as
            # possible after doing fetches
            url_watcher_wrapper.save()

            os.remove(kill_file)
            finished = True
            break

        if is_no_loop:
            finished = True
            break

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0:
            time.sleep(secs_to_sleep)
def do(
        repo_configs,
        sys_admin_emails,
        sleep_secs,
        is_no_loop,
        external_report_command,
        mail_sender,
        max_workers,
        overrun_secs):

    conduit_manager = _ConduitManager()

    fs_accessor = abdt_fs.make_default_accessor()
    url_watcher_wrapper = phlurl_watcher.FileCacheWatcherWrapper(
        fs_accessor.layout.urlwatcher_cache_path)

    # decide max workers based on number of CPUs if no value is specified
    if max_workers == 0:
        max_workers = determine_max_workers_default()

    repo_list = []
    for name, config in repo_configs:
        repo_list.append(
            _ArcydManagedRepository(
                name,
                config,
                conduit_manager,
                url_watcher_wrapper,
                sys_admin_emails,
                mail_sender))

    # if we always overrun half our workers then the loop is sustainable, if we
    # overrun more than that then we'll be lagging too far behind. In the event
    # that we only have one worker then we can't overrun any.
    max_overrun_workers = max_workers // 2

    pool = phlmp_cyclingpool.CyclingPool(
        repo_list, max_workers, max_overrun_workers)

    cycle_timer = phlsys_timer.Timer()
    cycle_timer.start()
    exit_code = None
    while exit_code is None:

        # This timer needs to be separate from the cycle timer. The cycle timer
        # must be reset every time it is reported. The sleep timer makes sure
        # that each run of the loop takes a minimum amount of time.
        sleep_timer = phlsys_timer.Timer()
        sleep_timer.start()

        # refresh git snoops
        with abdt_logging.remote_io_read_event_context(
                'refresh-git-snoop', ''):
            abdt_tryloop.critical_tryloop(
                url_watcher_wrapper.watcher.refresh,
                abdt_errident.GIT_SNOOP,
                '')

        with abdt_logging.remote_io_read_event_context('refresh-conduit', ''):
            conduit_manager.refresh_conduits()

        with abdt_logging.misc_operation_event_context(
                'process-repos',
                '{} workers, {} repos'.format(max_workers, len(repo_list))):
            if max_workers > 1:
                for i, res in pool.cycle_results(overrun_secs=overrun_secs):
                    repo = repo_list[i]
                    repo.merge_from_worker(res)
            else:
                for r in repo_list:
                    r()

        # important to do this before stopping arcyd and as soon as possible
        # after doing fetches
        url_watcher_wrapper.save()

        # report cycle stats
        report = {
            "cycle_time_secs": cycle_timer.restart(),
            "overrun_jobs": pool.num_active_jobs,
        }
        _LOGGER.debug("cycle-stats: {}".format(report))
        if external_report_command:
            report_json = json.dumps(report)
            full_path = os.path.abspath(external_report_command)
            with abdt_logging.misc_operation_event_context(
                    'external-report-command', external_report_command):
                try:
                    phlsys_subprocess.run(full_path, stdin=report_json)
                except phlsys_subprocess.CalledProcessError as e:
                    _LOGGER.error(
                        "External command: {} failed with exception: "
                        "{}.".format(
                            external_report_command, type(e).__name__))
                    _LOGGER.error("VERBOSE MESSAGE: CycleReportJson:{}".format(
                        e))

        if is_no_loop:
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
        elif os.path.isfile(fs_accessor.layout.killfile):
            exit_code = abdi_processexitcodes.ExitCodes.ec_exit
            if phlsys_fs.read_text_file(fs_accessor.layout.killfile):
                _LOGGER.info("Killfile observed, reason given: {}".format(
                    phlsys_fs.read_text_file(fs_accessor.layout.killfile)))
            else:
                _LOGGER.info("Killfile observed, arcyd will stop")
            os.remove(fs_accessor.layout.killfile)
        elif os.path.isfile(fs_accessor.layout.reloadfile):
            _LOGGER.info("Reloadfile observed, arcyd will reload")
            exit_code = abdi_processexitcodes.ExitCodes.ec_reload
            os.remove(fs_accessor.layout.reloadfile)

        # sleep to pad out the cycle
        secs_to_sleep = float(sleep_secs) - float(sleep_timer.duration)
        if secs_to_sleep > 0 and exit_code is None:
            with abdt_logging.misc_operation_event_context(
                    'sleep', secs_to_sleep):
                time.sleep(secs_to_sleep)

    # finish any jobs that overran
    for i, res in pool.finish_results():
        repo = repo_list[i]
        repo.merge_from_worker(res)

    # important to do this before stopping arcyd and as soon as
    # possible after doing fetches
    url_watcher_wrapper.save()

    return exit_code
Ejemplo n.º 32
0
 def setUp(self):
     # TODO: make this more portable with shutil etc.
     phlsys_subprocess.run_commands("mkdir " + self.path)
     phlsys_subprocess.run("git", "init", workingDir=self.path)
     self.repo = phlsys_git.Repo(self.path)
 def _gitCommitAll(self, subject, testPlan, reviewer):
     reviewers = [reviewer] if reviewer else None
     message = abdt_commitmessage.make(subject, None, testPlan, reviewers)
     phlsys_subprocess.run("git", "commit", "-a", "-F", "-", stdin=message)
Ejemplo n.º 34
0
 def send(self, stdin):
     result = phlsys_subprocess.run(self._binary,
                                    *self._params,
                                    stdin=stdin)
     return result.stdout