Ejemplo n.º 1
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        if self.args.top_level_dirs and self.args.top_level_dir:
            self.print_(
                'Cannot specify both --top-level-dir and --top-level-dirs',
                stream=h.stderr)
            return 1

        self.top_level_dirs = args.top_level_dirs
        if not self.top_level_dirs and args.top_level_dir:
            self.top_level_dirs = [args.top_level_dir]

        if not self.top_level_dirs:
            for test in [t for t in args.tests if h.exists(t)]:
                if h.isdir(test):
                    top_dir = test
                else:
                    top_dir = h.dirname(test)
                while h.exists(top_dir, '__init__.py'):
                    top_dir = h.dirname(top_dir)
                top_dir = h.realpath(top_dir)
                if not top_dir in self.top_level_dirs:
                    self.top_level_dirs.append(top_dir)
        if not self.top_level_dirs:
            top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            top_dir = h.realpath(top_dir)
            self.top_level_dirs.append(top_dir)

        if not self.top_level_dir and self.top_level_dirs:
            self.top_level_dir = self.top_level_dirs[0]

        for path in self.top_level_dirs:
            h.add_to_path(path)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                self.print_('Error: coverage is not installed.')
                return 1

            source = self.args.coverage_source
            if not source:
                source = self.top_level_dirs + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Ejemplo n.º 2
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(
            self.print_, args.overwrite, args.terminal_width)

        if self.args.top_level_dirs and self.args.top_level_dir:
            self.print_(
                'Cannot specify both --top-level-dir and --top-level-dirs',
                stream=h.stderr)
            return 1

        self.top_level_dirs = args.top_level_dirs
        if not self.top_level_dirs and args.top_level_dir:
            self.top_level_dirs = [args.top_level_dir]

        if not self.top_level_dirs:
            for test in [t for t in args.tests if h.exists(t)]:
                if h.isdir(test):
                    top_dir = test
                else:
                    top_dir = h.dirname(test)
                while h.exists(top_dir, '__init__.py'):
                    top_dir = h.dirname(top_dir)
                top_dir = h.realpath(top_dir)
                if not top_dir in self.top_level_dirs:
                    self.top_level_dirs.append(top_dir)
        if not self.top_level_dirs:
            top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            top_dir = h.realpath(top_dir)
            self.top_level_dirs.append(top_dir)

        if not self.top_level_dir and self.top_level_dirs:
            self.top_level_dir = self.top_level_dirs[0]

        for path in self.top_level_dirs:
            h.add_to_path(path)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                return 1

            source = self.args.coverage_source
            if not source:
                source = self.top_level_dirs + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Ejemplo n.º 3
0
 def test_last_line_flushed_when_not_overwriting(self):
     pr = Printer(self.print_, False, 80)
     pr.update('foo\nbar')
     pr.update('baz')
     pr.flush()
     self.assertEqual(self.out,
                      ['foo\nbar',
                       '\n',
                       'baz',
                       '\n'])
Ejemplo n.º 4
0
 def test_overwrite(self):
     pr = Printer(self.print_, True, 80)
     pr.update('hello world')
     pr.update('goodbye world')
     pr.flush()
     self.assertEqual(self.out,
                      ['hello world',
                       '\r           \r',
                       'goodbye world',
                       '\n'])
Ejemplo n.º 5
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                d = h.realpath(h.dirname(args.tests[0]))
                if h.exists(d, '__init__.py'):
                    top_dir = d
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Ejemplo n.º 6
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(
            self.print_, args.overwrite, args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                d = h.realpath(h.dirname(args.tests[0]))
                if h.exists(d, '__init__.py'):
                    top_dir = d
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Ejemplo n.º 7
0
class Runner(object):
    def __init__(self, host=None):
        self.args = None
        self.classifier = None
        self.cov = None
        self.context = None
        self.coverage_source = None
        self.host = host or Host()
        self.loader = unittest.loader.TestLoader()
        self.printer = None
        self.setup_fn = None
        self.stats = None
        self.teardown_fn = None
        self.top_level_dir = None
        self.top_level_dirs = []
        self.win_multiprocessing = WinMultiprocessing.spawn
        self.final_responses = []
        self.has_expectations = False
        self.expectations = None

        # initialize self.args to the defaults.
        parser = ArgumentParser(self.host)
        self.parse_args(parser, [])

    def main(self, argv=None, **defaults):
        parser = ArgumentParser(self.host)
        self.parse_args(parser, argv, **defaults)
        if parser.exit_status is not None:
            return parser.exit_status

        try:
            ret, _, _ = self.run()
            return ret
        except KeyboardInterrupt:
            self.print_("interrupted, exiting", stream=self.host.stderr)
            return 130

    def parse_args(self, parser, argv, **defaults):
        for attrname in defaults:
            if not hasattr(self.args, attrname):
                parser.error("Unknown default argument name '%s'" % attrname,
                             bailout=False)
                return
        parser.set_defaults(**defaults)
        self.args = parser.parse_args(args=argv)
        if parser.exit_status is not None:
            return

    def print_(self, msg='', end='\n', stream=None):
        self.host.print_(msg, end, stream=stream)

    def run(self, test_set=None):

        ret = 0
        h = self.host

        if self.args.version:
            self.print_(VERSION)
            return ret, None, None

        should_spawn = self._check_win_multiprocessing()
        if should_spawn:
            return self._spawn(test_set)

        ret = self._set_up_runner()
        if ret:
            return ret, None, None

        find_start = h.time()
        if self.cov:  # pragma: no cover
            self.cov.erase()
            self.cov.start()

        full_results = None
        result_set = ResultSet()

        if not test_set:
            ret, test_set = self.find_tests(self.args)
        find_end = h.time()

        if not ret:
            self.stats.total = (len(test_set.parallel_tests) +
                                len(test_set.isolated_tests) +
                                len(test_set.tests_to_skip)) * self.args.repeat
            all_tests = [
                ti.name for ti in _sort_inputs(test_set.parallel_tests +
                                               test_set.isolated_tests +
                                               test_set.tests_to_skip)
            ]
            if self.args.test_name_prefix:
                self.args.metadata.append('test_name_prefix=' +
                                          self.args.test_name_prefix)
            if self.args.list_only:
                self.print_('\n'.join(all_tests))
            else:
                for _ in range(self.args.repeat):
                    current_ret, full_results = self._run_tests(
                        result_set, test_set.copy(), all_tests)
                    ret = ret or current_ret

        if self.cov:  # pragma: no cover
            self.cov.stop()
            self.cov.save()
        test_end = h.time()

        trace = self._trace_from_results(result_set)
        if full_results:
            self._summarize(full_results)
            self._write(self.args.write_full_results_to, full_results)
            upload_ret = self._upload(full_results)
            if not ret:
                ret = upload_ret
            reporting_end = h.time()
            self._add_trace_event(trace, 'run', find_start, reporting_end)
            self._add_trace_event(trace, 'discovery', find_start, find_end)
            self._add_trace_event(trace, 'testing', find_end, test_end)
            self._add_trace_event(trace, 'reporting', test_end, reporting_end)
            self._write(self.args.write_trace_to, trace)
            self.report_coverage()
        else:
            upload_ret = 0

        return ret, full_results, trace

    def _check_win_multiprocessing(self):
        wmp = self.win_multiprocessing

        ignore, importable, spawn = WinMultiprocessing.values

        if wmp not in WinMultiprocessing.values:
            raise ValueError('illegal value %s for win_multiprocessing' % wmp)

        h = self.host
        if wmp == ignore and h.platform == 'win32':  # pragma: win32
            raise ValueError('Cannot use WinMultiprocessing.ignore for '
                             'win_multiprocessing when actually running '
                             'on Windows.')

        if wmp == ignore or self.args.jobs == 1:
            return False

        if wmp == importable:
            if self._main_is_importable():
                return False
            raise ValueError('The __main__ module (%s) '  # pragma: no cover
                             'may not be importable' %
                             sys.modules['__main__'].__file__)

        assert wmp == spawn
        return True

    def _main_is_importable(self):  # pragma: untested
        path = sys.modules['__main__'].__file__
        if not path:
            return False
        if path.endswith('.pyc'):
            path = path[:-1]
        if not path.endswith('.py'):
            return False
        if path.endswith('__main__.py'):
            # main modules are not directly importable.
            return False

        path = self.host.realpath(path)
        for d in sys.path:
            if path.startswith(self.host.realpath(d)):
                return True
        return False  # pragma: no cover

    def _spawn(self, test_set):
        # TODO: Handle picklable hooks, rather than requiring them to be None.
        assert self.classifier is None
        assert self.context is None
        assert self.setup_fn is None
        assert self.teardown_fn is None
        assert test_set is None
        h = self.host

        if self.args.write_trace_to:  # pragma: untested
            should_delete_trace = False
        else:
            should_delete_trace = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_trace_to = fp.name

        if self.args.write_full_results_to:  # pragma: untested
            should_delete_results = False
        else:
            should_delete_results = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_full_results_to = fp.name

        argv = ArgumentParser(h).argv_from_args(self.args)
        ret = h.call_inline([h.python_interpreter, path_to_file] + argv)

        trace = self._read_and_delete(self.args.write_trace_to,
                                      should_delete_trace)
        full_results = self._read_and_delete(self.args.write_full_results_to,
                                             should_delete_results)
        return ret, full_results, trace

    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        if self.args.top_level_dirs and self.args.top_level_dir:
            self.print_(
                'Cannot specify both --top-level-dir and --top-level-dirs',
                stream=h.stderr)
            return 1

        self.top_level_dirs = args.top_level_dirs
        if not self.top_level_dirs and args.top_level_dir:
            self.top_level_dirs = [args.top_level_dir]

        if not self.top_level_dirs:
            for test in [t for t in args.tests if h.exists(t)]:
                if h.isdir(test):
                    top_dir = test
                else:
                    top_dir = h.dirname(test)
                while h.exists(top_dir, '__init__.py'):
                    top_dir = h.dirname(top_dir)
                top_dir = h.realpath(top_dir)
                if not top_dir in self.top_level_dirs:
                    self.top_level_dirs.append(top_dir)
        if not self.top_level_dirs:
            top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            top_dir = h.realpath(top_dir)
            self.top_level_dirs.append(top_dir)

        if not self.top_level_dir and self.top_level_dirs:
            self.top_level_dir = self.top_level_dirs[0]

        for path in self.top_level_dirs:
            h.add_to_path(path)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                self.print_('Error: coverage is not installed.')
                return 1

            source = self.args.coverage_source
            if not source:
                source = self.top_level_dirs + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()

        if args.expectations_files:
            ret = self.parse_expectations()
            if ret:
                return ret
        elif args.tags:
            self.print_('Error: tags require expectations files.')
            return 1
        return 0

    def parse_expectations(self):
        args = self.args

        if len(args.expectations_files) != 1:
            # TODO(crbug.com/835690): Fix this.
            self.print_(
                'Only a single expectation file is currently supported',
                stream=self.host.stderr)
            return 1
        contents = self.host.read_text_file(args.expectations_files[0])

        expectations = TestExpectations(set(args.tags))
        err, msg = expectations.parse_tagged_list(contents)
        if err:
            self.print_(msg, stream=self.host.stderr)
            return err

        self.has_expectations = True
        self.expectations = expectations

    def find_tests(self, args):
        test_set = TestSet(self.args.test_name_prefix)

        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if args.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x

        try:
            names = self._name_list_from_args(args)
            classifier = self.classifier or self.default_classifier

            for name in names:
                try:
                    self._add_tests_to_set(test_set, args.suffixes,
                                           self.top_level_dirs, classifier,
                                           name)
                except (AttributeError, ImportError, SyntaxError) as e:
                    ex_str = traceback.format_exc()
                    self.print_('Failed to load "%s" in find_tests: %s' %
                                (name, e))
                    self.print_('  %s' % '\n  '.join(ex_str.splitlines()))
                    self.print_(ex_str)
                    return 1, None
                except _AddTestsError as e:
                    self.print_(str(e))
                    return 1, None

            # TODO: Add support for discovering setupProcess/teardownProcess?

            shard_index = args.shard_index
            total_shards = args.total_shards
            assert total_shards >= 1
            assert shard_index >= 0 and shard_index < total_shards, (
                'shard_index (%d) must be >= 0 and < total_shards (%d)' %
                (shard_index, total_shards))
            test_set.parallel_tests = _sort_inputs(
                test_set.parallel_tests)[shard_index::total_shards]
            test_set.isolated_tests = _sort_inputs(
                test_set.isolated_tests)[shard_index::total_shards]
            test_set.tests_to_skip = _sort_inputs(
                test_set.tests_to_skip)[shard_index::total_shards]
            return 0, test_set
        finally:
            unittest.skip = orig_skip
            unittest.skipIf = orig_skip_if

    def _name_list_from_args(self, args):
        if args.tests:
            names = args.tests
        elif args.file_list:
            if args.file_list == '-':
                s = self.host.stdin.read()
            else:
                s = self.host.read_text_file(args.file_list)
            names = [line.strip() for line in s.splitlines()]
        else:
            names = self.top_level_dirs
        return names

    def _add_tests_to_set(self, test_set, suffixes, top_level_dirs, classifier,
                          name):
        h = self.host
        loader = self.loader
        add_tests = _test_adder(test_set, classifier)

        found = set()
        for d in top_level_dirs:
            if h.isfile(name):
                rpath = h.relpath(name, d)
                if rpath.startswith('..'):
                    continue
                if rpath.endswith('.py'):
                    rpath = rpath[:-3]
                module = rpath.replace(h.sep, '.')
                if module not in found:
                    found.add(module)
                    add_tests(loader.loadTestsFromName(module))
            elif h.isdir(name):
                rpath = h.relpath(name, d)
                if rpath.startswith('..'):
                    continue
                for suffix in suffixes:
                    if not name in found:
                        found.add(name + '/' + suffix)
                        add_tests(loader.discover(name, suffix, d))
            else:
                possible_dir = name.replace('.', h.sep)
                if h.isdir(d, possible_dir):
                    for suffix in suffixes:
                        path = h.join(d, possible_dir)
                        if not path in found:
                            found.add(path + '/' + suffix)
                            suite = loader.discover(path, suffix, d)
                            add_tests(suite)
                elif not name in found:
                    found.add(name)
                    add_tests(
                        loader.loadTestsFromName(self.args.test_name_prefix +
                                                 name))

        # pylint: disable=no-member
        if hasattr(loader, 'errors') and loader.errors:  # pragma: python3
            # In Python3's version of unittest, loader failures get converted
            # into failed test cases, rather than raising exceptions. However,
            # the errors also get recorded so you can err out immediately.
            raise ImportError(loader.errors)

    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            for name in tests_to_retry:
                test_set.add_test_to_run_isolated(
                    list(
                        self.loader.loadTestsFromName(
                            self.args.test_name_prefix + name))[0])
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)

    def _run_one_set(self, stats, result_set, test_set):
        self._skip_tests(stats, result_set, test_set.tests_to_skip)
        self._run_list(stats, result_set, test_set.parallel_tests,
                       self.args.jobs)
        self._run_list(stats, result_set, test_set.isolated_tests, 1)

    def _skip_tests(self, stats, result_set, tests_to_skip):
        for test_input in tests_to_skip:
            last = self.host.time()
            stats.started += 1
            self._print_test_started(stats, test_input)
            now = self.host.time()
            result = Result(test_input.name,
                            actual=ResultType.Skip,
                            started=last,
                            took=(now - last),
                            worker=0,
                            expected=[ResultType.Skip],
                            out=test_input.msg)
            result_set.add(result)
            stats.finished += 1
            self._print_test_finished(stats, result)

    def _run_list(self, stats, result_set, test_inputs, jobs):
        h = self.host
        running_jobs = set()

        jobs = min(len(test_inputs), jobs)
        if not jobs:
            return

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child, _setup_process,
                         _teardown_process)
        try:
            while test_inputs or running_jobs:
                while test_inputs and (len(running_jobs) < jobs):
                    test_input = test_inputs.pop(0)
                    stats.started += 1
                    pool.send(test_input)
                    running_jobs.add(test_input.name)
                    self._print_test_started(stats, test_input)

                result, should_retry_on_failure = pool.get()
                if (self.args.retry_only_retry_on_failure_tests
                        and result.actual == ResultType.Failure
                        and should_retry_on_failure):
                    self.last_runs_retry_on_failure_tests.add(result.name)

                running_jobs.remove(result.name)
                result_set.add(result)
                stats.finished += 1
                self._print_test_finished(stats, result)
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

    def _print_test_started(self, stats, test_input):
        if self.args.quiet:
            # Print nothing when --quiet was passed.
            return

        # If -vvv was passed, print when the test is queued to be run.
        # We don't actually know when the test picked up to run, because
        # that is handled by the child process (where we can't easily
        # print things). Otherwise, only print when the test is started
        # if we know we can overwrite the line, so that we do not
        # get multiple lines of output as noise (in -vvv, we actually want
        # the noise).
        test_start_msg = stats.format() + test_input.name
        if self.args.verbose > 2:
            self.update(test_start_msg + ' queued', elide=False)
        if self.args.overwrite:
            self.update(test_start_msg, elide=(not self.args.verbose))

    def _print_test_finished(self, stats, result):
        stats.add_time()

        assert result.actual in [
            ResultType.Failure, ResultType.Skip, ResultType.Pass
        ]
        if result.actual == ResultType.Failure:
            result_str = ' failed'
        elif result.actual == ResultType.Skip:
            result_str = ' was skipped'
        elif result.actual == ResultType.Pass:
            result_str = ' passed'

        if result.unexpected:
            result_str += ' unexpectedly'
        elif result.actual == ResultType.Failure:
            result_str += ' as expected'

        if self.args.timing:
            timing_str = ' %.4fs' % result.took
        else:
            timing_str = ''
        suffix = '%s%s' % (result_str, timing_str)
        out = result.out
        err = result.err
        if result.code:
            if out or err:
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix, elide=False)
            for l in out.splitlines():
                self.print_('  %s' % l)
            for l in err.splitlines():
                self.print_('  %s' % l)
        elif not self.args.quiet:
            if self.args.verbose > 1 and (out or err):
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix,
                        elide=(not self.args.verbose))
            if self.args.verbose > 1:
                for l in out.splitlines():
                    self.print_('  %s' % l)
                for l in err.splitlines():
                    self.print_('  %s' % l)
            if self.args.verbose:
                self.flush()

    def update(self, msg, elide):
        self.printer.update(msg, elide)

    def flush(self):
        self.printer.flush()

    def _summarize(self, full_results):
        num_passes = json_results.num_passes(full_results)
        num_failures = json_results.num_failures(full_results)
        num_skips = json_results.num_skips(full_results)

        if self.args.quiet and num_failures == 0:
            return

        if self.args.timing:
            timing_clause = ' in %.1fs' % (self.host.time() -
                                           self.stats.started_time)
        else:
            timing_clause = ''
        self.update(
            '%d test%s passed%s, %d skipped, %d failure%s.' %
            (num_passes, '' if num_passes == 1 else 's', timing_clause,
             num_skips, num_failures, '' if num_failures == 1 else 's'),
            elide=False)
        self.print_()

    def _read_and_delete(self, path, delete):
        h = self.host
        obj = None
        if h.exists(path):
            contents = h.read_text_file(path)
            if contents:
                obj = json.loads(contents)
            if delete:
                h.remove(path)
        return obj

    def _write(self, path, obj):
        if path:
            self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')

    def _upload(self, full_results):
        h = self.host
        if not self.args.test_results_server:
            return 0

        url, content_type, data = json_results.make_upload_request(
            self.args.test_results_server, self.args.builder_name,
            self.args.master_name, self.args.test_type, full_results)

        try:
            h.fetch(url, data, {'Content-Type': content_type})
            return 0
        except Exception as e:
            h.print_('Uploading the JSON results raised "%s"' % str(e))
            return 1

    def report_coverage(self):
        if self.args.coverage:  # pragma: no cover
            self.host.print_()
            import coverage
            cov = coverage.coverage(data_suffix=True)
            cov.combine()
            cov.report(show_missing=self.args.coverage_show_missing,
                       omit=self.args.coverage_omit)
            if self.args.coverage_annotate:
                cov.annotate(omit=self.args.coverage_omit)

    def _add_trace_event(self, trace, name, start, end):
        event = {
            'name': name,
            'ts': int((start - self.stats.started_time) * 1000000),
            'dur': int((end - start) * 1000000),
            'ph': 'X',
            'pid': self.host.getpid(),
            'tid': 0,
        }
        trace['traceEvents'].append(event)

    def _trace_from_results(self, result_set):
        trace = OrderedDict()
        trace['traceEvents'] = []
        trace['otherData'] = {}
        for m in self.args.metadata:
            k, v = m.split('=')
            trace['otherData'][k] = v

        for result in result_set.results:
            started = int((result.started - self.stats.started_time) * 1000000)
            took = int(result.took * 1000000)
            event = OrderedDict()
            event['name'] = result.name
            event['dur'] = took
            event['ts'] = started
            event['ph'] = 'X'  # "Complete" events
            event['pid'] = result.pid
            event['tid'] = result.worker

            args = OrderedDict()
            args['expected'] = sorted(str(r) for r in result.expected)
            args['actual'] = str(result.actual)
            args['out'] = result.out
            args['err'] = result.err
            args['code'] = result.code
            args['unexpected'] = result.unexpected
            args['flaky'] = result.flaky
            event['args'] = args

            trace['traceEvents'].append(event)
        return trace

    def expectations_for(self, test_case):
        if self.has_expectations:
            return self.expectations.expectations_for(
                test_case.id()[len(self.args.test_name_prefix):])
        else:
            return (set([ResultType.Pass]), False)

    def default_classifier(self, test_set, test):
        if self.matches_filter(test):
            if not self.args.all and self.should_skip(test):
                test_set.add_test_to_skip(test, 'skipped by request')
            elif self.should_isolate(test):
                test_set.add_test_to_run_isolated(test)
            else:
                test_set.add_test_to_run_in_parallel(test)

    def matches_filter(self, test_case):
        _validate_test_starts_with_prefix(self.args.test_name_prefix,
                                          test_case.id())
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        return (not self.args.test_filter or any(
            fnmatch.fnmatch(test_name, glob)
            for glob in self.args.test_filter.split('::')))

    def should_isolate(self, test_case):
        _validate_test_starts_with_prefix(self.args.test_name_prefix,
                                          test_case.id())
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        return any(
            fnmatch.fnmatch(test_name, glob) for glob in self.args.isolate)

    def should_skip(self, test_case):
        _validate_test_starts_with_prefix(self.args.test_name_prefix,
                                          test_case.id())
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        if self.has_expectations:
            expected_results, _ = self.expectations.expectations_for(test_name)
        else:
            expected_results = set([ResultType.Pass])
        return (ResultType.Skip in expected_results or any(
            fnmatch.fnmatch(test_name, glob) for glob in self.args.skip))
Ejemplo n.º 8
0
class Runner(object):
    def __init__(self, host=None):
        self.args = None
        self.classifier = None
        self.cov = None
        self.context = None
        self.coverage_source = None
        self.host = host or Host()
        self.loader = unittest.loader.TestLoader()
        self.printer = None
        self.setup_fn = None
        self.stats = None
        self.teardown_fn = None
        self.top_level_dir = None
        self.win_multiprocessing = WinMultiprocessing.spawn
        self.final_responses = []

        # initialize self.args to the defaults.
        parser = ArgumentParser(self.host)
        self.parse_args(parser, [])

    def main(self, argv=None, **defaults):
        parser = ArgumentParser(self.host)
        self.parse_args(parser, argv, **defaults)
        if parser.exit_status is not None:
            return parser.exit_status

        try:
            ret, _, _ = self.run()
            return ret
        except KeyboardInterrupt:
            self.print_("interrupted, exiting", stream=self.host.stderr)
            return 130

    def parse_args(self, parser, argv, **defaults):
        for attrname in defaults:
            if not hasattr(self.args, attrname):
                parser.error("Unknown default argument name '%s'" % attrname,
                             bailout=False)
                return
        parser.set_defaults(**defaults)
        self.args = parser.parse_args(args=argv)
        if parser.exit_status is not None:
            return

    def print_(self, msg='', end='\n', stream=None):
        self.host.print_(msg, end, stream=stream)

    def run(self, test_set=None):

        ret = 0
        h = self.host

        if self.args.version:
            self.print_(VERSION)
            return ret, None, None

        should_spawn = self._check_win_multiprocessing()
        if should_spawn:
            return self._spawn(test_set)

        ret = self._set_up_runner()
        if ret:  # pragma: no cover
            return ret, None, None

        find_start = h.time()
        if self.cov:  # pragma: no cover
            self.cov.erase()
            self.cov.start()

        full_results = None
        result_set = ResultSet()

        if not test_set:
            ret, test_set = self.find_tests(self.args)
        find_end = h.time()

        if not ret:
            ret, full_results = self._run_tests(result_set, test_set)

        if self.cov:  # pragma: no cover
            self.cov.stop()
            self.cov.save()
        test_end = h.time()

        trace = self._trace_from_results(result_set)
        if full_results:
            self._summarize(full_results)
            self._write(self.args.write_full_results_to, full_results)
            upload_ret = self._upload(full_results)
            if not ret:
                ret = upload_ret
            reporting_end = h.time()
            self._add_trace_event(trace, 'run', find_start, reporting_end)
            self._add_trace_event(trace, 'discovery', find_start, find_end)
            self._add_trace_event(trace, 'testing', find_end, test_end)
            self._add_trace_event(trace, 'reporting', test_end, reporting_end)
            self._write(self.args.write_trace_to, trace)
            self.report_coverage()
        else:
            upload_ret = 0

        return ret, full_results, trace

    def _check_win_multiprocessing(self):
        wmp = self.win_multiprocessing

        ignore, importable, spawn = WinMultiprocessing.values

        if wmp not in WinMultiprocessing.values:
            raise ValueError('illegal value %s for win_multiprocessing' % wmp)

        h = self.host
        if wmp == ignore and h.platform == 'win32':  # pragma: win32
            raise ValueError('Cannot use WinMultiprocessing.ignore for '
                             'win_multiprocessing when actually running '
                             'on Windows.')

        if wmp == ignore or self.args.jobs == 1:
            return False

        if wmp == importable:
            if self._main_is_importable():
                return False
            raise ValueError('The __main__ module (%s) '  # pragma: no cover
                             'may not be importable' %
                             sys.modules['__main__'].__file__)

        assert wmp == spawn
        return True

    def _main_is_importable(self):  # pragma: untested
        path = sys.modules['__main__'].__file__
        if not path:
            return False
        if path.endswith('.pyc'):
            path = path[:-1]
        if not path.endswith('.py'):
            return False
        if path.endswith('__main__.py'):
            # main modules are not directly importable.
            return False

        path = self.host.realpath(path)
        for d in sys.path:
            if path.startswith(self.host.realpath(d)):
                return True
        return False  # pragma: no cover

    def _spawn(self, test_set):
        # TODO: Handle picklable hooks, rather than requiring them to be None.
        assert self.classifier is None
        assert self.context is None
        assert self.setup_fn is None
        assert self.teardown_fn is None
        assert test_set is None
        h = self.host

        if self.args.write_trace_to:  # pragma: untested
            should_delete_trace = False
        else:
            should_delete_trace = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_trace_to = fp.name

        if self.args.write_full_results_to:  # pragma: untested
            should_delete_results = False
        else:
            should_delete_results = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_full_results_to = fp.name

        argv = ArgumentParser(h).argv_from_args(self.args)
        ret = h.call_inline([h.python_interpreter, path_to_file] + argv)

        trace = self._read_and_delete(self.args.write_trace_to,
                                      should_delete_trace)
        full_results = self._read_and_delete(self.args.write_full_results_to,
                                             should_delete_results)
        return ret, full_results, trace

    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                d = h.realpath(h.dirname(args.tests[0]))
                if h.exists(d, '__init__.py'):
                    top_dir = d
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0

    def find_tests(self, args):
        test_set = TestSet()

        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if args.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x

        try:
            names = self._name_list_from_args(args)
            classifier = self.classifier or _default_classifier(args)

            for name in names:
                try:
                    self._add_tests_to_set(test_set, args.suffixes,
                                           self.top_level_dir, classifier,
                                           name)
                except (AttributeError, ImportError, SyntaxError) as e:
                    self.print_('Failed to load "%s": %s' % (name, e))
                    return 1, None
                except _AddTestsError as e:
                    self.print_(str(e))
                    return 1, None

            # TODO: Add support for discovering setupProcess/teardownProcess?

            test_set.parallel_tests = _sort_inputs(test_set.parallel_tests)
            test_set.isolated_tests = _sort_inputs(test_set.isolated_tests)
            test_set.tests_to_skip = _sort_inputs(test_set.tests_to_skip)
            return 0, test_set
        finally:
            unittest.skip = orig_skip
            unittest.skipIf = orig_skip_if

    def _name_list_from_args(self, args):
        if args.tests:
            names = args.tests
        elif args.file_list:
            if args.file_list == '-':
                s = self.host.stdin.read()
            else:
                s = self.host.read_text_file(args.file_list)
            names = [line.strip() for line in s.splitlines()]
        else:
            names = [self.top_level_dir]
        return names

    def _add_tests_to_set(self, test_set, suffixes, top_level_dir, classifier,
                          name):
        h = self.host
        loader = self.loader
        add_tests = _test_adder(test_set, classifier)

        if h.isfile(name):
            rpath = h.relpath(name, top_level_dir)
            if rpath.endswith('.py'):
                rpath = rpath[:-3]
            module = rpath.replace(h.sep, '.')
            add_tests(loader.loadTestsFromName(module))
        elif h.isdir(name):
            for suffix in suffixes:
                add_tests(loader.discover(name, suffix, top_level_dir))
        else:
            possible_dir = name.replace('.', h.sep)
            if h.isdir(top_level_dir, possible_dir):
                for suffix in suffixes:
                    path = h.join(top_level_dir, possible_dir)
                    suite = loader.discover(path, suffix, top_level_dir)
                    add_tests(suite)
            else:
                add_tests(loader.loadTestsFromName(name))

    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [
            ti.name for ti in _sort_inputs(test_set.parallel_tests +
                                           test_set.isolated_tests +
                                           test_set.tests_to_skip)
        ]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)

    def _run_one_set(self, stats, result_set, test_set):
        stats.total = (len(test_set.parallel_tests) +
                       len(test_set.isolated_tests) +
                       len(test_set.tests_to_skip))
        self._skip_tests(stats, result_set, test_set.tests_to_skip)
        self._run_list(stats, result_set, test_set.parallel_tests,
                       self.args.jobs)
        self._run_list(stats, result_set, test_set.isolated_tests, 1)

    def _skip_tests(self, stats, result_set, tests_to_skip):
        for test_input in tests_to_skip:
            last = self.host.time()
            stats.started += 1
            self._print_test_started(stats, test_input)
            now = self.host.time()
            result = Result(test_input.name,
                            actual=ResultType.Skip,
                            started=last,
                            took=(now - last),
                            worker=0,
                            expected=[ResultType.Skip],
                            out=test_input.msg)
            result_set.add(result)
            stats.finished += 1
            self._print_test_finished(stats, result)

    def _run_list(self, stats, result_set, test_inputs, jobs):
        h = self.host
        running_jobs = set()

        jobs = min(len(test_inputs), jobs)
        if not jobs:
            return

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child, _setup_process,
                         _teardown_process)
        try:
            while test_inputs or running_jobs:
                while test_inputs and (len(running_jobs) < self.args.jobs):
                    test_input = test_inputs.pop(0)
                    stats.started += 1
                    pool.send(test_input)
                    running_jobs.add(test_input.name)
                    self._print_test_started(stats, test_input)

                result = pool.get()
                running_jobs.remove(result.name)
                result_set.add(result)
                stats.finished += 1
                self._print_test_finished(stats, result)
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

    def _print_test_started(self, stats, test_input):
        if self.args.quiet:
            # Print nothing when --quiet was passed.
            return

        # If -vvv was passed, print when the test is queued to be run.
        # We don't actually know when the test picked up to run, because
        # that is handled by the child process (where we can't easily
        # print things). Otherwise, only print when the test is started
        # if we know we can overwrite the line, so that we do not
        # get multiple lines of output as noise (in -vvv, we actually want
        # the noise).
        test_start_msg = stats.format() + test_input.name
        if self.args.verbose > 2:
            self.update(test_start_msg + ' queued', elide=False)
        if self.args.overwrite:
            self.update(test_start_msg, elide=(not self.args.verbose))

    def _print_test_finished(self, stats, result):
        stats.add_time()

        assert result.actual in [
            ResultType.Failure, ResultType.Skip, ResultType.Pass
        ]
        if result.actual == ResultType.Failure:
            result_str = ' failed'
        elif result.actual == ResultType.Skip:
            result_str = ' was skipped'
        elif result.actual == ResultType.Pass:
            result_str = ' passed'

        if result.unexpected:
            result_str += ' unexpectedly'
        if self.args.timing:
            timing_str = ' %.4fs' % result.took
        else:
            timing_str = ''
        suffix = '%s%s' % (result_str, timing_str)
        out = result.out
        err = result.err
        if result.code:
            if out or err:
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix, elide=False)
            for l in out.splitlines():
                self.print_('  %s' % l)
            for l in err.splitlines():
                self.print_('  %s' % l)
        elif not self.args.quiet:
            if self.args.verbose > 1 and (out or err):
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix,
                        elide=(not self.args.verbose))
            if self.args.verbose > 1:
                for l in out.splitlines():
                    self.print_('  %s' % l)
                for l in err.splitlines():
                    self.print_('  %s' % l)
            if self.args.verbose:
                self.flush()

    def update(self, msg, elide):
        self.printer.update(msg, elide)

    def flush(self):
        self.printer.flush()

    def _summarize(self, full_results):
        num_tests = self.stats.finished
        num_failures = json_results.num_failures(full_results)

        if self.args.quiet and num_failures == 0:
            return

        if self.args.timing:
            timing_clause = ' in %.1fs' % (self.host.time() -
                                           self.stats.started_time)
        else:
            timing_clause = ''
        self.update('%d test%s run%s, %d failure%s.' %
                    (num_tests, '' if num_tests == 1 else 's', timing_clause,
                     num_failures, '' if num_failures == 1 else 's'),
                    elide=False)
        self.print_()

    def _read_and_delete(self, path, delete):
        h = self.host
        obj = None
        if h.exists(path):
            contents = h.read_text_file(path)
            if contents:
                obj = json.loads(contents)
            if delete:
                h.remove(path)
        return obj

    def _write(self, path, obj):
        if path:
            self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')

    def _upload(self, full_results):
        h = self.host
        if not self.args.test_results_server:
            return 0

        url, content_type, data = json_results.make_upload_request(
            self.args.test_results_server, self.args.builder_name,
            self.args.master_name, self.args.test_type, full_results)

        try:
            h.fetch(url, data, {'Content-Type': content_type})
            return 0
        except Exception as e:
            h.print_('Uploading the JSON results raised "%s"' % str(e))
            return 1

    def report_coverage(self):
        if self.args.coverage:  # pragma: no cover
            self.host.print_()
            import coverage
            cov = coverage.coverage(data_suffix=True)
            cov.combine()
            cov.report(show_missing=self.args.coverage_show_missing,
                       omit=self.args.coverage_omit)
            if self.args.coverage_annotate:
                cov.annotate(omit=self.args.coverage_omit)

    def _add_trace_event(self, trace, name, start, end):
        event = {
            'name': name,
            'ts': int((start - self.stats.started_time) * 1000000),
            'dur': int((end - start) * 1000000),
            'ph': 'X',
            'pid': self.host.getpid(),
            'tid': 0,
        }
        trace['traceEvents'].append(event)

    def _trace_from_results(self, result_set):
        trace = OrderedDict()
        trace['traceEvents'] = []
        trace['otherData'] = {}
        for m in self.args.metadata:
            k, v = m.split('=')
            trace['otherData'][k] = v

        for result in result_set.results:
            started = int((result.started - self.stats.started_time) * 1000000)
            took = int(result.took * 1000000)
            event = OrderedDict()
            event['name'] = result.name
            event['dur'] = took
            event['ts'] = started
            event['ph'] = 'X'  # "Complete" events
            event['pid'] = result.pid
            event['tid'] = result.worker

            args = OrderedDict()
            args['expected'] = sorted(str(r) for r in result.expected)
            args['actual'] = str(result.actual)
            args['out'] = result.out
            args['err'] = result.err
            args['code'] = result.code
            args['unexpected'] = result.unexpected
            args['flaky'] = result.flaky
            event['args'] = args

            trace['traceEvents'].append(event)
        return trace
Ejemplo n.º 9
0
class Runner(object):

    def __init__(self, host=None):
        self.args = None
        self.classifier = None
        self.cov = None
        self.context = None
        self.coverage_source = None
        self.host = host or Host()
        self.loader = unittest.loader.TestLoader()
        self.printer = None
        self.setup_fn = None
        self.stats = None
        self.teardown_fn = None
        self.top_level_dir = None
        self.win_multiprocessing = WinMultiprocessing.spawn

        # initialize self.args to the defaults.
        parser = ArgumentParser(self.host)
        self.parse_args(parser, [])

    def main(self, argv=None, **defaults):
        parser = ArgumentParser(self.host)
        self.parse_args(parser, argv, **defaults)
        if parser.exit_status is not None:
            return parser.exit_status

        try:
            ret, _, _ = self.run()
            return ret
        except KeyboardInterrupt:
            self.print_("interrupted, exiting", stream=self.host.stderr)
            return 130

    def parse_args(self, parser, argv, **defaults):
        for attrname in defaults:
            if not hasattr(self.args, attrname):
                parser.error("Unknown default argument name '%s'" % attrname,
                             bailout=False)
                return
        parser.set_defaults(**defaults)
        self.args = parser.parse_args(args=argv)
        if parser.exit_status is not None:
            return

    def print_(self, msg='', end='\n', stream=None):
        self.host.print_(msg, end, stream=stream)

    def run(self, test_set=None):

        ret = 0
        h = self.host

        if self.args.version:
            self.print_(VERSION)
            return ret, None, None

        should_spawn = self._check_win_multiprocessing()
        if should_spawn:
            return self._spawn(test_set)

        ret = self._set_up_runner()
        if ret:  # pragma: no cover
            return ret, None, None

        find_start = h.time()
        if self.cov:  # pragma: no cover
            self.cov.erase()
            self.cov.start()

        full_results = None
        result_set = ResultSet()

        if not test_set:
            ret, test_set = self.find_tests(self.args)
        find_end = h.time()

        if not ret:
            ret, full_results = self._run_tests(result_set, test_set)

        if self.cov:  # pragma: no cover
            self.cov.stop()
            self.cov.save()
        test_end = h.time()

        trace = self._trace_from_results(result_set)
        if full_results:
            self._summarize(full_results)
            self._write(self.args.write_full_results_to, full_results)
            upload_ret = self._upload(full_results)
            if not ret:
                ret = upload_ret
            reporting_end = h.time()
            self._add_trace_event(trace, 'run', find_start, reporting_end)
            self._add_trace_event(trace, 'discovery', find_start, find_end)
            self._add_trace_event(trace, 'testing', find_end, test_end)
            self._add_trace_event(trace, 'reporting', test_end, reporting_end)
            self._write(self.args.write_trace_to, trace)
            self.report_coverage()
        else:
            upload_ret = 0

        return ret, full_results, trace

    def _check_win_multiprocessing(self):
        wmp = self.win_multiprocessing

        ignore, importable, spawn = WinMultiprocessing.values

        if wmp not in WinMultiprocessing.values:
            raise ValueError('illegal value %s for win_multiprocessing' %
                             wmp)

        h = self.host
        if wmp == ignore and h.platform == 'win32':  # pragma: win32
            raise ValueError('Cannot use WinMultiprocessing.ignore for '
                             'win_multiprocessing when actually running '
                             'on Windows.')

        if wmp == ignore or self.args.jobs == 1:
            return False

        if wmp == importable:
            if self._main_is_importable():
                return False
            raise ValueError('The __main__ module (%s) '  # pragma: no cover
                             'may not be importable' %
                             sys.modules['__main__'].__file__)

        assert wmp == spawn
        return True

    def _main_is_importable(self):  # pragma: untested
        path = sys.modules['__main__'].__file__
        if not path:
            return False
        if path.endswith('.pyc'):
            path = path[:-1]
        if not path.endswith('.py'):
            return False
        if path.endswith('__main__.py'):
            # main modules are not directly importable.
            return False

        path = self.host.realpath(path)
        for d in sys.path:
            if path.startswith(self.host.realpath(d)):
                return True
        return False  # pragma: no cover

    def _spawn(self, test_set):
        # TODO: Handle picklable hooks, rather than requiring them to be None.
        assert self.classifier is None
        assert self.context is None
        assert self.setup_fn is None
        assert self.teardown_fn is None
        assert test_set is None
        h = self.host

        if self.args.write_trace_to:  # pragma: untested
            should_delete_trace = False
        else:
            should_delete_trace = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_trace_to = fp.name

        if self.args.write_full_results_to:  # pragma: untested
            should_delete_results = False
        else:
            should_delete_results = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_full_results_to = fp.name

        argv = ArgumentParser(h).argv_from_args(self.args)
        ret = h.call_inline([h.python_interpreter, path_to_file] + argv)

        trace = self._read_and_delete(self.args.write_trace_to,
                                      should_delete_trace)
        full_results = self._read_and_delete(self.args.write_full_results_to,
                                             should_delete_results)
        return ret, full_results, trace

    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(
            self.print_, args.overwrite, args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                if h.exists(h.dirname(args.tests[0]), '__init__.py'):
                    top_dir = h.dirname(args.tests[0])
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0

    def find_tests(self, args):
        test_set = TestSet()

        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if args.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x

        try:
            names = self._name_list_from_args(args)
            classifier = self.classifier or _default_classifier(args)

            for name in names:
                try:
                    self._add_tests_to_set(test_set, args.suffixes,
                                           self.top_level_dir, classifier,
                                           name)
                except (AttributeError, ImportError, SyntaxError) as e:
                    self.print_('Failed to load "%s": %s' % (name, e))
                    return 1, None
                except _AddTestsError as e:
                    self.print_(str(e))
                    return 1, None

            # TODO: Add support for discovering setupProcess/teardownProcess?

            test_set.parallel_tests = _sort_inputs(test_set.parallel_tests)
            test_set.isolated_tests = _sort_inputs(test_set.isolated_tests)
            test_set.tests_to_skip = _sort_inputs(test_set.tests_to_skip)
            return 0, test_set
        finally:
            unittest.skip = orig_skip
            unittest.skipIf = orig_skip_if

    def _name_list_from_args(self, args):
        if args.tests:
            names = args.tests
        elif args.file_list:
            if args.file_list == '-':
                s = self.host.stdin.read()
            else:
                s = self.host.read_text_file(args.file_list)
            names = [line.strip() for line in s.splitlines()]
        else:
            names = [self.top_level_dir]
        return names

    def _add_tests_to_set(self, test_set, suffixes, top_level_dir, classifier,
                          name):
        h = self.host
        loader = self.loader
        add_tests = _test_adder(test_set, classifier)

        if h.isfile(name):
            rpath = h.relpath(name, top_level_dir)
            if rpath.endswith('.py'):
                rpath = rpath[:-3]
            module = rpath.replace(h.sep, '.')
            add_tests(loader.loadTestsFromName(module))
        elif h.isdir(name):
            for suffix in suffixes:
                add_tests(loader.discover(name, suffix, top_level_dir))
        else:
            possible_dir = name.replace('.', h.sep)
            if h.isdir(top_level_dir, possible_dir):
                for suffix in suffixes:
                    path = h.join(top_level_dir, possible_dir)
                    suite = loader.discover(path, suffix, top_level_dir)
                    add_tests(suite)
            else:
                add_tests(loader.loadTestsFromName(name))

    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [ti.name for ti in
                     _sort_inputs(test_set.parallel_tests +
                                  test_set.isolated_tests +
                                  test_set.tests_to_skip)]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)

    def _run_one_set(self, stats, result_set, test_set):
        stats.total = (len(test_set.parallel_tests) +
                       len(test_set.isolated_tests) +
                       len(test_set.tests_to_skip))
        self._skip_tests(stats, result_set, test_set.tests_to_skip)
        self._run_list(stats, result_set, test_set.parallel_tests,
                       self.args.jobs)
        self._run_list(stats, result_set, test_set.isolated_tests, 1)

    def _skip_tests(self, stats, result_set, tests_to_skip):
        for test_input in tests_to_skip:
            last = self.host.time()
            stats.started += 1
            self._print_test_started(stats, test_input)
            now = self.host.time()
            result = Result(test_input.name, actual=ResultType.Skip,
                            started=last, took=(now - last), worker=0,
                            expected=[ResultType.Skip],
                            out=test_input.msg)
            result_set.add(result)
            stats.finished += 1
            self._print_test_finished(stats, result)

    def _run_list(self, stats, result_set, test_inputs, jobs):
        h = self.host
        running_jobs = set()

        jobs = min(len(test_inputs), jobs)
        if not jobs:
            return

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child,
                         _setup_process, _teardown_process)
        try:
            while test_inputs or running_jobs:
                while test_inputs and (len(running_jobs) < self.args.jobs):
                    test_input = test_inputs.pop(0)
                    stats.started += 1
                    pool.send(test_input)
                    running_jobs.add(test_input.name)
                    self._print_test_started(stats, test_input)

                result = pool.get()
                running_jobs.remove(result.name)
                result_set.add(result)
                stats.finished += 1
                self._print_test_finished(stats, result)
            pool.close()
        finally:
            pool.join()

    def _print_test_started(self, stats, test_input):
        if not self.args.quiet and self.args.overwrite:
            self.update(stats.format() + test_input.name,
                        elide=(not self.args.verbose))

    def _print_test_finished(self, stats, result):
        stats.add_time()

        assert result.actual in [ResultType.Failure, ResultType.Skip,
                                 ResultType.Pass]
        if result.actual == ResultType.Failure:
            result_str = ' failed'
        elif result.actual == ResultType.Skip:
            result_str = ' was skipped'
        elif result.actual == ResultType.Pass:
            result_str = ' passed'

        if result.unexpected:
            result_str += ' unexpectedly'
        if self.args.timing:
            timing_str = ' %.4fs' % result.took
        else:
            timing_str = ''
        suffix = '%s%s' % (result_str, timing_str)
        out = result.out
        err = result.err
        if result.code:
            if out or err:
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix, elide=False)
            for l in out.splitlines():
                self.print_('  %s' % l)
            for l in err.splitlines():
                self.print_('  %s' % l)
        elif not self.args.quiet:
            if self.args.verbose > 1 and (out or err):
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix,
                        elide=(not self.args.verbose))
            if self.args.verbose > 1:
                for l in out.splitlines():
                    self.print_('  %s' % l)
                for l in err.splitlines():
                    self.print_('  %s' % l)
            if self.args.verbose:
                self.flush()

    def update(self, msg, elide):
        self.printer.update(msg, elide)

    def flush(self):
        self.printer.flush()

    def _summarize(self, full_results):
        num_tests = self.stats.finished
        num_failures = json_results.num_failures(full_results)

        if self.args.quiet and num_failures == 0:
            return

        if self.args.timing:
            timing_clause = ' in %.1fs' % (self.host.time() -
                                           self.stats.started_time)
        else:
            timing_clause = ''
        self.update('%d test%s run%s, %d failure%s.' %
                    (num_tests,
                     '' if num_tests == 1 else 's',
                     timing_clause,
                     num_failures,
                     '' if num_failures == 1 else 's'), elide=False)
        self.print_()

    def _read_and_delete(self, path, delete):
        h = self.host
        obj = None
        if h.exists(path):
            contents = h.read_text_file(path)
            if contents:
                obj = json.loads(contents)
            if delete:
                h.remove(path)
        return obj

    def _write(self, path, obj):
        if path:
            self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')

    def _upload(self, full_results):
        h = self.host
        if not self.args.test_results_server:
            return 0

        url, content_type, data = json_results.make_upload_request(
            self.args.test_results_server, self.args.builder_name,
            self.args.master_name, self.args.test_type,
            full_results)

        try:
            h.fetch(url, data, {'Content-Type': content_type})
            return 0
        except Exception as e:
            h.print_('Uploading the JSON results raised "%s"' % str(e))
            return 1

    def report_coverage(self):
        if self.args.coverage:  # pragma: no cover
            self.host.print_()
            import coverage
            cov = coverage.coverage(data_suffix=True)
            cov.combine()
            cov.report(show_missing=self.args.coverage_show_missing,
                       omit=self.args.coverage_omit)

    def _add_trace_event(self, trace, name, start, end):
        event = {
            'name': name,
            'ts': int((start - self.stats.started_time) * 1000000),
            'dur': int((end - start) * 1000000),
            'ph': 'X',
            'pid': self.host.getpid(),
            'tid': 0,
        }
        trace['traceEvents'].append(event)

    def _trace_from_results(self, result_set):
        trace = OrderedDict()
        trace['traceEvents'] = []
        trace['otherData'] = {}
        for m in self.args.metadata:
            k, v = m.split('=')
            trace['otherData'][k] = v

        for result in result_set.results:
            started = int((result.started - self.stats.started_time) * 1000000)
            took = int(result.took * 1000000)
            event = OrderedDict()
            event['name'] = result.name
            event['dur'] = took
            event['ts'] = started
            event['ph'] = 'X'  # "Complete" events
            event['pid'] = result.pid
            event['tid'] = result.worker

            args = OrderedDict()
            args['expected'] = sorted(str(r) for r in result.expected)
            args['actual'] = str(result.actual)
            args['out'] = result.out
            args['err'] = result.err
            args['code'] = result.code
            args['unexpected'] = result.unexpected
            args['flaky'] = result.flaky
            event['args'] = args

            trace['traceEvents'].append(event)
        return trace
Ejemplo n.º 10
0
class Runner(object):

    def __init__(self, host=None):
        self.args = None
        self.classifier = None
        self.cov = None
        self.context = None
        self.coverage_source = None
        self.host = host or Host()
        self.loader = unittest.loader.TestLoader()
        self.printer = None
        self.setup_fn = None
        self.stats = None
        self.teardown_fn = None
        self.top_level_dir = None
        self.top_level_dirs = []
        self.win_multiprocessing = WinMultiprocessing.spawn
        self.final_responses = []
        self.has_expectations = False
        self.expectations = None
        self.metadata = {}
        self.path_delimiter = json_results.DEFAULT_TEST_SEPARATOR

        # initialize self.args to the defaults.
        parser = ArgumentParser(self.host)
        self.parse_args(parser, [])

    def main(self, argv=None, **defaults):
        parser = ArgumentParser(self.host)
        self.parse_args(parser, argv, **defaults)
        if parser.exit_status is not None:
            return parser.exit_status

        try:
            ret, _, _ = self.run()
            return ret
        except KeyboardInterrupt:
            self.print_("interrupted, exiting", stream=self.host.stderr)
            return 130

    def parse_args(self, parser, argv, **defaults):
        for attrname in defaults:
            if not hasattr(self.args, attrname):
                parser.error("Unknown default argument name '%s'" % attrname,
                             bailout=False)
                return
        parser.set_defaults(**defaults)
        self.args = parser.parse_args(args=argv)
        if parser.exit_status is not None:
            return

    def print_(self, msg='', end='\n', stream=None):
        self.host.print_(msg, end, stream=stream)

    def run(self, test_set=None):

        ret = 0
        h = self.host

        if self.args.version:
            self.print_(VERSION)
            return ret, None, None

        should_spawn = self._check_win_multiprocessing()
        if should_spawn:
            return self._spawn(test_set)

        ret = self._set_up_runner()
        if ret:
            return ret, None, None

        find_start = h.time()
        if self.cov:  # pragma: no cover
            self.cov.erase()
            self.cov.start()

        full_results = None
        result_set = ResultSet()

        if not test_set:
            ret, test_set = self.find_tests(self.args)
        find_end = h.time()

        if not ret:
            self.stats.total = (len(test_set.parallel_tests) +
                                len(test_set.isolated_tests) +
                                len(test_set.tests_to_skip)) * self.args.repeat
            all_tests = [ti.name for ti in
            _sort_inputs(test_set.parallel_tests +
                         test_set.isolated_tests +
                         test_set.tests_to_skip)]
            self.metadata = {tup[0]:tup[1]
                             for  tup in
                             [md.split('=', 1) for md in self.args.metadata]}
            if self.args.test_name_prefix:
                self.metadata['test_name_prefix'] = self.args.test_name_prefix
            if self.args.tags:
                self.metadata['tags'] = self.args.tags
            if self.args.expectations_files:
                self.metadata['expectations_files'] = [
                    os.path.basename(p) for p in self.args.expectations_files]
            if self.args.list_only:
                self.print_('\n'.join(all_tests))
            else:
                for _ in range(self.args.repeat):
                    current_ret, full_results=self._run_tests(
                        result_set, test_set.copy(), all_tests)
                    ret = ret or current_ret

        if self.cov:  # pragma: no cover
            self.cov.stop()
            self.cov.save()
        test_end = h.time()

        trace = self._trace_from_results(result_set)
        if full_results:
            self._summarize(full_results)
            self._write(self.args.write_full_results_to, full_results)
            upload_ret = self._upload(full_results)
            if not ret:
                ret = upload_ret
            reporting_end = h.time()
            self._add_trace_event(trace, 'run', find_start, reporting_end)
            self._add_trace_event(trace, 'discovery', find_start, find_end)
            self._add_trace_event(trace, 'testing', find_end, test_end)
            self._add_trace_event(trace, 'reporting', test_end, reporting_end)
            self._write(self.args.write_trace_to, trace)
            self.report_coverage()
        else:
            upload_ret = 0

        return ret, full_results, trace

    def _check_win_multiprocessing(self):
        wmp = self.win_multiprocessing

        ignore, importable, spawn = WinMultiprocessing.values

        if wmp not in WinMultiprocessing.values:
            raise ValueError('illegal value %s for win_multiprocessing' %
                             wmp)

        h = self.host
        if wmp == ignore and h.platform == 'win32':  # pragma: win32
            raise ValueError('Cannot use WinMultiprocessing.ignore for '
                             'win_multiprocessing when actually running '
                             'on Windows.')

        if wmp == ignore or self.args.jobs == 1:
            return False

        if wmp == importable:
            if self._main_is_importable():
                return False
            raise ValueError('The __main__ module (%s) '  # pragma: no cover
                             'may not be importable' %
                             sys.modules['__main__'].__file__)

        assert wmp == spawn
        return True

    def _main_is_importable(self):  # pragma: untested
        path = sys.modules['__main__'].__file__
        if not path:
            return False
        if path.endswith('.pyc'):
            path = path[:-1]
        if not path.endswith('.py'):
            return False
        if path.endswith('__main__.py'):
            # main modules are not directly importable.
            return False

        path = self.host.realpath(path)
        for d in sys.path:
            if path.startswith(self.host.realpath(d)):
                return True
        return False  # pragma: no cover

    def _spawn(self, test_set):
        # TODO: Handle picklable hooks, rather than requiring them to be None.
        assert self.classifier is None
        assert self.context is None
        assert self.setup_fn is None
        assert self.teardown_fn is None
        assert test_set is None
        h = self.host

        if self.args.write_trace_to:  # pragma: untested
            should_delete_trace = False
        else:
            should_delete_trace = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_trace_to = fp.name

        if self.args.write_full_results_to:  # pragma: untested
            should_delete_results = False
        else:
            should_delete_results = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_full_results_to = fp.name

        argv = ArgumentParser(h).argv_from_args(self.args)
        ret = h.call_inline([h.python_interpreter, path_to_file] + argv)

        trace = self._read_and_delete(self.args.write_trace_to,
                                      should_delete_trace)
        full_results = self._read_and_delete(self.args.write_full_results_to,
                                             should_delete_results)
        return ret, full_results, trace

    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(
            self.print_, args.overwrite, args.terminal_width)

        if self.args.top_level_dirs and self.args.top_level_dir:
            self.print_(
                'Cannot specify both --top-level-dir and --top-level-dirs',
                stream=h.stderr)
            return 1

        self.top_level_dirs = args.top_level_dirs
        if not self.top_level_dirs and args.top_level_dir:
            self.top_level_dirs = [args.top_level_dir]

        if not self.top_level_dirs:
            for test in [t for t in args.tests if h.exists(t)]:
                if h.isdir(test):
                    top_dir = test
                else:
                    top_dir = h.dirname(test)
                while h.exists(top_dir, '__init__.py'):
                    top_dir = h.dirname(top_dir)
                top_dir = h.realpath(top_dir)
                if not top_dir in self.top_level_dirs:
                    self.top_level_dirs.append(top_dir)
        if not self.top_level_dirs:
            top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            top_dir = h.realpath(top_dir)
            self.top_level_dirs.append(top_dir)

        if not self.top_level_dir and self.top_level_dirs:
            self.top_level_dir = self.top_level_dirs[0]

        for path in self.top_level_dirs:
            h.add_to_path(path)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                self.print_('Error: coverage is not installed.')
                return 1

            source = self.args.coverage_source
            if not source:
                source = self.top_level_dirs + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()

        if args.expectations_files:
            ret = self.parse_expectations()
            if ret:
                return ret
        elif args.tags:
            self.print_('Error: tags require expectations files.')
            return 1
        return 0

    def parse_expectations(self):
        args = self.args

        if len(args.expectations_files) != 1:
            # TODO(crbug.com/835690): Fix this.
            self.print_(
                'Only a single expectation file is currently supported',
                stream=self.host.stderr)
            return 1
        contents = self.host.read_text_file(args.expectations_files[0])

        expectations = TestExpectations(set(args.tags))
        err, msg = expectations.parse_tagged_list(contents)
        if err:
            self.print_(msg, stream=self.host.stderr)
            return err

        self.has_expectations = True
        self.expectations = expectations

    def find_tests(self, args):
        test_set = TestSet(self.args.test_name_prefix)

        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if args.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x

        try:
            names = self._name_list_from_args(args)
            classifier = self.classifier or self.default_classifier

            for name in names:
                try:
                    self._add_tests_to_set(test_set, args.suffixes,
                                           self.top_level_dirs, classifier,
                                           name)
                except (AttributeError, ImportError, SyntaxError) as e:
                    ex_str = traceback.format_exc()
                    self.print_('Failed to load "%s" in find_tests: %s' %
                                (name, e))
                    self.print_('  %s' %
                                '\n  '.join(ex_str.splitlines()))
                    self.print_(ex_str)
                    return 1, None
                except _AddTestsError as e:
                    self.print_(str(e))
                    return 1, None

            # TODO: Add support for discovering setupProcess/teardownProcess?

            shard_index = args.shard_index
            total_shards = args.total_shards
            assert total_shards >= 1
            assert shard_index >= 0 and shard_index < total_shards, (
                'shard_index (%d) must be >= 0 and < total_shards (%d)' %
                (shard_index, total_shards))
            test_set.parallel_tests = _sort_inputs(
                test_set.parallel_tests)[shard_index::total_shards]
            test_set.isolated_tests = _sort_inputs(
                test_set.isolated_tests)[shard_index::total_shards]
            test_set.tests_to_skip = _sort_inputs(
                test_set.tests_to_skip)[shard_index::total_shards]
            return 0, test_set
        finally:
            unittest.skip = orig_skip
            unittest.skipIf = orig_skip_if

    def _name_list_from_args(self, args):
        if args.tests:
            names = args.tests
        elif args.file_list:
            if args.file_list == '-':
                s = self.host.stdin.read()
            else:
                s = self.host.read_text_file(args.file_list)
            names = [line.strip() for line in s.splitlines()]
        else:
            names = self.top_level_dirs
        return names

    def _add_tests_to_set(self, test_set, suffixes, top_level_dirs, classifier,
                          name):
        h = self.host
        loader = self.loader
        add_tests = _test_adder(test_set, classifier)

        found = set()
        for d in top_level_dirs:
            if h.isfile(name):
                rpath = h.relpath(name, d)
                if rpath.startswith('..'):
                    continue
                if rpath.endswith('.py'):
                    rpath = rpath[:-3]
                module = rpath.replace(h.sep, '.')
                if module not in found:
                    found.add(module)
                    add_tests(loader.loadTestsFromName(module))
            elif h.isdir(name):
                rpath = h.relpath(name, d)
                if rpath.startswith('..'):
                    continue
                for suffix in suffixes:
                    if not name in found:
                        found.add(name + '/' + suffix)
                        add_tests(loader.discover(name, suffix, d))
            else:
                possible_dir = name.replace('.', h.sep)
                if h.isdir(d, possible_dir):
                    for suffix in suffixes:
                        path = h.join(d, possible_dir)
                        if not path in found:
                            found.add(path + '/' + suffix)
                            suite = loader.discover(path, suffix, d)
                            add_tests(suite)
                elif not name in found:
                    found.add(name)
                    add_tests(loader.loadTestsFromName(
                        self.args.test_name_prefix + name))

        # pylint: disable=no-member
        if hasattr(loader, 'errors') and loader.errors:  # pragma: python3
            # In Python3's version of unittest, loader failures get converted
            # into failed test cases, rather than raising exceptions. However,
            # the errors also get recorded so you can err out immediately.
            raise ImportError(loader.errors)

    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            test_set.isolated_tests = [
                TestInput(name) for name in tests_to_retry]
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set,
                                                      self.path_delimiter)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)

    def _run_one_set(self, stats, result_set, test_set):
        self._skip_tests(stats, result_set, test_set.tests_to_skip)
        self._run_list(stats, result_set,
                       test_set.parallel_tests, self.args.jobs)
        self._run_list(stats, result_set,
                       test_set.isolated_tests, 1)

    def _skip_tests(self, stats, result_set, tests_to_skip):
        for test_input in tests_to_skip:
            last = self.host.time()
            stats.started += 1
            self._print_test_started(stats, test_input)
            now = self.host.time()
            result = Result(test_input.name, actual=ResultType.Skip,
                            started=last, took=(now - last), worker=0,
                            expected=[ResultType.Skip],
                            out=test_input.msg)
            result_set.add(result)
            stats.finished += 1
            self._print_test_finished(stats, result)

    def _run_list(self, stats, result_set, test_inputs, jobs):
        h = self.host
        running_jobs = set()

        jobs = min(len(test_inputs), jobs)
        if not jobs:
            return

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child,
                         _setup_process, _teardown_process)
        try:
            while test_inputs or running_jobs:
                while test_inputs and (len(running_jobs) < jobs):
                    test_input = test_inputs.pop(0)
                    stats.started += 1
                    pool.send(test_input)
                    running_jobs.add(test_input.name)
                    self._print_test_started(stats, test_input)

                result, should_retry_on_failure = pool.get()
                if (self.args.retry_only_retry_on_failure_tests and
                    result.actual == ResultType.Failure and
                    should_retry_on_failure):
                    self.last_runs_retry_on_failure_tests.add(result.name)

                running_jobs.remove(result.name)
                result_set.add(result)
                stats.finished += 1
                self._print_test_finished(stats, result)
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

    def _print_test_started(self, stats, test_input):
        if self.args.quiet:
            # Print nothing when --quiet was passed.
            return

        # If -vvv was passed, print when the test is queued to be run.
        # We don't actually know when the test picked up to run, because
        # that is handled by the child process (where we can't easily
        # print things). Otherwise, only print when the test is started
        # if we know we can overwrite the line, so that we do not
        # get multiple lines of output as noise (in -vvv, we actually want
        # the noise).
        test_start_msg = stats.format() + test_input.name
        if self.args.verbose > 2:
            self.update(test_start_msg + ' queued', elide=False)
        if self.args.overwrite:
            self.update(test_start_msg, elide=(not self.args.verbose))

    def _print_test_finished(self, stats, result):
        stats.add_time()

        assert result.actual in [ResultType.Failure, ResultType.Skip,
                                 ResultType.Pass]
        if result.actual == ResultType.Failure:
            result_str = ' failed'
        elif result.actual == ResultType.Skip:
            result_str = ' was skipped'
        elif result.actual == ResultType.Pass:
            result_str = ' passed'

        if result.unexpected:
            result_str += ' unexpectedly'
        elif result.actual == ResultType.Failure:
            result_str += ' as expected'

        if self.args.timing:
            timing_str = ' %.4fs' % result.took
        else:
            timing_str = ''
        suffix = '%s%s' % (result_str, timing_str)
        out = result.out
        err = result.err
        if result.code:
            if out or err:
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix, elide=False)
            for l in out.splitlines():
                self.print_('  %s' % l)
            for l in err.splitlines():
                self.print_('  %s' % l)
        elif not self.args.quiet:
            if self.args.verbose > 1 and (out or err):
                suffix += ':\n'
            self.update(stats.format() + result.name + suffix,
                        elide=(not self.args.verbose))
            if self.args.verbose > 1:
                for l in out.splitlines():
                    self.print_('  %s' % l)
                for l in err.splitlines():
                    self.print_('  %s' % l)
            if self.args.verbose:
                self.flush()

    def update(self, msg, elide):
        self.printer.update(msg, elide)

    def flush(self):
        self.printer.flush()

    def _summarize(self, full_results):
        num_passes = json_results.num_passes(full_results)
        num_failures = json_results.num_failures(full_results)
        num_skips = json_results.num_skips(full_results)

        if self.args.quiet and num_failures == 0:
            return

        if self.args.timing:
            timing_clause = ' in %.1fs' % (self.host.time() -
                                           self.stats.started_time)
        else:
            timing_clause = ''
        self.update('%d test%s passed%s, %d skipped, %d failure%s.' %
                    (num_passes,
                     '' if num_passes == 1 else 's',
                     timing_clause,
                     num_skips,
                     num_failures,
                     '' if num_failures == 1 else 's'), elide=False)
        self.print_()

    def _read_and_delete(self, path, delete):
        h = self.host
        obj = None
        if h.exists(path):
            contents = h.read_text_file(path)
            if contents:
                obj = json.loads(contents)
            if delete:
                h.remove(path)
        return obj

    def _write(self, path, obj):
        if path:
            self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')

    def _upload(self, full_results):
        h = self.host
        if not self.args.test_results_server:
            return 0

        url, content_type, data = json_results.make_upload_request(
            self.args.test_results_server, self.args.builder_name,
            self.args.master_name, self.args.test_type,
            full_results)

        try:
            h.fetch(url, data, {'Content-Type': content_type})
            return 0
        except Exception as e:
            h.print_('Uploading the JSON results raised "%s"' % str(e))
            return 1

    def report_coverage(self):
        if self.args.coverage:  # pragma: no cover
            self.host.print_()
            import coverage
            cov = coverage.coverage(data_suffix=True)
            cov.combine()
            cov.report(show_missing=self.args.coverage_show_missing,
                       omit=self.args.coverage_omit)
            if self.args.coverage_annotate:
                cov.annotate(omit=self.args.coverage_omit)

    def _add_trace_event(self, trace, name, start, end):
        event = {
            'name': name,
            'ts': int((start - self.stats.started_time) * 1000000),
            'dur': int((end - start) * 1000000),
            'ph': 'X',
            'pid': self.host.getpid(),
            'tid': 0,
        }
        trace['traceEvents'].append(event)

    def _trace_from_results(self, result_set):
        trace = OrderedDict()
        trace['traceEvents'] = []
        trace['otherData'] = {}

        if self.metadata:
            trace['otherData'] = self.metadata

        for result in result_set.results:
            started = int((result.started - self.stats.started_time) * 1000000)
            took = int(result.took * 1000000)
            event = OrderedDict()
            event['name'] = result.name
            event['dur'] = took
            event['ts'] = started
            event['ph'] = 'X'  # "Complete" events
            event['pid'] = result.pid
            event['tid'] = result.worker

            args = OrderedDict()
            args['expected'] = sorted(str(r) for r in result.expected)
            args['actual'] = str(result.actual)
            args['out'] = result.out
            args['err'] = result.err
            args['code'] = result.code
            args['unexpected'] = result.unexpected
            args['flaky'] = result.flaky
            event['args'] = args

            trace['traceEvents'].append(event)
        return trace

    def expectations_for(self, test_case):
      if self.has_expectations:
          return self.expectations.expectations_for(
              test_case.id()[len(self.args.test_name_prefix):])
      else:
          return (set([ResultType.Pass]), False)

    def default_classifier(self, test_set, test):
        if self.matches_filter(test):
            if self.should_skip(test):
                test_set.add_test_to_skip(test, 'skipped by request')
            elif self.should_isolate(test):
                test_set.add_test_to_run_isolated(test)
            else:
                test_set.add_test_to_run_in_parallel(test)

    def matches_filter(self, test_case):
        _validate_test_starts_with_prefix(
            self.args.test_name_prefix, test_case.id())
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        return (not self.args.test_filter or any(
            fnmatch.fnmatch(test_name, glob)
            for glob in self.args.test_filter.split('::')))

    def should_isolate(self, test_case):
        _validate_test_starts_with_prefix(
            self.args.test_name_prefix, test_case.id())
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        return any(fnmatch.fnmatch(test_name, glob)
                   for glob in self.args.isolate)

    def should_skip(self, test_case):
        _validate_test_starts_with_prefix(
            self.args.test_name_prefix, test_case.id())
        if self.args.all:
          return False
        test_name = test_case.id()[len(self.args.test_name_prefix):]
        if self.has_expectations:
            expected_results, _ = self.expectations.expectations_for(test_name)
        else:
            expected_results = set([ResultType.Pass])
        return (
            ResultType.Skip in expected_results or
            any(fnmatch.fnmatch(test_name, glob) for glob in self.args.skip))
Ejemplo n.º 11
0
 def test_elide(self):
     pr = Printer(self.print_, False, 8)
     pr.update('hello world')
     pr.flush()
     self.assertEqual(self.out, ['h...d', '\n'])
Ejemplo n.º 12
0
 def test_basic(self):
     pr = Printer(self.print_, False, 80)
     pr.update('foo')
     pr.flush()
     self.assertEqual(self.out, ['foo', '\n'])
Ejemplo n.º 13
0
class Runner(object):
    def __init__(self, host=None):
        self.args = None
        self.classifier = None
        self.cov = None
        self.context = None
        self.coverage_source = None
        self.host = host or Host()
        self.loader = unittest.loader.TestLoader()
        self.printer = None
        self.setup_fn = None
        self.stats = None
        self.teardown_fn = None
        self.top_level_dir = None
        self.win_multiprocessing = WinMultiprocessing.spawn
        self.final_responses = []

        # initialize self.args to the defaults.
        parser = ArgumentParser(self.host)
        self.parse_args(parser, [])

    def main(self, argv=None, **defaults):
        parser = ArgumentParser(self.host)
        self.parse_args(parser, argv, **defaults)
        if parser.exit_status is not None:
            return parser.exit_status

        try:
            ret, _, _ = self.run()
            return ret
        except KeyboardInterrupt:
            self.print_("interrupted, exiting", stream=self.host.stderr)
            return 130

    def parse_args(self, parser, argv, **defaults):
        for attrname in defaults:
            if not hasattr(self.args, attrname):
                parser.error("Unknown default argument name '%s'" % attrname, bailout=False)
                return
        parser.set_defaults(**defaults)
        self.args = parser.parse_args(args=argv)
        if parser.exit_status is not None:
            return

    def print_(self, msg="", end="\n", stream=None):
        self.host.print_(msg, end, stream=stream)

    def run(self, test_set=None):

        ret = 0
        h = self.host

        if self.args.version:
            self.print_(VERSION)
            return ret, None, None

        should_spawn = self._check_win_multiprocessing()
        if should_spawn:
            return self._spawn(test_set)

        ret = self._set_up_runner()
        if ret:  # pragma: no cover
            return ret, None, None

        find_start = h.time()
        if self.cov:  # pragma: no cover
            self.cov.erase()
            self.cov.start()

        full_results = None
        result_set = ResultSet()

        if not test_set:
            ret, test_set = self.find_tests(self.args)
        find_end = h.time()

        if not ret:
            ret, full_results = self._run_tests(result_set, test_set)

        if self.cov:  # pragma: no cover
            self.cov.stop()
            self.cov.save()
        test_end = h.time()

        trace = self._trace_from_results(result_set)
        if full_results:
            self._summarize(full_results)
            self._write(self.args.write_full_results_to, full_results)
            upload_ret = self._upload(full_results)
            if not ret:
                ret = upload_ret
            reporting_end = h.time()
            self._add_trace_event(trace, "run", find_start, reporting_end)
            self._add_trace_event(trace, "discovery", find_start, find_end)
            self._add_trace_event(trace, "testing", find_end, test_end)
            self._add_trace_event(trace, "reporting", test_end, reporting_end)
            self._write(self.args.write_trace_to, trace)
            self.report_coverage()
        else:
            upload_ret = 0

        return ret, full_results, trace

    def _check_win_multiprocessing(self):
        wmp = self.win_multiprocessing

        ignore, importable, spawn = WinMultiprocessing.values

        if wmp not in WinMultiprocessing.values:
            raise ValueError("illegal value %s for win_multiprocessing" % wmp)

        h = self.host
        if wmp == ignore and h.platform == "win32":  # pragma: win32
            raise ValueError(
                "Cannot use WinMultiprocessing.ignore for " "win_multiprocessing when actually running " "on Windows."
            )

        if wmp == ignore or self.args.jobs == 1:
            return False

        if wmp == importable:
            if self._main_is_importable():
                return False
            raise ValueError(
                "The __main__ module (%s) "  # pragma: no cover
                "may not be importable" % sys.modules["__main__"].__file__
            )

        assert wmp == spawn
        return True

    def _main_is_importable(self):  # pragma: untested
        path = sys.modules["__main__"].__file__
        if not path:
            return False
        if path.endswith(".pyc"):
            path = path[:-1]
        if not path.endswith(".py"):
            return False
        if path.endswith("__main__.py"):
            # main modules are not directly importable.
            return False

        path = self.host.realpath(path)
        for d in sys.path:
            if path.startswith(self.host.realpath(d)):
                return True
        return False  # pragma: no cover

    def _spawn(self, test_set):
        # TODO: Handle picklable hooks, rather than requiring them to be None.
        assert self.classifier is None
        assert self.context is None
        assert self.setup_fn is None
        assert self.teardown_fn is None
        assert test_set is None
        h = self.host

        if self.args.write_trace_to:  # pragma: untested
            should_delete_trace = False
        else:
            should_delete_trace = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_trace_to = fp.name

        if self.args.write_full_results_to:  # pragma: untested
            should_delete_results = False
        else:
            should_delete_results = True
            fp = h.mktempfile(delete=False)
            fp.close()
            self.args.write_full_results_to = fp.name

        argv = ArgumentParser(h).argv_from_args(self.args)
        ret = h.call_inline([h.python_interpreter, path_to_file] + argv)

        trace = self._read_and_delete(self.args.write_trace_to, should_delete_trace)
        full_results = self._read_and_delete(self.args.write_full_results_to, should_delete_results)
        return ret, full_results, trace

    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite, args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                d = h.realpath(h.dirname(args.tests[0]))
                if h.exists(d, "__init__.py"):
                    top_dir = d
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, "__init__.py"):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source, data_suffix=True)
            self.cov.erase()
        return 0

    def find_tests(self, args):
        test_set = TestSet()

        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if args.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x

        try:
            names = self._name_list_from_args(args)
            classifier = self.classifier or _default_classifier(args)

            for name in names:
                try:
                    self._add_tests_to_set(test_set, args.suffixes, self.top_level_dir, classifier, name)
                except (AttributeError, ImportError, SyntaxError) as e:
                    ex_str = traceback.format_exc()
                    self.print_('Failed to load "%s" in find_tests: %s' % (name, e))
                    self.print_("  %s" % "\n  ".join(ex_str.splitlines()))
                    self.print_(ex_str)
                    return 1, None
                except _AddTestsError as e:
                    self.print_(str(e))
                    return 1, None

            # TODO: Add support for discovering setupProcess/teardownProcess?

            shard_index = args.shard_index
            total_shards = args.total_shards
            assert total_shards >= 1
            assert (
                shard_index >= 0 and shard_index < total_shards
            ), "shard_index (%d) must be >= 0 and < total_shards (%d)" % (shard_index, total_shards)
            test_set.parallel_tests = _sort_inputs(test_set.parallel_tests)[shard_index::total_shards]
            test_set.isolated_tests = _sort_inputs(test_set.isolated_tests)[shard_index::total_shards]
            test_set.tests_to_skip = _sort_inputs(test_set.tests_to_skip)[shard_index::total_shards]
            return 0, test_set
        finally:
            unittest.skip = orig_skip
            unittest.skipIf = orig_skip_if

    def _name_list_from_args(self, args):
        if args.tests:
            names = args.tests
        elif args.file_list:
            if args.file_list == "-":
                s = self.host.stdin.read()
            else:
                s = self.host.read_text_file(args.file_list)
            names = [line.strip() for line in s.splitlines()]
        else:
            names = [self.top_level_dir]
        return names

    def _add_tests_to_set(self, test_set, suffixes, top_level_dir, classifier, name):
        h = self.host
        loader = self.loader
        add_tests = _test_adder(test_set, classifier)

        if h.isfile(name):
            rpath = h.relpath(name, top_level_dir)
            if rpath.endswith(".py"):
                rpath = rpath[:-3]
            module = rpath.replace(h.sep, ".")
            add_tests(loader.loadTestsFromName(module))
        elif h.isdir(name):
            for suffix in suffixes:
                add_tests(loader.discover(name, suffix, top_level_dir))
        else:
            possible_dir = name.replace(".", h.sep)
            if h.isdir(top_level_dir, possible_dir):
                for suffix in suffixes:
                    path = h.join(top_level_dir, possible_dir)
                    suite = loader.discover(path, suffix, top_level_dir)
                    add_tests(suite)
            else:
                add_tests(loader.loadTestsFromName(name))

        # pylint: disable=no-member
        if hasattr(loader, "errors") and loader.errors:  # pragma: python3
            # In Python3's version of unittest, loader failures get converted
            # into failed test cases, rather than raising exceptions. However,
            # the errors also get recorded so you can err out immediately.
            raise ImportError(loader.errors)

    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_("No tests to run.")
            return 1, None

        all_tests = [
            ti.name for ti in _sort_inputs(test_set.parallel_tests + test_set.isolated_tests + test_set.tests_to_skip)
        ]

        if self.args.list_only:
            self.print_("\n".join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_("")
            self.print_(
                "Retrying failed tests (attempt #%d of %d)..."
                % (self.args.retry_limit - retry_limit + 1, self.args.retry_limit)
            )
            self.print_("")

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_("")

        full_results = json_results.make_full_results(self.args.metadata, int(h.time()), all_tests, result_set)

        return (json_results.exit_code_from_full_results(full_results), full_results)

    def _run_one_set(self, stats, result_set, test_set):
        stats.total = len(test_set.parallel_tests) + len(test_set.isolated_tests) + len(test_set.tests_to_skip)
        self._skip_tests(stats, result_set, test_set.tests_to_skip)
        self._run_list(stats, result_set, test_set.parallel_tests, self.args.jobs)
        self._run_list(stats, result_set, test_set.isolated_tests, 1)

    def _skip_tests(self, stats, result_set, tests_to_skip):
        for test_input in tests_to_skip:
            last = self.host.time()
            stats.started += 1
            self._print_test_started(stats, test_input)
            now = self.host.time()
            result = Result(
                test_input.name,
                actual=ResultType.Skip,
                started=last,
                took=(now - last),
                worker=0,
                expected=[ResultType.Skip],
                out=test_input.msg,
            )
            result_set.add(result)
            stats.finished += 1
            self._print_test_finished(stats, result)

    def _run_list(self, stats, result_set, test_inputs, jobs):
        h = self.host
        running_jobs = set()

        jobs = min(len(test_inputs), jobs)
        if not jobs:
            return

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child, _setup_process, _teardown_process)
        try:
            while test_inputs or running_jobs:
                while test_inputs and (len(running_jobs) < self.args.jobs):
                    test_input = test_inputs.pop(0)
                    stats.started += 1
                    pool.send(test_input)
                    running_jobs.add(test_input.name)
                    self._print_test_started(stats, test_input)

                result = pool.get()
                running_jobs.remove(result.name)
                result_set.add(result)
                stats.finished += 1
                self._print_test_finished(stats, result)
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

    def _print_test_started(self, stats, test_input):
        if self.args.quiet:
            # Print nothing when --quiet was passed.
            return

        # If -vvv was passed, print when the test is queued to be run.
        # We don't actually know when the test picked up to run, because
        # that is handled by the child process (where we can't easily
        # print things). Otherwise, only print when the test is started
        # if we know we can overwrite the line, so that we do not
        # get multiple lines of output as noise (in -vvv, we actually want
        # the noise).
        test_start_msg = stats.format() + test_input.name
        if self.args.verbose > 2:
            self.update(test_start_msg + " queued", elide=False)
        if self.args.overwrite:
            self.update(test_start_msg, elide=(not self.args.verbose))

    def _print_test_finished(self, stats, result):
        stats.add_time()

        assert result.actual in [ResultType.Failure, ResultType.Skip, ResultType.Pass]
        if result.actual == ResultType.Failure:
            result_str = " failed"
        elif result.actual == ResultType.Skip:
            result_str = " was skipped"
        elif result.actual == ResultType.Pass:
            result_str = " passed"

        if result.unexpected:
            result_str += " unexpectedly"
        if self.args.timing:
            timing_str = " %.4fs" % result.took
        else:
            timing_str = ""
        suffix = "%s%s" % (result_str, timing_str)
        out = result.out
        err = result.err
        if result.code:
            if out or err:
                suffix += ":\n"
            self.update(stats.format() + result.name + suffix, elide=False)
            for l in out.splitlines():
                self.print_("  %s" % l)
            for l in err.splitlines():
                self.print_("  %s" % l)
        elif not self.args.quiet:
            if self.args.verbose > 1 and (out or err):
                suffix += ":\n"
            self.update(stats.format() + result.name + suffix, elide=(not self.args.verbose))
            if self.args.verbose > 1:
                for l in out.splitlines():
                    self.print_("  %s" % l)
                for l in err.splitlines():
                    self.print_("  %s" % l)
            if self.args.verbose:
                self.flush()

    def update(self, msg, elide):
        self.printer.update(msg, elide)

    def flush(self):
        self.printer.flush()

    def _summarize(self, full_results):
        num_tests = self.stats.finished
        num_failures = json_results.num_failures(full_results)

        if self.args.quiet and num_failures == 0:
            return

        if self.args.timing:
            timing_clause = " in %.1fs" % (self.host.time() - self.stats.started_time)
        else:
            timing_clause = ""
        self.update(
            "%d test%s run%s, %d failure%s."
            % (num_tests, "" if num_tests == 1 else "s", timing_clause, num_failures, "" if num_failures == 1 else "s"),
            elide=False,
        )
        self.print_()

    def _read_and_delete(self, path, delete):
        h = self.host
        obj = None
        if h.exists(path):
            contents = h.read_text_file(path)
            if contents:
                obj = json.loads(contents)
            if delete:
                h.remove(path)
        return obj

    def _write(self, path, obj):
        if path:
            self.host.write_text_file(path, json.dumps(obj, indent=2) + "\n")

    def _upload(self, full_results):
        h = self.host
        if not self.args.test_results_server:
            return 0

        url, content_type, data = json_results.make_upload_request(
            self.args.test_results_server,
            self.args.builder_name,
            self.args.master_name,
            self.args.test_type,
            full_results,
        )

        try:
            h.fetch(url, data, {"Content-Type": content_type})
            return 0
        except Exception as e:
            h.print_('Uploading the JSON results raised "%s"' % str(e))
            return 1

    def report_coverage(self):
        if self.args.coverage:  # pragma: no cover
            self.host.print_()
            import coverage

            cov = coverage.coverage(data_suffix=True)
            cov.combine()
            cov.report(show_missing=self.args.coverage_show_missing, omit=self.args.coverage_omit)
            if self.args.coverage_annotate:
                cov.annotate(omit=self.args.coverage_omit)

    def _add_trace_event(self, trace, name, start, end):
        event = {
            "name": name,
            "ts": int((start - self.stats.started_time) * 1000000),
            "dur": int((end - start) * 1000000),
            "ph": "X",
            "pid": self.host.getpid(),
            "tid": 0,
        }
        trace["traceEvents"].append(event)

    def _trace_from_results(self, result_set):
        trace = OrderedDict()
        trace["traceEvents"] = []
        trace["otherData"] = {}
        for m in self.args.metadata:
            k, v = m.split("=")
            trace["otherData"][k] = v

        for result in result_set.results:
            started = int((result.started - self.stats.started_time) * 1000000)
            took = int(result.took * 1000000)
            event = OrderedDict()
            event["name"] = result.name
            event["dur"] = took
            event["ts"] = started
            event["ph"] = "X"  # "Complete" events
            event["pid"] = result.pid
            event["tid"] = result.worker

            args = OrderedDict()
            args["expected"] = sorted(str(r) for r in result.expected)
            args["actual"] = str(result.actual)
            args["out"] = result.out
            args["err"] = result.err
            args["code"] = result.code
            args["unexpected"] = result.unexpected
            args["flaky"] = result.flaky
            event["args"] = args

            trace["traceEvents"].append(event)
        return trace