Beispiel #1
0
 def test_current_rate(self):
     times = [0.0, 0.1, 0.2]
     s = Stats("[%c]", lambda: times.pop(0), 1)
     self.assertEquals(s.format(), "[-]")
     s.add_time()
     s.add_time()
     self.assertEquals(s.format(), "[ 10.0]")
Beispiel #2
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            for name in tests_to_retry:
                test_set.add_test_to_run_isolated(
                    list(
                        self.loader.loadTestsFromName(
                            self.args.test_name_prefix + name))[0])
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
Beispiel #3
0
    def test_elapsed_time(self):
        times = [0.0, 0.4]
        s = Stats("[%e]", lambda: times.pop(0), 32)
        self.assertEqual(s.format(), "[0.400]")

        s = Stats("[%e]", lambda: 0, 32)
        self.assertEqual(s.format(), "[0.000]")
Beispiel #4
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            test_set.isolated_tests = [
                TestInput(name) for name in tests_to_retry]
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set,
                                                      self.path_delimiter)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
Beispiel #5
0
    def test_edges(self):
        s = Stats('[%s/%f/%t/%r/%p]', lambda: 0, 32)
        self.assertEqual(s.format(), '[0/0/0/0/-]')
        s.started = 3
        s.total = 5
        s.finished = 1
        self.assertEqual(s.format(), '[3/1/5/2/ 60.0]')

        s.started = 5
        s.finished = 5
        self.assertEqual(s.format(), '[5/5/5/0/100.0]')
Beispiel #6
0
    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [
            ti.name for ti in _sort_inputs(test_set.parallel_tests +
                                           test_set.isolated_tests +
                                           test_set.tests_to_skip)
        ]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
Beispiel #7
0
    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [ti.name for ti in
                     _sort_inputs(test_set.parallel_tests +
                                  test_set.isolated_tests +
                                  test_set.tests_to_skip)]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
Beispiel #8
0
    def test_edges(self):
        s = Stats("[%s/%f/%t/%r/%p]", lambda: 0, 32)
        self.assertEqual(s.format(), "[0/0/0/0/-]")
        s.started = 3
        s.total = 5
        s.finished = 1
        self.assertEqual(s.format(), "[3/1/5/2/ 60.0]")

        s.started = 5
        s.finished = 5
        self.assertEqual(s.format(), "[5/5/5/0/100.0]")
Beispiel #9
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        if self.args.top_level_dirs and self.args.top_level_dir:
            self.print_(
                'Cannot specify both --top-level-dir and --top-level-dirs',
                stream=h.stderr)
            return 1

        self.top_level_dirs = args.top_level_dirs
        if not self.top_level_dirs and args.top_level_dir:
            self.top_level_dirs = [args.top_level_dir]

        if not self.top_level_dirs:
            for test in [t for t in args.tests if h.exists(t)]:
                if h.isdir(test):
                    top_dir = test
                else:
                    top_dir = h.dirname(test)
                while h.exists(top_dir, '__init__.py'):
                    top_dir = h.dirname(top_dir)
                top_dir = h.realpath(top_dir)
                if not top_dir in self.top_level_dirs:
                    self.top_level_dirs.append(top_dir)
        if not self.top_level_dirs:
            top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            top_dir = h.realpath(top_dir)
            self.top_level_dirs.append(top_dir)

        if not self.top_level_dir and self.top_level_dirs:
            self.top_level_dir = self.top_level_dirs[0]

        for path in self.top_level_dirs:
            h.add_to_path(path)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                self.print_('Error: coverage is not installed.')
                return 1

            source = self.args.coverage_source
            if not source:
                source = self.top_level_dirs + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Beispiel #10
0
 def test_overall_rate(self):
     times = [0, 0, 5]
     s = Stats('[%o]', lambda: times.pop(0), 32)
     self.assertEqual(s.format(), '[-]')
     s.started = 3
     s.finished = 1
     s.total = 5
     self.assertEqual(s.format(), '[  0.2]')
Beispiel #11
0
 def test_current_rate(self):
     times = [0.0, 0.1, 0.2]
     s = Stats('[%c]', lambda: times.pop(0), 1)
     self.assertEquals(s.format(), '[-]')
     s.add_time()
     s.add_time()
     self.assertEquals(s.format(), '[ 10.0]')
Beispiel #12
0
    def test_elapsed_time(self):
        times = [0.0, 0.4]
        s = Stats('[%e]', lambda: times.pop(0), 32)
        self.assertEqual(s.format(), '[0.400]')

        s = Stats('[%e]', lambda: 0, 32)
        self.assertEqual(s.format(), '[0.000]')
Beispiel #13
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
Beispiel #14
0
 def test_overall_rate(self):
     times = [0, 0, 5]
     s = Stats("[%o]", lambda: times.pop(0), 32)
     self.assertEqual(s.format(), "[-]")
     s.started = 3
     s.finished = 1
     s.total = 5
     self.assertEqual(s.format(), "[  0.2]")
Beispiel #15
0
    def _set_up_runner(self):
        h = self.host
        args = self.args

        self.stats = Stats(args.status_format, h.time, args.jobs)
        self.printer = Printer(self.print_, args.overwrite,
                               args.terminal_width)

        self.top_level_dir = args.top_level_dir
        if not self.top_level_dir:
            if args.tests and h.isdir(args.tests[0]):
                # TODO: figure out what to do if multiple files are
                # specified and they don't all have the same correct
                # top level dir.
                d = h.realpath(h.dirname(args.tests[0]))
                if h.exists(d, '__init__.py'):
                    top_dir = d
                else:
                    top_dir = args.tests[0]
            else:
                top_dir = h.getcwd()
            while h.exists(top_dir, '__init__.py'):
                top_dir = h.dirname(top_dir)
            self.top_level_dir = h.realpath(top_dir)

        h.add_to_path(self.top_level_dir)

        for path in args.path:
            h.add_to_path(path)

        if args.coverage:  # pragma: no cover
            try:
                import coverage
            except ImportError:
                h.print_("Error: coverage is not installed")
                return 1
            source = self.args.coverage_source
            if not source:
                source = [self.top_level_dir] + self.args.path
            self.coverage_source = source
            self.cov = coverage.coverage(source=self.coverage_source,
                                         data_suffix=True)
            self.cov.erase()
        return 0
Beispiel #16
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        if len(test_set.parallel_tests):
            jobs = min(
                len(test_set.parallel_tests), self.args.jobs)
        else:
            jobs = 1

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child,
                         _setup_process, _teardown_process)

        self._run_one_set(self.stats, result_set, test_set, jobs, pool)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit
        try:
            # Start at 1 since we already did iteration 0 above.
            for iteration in range(1, self.args.retry_limit + 1):
                if not tests_to_retry:
                    break
                if retry_limit == self.args.retry_limit:
                    self.flush()
                    self.args.overwrite = False
                    self.printer.should_overwrite = False
                    self.args.verbose = min(self.args.verbose, 1)

                self.print_('')
                self.print_('Retrying failed tests (attempt #%d of %d)...' %
                            (iteration, self.args.retry_limit))
                self.print_('')

                stats = Stats(self.args.status_format, h.time, 1)
                stats.total = len(tests_to_retry)
                test_set = TestSet(self.args.test_name_prefix)
                test_set.isolated_tests = [
                    TestInput(name,
                        iteration=iteration) for name in tests_to_retry]
                tests_to_retry = test_set
                retry_set = ResultSet()
                self._run_one_set(stats, retry_set, tests_to_retry, 1, pool)
                result_set.results.extend(retry_set.results)
                tests_to_retry = get_tests_to_retry(retry_set)
                retry_limit -= 1
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set,
                                                      self.path_delimiter)
        retcode = (json_results.exit_code_from_full_results(full_results)
                   | result_sink.result_sink_retcode_from_result_set(result_set))

        return (retcode, full_results)
Beispiel #17
0
 def test_escaped_percent(self):
     s = Stats('%%', lambda: 0, 32)
     self.assertEqual(s.format(), '%')
Beispiel #18
0
 def test_remaining(self):
     s = Stats("%u", lambda: 0, 32)
     s.total = 2
     self.assertEqual(s.format(), "2")
Beispiel #19
0
 def test_unrecognized_escape(self):
     s = Stats("%x", lambda: 0, 32)
     self.assertEqual(s.format(), "%x")
Beispiel #20
0
 def test_escaped_percent(self):
     s = Stats("%%", lambda: 0, 32)
     self.assertEqual(s.format(), "%")
Beispiel #21
0
 def test_basic(self):
     s = Stats('foo', lambda: 0, 32)
     self.assertEqual(s.format(), 'foo')
Beispiel #22
0
 def test_unrecognized_escape(self):
     s = Stats('%x', lambda: 0, 32)
     self.assertEqual(s.format(), '%x')
Beispiel #23
0
 def test_remaining(self):
     s = Stats('%u', lambda: 0, 32)
     s.total = 2
     self.assertEqual(s.format(), '2')
Beispiel #24
0
 def test_basic(self):
     s = Stats("foo", lambda: 0, 32)
     self.assertEqual(s.format(), "foo")