示例#1
0
    def test_artifacts_merged(self):
        test_names = [ 'foo_test.FooTest.foobar' ]
        result_set = json_results.ResultSet()

        ar = FakeArtifacts()
        ar.artifacts = {'artifact_name': ['a/b/c.txt']}
        result_set.add(json_results.Result(
                'foo_test.FooTest.foobar', json_results.ResultType.Failure,
                0, 0.2, 0, artifacts=ar.artifacts))

        ar2 = FakeArtifacts()
        ar2.artifacts = {'artifact_name': ['d/e/f.txt']}
        result_set.add(json_results.Result(
                'foo_test.FooTest.foobar', json_results.ResultType.Failure,
                0, 0.2, 0, artifacts=ar2.artifacts))

        full_results = json_results.make_full_results(
                {'foo': 'bar'}, 0, test_names, result_set)

        tests = full_results['tests']['foo_test']['FooTest']
        self.assertIn('artifacts', tests['foobar'])
        # Order is not guaranteed for the list of paths, so don't use
        # assertEqual if there are multiple entries
        artifacts = tests['foobar']['artifacts']
        self.assertIn('artifact_name', artifacts)
        self.assertEqual(len(artifacts['artifact_name']), 2)
        self.assertIn('a/b/c.txt', artifacts['artifact_name'])
        self.assertIn('d/e/f.txt', artifacts['artifact_name'])
示例#2
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            for name in tests_to_retry:
                test_set.add_test_to_run_isolated(
                    list(
                        self.loader.loadTestsFromName(
                            self.args.test_name_prefix + name))[0])
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
示例#3
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        self._run_one_set(self.stats, result_set, test_set)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and tests_to_retry:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(tests_to_retry)
            test_set = TestSet(self.args.test_name_prefix)
            test_set.isolated_tests = [
                TestInput(name) for name in tests_to_retry]
            tests_to_retry = test_set
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            tests_to_retry = get_tests_to_retry(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set,
                                                      self.path_delimiter)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
    def test_basic(self):
        test_names = ['foo_test.FooTest.test_fail',
                      'foo_test.FooTest.test_pass',
                      'foo_test.FooTest.test_skip']

        result_set = json_results.ResultSet()
        result_set.add(
            json_results.Result('foo_test.FooTest.test_fail',
                                json_results.ResultType.Failure, 0, 0.1, 0,
                                unexpected=True))
        result_set.add(json_results.Result('foo_test.FooTest.test_pass',
                                           json_results.ResultType.Pass,
                                           0, 0.2, 0))
        result_set.add(json_results.Result('foo_test.FooTest.test_skip',
                                           json_results.ResultType.Skip,
                                           0, 0.3, 0, 
                                           expected=[json_results.ResultType.Skip],
                                           unexpected=False))

        full_results = json_results.make_full_results(
            {'foo': 'bar'}, 0, test_names, result_set)
        expected_full_results = {
            'foo': 'bar',
            'metadata': {
                'foo': 'bar'},
            'interrupted': False,
            'num_failures_by_type': {
                'FAIL': 1,
                'PASS': 1,
                'SKIP': 1},
            'num_regressions': 1,
            'path_delimiter': '.',
            'seconds_since_epoch': 0,
            'tests': {
                'foo_test': {
                    'FooTest': {
                        'test_fail': {
                            'expected': 'PASS',
                            'actual': 'FAIL',
                            'times': [0.1],
                            'is_unexpected': True,
                            'is_regression': True,
                        },
                        'test_pass': {
                            'expected': 'PASS',
                            'actual': 'PASS',
                            'times': [0.2],
                        },
                        'test_skip': {
                            'expected': 'SKIP',
                            'actual': 'SKIP',
                            'times': [0.3],
                        }}}},
            'version': 3}
        self.assertEqual(full_results, expected_full_results)
示例#5
0
    def test_basic(self):
        test_names = ['foo_test.FooTest.test_fail',
                      'foo_test.FooTest.test_pass',
                      'foo_test.FooTest.test_skip']

        result_set = json_results.ResultSet()
        result_set.add(
            json_results.Result('foo_test.FooTest.test_fail',
                                json_results.ResultType.Failure, 0, 0.1, 0,
                                unexpected=True))
        result_set.add(json_results.Result('foo_test.FooTest.test_pass',
                                           json_results.ResultType.Pass,
                                           0, 0.2, 0))
        result_set.add(json_results.Result('foo_test.FooTest.test_skip',
                                           json_results.ResultType.Skip,
                                           0, 0.3, 0,
                                           expected=[json_results.ResultType.Skip],
                                           unexpected=False))

        full_results = json_results.make_full_results(
            {'foo': 'bar'}, 0, test_names, result_set)
        expected_full_results = {
            'foo': 'bar',
            'metadata': {
                'foo': 'bar'},
            'interrupted': False,
            'num_failures_by_type': {
                'FAIL': 1,
                'PASS': 1,
                'SKIP': 1},
            'num_regressions': 1,
            'path_delimiter': '.',
            'seconds_since_epoch': 0,
            'tests': {
                'foo_test': {
                    'FooTest': {
                        'test_fail': {
                            'expected': 'PASS',
                            'actual': 'FAIL',
                            'times': [0.1],
                            'is_unexpected': True,
                            'is_regression': True,
                        },
                        'test_pass': {
                            'expected': 'PASS',
                            'actual': 'PASS',
                            'times': [0.2],
                        },
                        'test_skip': {
                            'expected': 'SKIP',
                            'actual': 'SKIP',
                            'times': [0.3],
                        }}}},
            'version': 3}
        self.assertEqual(full_results, expected_full_results)
    def test_unknown_result(self):
        test_names = ['foo_test.FooTest.test_unknown_result']

        result_set = json_results.ResultSet()
        result_set.add(
            json_results.Result('foo_test.FooTest.test_unknown_result',
                                'UNKNOWN', 0, 0.2, 0))
        with self.assertRaises(ValueError) as ctx:
            full_results = json_results.make_full_results({'foo': 'bar'}, 0,
                                                          test_names,
                                                          result_set)
        self.assertIn('UNKNOWN', str(ctx.exception))
示例#7
0
文件: runner.py 项目: DrovioHQ/Qt
    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [
            ti.name for ti in _sort_inputs(test_set.parallel_tests +
                                           test_set.isolated_tests +
                                           test_set.tests_to_skip)
        ]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
示例#8
0
    def _run_tests(self, result_set, test_set):
        h = self.host
        if not test_set.parallel_tests and not test_set.isolated_tests:
            self.print_('No tests to run.')
            return 1, None

        all_tests = [ti.name for ti in
                     _sort_inputs(test_set.parallel_tests +
                                  test_set.isolated_tests +
                                  test_set.tests_to_skip)]

        if self.args.list_only:
            self.print_('\n'.join(all_tests))
            return 0, None

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
示例#9
0
    def test_artifacts_and_types_added(self):
        ar = FakeArtifacts()
        ar.artifacts = {'artifact_name': ['a/b/c.txt']}

        test_names = [ 'foo_test.FooTest.foobar' ]

        result_set = json_results.ResultSet()
        result_set.add(json_results.Result(
                'foo_test.FooTest.foobar', json_results.ResultType.Pass,
                0, 0.2, 0, artifacts=ar.artifacts))

        full_results = json_results.make_full_results(
                {'foo': 'bar'}, 0, test_names, result_set)

        tests = full_results['tests']['foo_test']['FooTest']
        self.assertIn('artifacts', tests['foobar'])
        self.assertEqual(tests['foobar']['artifacts'],
                         {'artifact_name': ['a/b/c.txt']})
示例#10
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host

        self._run_one_set(self.stats, result_set, test_set)

        failed_tests = sorted(json_results.failed_test_names(result_set))
        retry_limit = self.args.retry_limit

        while retry_limit and failed_tests:
            if retry_limit == self.args.retry_limit:
                self.flush()
                self.args.overwrite = False
                self.printer.should_overwrite = False
                self.args.verbose = min(self.args.verbose, 1)

            self.print_('')
            self.print_('Retrying failed tests (attempt #%d of %d)...' %
                        (self.args.retry_limit - retry_limit + 1,
                         self.args.retry_limit))
            self.print_('')

            stats = Stats(self.args.status_format, h.time, 1)
            stats.total = len(failed_tests)
            tests_to_retry = TestSet(isolated_tests=list(failed_tests))
            retry_set = ResultSet()
            self._run_one_set(stats, retry_set, tests_to_retry)
            result_set.results.extend(retry_set.results)
            failed_tests = json_results.failed_test_names(retry_set)
            retry_limit -= 1

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.args.metadata,
                                                      int(h.time()), all_tests,
                                                      result_set)

        return (json_results.exit_code_from_full_results(full_results),
                full_results)
示例#11
0
    def test_basic_upload(self):
        results = json_results.ResultSet()
        full_results = json_results.make_full_results({}, 0, [], results)
        url, content_type, data = json_results.make_upload_request(
            'localhost', 'fake_builder_name', 'fake_master', 'fake_test_type',
            full_results)

        self.assertEqual(
            content_type, 'multipart/form-data; '
            'boundary=-J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-')

        self.assertEqual(url, 'https://localhost/testfile/upload')
        self.assertMultiLineEqual(
            data,
            ('---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="builder"\r\n'
             '\r\n'
             'fake_builder_name\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="master"\r\n'
             '\r\n'
             'fake_master\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="testtype"\r\n'
             '\r\n'
             'fake_test_type\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="file"; '
             'filename="full_results.json"\r\n'
             'Content-Type: application/json\r\n'
             '\r\n'
             '{"version": 3, "interrupted": false, "path_delimiter": ".", '
             '"seconds_since_epoch": 0, '
             '"num_failures_by_type": {"FAIL": 0, "TIMEOUT": 0, "CRASH": 0,'
             ' "PASS": 0, "SKIP": 0}, '
             '"num_regressions": 0, '
             '"tests": {}}\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y---\r\n'))
    def test_basic_upload(self):
        results = json_results.ResultSet()
        full_results = json_results.make_full_results({}, 0, [], results)
        url, content_type, data = json_results.make_upload_request(
            'localhost', 'fake_builder_name', 'fake_master', 'fake_test_type',
            full_results)

        self.assertEqual(
            content_type,
            'multipart/form-data; '
            'boundary=-J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-')

        self.assertEqual(url, 'https://localhost/testfile/upload')
        self.assertMultiLineEqual(
            data,
            ('---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="builder"\r\n'
             '\r\n'
             'fake_builder_name\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="master"\r\n'
             '\r\n'
             'fake_master\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="testtype"\r\n'
             '\r\n'
             'fake_test_type\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-\r\n'
             'Content-Disposition: form-data; name="file"; '
             'filename="full_results.json"\r\n'
             'Content-Type: application/json\r\n'
             '\r\n'
             '{"version": 3, "interrupted": false, "path_delimiter": ".", '
             '"seconds_since_epoch": 0, '
             '"num_failures_by_type": {"FAIL": 0, "PASS": 0, "SKIP": 0}, '
             '"num_regressions": 0, '
             '"tests": {}}\r\n'
             '---J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y---\r\n'))
示例#13
0
    def _run_tests(self, result_set, test_set, all_tests):
        h = self.host
        self.last_runs_retry_on_failure_tests = set()

        def get_tests_to_retry(results):
            # If the --retry-only-retry-on-failure-tests command line argument
            # is passed , then a set of test failures with the RetryOnFailure
            # expectation from the last run of tests will be returned. The
            # self.last_runs_retry_on_failure_tests will be set to an empty set
            # for the next run of tests. Otherwise all regressions from the
            # last run will be returned.
            if self.args.retry_only_retry_on_failure_tests:
                ret = self.last_runs_retry_on_failure_tests.copy()
                self.last_runs_retry_on_failure_tests = set()
                return ret
            else:
                return json_results.regressions(results)

        if len(test_set.parallel_tests):
            jobs = min(
                len(test_set.parallel_tests), self.args.jobs)
        else:
            jobs = 1

        child = _Child(self)
        pool = make_pool(h, jobs, _run_one_test, child,
                         _setup_process, _teardown_process)

        self._run_one_set(self.stats, result_set, test_set, jobs, pool)

        tests_to_retry = sorted(get_tests_to_retry(result_set))
        retry_limit = self.args.retry_limit
        try:
            # Start at 1 since we already did iteration 0 above.
            for iteration in range(1, self.args.retry_limit + 1):
                if not tests_to_retry:
                    break
                if retry_limit == self.args.retry_limit:
                    self.flush()
                    self.args.overwrite = False
                    self.printer.should_overwrite = False
                    self.args.verbose = min(self.args.verbose, 1)

                self.print_('')
                self.print_('Retrying failed tests (attempt #%d of %d)...' %
                            (iteration, self.args.retry_limit))
                self.print_('')

                stats = Stats(self.args.status_format, h.time, 1)
                stats.total = len(tests_to_retry)
                test_set = TestSet(self.args.test_name_prefix)
                test_set.isolated_tests = [
                    TestInput(name,
                        iteration=iteration) for name in tests_to_retry]
                tests_to_retry = test_set
                retry_set = ResultSet()
                self._run_one_set(stats, retry_set, tests_to_retry, 1, pool)
                result_set.results.extend(retry_set.results)
                tests_to_retry = get_tests_to_retry(retry_set)
                retry_limit -= 1
            pool.close()
        finally:
            self.final_responses.extend(pool.join())

        if retry_limit != self.args.retry_limit:
            self.print_('')

        full_results = json_results.make_full_results(self.metadata,
                                                      int(h.time()),
                                                      all_tests, result_set,
                                                      self.path_delimiter)
        retcode = (json_results.exit_code_from_full_results(full_results)
                   | result_sink.result_sink_retcode_from_result_set(result_set))

        return (retcode, full_results)