Пример #1
0
    def test_interrupt_if_at_failure_limits(self):
        port = Mock()  # FIXME: This should be a tighter mock.
        port.TEST_PATH_SEPARATOR = '/'
        port._filesystem = MockFileSystem()
        manager = Manager(port=port, options=MockOptions(), printer=Mock())

        manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
        result_summary = ResultSummary(expectations=Mock(), test_files=[])
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        manager._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        manager._options.exit_after_n_failures = 101
        manager._options.exit_after_n_crashes_or_timeouts = 101
        manager._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        manager._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)

        manager._options.exit_after_n_crashes_or_timeouts = None
        manager._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
Пример #2
0
    def test_interrupt_if_at_failure_limits(self):
        port = Mock()  # FIXME: This should be a tighter mock.
        port.TEST_PATH_SEPARATOR = '/'
        port._filesystem = MockFileSystem()
        manager = Manager(port=port, options=MockOptions(), printer=Mock())

        manager._options = MockOptions(exit_after_n_failures=None,
                                       exit_after_n_crashes_or_timeouts=None)
        result_summary = ResultSummary(expectations=Mock(), test_files=[])
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        manager._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        manager._options.exit_after_n_failures = 101
        manager._options.exit_after_n_crashes_or_timeouts = 101
        manager._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        manager._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException,
                          manager._interrupt_if_at_failure_limits,
                          result_summary)

        manager._options.exit_after_n_crashes_or_timeouts = None
        manager._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException,
                                      manager._interrupt_if_at_failure_limits,
                                      result_summary)
    def test_shard_tests(self):
        # Test that _shard_tests in test_runner.TestRunner really
        # put the http tests first in the queue.
        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        runner = TestRunnerWrapper(port=port, options=Mock(),
            printer=Mock())

        test_list = [
          "LayoutTests/websocket/tests/unicode.htm",
          "LayoutTests/animations/keyframes.html",
          "LayoutTests/http/tests/security/view-source-no-refresh.html",
          "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
          "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
          "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
          "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
          "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
          "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
        ]

        expected_tests_to_http_lock = set([
          'LayoutTests/websocket/tests/unicode.htm',
          'LayoutTests/http/tests/security/view-source-no-refresh.html',
          'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
          'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
        ])

        # FIXME: Ideally the HTTP tests don't have to all be in one shard.
        single_thread_results = runner._shard_tests(test_list, False)
        multi_thread_results = runner._shard_tests(test_list, True)

        self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
        self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set if t.startswith("DISABLED_")])
        FLAKY_tests = set([t for t in tests_set if t.startswith("FLAKY_")])
        FAILS_tests = set([t for t in tests_set if t.startswith("FAILS_")])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(
                test, failed=(test in failed_tests), elapsed_time=test_timings[test]
            )

        host = MockHost()
        port = Mock()
        port._filesystem = host.filesystem
        generator = json_results_generator.JSONResultsGeneratorBase(
            port,
            self.builder_name,
            self.build_name,
            self.build_number,
            "",
            None,  # don't fetch past json results archive
            test_results_map,
        )

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1,
        )

        # We don't verify the results here, but at least we make sure the code runs without errors.
        generator.generate_json_output()
        generator.generate_times_ms_file()
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set
                             if t.startswith('DISABLED_')])
        FLAKY_tests = set([t for t in tests_set
                           if t.startswith('FLAKY_')])
        FAILS_tests = set([t for t in tests_set
                           if t.startswith('FAILS_')])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test])

        host = MockHost()
        port = Mock()
        port._filesystem = host.filesystem
        generator = json_results_generator.JSONResultsGeneratorBase(port,
            self.builder_name, self.build_name, self.build_number,
            '',
            None,   # don't fetch past json results archive
            test_results_map)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1)

        # We don't verify the results here, but at least we make sure the code runs without errors.
        generator.generate_json_output()
        generator.generate_times_ms_file()
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set
                             if t.startswith('DISABLED_')])
        FLAKY_tests = set([t for t in tests_set
                           if t.startswith('FLAKY_')])
        FAILS_tests = set([t for t in tests_set
                           if t.startswith('FAILS_')])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test])

        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        generator = json_results_generator.JSONResultsGeneratorBase(port,
            self.builder_name, self.build_name, self.build_number,
            '',
            None,   # don't fetch past json results archive
            test_results_map)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1)
    def test_results_html(self):
        mock_port = Mock()
        mock_port._filesystem = filesystem_mock.MockFileSystem()
        mock_port.relative_test_filename = lambda name: name
        mock_port.filename_to_uri = lambda name: name

        runner = test_runner.TestRunner(port=mock_port, options=Mock(),
            printer=Mock())
        expected_html = u"""<html>
  <head>
    <title>Layout Test Results (time)</title>
  </head>
  <body>
    <h2>Title (time)</h2>
        <p><a href='test_path'>test_path</a><br />
</p>
</body></html>
"""
        html = runner._results_html(["test_path"], {}, "Title", override_time="time")
        self.assertEqual(html, expected_html)
Пример #8
0
    def test_results_html(self):
        mock_port = Mock()
        mock_port._filesystem = filesystem_mock.MockFileSystem()
        mock_port.relative_test_filename = lambda name: name
        mock_port.filename_to_uri = lambda name: name

        runner = test_runner.TestRunner(port=mock_port,
                                        options=Mock(),
                                        printer=Mock())
        expected_html = u"""<html>
  <head>
    <title>Layout Test Results (time)</title>
  </head>
  <body>
    <h2>Title (time)</h2>
        <p><a href='test_path'>test_path</a><br />
</p>
</body></html>
"""
        html = runner._results_html(["test_path"], {},
                                    "Title",
                                    override_time="time")
        self.assertEqual(html, expected_html)
Пример #9
0
    def test_shard_tests(self):
        # Test that _shard_tests in test_runner.TestRunner really
        # put the http tests first in the queue.
        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        runner = TestRunnerWrapper(port=port, options=Mock(), printer=Mock())

        test_list = [
            "LayoutTests/websocket/tests/unicode.htm",
            "LayoutTests/animations/keyframes.html",
            "LayoutTests/http/tests/security/view-source-no-refresh.html",
            "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
            "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
            "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
            "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
            "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
            "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
        ]

        expected_tests_to_http_lock = set([
            'LayoutTests/websocket/tests/unicode.htm',
            'LayoutTests/http/tests/security/view-source-no-refresh.html',
            'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
            'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
        ])

        # FIXME: Ideally the HTTP tests don't have to all be in one shard.
        single_thread_results = runner._shard_tests(test_list, False)
        multi_thread_results = runner._shard_tests(test_list, True)

        self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock,
                         set(single_thread_results[0][1]))
        self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock,
                         set(multi_thread_results[0][1]))
Пример #10
0
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set
                             if t.startswith('DISABLED_')])
        FLAKY_tests = set([t for t in tests_set
                           if t.startswith('FLAKY_')])
        FAILS_tests = set([t for t in tests_set
                           if t.startswith('FAILS_')])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test])

        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        generator = json_results_generator.JSONResultsGeneratorBase(port,
            self.builder_name, self.build_name, self.build_number,
            '',
            None,   # don't fetch past json results archive
            test_results_map)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json(incremental=True)
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1)

        # Test aggregated json results
        generator.set_archived_results(self._json)
        json = generator.get_json(incremental=False)
        self._json = json
        self._num_runs += 1
        self._tests_set |= tests_set
        self._test_timings.update(test_timings)
        self._PASS_count += len(PASS_tests)
        self._DISABLED_count += len(DISABLED_tests)
        self._FLAKY_count += len(FLAKY_tests)
        self._fixable_count += len(DISABLED_tests | failed_tests)

        get = self._failed_count_map.get
        for test in failed_count_map.iterkeys():
            self._failed_count_map[test] = get(test, 0) + 1

        self._verify_json_results(
            self._tests_set,
            self._test_timings,
            self._failed_count_map,
            self._PASS_count,
            self._DISABLED_count,
            self._FLAKY_count,
            self._fixable_count,
            self._json,
            self._num_runs)