Beispiel #1
0
    def test_failing_tests_message(self):
        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
        class TestEWS(AbstractEarlyWarningSystem):
            port_name = "win"  # Needs to be a port which port/factory understands.

        ews = TestEWS()
        ews.bind_to_tool(MockTool())
        ews._options = MockOptions(port=None, confirm=False)
        OutputCapture().assert_outputs(
            self,
            ews.begin_work_queue,
            expected_logs=self._default_begin_work_queue_logs(ews.name))
        task = Mock()
        task.results_from_patch_test_run = lambda a: LayoutTestResults(
            [
                test_results.TestResult("foo.html",
                                        failures=
                                        [test_failures.FailureTextMismatch()]),
                test_results.TestResult(
                    "bar.html", failures=[test_failures.FailureTextMismatch()])
            ],
            did_exceed_test_failure_limit=False)
        task.results_from_test_run_without_patch = lambda a: LayoutTestResults(
            [], did_exceed_test_failure_limit=False)
        patch = ews._tool.bugs.fetch_attachment(10000)
        self.assertMultiLineEqual(ews._failing_tests_message(task, patch),
                                  "New failing tests:\nbar.html\nfoo.html")
 def test_results(self):
     # Doesn't make sense to ask for the test_results until the tests have run at least once.
     assert(self._test_run_counter >= 0)
     failures_for_run = self._test_failure_plan[self._test_run_counter]
     results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
     # This makes the results trustable by ExpectedFailures.
     results.set_failure_limit_count(10)
     return results
 def layout_test_results(self):
     # Doesn't make sense to ask for the layout_test_results until the tests have run at least once.
     assert(self._test_run_counter >= 0)
     failures_for_run = self._test_failure_plan[self._test_run_counter]
     results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
     # This makes the results trustable by ExpectedFailures.
     results.set_failure_limit_count(10)
     return results
 def test_was_interrupted(self):
     self.assertTrue(
         LayoutTestResults.results_from_string(
             'ADD_RESULTS({"tests":{},"interrupted":true});').
         run_was_interrupted())
     self.assertFalse(
         LayoutTestResults.results_from_string(
             'ADD_RESULTS({"tests":{},"interrupted":false});').
         run_was_interrupted())
 def test_was_interrupted(self):
     self.assertTrue(
         LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":true});').run_was_interrupted()
     )
     self.assertFalse(
         LayoutTestResults.results_from_string(
             'ADD_RESULTS({"tests":{},"interrupted":false});'
         ).run_was_interrupted()
     )
Beispiel #6
0
    def test_flaky_test_failure(self):
        commit_queue = MockCommitQueue([
            None,
            None,
            None,
            None,
            None,
            ScriptError("MOCK tests failure"),
        ])
        # CommitQueueTask will only report flaky tests if we successfully parsed
        # results.html and returned a LayoutTestResults object, so we fake one.
        commit_queue.test_results = lambda: LayoutTestResults([])
        expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
        self._run_through_task(commit_queue, expected_stderr)
        def builder_data():
            old_builder_data()
            # have prototype-chocolate only fail on "MOCK Leopard".
            self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
    "tests": {
        "fast": {
            "dom": {
                "prototype-taco.html": {
                    "expected": "PASS",
                    "actual": "PASS TEXT",
                    "is_unexpected": true
                },
                "prototype-chocolate.html": {
                    "expected": "FAIL",
                    "actual": "PASS"
                },
                "prototype-strawberry.html": {
                    "expected": "PASS",
                    "actual": "IMAGE PASS",
                    "is_unexpected": true
                }
            }
        }
    }
});""")
            return self.command._builder_data
Beispiel #8
0
    def test_failed_archive(self):
        commit_queue = MockCommitQueue([
            None,
            None,
            None,
            None,
            None,
            ScriptError("MOCK tests failure"),
        ])
        commit_queue.test_results = lambda: LayoutTestResults([])
        # It's possible delegate to fail to archive layout tests, don't try to report
        # flaky tests when that happens.
        commit_queue.archive_last_test_results = lambda patch: None
        expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
        self._run_through_task(commit_queue, expected_stderr)
 def _create_layout_test_results(self):
     results_path = self._host.filesystem.join(self._results_directory,
                                               "full_results.json")
     results_html = self._read_file_contents(results_path)
     if not results_html:
         return None
     return LayoutTestResults.results_from_string(results_html)
Beispiel #10
0
    def _should_defer_patch_or_throw(self, failures_with_patch,
                                     results_archive_for_failures_with_patch,
                                     script_error, failure_id):
        self._build_and_test_without_patch()
        clean_tree_results = self._delegate.test_results()

        if clean_tree_results.did_exceed_test_failure_limit():
            # We cannot know whether the failures we saw in the test runs with the patch are expected.
            return True

        failures_introduced_by_patch = frozenset(
            failures_with_patch) - frozenset(
                clean_tree_results.failing_test_results())
        if failures_introduced_by_patch:
            self.failure_status_id = failure_id
            # report_failure will either throw or return false.
            return not self.report_failure(
                results_archive_for_failures_with_patch,
                LayoutTestResults(failures_introduced_by_patch,
                                  did_exceed_test_failure_limit=False),
                script_error)

        # In this case, we know that all of the failures that we saw with the patch were
        # also present without the patch, so we don't need to defer.
        return False
    def test_parse_layout_test_results(self):
        failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
        testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
        expected_results = [test_results.TestResult(testname, failures)]

        results = LayoutTestResults._parse_results_html(self._example_results_html)
        self.assertEqual(expected_results, results)
Beispiel #12
0
 def results_from_patch_test_run(self, patch):
     return LayoutTestResults([
         test_results.TestResult(
             "mock_test_name.html",
             failures=[test_failures.FailureTextMismatch()])
     ],
                              did_exceed_test_failure_limit=False)
 def test_failing_tests_message(self):
     ews = TestEWS()
     results = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
                                             test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
                                             did_exceed_test_failure_limit=False)
     message = "New failing tests:\nfoo.html\nbar.html"
     self._test_message(ews, results, message)
Beispiel #14
0
 def test_get_failing_results_dict_some_failing_results(self):
     self.host.buildbot.set_results(
         Build('mac', 123),
         LayoutTestResults({
             'tests': {
                 'fake': {
                     'test.html': {
                         'failing-test.html': {
                             'expected': 'PASS',
                             'actual': 'IMAGE',
                             'is_unexpected': True,
                         },
                     },
                 },
             },
         }))
     line_adder = W3CExpectationsLineAdder(self.host)
     self.assertEqual(
         line_adder.get_failing_results_dict(Build('mac', 123)), {
             'fake/test.html/failing-test.html': {
                 'Mac': {
                     'actual': 'IMAGE',
                     'expected': 'PASS',
                     'bug': 'crbug.com/626703',
                 },
             },
         })
    def test_rebaseline_test_passes_on_all_builders(self):
        self._setup_mock_build_data()

        self.tool.buildbot.set_results(
            Build('MOCK Win7'),
            LayoutTestResults({
                "tests": {
                    "userscripts": {
                        "first-test.html": {
                            "expected": "NEEDSREBASELINE",
                            "actual": "PASS"
                        }
                    }
                }
            }))

        self._write(self.mac_expectations_path,
                    "Bug(x) userscripts/first-test.html [ Failure ]\n")
        self._write("userscripts/first-test.html", "Dummy test contents")
        self.command.rebaseline(self.options(), {
            "userscripts/first-test.html": {
                Build("MOCK Win7"): ["txt", "png"]
            }
        })

        self.assertEqual(self.tool.executive.calls, [])
 def test_results(self):
     # Doesn't make sense to ask for the test_results until the tests have run at least once.
     assert(self._test_run_counter >= 0)
     failures_for_run = self._test_failure_plan[self._test_run_counter]
     assert(isinstance(failures_for_run, list))
     results = LayoutTestResults(test_results=map(self._mock_test_result, failures_for_run), did_exceed_test_failure_limit=(len(failures_for_run) >= 10))
     return results
Beispiel #17
0
 def test_latest_layout_test_results(self):
     self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(
         [
             self._mock_test_result(testname)
             for testname in ["test1", "test2"]
         ])
     self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
     self.assertTrue(self.builder.latest_layout_test_results())
Beispiel #18
0
    def fetch_layout_test_results(self, results_url):
        # FIXME: This should cache that the result was a 404 and stop hitting the network.
        results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
        if not results_file:
            results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "results.html"))

        # results_from_string accepts either ORWT html or NRWT json.
        return LayoutTestResults.results_from_string(results_file)
Beispiel #19
0
    def fetch_layout_test_results(self, results_url):
        # FIXME: This should cache that the result was a 404 and stop hitting the network.
        results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
        if not results_file:
            results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "results.html"))

        # results_from_string accepts either ORWT html or NRWT json.
        return LayoutTestResults.results_from_string(results_file)
 def test_missing_unit_test_results_path(self):
     host = MockHost()
     reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     reader._create_unit_test_results = lambda: None
     # layout_test_results shouldn't raise even if the unit tests xml file is missing.
     self.assertIsNotNone(reader.results(), None)
     self.assertEqual(reader.results().failing_tests(), [])
Beispiel #21
0
 def test_missing_unit_test_results_path(self):
     tool = MockTool()
     tool.port().unit_tests_results_path = lambda: None
     reader = LayoutTestResultsReader(tool, "/var/logs")
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     # layout_test_results shouldn't raise even if the unit tests xml file is missing.
     self.assertNotEquals(reader.results(), None)
     self.assertEquals(reader.results().failing_tests(), [])
Beispiel #22
0
 def fetch_layout_test_results(self, results_url):
     """Returns a LayoutTestResults object for results fetched from a given URL."""
     results_file = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file(results_url, "failing_results.json"))
     revision = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file(results_url, "LAST_CHANGE"))
     if not revision:
         results_file = None
     return LayoutTestResults.results_from_string(results_file, revision)
Beispiel #23
0
 def test_layout_test_results(self):
     reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
     reader._read_file_contents = lambda path: None
     self.assertIsNone(reader.results())
     reader._read_file_contents = lambda path: ""
     self.assertIsNone(reader.results())
     reader._create_layout_test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
     results = reader.results()
     self.assertIsNotNone(results)
Beispiel #24
0
 def fetch_layout_test_results(self, results_url):
     """Returns a LayoutTestResults object for results fetched from a given URL."""
     results_file = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file(results_url, "failing_results.json"))
     revision = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file(results_url, "LAST_CHANGE"))
     if not revision:
         results_file = None
     return LayoutTestResults.results_from_string(results_file, revision)
 def test_actual_results(self):
     results = LayoutTestResults.results_from_string(
         self.example_full_results_json)
     self.assertEqual(
         results.actual_results("fast/dom/prototype-banana.html"), "PASS")
     self.assertEqual(
         results.actual_results("fast/dom/prototype-taco.html"),
         "PASS TEXT")
     self.assertEqual(results.actual_results("nonexistant.html"), "")
 def test_unexpected_mismatch_results(self):
     results = LayoutTestResults.results_from_string(self.example_full_results_json)
     self.assertEqual(
         [r.test_name() for r in results.unexpected_mismatch_results()],
         [
             'fast/dom/prototype-inheritance.html',
             'fast/dom/prototype-taco.html',
             'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html'
         ])
 def test_layout_test_results(self):
     reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
     reader._read_file_contents = lambda path: None
     self.assertIsNone(reader.results())
     reader._read_file_contents = lambda path: ""
     self.assertIsNone(reader.results())
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     results = reader.results()
     self.assertIsNotNone(results)
     self.assertEqual(results.failure_limit_count(), 30)  # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
Beispiel #28
0
 def test_unexpected_mismatch_results(self):
     results = LayoutTestResults.results_from_string(
         self.example_full_results_json)
     self.assertEqual([
         r.test_name() for r in results.unexpected_mismatch_results()
     ], [
         'fast/dom/prototype-inheritance.html',
         'fast/dom/prototype-taco.html',
         'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html'
     ])
Beispiel #29
0
 def fetch_layout_test_results(self, results_url):
     # FIXME: This should cache that the result was a 404 and stop hitting the network.
     results_file = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file_from_results(results_url,
                                               "failing_results.json"))
     revision = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file_from_results(results_url, "LAST_CHANGE"))
     if not revision:
         results_file = None
     return LayoutTestResults.results_from_string(results_file, revision)
 def _mock_fetch_build(build_number):
     build = Build(
         builder=self.builder,
         build_number=build_number,
         revision=build_number + 1000,
         is_green=build_number < 4
     )
     results = [self._mock_test_result(testname) for testname in failure(build_number)]
     build._layout_test_results = LayoutTestResults(results)
     return build
Beispiel #31
0
    def test_parse_layout_test_results(self):
        failures = [
            test_failures.FailureMissingResult(),
            test_failures.FailureMissingImageHash(),
            test_failures.FailureMissingImage()
        ]
        testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
        expected_results = [test_results.TestResult(testname, failures)]

        results = LayoutTestResults._parse_results_html(
            self._example_results_html)
        self.assertEqual(expected_results, results)
Beispiel #32
0
 def test_didnt_run_as_expected_results(self):
     results = LayoutTestResults.results_from_string(self.example_full_results_json)
     self.assertEqual(
         [r.test_name() for r in results.didnt_run_as_expected_results()],
         [
             'fast/dom/prototype-banana.html',
             'fast/dom/prototype-crashy.html',
             'fast/dom/prototype-inheritance.html',
             'fast/dom/prototype-newtest.html',
             'fast/dom/prototype-strawberry.html',
             'fast/dom/prototype-taco.html',
             'svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html',
         ])
    def test_execute_stuck_on_alternate_rebaseline_branch(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        self.tool.buildbot.set_results(Build('MOCK Win7'), LayoutTestResults({
            "tests": {
                "fast": {
                    "dom": {
                        "prototype-taco.html": {
                            "expected": "FAIL",
                            "actual": "PASS",
                            "is_unexpected": True
                        }
                    }
                }
            }
        }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Win7": {"port_name": "test-win-win7", "specifiers": ["Win7", "Release"]},
        })
        old_branch_name = self.tool.scm().current_branch_or_ref
        try:
            self.command.tree_status = lambda: 'open'
            self.tool.scm().current_branch_or_ref = lambda: 'auto-rebaseline-alt-temporary-branch'
            self._execute_with_mock_options()
            self.assertEqual(self.tool.executive.calls, [
                ['git', 'cl', 'upload', '-f'],
                ['git', 'pull'],
                ['git', 'cl', 'land', '-f', '-v'],
                ['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
            ])

            self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Mac Win10 ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
        finally:
            self.tool.scm().current_branch_or_ref = old_branch_name
 def _tests_to_update(self, attachment, bot_type=None):
     _log.info("Processing attachment " + str(attachment.id()))
     zip_file = self.unzip(attachment.contents())
     results = LayoutTestResults.results_from_string(
         zip_file.read("full_results.json"))
     results_to_update = [
         result.test_name for result in results.failing_test_results()
         if result.type == test_expectations.TEXT
     ]
     return {
         result: zip_file.read(
             TestResultWriter.actual_filename(result, self.filesystem))
         for result in results_to_update
     }
    def test_execute_test_passes_everywhere(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        for builder in ['MOCK Mac10.10', 'MOCK Mac10.11']:
            self.tool.buildbot.set_results(Build(builder), LayoutTestResults({
                "tests": {
                    "fast": {
                        "dom": {
                            "prototype-taco.html": {
                                "expected": "FAIL",
                                "actual": "PASS",
                                "is_unexpected": True
                            }
                        }
                    }
                }
            }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {"port_name": "test-mac-mac10.10", "specifiers": ["Mac10.10", "Release"]},
            "MOCK Mac10.11": {"port_name": "test-mac-mac10.11", "specifiers": ["Mac10.11", "Release"]},
        })

        self.command.tree_status = lambda: 'open'
        self._execute_with_mock_options()
        self.assertEqual(self.tool.executive.calls, [
            ['git', 'cl', 'upload', '-f'],
            ['git', 'pull'],
            ['git', 'cl', 'land', '-f', '-v'],
            ['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
        ])

        # The mac ports should both be removed since they're the only ones in builders._exact_matches.
        self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
    def test_rebaseline_test_passes_everywhere(self):
        test_port = self.tool.port_factory.get('test')

        for builder in ['MOCK Mac10.10', 'MOCK Mac10.11']:
            self.tool.buildbot.set_results(
                Build(builder),
                LayoutTestResults({
                    "tests": {
                        "fast": {
                            "dom": {
                                "prototype-taco.html": {
                                    "expected": "FAIL",
                                    "actual": "PASS",
                                    "is_unexpected": True
                                }
                            }
                        }
                    }
                }))

        self.tool.filesystem.write_text_file(
            test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ Rebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html',
                              "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {
                "port_name": "test-mac-mac10.10",
                "specifiers": ["Mac10.10", "Release"]
            },
            "MOCK Mac10.11": {
                "port_name": "test-mac-mac10.11",
                "specifiers": ["Mac10.11", "Release"]
            },
        })

        self.command.execute(self.options(), [], self.tool)
        self.assertEqual(self.tool.executive.calls, [])

        # The mac ports should both be removed since they're the only ones in the builder list.
        self.assertEqual(
            self.tool.filesystem.read_text_file(
                test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ Rebaseline ]
""")
Beispiel #37
0
    def _setup_mock_builder_data(self):
        data = LayoutTestResults.results_from_string("""ADD_RESULTS({
    "tests": {
        "userscripts": {
            "first-test.html": {
                "expected": "PASS",
                "actual": "IMAGE+TEXT"
            },
            "second-test.html": {
                "expected": "FAIL",
                "actual": "IMAGE+TEXT"
            }
        }
    }
});""")
        for builder in ['MOCK builder', 'MOCK builder (Debug)', 'WebKit Mac10.7']:
            self.command._builder_data[builder] = data
Beispiel #38
0
 def fetch_layout_test_results(self, results_url):
     # FIXME: This should cache that the result was a 404 and stop hitting the network.
     results_file = NetworkTransaction(convert_404_to_None=True).run(
         lambda: self._fetch_file_from_results(results_url, "failing_results.json"))
     return LayoutTestResults.results_from_string(results_file)
Beispiel #39
0
 def test_tests_matching_failure_types(self):
     results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
     failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
     self.assertEqual(len(results.failing_tests()), 2)
 def test_actual_results(self):
     results = LayoutTestResults.results_from_string(self.example_full_results_json)
     self.assertEqual(results.result_for_test("fast/dom/prototype-banana.html").actual_results(), "PASS")
     self.assertEqual(results.result_for_test("fast/dom/prototype-taco.html").actual_results(), "PASS TEXT")
     self.assertFalse(results.result_for_test("nonexistant.html"))
Beispiel #41
0
 def test_set_failure_limit_count(self):
     results = LayoutTestResults([])
     self.assertEquals(results.failure_limit_count(), None)
     results.set_failure_limit_count(10)
     self.assertEquals(results.failure_limit_count(), 10)
 def test_parse_layout_test_results(self):
     results = LayoutTestResults._parse_results_html(self._example_results_html)
     self.assertEqual(self._expected_layout_test_results, results)
 def test_chromium_revision(self):
     self.assertEqual(LayoutTestResults.results_from_string(self.example_full_results_json).chromium_revision(), 1234)
 def _create_layout_test_results(self):
     results_path = self._tool.port().layout_tests_results_path()
     results_html = self._read_file_contents(results_path)
     if not results_html:
         return None
     return LayoutTestResults.results_from_string(results_html)
 def _create_layout_test_results(self):
     results_path = self._host.filesystem.join(self._results_directory, "full_results.json")
     results_html = self._read_file_contents(results_path)
     if not results_html:
         return None
     return LayoutTestResults.results_from_string(results_html)
Beispiel #46
0
 def layout_test_results(self):
     if not self._layout_test_results:
         # FIXME: This should cache that the result was a 404 and stop hitting the network.
         self._layout_test_results = LayoutTestResults.results_from_string(self._fetch_results_html())
     return self._layout_test_results
Beispiel #47
0
 def fetch_results(self, build):
     return self._canned_results.get(
         build,
         LayoutTestResults.results_from_string(LayoutTestResultsTest.example_full_results_json))
 def test_missing_baseline(self):
     results = LayoutTestResults.results_from_string(self.example_full_results_json)
     missing_results = results.missing_results()
     self.assertEqual(len(missing_results), 1)
     self.assertEqual(missing_results[0].test_name(), "fast/dom/prototype-newtest.html")
Beispiel #49
0
    def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(result):
            test = result.test_name()
            actual = result.actual_results().split(" ")
            expected = result.expected_results().split(" ")

            if result.did_run_as_expected():
                # Don't print anything for tests that ran as expected.
                return

            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                else:
                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
            elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
                add_to_dict_of_lists(regressions, actual[0], test)
            elif len(actual) > 1 and bool(set(actual[1:]) & set(expected)):
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, actual[0], test)

        test_results = LayoutTestResults(summarized_results)
        test_results.for_each_test(add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s" % test)
                self._print("")
            self._print("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result_type = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Unexpected flakiness: %s (%d)" % (descriptions[result_type], len(tests)))
                tests.sort()

                for test in tests:
                    result = test_results.result_for_test(test)
                    actual = result.actual_results().split(" ")
                    expected = result.expected_results().split(" ")
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp]
                                             for exp in list(set(actual) | set(expected))]
                    self._print("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
                self._print("")
            self._print("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result_type = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Regressions: Unexpected %s (%d)" % (descriptions[result_type], len(tests)))
                tests.sort()
                for test in tests:
                    result = test_results.result_for_test(test)
                    actual = result.actual_results().split(" ")
                    expected = result.expected_results().split(" ")
                    new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp] for exp in actual]
                    self._print("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
                self._print("")

        if len(summarized_results['tests']) and self.debug_logging:
            self._print("%s" % ("-" * 78))
 def test_results_from_string(self):
     self.assertEqual(LayoutTestResults.results_from_string(None), None)
     self.assertEqual(LayoutTestResults.results_from_string(""), None)
     results = LayoutTestResults.results_from_string(self._example_results_html)
     self.assertEqual(len(results.failing_tests()), 0)
Beispiel #51
0
 def latest_layout_test_results(self):
     return LayoutTestResults.results_from_string(layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json)
Beispiel #52
0
 def fetch_layout_test_results(self, _):
     return LayoutTestResults.results_from_string(LayoutTestResultsTest.example_full_results_json)
Beispiel #53
0
 def test_results_from_string(self):
     self.assertEqual(LayoutTestResults.results_from_string(None), None)
     self.assertEqual(LayoutTestResults.results_from_string(""), None)
 def test_results_from_string(self):
     self.assertIsNone(LayoutTestResults.results_from_string(None))
     self.assertIsNone(LayoutTestResults.results_from_string(""))
Beispiel #55
0
        def builder_data():
            self.command._builder_data['MOCK SnowLeopard'] = self.command._builder_data['MOCK Leopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
    "tests": {
        "userscripts": {
            "another-test.html": {
                "expected": "PASS",
                "actual": "PASS TEXT"
            },
            "images.svg": {
                "expected": "FAIL",
                "actual": "IMAGE+TEXT"
            }
        }
    }
});""")
            return self.command._builder_data
Beispiel #56
0
        def builder_data():
            self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
    "tests": {
        "fast": {
            "dom": {
                "prototype-taco.html": {
                    "expected": "PASS",
                    "actual": "PASS TEXT",
                    "is_unexpected": true
                }
            }
        }
    }
});""")
            return self.command._builder_data