Exemple #1
0
    def test_failing_tests_message(self):
        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
        class TestEWS(AbstractEarlyWarningSystem):
            port_name = "win"  # Needs to be a port which port/factory understands.

        ews = TestEWS()
        ews.bind_to_tool(MockTool())
        ews._options = MockOptions(port=None, confirm=False)
        OutputCapture().assert_outputs(
            self,
            ews.begin_work_queue,
            expected_logs=self._default_begin_work_queue_logs(ews.name))
        task = Mock()
        task.results_from_patch_test_run = lambda a: LayoutTestResults(
            [
                test_results.TestResult("foo.html",
                                        failures=
                                        [test_failures.FailureTextMismatch()]),
                test_results.TestResult(
                    "bar.html", failures=[test_failures.FailureTextMismatch()])
            ],
            did_exceed_test_failure_limit=False)
        task.results_from_test_run_without_patch = lambda a: LayoutTestResults(
            [], did_exceed_test_failure_limit=False)
        patch = ews._tool.bugs.fetch_attachment(10000)
        self.assertMultiLineEqual(ews._failing_tests_message(task, patch),
                                  "New failing tests:\nbar.html\nfoo.html")
Exemple #2
0
 def results_from_patch_test_run(self, patch):
     return LayoutTestResults([
         test_results.TestResult(
             "mock_test_name.html",
             failures=[test_failures.FailureTextMismatch()])
     ],
                              did_exceed_test_failure_limit=False)
Exemple #3
0
 def test_get_failing_results_dict_some_failing_results(self):
     self.host.buildbot.set_results(
         Build('mac', 123),
         LayoutTestResults({
             'tests': {
                 'fake': {
                     'test.html': {
                         'failing-test.html': {
                             'expected': 'PASS',
                             'actual': 'IMAGE',
                             'is_unexpected': True,
                         },
                     },
                 },
             },
         }))
     line_adder = W3CExpectationsLineAdder(self.host)
     self.assertEqual(
         line_adder.get_failing_results_dict(Build('mac', 123)), {
             'fake/test.html/failing-test.html': {
                 'Mac': {
                     'actual': 'IMAGE',
                     'expected': 'PASS',
                     'bug': 'crbug.com/626703',
                 },
             },
         })
 def test_failing_tests_message(self):
     ews = TestEWS()
     results = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
                                             test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
                                             did_exceed_test_failure_limit=False)
     message = "New failing tests:\nfoo.html\nbar.html"
     self._test_message(ews, results, message)
Exemple #5
0
    def test_failed_archive(self):
        commit_queue = MockCommitQueue([
            None,
            None,
            None,
            None,
            None,
            ScriptError("MOCK tests failure"),
        ])
        commit_queue.test_results = lambda: LayoutTestResults([])
        # It's possible delegate to fail to archive layout tests, don't try to report
        # flaky tests when that happens.
        commit_queue.archive_last_test_results = lambda patch: None
        expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
        self._run_through_task(commit_queue, expected_stderr)
Exemple #6
0
    def test_flaky_test_failure(self):
        commit_queue = MockCommitQueue([
            None,
            None,
            None,
            None,
            None,
            ScriptError("MOCK tests failure"),
        ])
        # CommitQueueTask will only report flaky tests if we successfully parsed
        # results.html and returned a LayoutTestResults object, so we fake one.
        commit_queue.test_results = lambda: LayoutTestResults([])
        expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
        self._run_through_task(commit_queue, expected_stderr)
Exemple #7
0
    def _should_defer_patch_or_throw(self, failures_with_patch,
                                     results_archive_for_failures_with_patch,
                                     script_error, failure_id):
        self._build_and_test_without_patch()
        clean_tree_results = self._delegate.test_results()

        if clean_tree_results.did_exceed_test_failure_limit():
            # We cannot know whether the failures we saw in the test runs with the patch are expected.
            return True

        failures_introduced_by_patch = frozenset(
            failures_with_patch) - frozenset(
                clean_tree_results.failing_test_results())
        if failures_introduced_by_patch:
            self.failure_status_id = failure_id
            # report_failure will either throw or return false.
            return not self.report_failure(
                results_archive_for_failures_with_patch,
                LayoutTestResults(failures_introduced_by_patch,
                                  did_exceed_test_failure_limit=False),
                script_error)

        # In this case, we know that all of the failures that we saw with the patch were
        # also present without the patch, so we don't need to defer.
        return False
    def test_rebaseline_test_passes_on_all_builders(self):
        self._setup_mock_build_data()

        self.tool.buildbot.set_results(
            Build('MOCK Win7'),
            LayoutTestResults({
                "tests": {
                    "userscripts": {
                        "first-test.html": {
                            "expected": "NEEDSREBASELINE",
                            "actual": "PASS"
                        }
                    }
                }
            }))

        self._write(self.mac_expectations_path,
                    "Bug(x) userscripts/first-test.html [ Failure ]\n")
        self._write("userscripts/first-test.html", "Dummy test contents")
        self.command.rebaseline(self.options(), {
            "userscripts/first-test.html": {
                Build("MOCK Win7"): ["txt", "png"]
            }
        })

        self.assertEqual(self.tool.executive.calls, [])
 def test_results(self):
     # Doesn't make sense to ask for the test_results until the tests have run at least once.
     assert(self._test_run_counter >= 0)
     failures_for_run = self._test_failure_plan[self._test_run_counter]
     assert(isinstance(failures_for_run, list))
     results = LayoutTestResults(test_results=map(self._mock_test_result, failures_for_run), did_exceed_test_failure_limit=(len(failures_for_run) >= 10))
     return results
 def layout_test_results(self):
     # Doesn't make sense to ask for the layout_test_results until the tests have run at least once.
     assert(self._test_run_counter >= 0)
     failures_for_run = self._test_failure_plan[self._test_run_counter]
     results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
     # This makes the results trustable by ExpectedFailures.
     results.set_failure_limit_count(10)
     return results
 def test_missing_unit_test_results_path(self):
     host = MockHost()
     reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     reader._create_unit_test_results = lambda: None
     # layout_test_results shouldn't raise even if the unit tests xml file is missing.
     self.assertIsNotNone(reader.results(), None)
     self.assertEqual(reader.results().failing_tests(), [])
Exemple #12
0
 def test_latest_layout_test_results(self):
     self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(
         [
             self._mock_test_result(testname)
             for testname in ["test1", "test2"]
         ])
     self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
     self.assertTrue(self.builder.latest_layout_test_results())
Exemple #13
0
 def test_missing_unit_test_results_path(self):
     tool = MockTool()
     tool.port().unit_tests_results_path = lambda: None
     reader = LayoutTestResultsReader(tool, "/var/logs")
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     # layout_test_results shouldn't raise even if the unit tests xml file is missing.
     self.assertNotEquals(reader.results(), None)
     self.assertEquals(reader.results().failing_tests(), [])
Exemple #14
0
 def test_layout_test_results(self):
     reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
     reader._read_file_contents = lambda path: None
     self.assertIsNone(reader.results())
     reader._read_file_contents = lambda path: ""
     self.assertIsNone(reader.results())
     reader._create_layout_test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
     results = reader.results()
     self.assertIsNotNone(results)
 def test_layout_test_results(self):
     reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
     reader._read_file_contents = lambda path: None
     self.assertIsNone(reader.results())
     reader._read_file_contents = lambda path: ""
     self.assertIsNone(reader.results())
     reader._create_layout_test_results = lambda: LayoutTestResults([])
     results = reader.results()
     self.assertIsNotNone(results)
     self.assertEqual(results.failure_limit_count(), 30)  # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
 def _mock_fetch_build(build_number):
     build = Build(
         builder=self.builder,
         build_number=build_number,
         revision=build_number + 1000,
         is_green=build_number < 4
     )
     results = [self._mock_test_result(testname) for testname in failure(build_number)]
     build._layout_test_results = LayoutTestResults(results)
     return build
    def test_execute_stuck_on_alternate_rebaseline_branch(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        self.tool.buildbot.set_results(Build('MOCK Win7'), LayoutTestResults({
            "tests": {
                "fast": {
                    "dom": {
                        "prototype-taco.html": {
                            "expected": "FAIL",
                            "actual": "PASS",
                            "is_unexpected": True
                        }
                    }
                }
            }
        }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Win7": {"port_name": "test-win-win7", "specifiers": ["Win7", "Release"]},
        })
        old_branch_name = self.tool.scm().current_branch_or_ref
        try:
            self.command.tree_status = lambda: 'open'
            self.tool.scm().current_branch_or_ref = lambda: 'auto-rebaseline-alt-temporary-branch'
            self._execute_with_mock_options()
            self.assertEqual(self.tool.executive.calls, [
                ['git', 'cl', 'upload', '-f'],
                ['git', 'pull'],
                ['git', 'cl', 'land', '-f', '-v'],
                ['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
            ])

            self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Mac Win10 ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
        finally:
            self.tool.scm().current_branch_or_ref = old_branch_name
    def test_execute_test_passes_everywhere(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        for builder in ['MOCK Mac10.10', 'MOCK Mac10.11']:
            self.tool.buildbot.set_results(Build(builder), LayoutTestResults({
                "tests": {
                    "fast": {
                        "dom": {
                            "prototype-taco.html": {
                                "expected": "FAIL",
                                "actual": "PASS",
                                "is_unexpected": True
                            }
                        }
                    }
                }
            }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {"port_name": "test-mac-mac10.10", "specifiers": ["Mac10.10", "Release"]},
            "MOCK Mac10.11": {"port_name": "test-mac-mac10.11", "specifiers": ["Mac10.11", "Release"]},
        })

        self.command.tree_status = lambda: 'open'
        self._execute_with_mock_options()
        self.assertEqual(self.tool.executive.calls, [
            ['git', 'cl', 'upload', '-f'],
            ['git', 'pull'],
            ['git', 'cl', 'land', '-f', '-v'],
            ['git', 'config', 'branch.auto-rebaseline-temporary-branch.rietveldissue'],
        ])

        # The mac ports should both be removed since they're the only ones in builders._exact_matches.
        self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
    def test_rebaseline_test_passes_everywhere(self):
        test_port = self.tool.port_factory.get('test')

        for builder in ['MOCK Mac10.10', 'MOCK Mac10.11']:
            self.tool.buildbot.set_results(
                Build(builder),
                LayoutTestResults({
                    "tests": {
                        "fast": {
                            "dom": {
                                "prototype-taco.html": {
                                    "expected": "FAIL",
                                    "actual": "PASS",
                                    "is_unexpected": True
                                }
                            }
                        }
                    }
                }))

        self.tool.filesystem.write_text_file(
            test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ Rebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html',
                              "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {
                "port_name": "test-mac-mac10.10",
                "specifiers": ["Mac10.10", "Release"]
            },
            "MOCK Mac10.11": {
                "port_name": "test-mac-mac10.11",
                "specifiers": ["Mac10.11", "Release"]
            },
        })

        self.command.execute(self.options(), [], self.tool)
        self.assertEqual(self.tool.executive.calls, [])

        # The mac ports should both be removed since they're the only ones in the builder list.
        self.assertEqual(
            self.tool.filesystem.read_text_file(
                test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ Rebaseline ]
""")
Exemple #20
0
 def test_get_failing_results_dict_only_passing_results(self):
     self.host.buildbot.set_results(Build('mac', 123), LayoutTestResults({
         'tests': {
             'fake': {
                 'test.html': {
                     'passing-test.html': {
                         'expected': 'PASS',
                         'actual': 'PASS',
                     },
                 },
             },
         },
     }))
     line_adder = W3CExpectationsLineAdder(self.host)
     self.assertEqual(line_adder.get_failing_results_dict(Build('mac', 123)), {})
    def test_execute_git_cl_hangs(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        # Have prototype-chocolate only fail on "MOCK Mac10.11".
        self.tool.buildbot.set_results(Build('MOCK Mac10.11'), LayoutTestResults({
            "tests": {
                "fast": {
                    "dom": {
                        "prototype-taco.html": {
                            "expected": "PASS",
                            "actual": "PASS TEXT",
                            "is_unexpected": True
                        }
                    }
                }
            }
        }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.builders = BuilderList({
            "MOCK Mac10.11": {"port_name": "test-mac-mac10.11", "specifiers": ["Mac10.11", "Release"]},
        })

        self.command.SECONDS_BEFORE_GIVING_UP = 0
        self.command.tree_status = lambda: 'open'
        self.tool.executive = MockExecutive()
        self.tool.executive.calls = []
        self._execute_with_mock_options()

        self.assertEqual(self.tool.executive.calls, [
            [['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt',
              '--builder', 'MOCK Mac10.11', '--test', 'fast/dom/prototype-taco.html']],
            [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt',
              '--builder', 'MOCK Mac10.11', '--test', 'fast/dom/prototype-taco.html']],
            [['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html']],
            ['git', 'cl', 'upload', '-f'],
        ])
Exemple #22
0
        def _mock_fetch_build(build_number):
            build = Build(builder=self.builder,
                          build_number=build_number,
                          revision=build_number + 1000,
                          is_green=build_number < 4)
            results = [
                self._mock_test_result(testname)
                for testname in failure(build_number)
            ]
            layout_test_results = LayoutTestResults(
                test_results=results, did_exceed_test_failure_limit=False)

            def mock_layout_test_results():
                return layout_test_results

            build.layout_test_results = mock_layout_test_results
            return build
    def _basic_execute_test(self, expected_executive_calls, auth_refresh_token_json=None, commit_author=None, dry_run=False):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        for builder in ['MOCK Mac10.10', 'MOCK Mac10.11']:
            self.tool.buildbot.set_results(Build(builder), LayoutTestResults({
                "tests": {
                    "fast": {
                        "dom": {
                            "prototype-taco.html": {
                                "expected": "FAIL",
                                "actual": "PASS",
                                "is_unexpected": True
                            }
                        }
                    }
                }
            }))

        self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {"port_name": "test-mac-mac10.10", "specifiers": ["Mac10.10", "Release"]},
            "MOCK Mac10.11": {"port_name": "test-mac-mac10.11", "specifiers": ["Mac10.11", "Release"]},
        })

        self.command.tree_status = lambda: 'open'
        self._execute_with_mock_options(auth_refresh_token_json=auth_refresh_token_json,
                                        commit_author=commit_author, dry_run=dry_run)
        self.assertEqual(self.tool.executive.calls, expected_executive_calls)

        # The mac ports should both be removed since they're the only ones in builders._exact_matches.
        self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
 def _setup_mock_build_data(self):
     for builder in ['MOCK Win7', 'MOCK Win7 (dbg)', 'MOCK Mac10.11']:
         self.tool.buildbot.set_results(
             Build(builder),
             LayoutTestResults({
                 "tests": {
                     "userscripts": {
                         "first-test.html": {
                             "expected": "PASS",
                             "actual": "IMAGE+TEXT"
                         },
                         "second-test.html": {
                             "expected": "FAIL",
                             "actual": "IMAGE+TEXT"
                         }
                     }
                 }
             }))
Exemple #25
0
    def _gather_baselines(self, results_json):
        # Rebaseline server and it's associated JavaScript expected the tests subtree to
        # be key-value pairs instead of hierarchical.
        # FIXME: make the rebaseline server use the hierarchical tree.
        new_tests_subtree = {}

        def gather_baselines_for_test(result):
            if result.did_pass_or_run_as_expected():
                return
            result_dict = result.result_dict()
            result_dict['state'] = STATE_NEEDS_REBASELINE
            result_dict['baselines'] = get_test_baselines(
                result.test_name(), self._test_config)
            new_tests_subtree[result.test_name()] = result_dict

        LayoutTestResults(results_json).for_each_test(
            gather_baselines_for_test)
        results_json['tests'] = new_tests_subtree
Exemple #26
0
 def test_latest_layout_test_results(self):
     self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(
         None)
     self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
     self.assertTrue(self.builder.latest_layout_test_results())
Exemple #27
0
    def test_execute(self):
        def blame(_):
            return """
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-06-14 20:18:46 +0000   11) # Test NeedsRebaseline being in a comment doesn't bork parsing.
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-06-14 20:18:46 +0000   11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Failure ]
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-06-14 20:18:46 +0000   11) crbug.com/24182 [ Mac10.11 ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations                   (<*****@*****.**> 2013-04-28 04:52:41 +0000   12) crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations                   (<*****@*****.**>        2013-04-28 04:52:41 +0000   12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations                   (<*****@*****.**>        2013-04-28 04:52:41 +0000   12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""

        self.tool.scm().blame = blame

        test_port = self.tool.port_factory.get('test')

        # Have prototype-chocolate only fail on "MOCK Mac10.11",
        # and pass on "Mock Mac10.10".
        self.tool.buildbot.set_results(
            Build('MOCK Mac10.11'),
            LayoutTestResults({
                "tests": {
                    "fast": {
                        "dom": {
                            "prototype-taco.html": {
                                "expected": "PASS",
                                "actual": "PASS TEXT",
                                "is_unexpected": True
                            },
                            "prototype-chocolate.html": {
                                "expected": "FAIL",
                                "actual": "PASS"
                            },
                            "prototype-strawberry.html": {
                                "expected": "PASS",
                                "actual": "IMAGE PASS",
                                "is_unexpected": True
                            }
                        }
                    }
                }
            }))
        self.tool.buildbot.set_results(
            Build('MOCK Mac10.10'),
            LayoutTestResults({
                "tests": {
                    "fast": {
                        "dom": {
                            "prototype-taco.html": {
                                "expected": "PASS",
                                "actual": "PASS",
                            },
                            "prototype-chocolate.html": {
                                "expected": "FAIL",
                                "actual": "FAIL"
                            },
                            "prototype-strawberry.html": {
                                "expected": "PASS",
                                "actual": "PASS",
                            }
                        }
                    }
                }
            }))

        self.tool.filesystem.write_text_file(
            test_port.path_to_generic_test_expectations_file(), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ Mac10.11 ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")

        self._write_test_file(test_port, 'fast/dom/prototype-taco.html',
                              "Dummy test contents")
        self._write_test_file(test_port, 'fast/dom/prototype-strawberry.html',
                              "Dummy test contents")
        self._write_test_file(test_port, 'fast/dom/prototype-chocolate.html',
                              "Dummy test contents")

        self.tool.executive = MockLineRemovingExecutive()

        self.tool.builders = BuilderList({
            "MOCK Mac10.10": {
                "port_name": "test-mac-mac10.10",
                "specifiers": ["Mac10.10", "Release"]
            },
            "MOCK Mac10.11": {
                "port_name": "test-mac-mac10.11",
                "specifiers": ["Mac10.11", "Release"]
            },
        })

        self.command.tree_status = lambda: 'closed'
        self._execute_with_mock_options()
        self.assertEqual(self.tool.executive.calls, [])

        self.command.tree_status = lambda: 'open'
        self.tool.executive.calls = []
        self._execute_with_mock_options()

        self.assertEqual(self.tool.executive.calls, [
            [
                [
                    'python', 'echo', 'copy-existing-baselines-internal',
                    '--suffixes', 'png', '--builder', 'MOCK Mac10.11',
                    '--test', 'fast/dom/prototype-strawberry.html'
                ],
                [
                    'python', 'echo', 'copy-existing-baselines-internal',
                    '--suffixes', 'txt', '--builder', 'MOCK Mac10.11',
                    '--test', 'fast/dom/prototype-taco.html'
                ],
            ],
            [
                [
                    'python', 'echo', 'rebaseline-test-internal', '--suffixes',
                    'png', '--builder', 'MOCK Mac10.11', '--test',
                    'fast/dom/prototype-strawberry.html'
                ],
                [
                    'python', 'echo', 'rebaseline-test-internal', '--suffixes',
                    'txt', '--builder', 'MOCK Mac10.11', '--test',
                    'fast/dom/prototype-taco.html'
                ],
            ],
            [
                [
                    'python', 'echo', 'optimize-baselines', '--suffixes',
                    'png', 'fast/dom/prototype-strawberry.html'
                ],
                [
                    'python', 'echo', 'optimize-baselines', '--suffixes',
                    'txt', 'fast/dom/prototype-taco.html'
                ],
            ],
            ['git', 'cl', 'upload', '-f'],
            ['git', 'pull'],
            ['git', 'cl', 'land', '-f', '-v'],
            [
                'git', 'config',
                'branch.auto-rebaseline-temporary-branch.rietveldissue'
            ],
        ])

        # The mac ports should both be removed since they're the only ones in builders._exact_matches.
        self.assertEqual(
            self.tool.filesystem.read_text_file(
                test_port.path_to_generic_test_expectations_file()), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ Linux Win ] fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
Exemple #28
0
    def print_unexpected_results(self,
                                 summarized_results,
                                 enabled_pixel_tests_in_retry=False):
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(result):
            test = result.test_name()
            actual = result.actual_results().split(" ")
            expected = result.expected_results().split(" ")

            if result.did_run_as_expected():
                # Don't print anything for tests that ran as expected.
                return

            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                         test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed', test)
            elif enabled_pixel_tests_in_retry and actual == [
                    'TEXT', 'IMAGE+TEXT'
            ]:
                add_to_dict_of_lists(regressions, actual[0], test)
            elif len(actual) > 1 and bool(set(actual[1:]) & set(expected)):
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, actual[0], test)

        test_results = LayoutTestResults(summarized_results)
        test_results.for_each_test(add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print("  %s" % test)
                self._print("")
            self._print("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result_type = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Unexpected flakiness: %s (%d)" %
                            (descriptions[result_type], len(tests)))
                tests.sort()

                for test in tests:
                    result = test_results.result_for_test(test)
                    actual = result.actual_results().split(" ")
                    expected = result.expected_results().split(" ")
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [
                        TestExpectationParser._inverted_expectation_tokens[exp]
                        for exp in list(set(actual) | set(expected))
                    ]
                    self._print("  %s [ %s ]" %
                                (test, " ".join(new_expectations_list)))
                self._print("")
            self._print("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result_type = TestExpectations.EXPECTATIONS[key.lower()]
                self._print("Regressions: Unexpected %s (%d)" %
                            (descriptions[result_type], len(tests)))
                tests.sort()
                for test in tests:
                    result = test_results.result_for_test(test)
                    actual = result.actual_results().split(" ")
                    expected = result.expected_results().split(" ")
                    new_expectations_list = [
                        TestExpectationParser._inverted_expectation_tokens[exp]
                        for exp in actual
                    ]
                    self._print("  %s [ %s ]" %
                                (test, " ".join(new_expectations_list)))
                self._print("")

        if len(summarized_results['tests']) and self.debug_logging:
            self._print("%s" % ("-" * 78))
Exemple #29
0
 def test_set_failure_limit_count(self):
     results = LayoutTestResults([])
     self.assertEquals(results.failure_limit_count(), None)
     results.set_failure_limit_count(10)
     self.assertEquals(results.failure_limit_count(), 10)
 def layout_test_results(self):
     if self._double_flaky_test_counter % 2:
         return LayoutTestResults(
             [self._mock_test_result('foo.html')])
     return LayoutTestResults([self._mock_test_result('bar.html')])