Beispiel #1
0
 def get_manager_with_tests(test_names):
     port = Mock()  # FIXME: Use a tighter mock.
     port.TEST_PATH_SEPARATOR = '/'
     manager = Manager(port, options=MockOptions(http=True), printer=Mock())
     manager._test_files = set(test_names)
     manager._test_files_list = test_names
     return manager
Beispiel #2
0
    def test_interrupt_if_at_failure_limits(self):
        port = Mock()  # FIXME: This should be a tighter mock.
        port.TEST_PATH_SEPARATOR = '/'
        port._filesystem = MockFileSystem()
        manager = Manager(port=port, options=MockOptions(), printer=Mock())

        manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
        result_summary = ResultSummary(expectations=Mock(), test_files=[])
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        manager._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        manager._options.exit_after_n_failures = 101
        manager._options.exit_after_n_crashes_or_timeouts = 101
        manager._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        manager._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)

        manager._options.exit_after_n_crashes_or_timeouts = None
        manager._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
Beispiel #3
0
    def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None):
        if not tool:
            tool = MockTool()
            # This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue
            # the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this.
            tool.filesystem.write_text_file('/mock/results.html', "")
        if not expected_stdout:
            expected_stdout = {}
        if not expected_stderr:
            expected_stderr = {}
        if not args:
            args = []
        if not options:
            options = Mock()
            options.port = None
        if not work_item:
            work_item = self.mock_work_item
        tool.user.prompt = lambda message: "yes"

        queue.execute(options, args, tool, engine=MockQueueEngine)

        self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.should_proceed_with_work_item, "should_proceed_with_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions)
        # Should we have a different function for testing StepSequenceErrorHandlers?
        if isinstance(queue, StepSequenceErrorHandler):
            self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand")], expected_stdout, expected_stderr, expected_exceptions)
Beispiel #4
0
 def __init__(self):
     Mock.__init__(self)
     # FIXME: We should probably use real checkout-root detection logic here.
     # os.getcwd() can't work here because other parts of the code assume that "checkout_root"
     # will actually be the root.  Since getcwd() is wrong, use a globally fake root for now.
     self.checkout_root = self.fake_checkout_root
     self.added_paths = set()
 def test_modified_changelogs(self):
     scm = Mock()
     scm.checkout_root = "/foo/bar"
     scm.changed_files = lambda git_commit: ["file1", "ChangeLog", "relative/path/ChangeLog"]
     checkout = Checkout(scm)
     expected_changlogs = ["/foo/bar/ChangeLog", "/foo/bar/relative/path/ChangeLog"]
     self.assertEqual(checkout.modified_changelogs(git_commit=None), expected_changlogs)
    def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None):
        if not tool:
            tool = MockTool()
        if not expected_stdout:
            expected_stdout = {}
        if not expected_stderr:
            expected_stderr = {}
        if not args:
            args = []
        if not options:
            options = Mock()
            options.port = None
        if not work_item:
            work_item = self.mock_work_item
        tool.user.prompt = lambda message: "yes"

        queue.execute(options, args, tool, engine=MockQueueEngine)

        self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.should_proceed_with_work_item, "should_proceed_with_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions)
        self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions)
        # Should we have a different function for testing StepSequenceErrorHandlers?
        if isinstance(queue, StepSequenceErrorHandler):
            self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand")], expected_stdout, expected_stderr, expected_exceptions)
Beispiel #7
0
 def test_commit_message_for_current_diff(self):
     tool = MockTool()
     mock_commit_message_for_this_commit = Mock()
     mock_commit_message_for_this_commit.message = lambda: "Mock message"
     tool._checkout.commit_message_for_this_commit = lambda: mock_commit_message_for_this_commit
     expected_stdout = "Mock message\n"
     self.assert_execute_outputs(CommitMessageForCurrentDiff(), [], expected_stdout=expected_stdout, tool=tool)
 def _test_ews(self, ews):
     ews.bind_to_tool(MockTool())
     ews.host = MockHost()
     options = Mock()
     options.port = None
     options.run_tests = ews.run_tests
     self.assert_queue_outputs(ews, expected_logs=self._default_expected_logs(ews), options=options)
    def test_shard_tests(self):
        # Test that _shard_tests in test_runner.TestRunner really
        # put the http tests first in the queue.
        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        runner = TestRunnerWrapper(port=port, options=Mock(),
            printer=Mock())

        test_list = [
          "LayoutTests/websocket/tests/unicode.htm",
          "LayoutTests/animations/keyframes.html",
          "LayoutTests/http/tests/security/view-source-no-refresh.html",
          "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
          "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
          "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
          "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
          "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
          "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
        ]

        expected_tests_to_http_lock = set([
          'LayoutTests/websocket/tests/unicode.htm',
          'LayoutTests/http/tests/security/view-source-no-refresh.html',
          'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
          'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
        ])

        # FIXME: Ideally the HTTP tests don't have to all be in one shard.
        single_thread_results = runner._shard_tests(test_list, False)
        multi_thread_results = runner._shard_tests(test_list, True)

        self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
        self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
        self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
Beispiel #10
0
 def test_mark_bug_fixed(self):
     tool = MockTool()
     tool._scm.last_svn_commit_log = lambda: "r9876 |"
     options = Mock()
     options.bug_id = 42
     options.comment = "MOCK comment"
     expected_stderr = "Bug: <http://example.com/42> Bug with two r+'d and cq+'d patches, one of which has an invalid commit-queue setter.\nRevision: 9876\nMOCK: user.open_url: http://example.com/42\nAdding comment to Bug 42.\nMOCK bug comment: bug_id=42, cc=None\n--- Begin comment ---\nMOCK comment\n\nCommitted r9876: <http://trac.webkit.org/changeset/9876>\n--- End comment ---\n\n"
     self.assert_execute_outputs(MarkBugFixed(), [], expected_stderr=expected_stderr, tool=tool, options=options)
 def test_filename_for_upload(self):
     bugzilla = Bugzilla()
     mock_file = Mock()
     mock_file.name = "foo"
     self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
     mock_timestamp = lambda: "now"
     filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
     self.assertEqual(filename, "bug-1234-now.patch")
Beispiel #12
0
 def test_apply_patch(self):
     checkout = self._make_checkout()
     checkout._executive = MockExecutive(should_log=True)
     checkout._scm.script_path = lambda script: script
     mock_patch = Mock()
     mock_patch.contents = lambda: "foo"
     mock_patch.reviewer = lambda: None
     expected_stderr = "MOCK run_command: ['svn-apply', '--force'], cwd=/mock-checkout\n"
     OutputCapture().assert_outputs(self, checkout.apply_patch, [mock_patch], expected_stderr=expected_stderr)
Beispiel #13
0
    def test_patches_to_commit_queue(self):
        expected_stdout = "http://example.com/10003&action=edit\n"
        expected_logs = "10000 already has cq=+\n10001 already has cq=+\n10004 committer = \"Eric Seidel\" <*****@*****.**>\n"
        options = Mock()
        options.bugs = False
        self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_logs=expected_logs, options=options)

        expected_stdout = "http://example.com/50003\n"
        options.bugs = True
        self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_logs=expected_logs, options=options)
Beispiel #14
0
 def test_latest_entry_for_changelog_at_revision(self):
     scm = Mock()
     def mock_contents_at_revision(changelog_path, revision):
         self.assertEqual(changelog_path, "foo")
         self.assertEqual(revision, "bar")
         return _changelog1
     scm.contents_at_revision = mock_contents_at_revision
     checkout = Checkout(scm)
     entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
     self.assertEqual(entry.contents(), _changelog1entry1)
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set if t.startswith("DISABLED_")])
        FLAKY_tests = set([t for t in tests_set if t.startswith("FLAKY_")])
        FAILS_tests = set([t for t in tests_set if t.startswith("FAILS_")])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(
                test, failed=(test in failed_tests), elapsed_time=test_timings[test]
            )

        host = MockHost()
        port = Mock()
        port._filesystem = host.filesystem
        generator = json_results_generator.JSONResultsGeneratorBase(
            port,
            self.builder_name,
            self.build_name,
            self.build_number,
            "",
            None,  # don't fetch past json results archive
            test_results_map,
        )

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1,
        )

        # We don't verify the results here, but at least we make sure the code runs without errors.
        generator.generate_json_output()
        generator.generate_times_ms_file()
    def test_changelog_contains_oops(self):
        tool = MockTool()
        tool._checkout.is_path_to_changelog = lambda path: True
        step = ValidateChangeLogs(tool, MockOptions(git_commit=None, non_interactive=True, check_oops=True))
        diff_file = Mock()
        diff_file.filename = "mock/ChangeLog"
        diff_file.lines = [(1, 1, "foo"), (2, 2, "bar OOPS! bar"), (3, 3, "foo")]
        self.assertTrue(OutputCapture().assert_outputs(self, step._changelog_contains_oops, [diff_file], expected_logs=''))

        diff_file.lines = [(1, 1, "foo"), (2, 2, "bar OOPS bar"), (3, 3, "foo")]
        self.assertFalse(OutputCapture().assert_outputs(self, step._changelog_contains_oops, [diff_file], expected_logs=''))
 def _assert_start_line_produces_output(self, start_line, should_fail=False, non_interactive=False):
     tool = MockTool()
     step = ValidateChangeLogs(tool, MockOptions(git_commit=None, non_interactive=non_interactive))
     diff_file = Mock()
     diff_file.filename = "mock/ChangeLog"
     diff_file.lines = [(start_line, start_line, "foo")]
     expected_stdout = expected_stderr = expected_logs = ""
     if should_fail and not non_interactive:
         expected_logs = "The diff to mock/ChangeLog looks wrong. Are you sure your ChangeLog entry is at the top of the file?\nOK to continue?\n"
     result = OutputCapture().assert_outputs(self, step._check_changelog_diff, [diff_file], expected_logs=expected_logs)
     self.assertEqual(not result, should_fail)
Beispiel #18
0
 def _assert_start_line_produces_output(self, start_line, should_prompt_user=False):
     tool = MockTool()
     tool._checkout.is_path_to_changelog = lambda path: True
     step = ValidateChangeLogs(tool, MockOptions(git_commit=None))
     diff_file = Mock()
     diff_file.filename = "mock/ChangeLog"
     diff_file.lines = [(start_line, start_line, "foo")]
     expected_stdout = expected_stderr = ""
     if should_prompt_user:
         expected_stdout = "OK to continue?\n"
         expected_stderr = "The diff to mock/ChangeLog looks wrong.  Are you sure your ChangeLog entry is at the top of the file?\n"
     OutputCapture().assert_outputs(self, step._check_changelog_diff, [diff_file], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
Beispiel #19
0
 def test_latest_entry_for_changelog_at_revision(self):
     scm = Mock()
     def mock_contents_at_revision(changelog_path, revision):
         self.assertEqual(changelog_path, "foo")
         self.assertEqual(revision, "bar")
         # contents_at_revision is expected to return a byte array (str)
         # so we encode our unicode ChangeLog down to a utf-8 stream.
         return _changelog1.encode("utf-8")
     scm.contents_at_revision = mock_contents_at_revision
     checkout = Checkout(scm)
     entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
     self.assertEqual(entry.contents(), _changelog1entry1)
Beispiel #20
0
 def _default_options(self):
     options = Mock()
     options.force_clean = False
     options.clean = True
     options.check_builders = True
     options.quiet = False
     options.non_interactive = False
     options.update = True
     options.build = True
     options.test = True
     options.close_bug = True
     return options
Beispiel #21
0
 def test_commit_info_for_revision(self):
     scm = Mock()
     scm.committer_email_for_revision = lambda revision: "*****@*****.**"
     checkout = Checkout(scm)
     checkout.changelog_entries_for_revision = lambda revision: [ChangeLogEntry(_changelog1entry1)]
     commitinfo = checkout.commit_info_for_revision(4)
     self.assertEqual(commitinfo.bug_id(), 36629)
     self.assertEqual(commitinfo.author_name(), "Eric Seidel")
     self.assertEqual(commitinfo.author_email(), "*****@*****.**")
     self.assertEqual(commitinfo.reviewer_text(), None)
     self.assertEqual(commitinfo.reviewer(), None)
     self.assertEqual(commitinfo.committer_email(), "*****@*****.**")
     self.assertEqual(commitinfo.committer(), None)
Beispiel #22
0
 def run():
     sheriff = Sheriff(MockTool(), MockSheriffBot())
     builders = [Builder("Foo", None), Builder("Bar", None)]
     commit_info = Mock()
     commit_info.bug_id = lambda: None
     commit_info.revision = lambda: 4321
     # Should do nothing with no bug_id
     sheriff.post_blame_comment_on_bug(commit_info, builders, [])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
     # Should try to post a comment to the bug, but MockTool.bugs does nothing.
     commit_info.bug_id = lambda: 1234
     sheriff.post_blame_comment_on_bug(commit_info, builders, [])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
Beispiel #23
0
    def test_auto_retry(self):
        queue = CommitQueue()
        options = Mock()
        options.parent_command = "commit-queue"
        tool = AlwaysCommitQueueTool()
        sequence = NeedsUpdateSequence(None)

        expected_stderr = "Commit failed because the checkout is out of date.  Please update and try again.\nMOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date).  Updating, then landing without building or re-running tests.\n"
        state = {'patch': None}
        OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_stderr=expected_stderr)

        self.assertEquals(options.update, True)
        self.assertEquals(options.build, False)
        self.assertEquals(options.test, False)
    def test_failing_tests_message(self):
        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
        class TestEWS(AbstractEarlyWarningSystem):
            port_name = "win"  # Needs to be a port which port/factory understands.

        ews = TestEWS()
        ews.bind_to_tool(MockTool())
        ews._options = MockOptions(port=None, confirm=False)
        OutputCapture().assert_outputs(self, ews.begin_work_queue, expected_logs=self._default_begin_work_queue_logs(ews.name))
        task = Mock()
        task.results_from_patch_test_run = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
                                                                          test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
                                                                          did_exceed_test_failure_limit=False)
        patch = ews._tool.bugs.fetch_attachment(10000)
        self.assertMultiLineEqual(ews._failing_tests_message(task, patch), "New failing tests:\nfoo.html\nbar.html")
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set([t for t in tests_set
                             if t.startswith('DISABLED_')])
        FLAKY_tests = set([t for t in tests_set
                           if t.startswith('FLAKY_')])
        FAILS_tests = set([t for t in tests_set
                           if t.startswith('FAILS_')])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test])

        port = Mock()
        port._filesystem = filesystem_mock.MockFileSystem()
        generator = json_results_generator.JSONResultsGeneratorBase(port,
            self.builder_name, self.build_name, self.build_number,
            '',
            None,   # don't fetch past json results archive
            test_results_map)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(
            tests_set,
            test_timings,
            failed_count_map,
            len(PASS_tests),
            len(DISABLED_tests),
            len(FLAKY_tests),
            len(DISABLED_tests | failed_tests),
            incremental_json,
            1)
    def test_changelog_entries_for_revision(self):
        scm = Mock()
        scm.changed_files_for_revision = lambda revision: ['foo/ChangeLog', 'bar/ChangeLog']
        checkout = Checkout(scm)

        def mock_latest_entry_for_changelog_at_revision(path, revision):
            if path == "foo/ChangeLog":
                return 'foo'
            raise ScriptError()

        checkout._latest_entry_for_changelog_at_revision = mock_latest_entry_for_changelog_at_revision

        # Even though fetching one of the entries failed, the other should succeed.
        entries = checkout.changelog_entries_for_revision(1)
        self.assertEqual(len(entries), 1)
        self.assertEqual(entries[0], 'foo')
    def test_commit_info(self):
        command = AbstractRolloutPrepCommand()
        tool = MockTool()
        command.bind_to_tool(tool)
        output = OutputCapture()

        expected_stderr = "Preparing rollout for bug 42.\n"
        commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
        self.assertTrue(commit_info)

        mock_commit_info = Mock()
        mock_commit_info.bug_id = lambda: None
        tool._checkout.commit_info_for_revision = lambda revision: mock_commit_info
        expected_stderr = "Unable to parse bug number from diff.\n"
        commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr)
        self.assertEqual(commit_info, mock_commit_info)
Beispiel #28
0
 def test_local_commits_exist_with_force(self):
     tool = MockTool()
     tool._scm = Mock()
     tool._scm.has_working_directory_changes = lambda: False
     tool._scm.has_local_commits = lambda: True
     step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=True))
     step.run({})
     self.assertEqual(tool._scm.discard_local_changes.call_count, 1)
Beispiel #29
0
 def test_iteration_count(self):
     queue = TestQueue()
     queue._options = Mock()
     queue._options.iterations = 3
     self.assertTrue(queue.should_continue_work_queue())
     self.assertTrue(queue.should_continue_work_queue())
     self.assertTrue(queue.should_continue_work_queue())
     self.assertFalse(queue.should_continue_work_queue())
Beispiel #30
0
 def _run_step(self, step, tool=None, options=None, state=None):
     if not tool:
         tool = MockTool()
     if not options:
         options = Mock()
     if not state:
         state = {}
     step(tool, options).run(state)
Beispiel #31
0
 def get_manager_with_tests(test_names):
     host = MockHost()
     port = host.port_factory.get()
     manager = Manager(port,
                       options=MockOptions(test_list=None, http=True),
                       printer=Mock())
     manager.collect_tests(test_names)
     return manager
 def test_no_clean(self):
     tool = MockTool()
     tool._scm = Mock()
     step = CleanWorkingDirectory(tool, MockOptions(clean=False))
     step.run({})
     self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 0)
     self.assertEqual(tool._scm.ensure_clean_working_directory.call_count,
                      0)
    def test_failing_tests_message(self):
        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
        class TestEWS(AbstractEarlyWarningSystem):
            port_name = "win"  # Needs to be a port which port/factory understands.
            _build_style = None

        ews = TestEWS()
        ews.bind_to_tool(MockTool())
        ews.host = MockHost()
        ews._options = MockOptions(port=None, confirm=False)
        OutputCapture().assert_outputs(self, ews.begin_work_queue, expected_logs=self._default_begin_work_queue_logs(ews.name))
        task = Mock()
        task.results_from_patch_test_run = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
                                                                          test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
                                                                          did_exceed_test_failure_limit=False)
        patch = ews._tool.bugs.fetch_attachment(10000)
        self.assertMultiLineEqual(ews._failing_tests_message(task, patch), "New failing tests:\nfoo.html\nbar.html")
Beispiel #34
0
 def run():
     sheriff = Sheriff(MockTool(), MockSheriffBot())
     builders = [
         Builder("Foo", None),
         Builder("Bar", None),
     ]
     commit_info = Mock()
     commit_info.bug_id = lambda: None
     commit_info.revision = lambda: 4321
     # Should do nothing with no bug_id
     sheriff.post_blame_comment_on_bug(commit_info, builders, [])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
     # Should try to post a comment to the bug, but MockTool.bugs does nothing.
     commit_info.bug_id = lambda: 1234
     sheriff.post_blame_comment_on_bug(commit_info, builders, [])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
     sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
Beispiel #35
0
 def test_write_command_and_read_line(self):
     self.driver._proc = Mock()  # FIXME: This should use a tighter mock.
     self.driver._proc.stdout = StringIO.StringIO("#URL:file:///data/local/tmp/third_party/WebKit/LayoutTests/test.html\noutput\n\n")
     self.assertEquals(self.driver._write_command_and_read_line(), ('#URL:file:///mock-checkout/LayoutTests/test.html\n', False))
     self.assertEquals(self.driver._write_command_and_read_line(), ('output\n', False))
     self.assertEquals(self.driver._write_command_and_read_line(), ('\n', False))
     # Unexpected EOF is treated as crash.
     self.assertEquals(self.driver._write_command_and_read_line(), ('', True))
Beispiel #36
0
    def test_land(self):
        expected_logs = """Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
Running bindings generation tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
Updating bug 50000
"""
        mock_tool = MockTool()
        mock_tool.scm().create_patch = Mock(return_value="Patch1\nMockPatch\n")
        mock_tool.checkout().modified_changelogs = Mock(return_value=[])
        self.assert_execute_outputs(Land(), [50000], options=self._default_options(), expected_logs=expected_logs, tool=mock_tool)
        # Make sure we're not calling expensive calls too often.
        self.assertEqual(mock_tool.scm().create_patch.call_count, 0)
        self.assertEqual(mock_tool.checkout().modified_changelogs.call_count, 1)
 def test_run_no_local_changes(self):
     tool = MockTool()
     tool._scm = Mock()
     step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
     tool._scm.has_working_directory_changes = lambda: False
     tool._scm.has_local_commits = lambda: False
     step.run({})
     self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
    def _test_json_generation(self, passed_tests_list, failed_tests_list):
        tests_set = set(passed_tests_list) | set(failed_tests_list)

        DISABLED_tests = set(
            [t for t in tests_set if t.startswith('DISABLED_')])
        FLAKY_tests = set([t for t in tests_set if t.startswith('FLAKY_')])
        FAILS_tests = set([t for t in tests_set if t.startswith('FAILS_')])
        PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)

        failed_tests = set(failed_tests_list) - DISABLED_tests
        failed_count_map = dict([(t, 1) for t in failed_tests])

        test_timings = {}
        i = 0
        for test in tests_set:
            test_timings[test] = float(self._num_runs * 100 + i)
            i += 1

        test_results_map = dict()
        for test in tests_set:
            test_results_map[test] = json_results_generator.TestResult(
                test,
                failed=(test in failed_tests),
                elapsed_time=test_timings[test])

        host = MockHost()
        port = Mock()
        port._filesystem = host.filesystem
        generator = json_results_generator.JSONResultsGenerator(
            port, self.builder_name, self.build_name, self.build_number, '',
            test_results_map)

        failed_count_map = dict([(t, 1) for t in failed_tests])

        # Test incremental json results
        incremental_json = generator.get_json()
        self._verify_json_results(tests_set, test_timings, failed_count_map,
                                  len(PASS_tests), len(DISABLED_tests),
                                  len(FLAKY_tests),
                                  len(DISABLED_tests | failed_tests),
                                  incremental_json, 1)

        # We don't verify the results here, but at least we make sure the code runs without errors.
        generator.generate_json_output()
        generator.generate_times_ms_file()
 def test_has_valid_reviewer(self):
     step = ValidateReviewer(MockTool(), Mock())
     self._test_review_text(step, "Reviewed by Eric Seidel.", True)
     self._test_review_text(step, "Reviewed by Eric Seidel", True) # Not picky about the '.'
     self._test_review_text(step, "Reviewed by Eric.", False)
     self._test_review_text(step, "Reviewed by Eric C Seidel.", False)
     self._test_review_text(step, "Rubber-stamped by Eric.", True)
     self._test_review_text(step, "Rubber stamped by Eric.", True)
     self._test_review_text(step, "Unreviewed build fix.", True)
Beispiel #40
0
    def test_changelog_entries_for_revision(self):
        scm = Mock()
        scm.changed_files_for_revision = lambda revision: [
            'foo/ChangeLog', 'bar/ChangeLog'
        ]
        checkout = Checkout(scm)

        def mock_latest_entry_for_changelog_at_revision(path, revision):
            if path == "foo/ChangeLog":
                return 'foo'
            raise ScriptError()

        checkout._latest_entry_for_changelog_at_revision = mock_latest_entry_for_changelog_at_revision

        # Even though fetching one of the entries failed, the other should succeed.
        entries = checkout.changelog_entries_for_revision(1)
        self.assertEqual(len(entries), 1)
        self.assertEqual(entries[0], 'foo')
Beispiel #41
0
 def get_manager():
     host = MockHost()
     port = host.port_factory.get()
     port.TEST_PATH_SEPARATOR = '/'
     port.web_platform_test_server_doc_root = get_wpt_doc_root
     manager = Manager(port,
                       options=MockOptions(http=True),
                       printer=Mock())
     return manager
Beispiel #42
0
 def get_manager():
     host = MockHost()
     port = MockCustomDevicePort(host)
     manager = Manager(port,
                       options=MockOptions(
                           test_list=['fast/test-starship/lasers.html'],
                           http=True),
                       printer=Mock())
     return manager
    def test_should_squash_error(self):
        """should_squash can throw an error. That error should not be eaten by CheckStyle."""
        def should_squash(squash):
            raise ScriptError(message="Dummy error")

        tool = MockTool()
        tool._scm.should_squash = should_squash
        step = CheckStyle(tool, Mock())
        self.assertRaises(ScriptError, step.run, [])
 def test_run_working_directory_changes_no_force(self):
     tool = MockTool()
     tool._scm = Mock()
     step = CleanWorkingDirectory(
         tool, MockOptions(clean=True, force_clean=False))
     tool._scm.has_working_directory_changes = lambda: True
     self.assertRaises(ScriptError, step.run, {})
     self.assertEqual(
         tool._scm.discard_working_directory_changes.call_count, 0)
 def test_empty_state(self):
     capture = OutputCapture()
     step = CloseBugForLandDiff(MockTool(), Mock())
     expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
     capture.assert_outputs(self,
                            step.run, [{
                                "commit_text": "Mock commit text"
                            }],
                            expected_stderr=expected_stderr)
Beispiel #46
0
    def test_auto_retry(self):
        queue = CommitQueue()
        options = Mock()
        options.parent_command = "commit-queue"
        tool = AlwaysCommitQueueTool()
        sequence = NeedsUpdateSequence(None)

        expected_stderr = "Commit failed because the checkout is out of date.  Please update and try again.\nMOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date).  Updating, then landing without building or re-running tests.\n"
        state = {'patch': None}
        OutputCapture().assert_outputs(self,
                                       sequence.run_and_handle_errors,
                                       [tool, options, state],
                                       expected_exception=TryAgain,
                                       expected_stderr=expected_stderr)

        self.assertEquals(options.update, True)
        self.assertEquals(options.build, False)
        self.assertEquals(options.test, False)
 def get_manager():
     host = MockHost()
     port = host.port_factory.get('test-mac-leopard')
     manager = Manager(port,
                       options=MockOptions(test_list=None,
                                           http=True,
                                           max_locked_shards=1),
                       printer=Mock())
     return manager
    def test_commit_info_for_revision(self):
        scm = Mock()
        scm.committer_email_for_revision = lambda revision: "*****@*****.**"
        checkout = Checkout(scm)
        checkout.changelog_entries_for_revision = lambda revision: [
            ChangeLogEntry(_changelog1entry1)
        ]
        commitinfo = checkout.commit_info_for_revision(4)
        self.assertEqual(commitinfo.bug_id(), 36629)
        self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8")
        self.assertEqual(commitinfo.author_email(), "*****@*****.**")
        self.assertEqual(commitinfo.reviewer_text(), None)
        self.assertEqual(commitinfo.reviewer(), None)
        self.assertEqual(commitinfo.committer_email(), "*****@*****.**")
        self.assertEqual(commitinfo.committer(), None)

        checkout.changelog_entries_for_revision = lambda revision: []
        self.assertEqual(checkout.commit_info_for_revision(1), None)
Beispiel #49
0
    def test_land(self):
        expected_logs = """Building WebKit
Committed r49824: <https://commits.webkit.org/r49824>
Adding comment and closing bug 50000
"""
        with self.mock_svn_remote():
            mock_tool = MockTool()
            mock_tool.scm().create_patch = Mock(
                return_value="Patch1\nMockPatch\n")
            mock_tool.checkout().modified_changelogs = Mock(return_value=[])
            self.assert_execute_outputs(Land(), [50000],
                                        options=self._default_options(),
                                        expected_logs=expected_logs,
                                        tool=mock_tool)
            # Make sure we're not calling expensive calls too often.
            self.assertEqual(mock_tool.scm().create_patch.call_count, 0)
            self.assertEqual(
                mock_tool.checkout().modified_changelogs.call_count, 1)
Beispiel #50
0
    def test_patches_to_commit_queue(self):
        expected_stdout = "http://example.com/10003&action=edit\n"
        expected_logs = "10000 already has cq=+\n10001 already has cq=+\n10004 committer = \"Eric Seidel\" <*****@*****.**>\n"
        options = Mock()
        options.bugs = False
        self.assert_execute_outputs(PatchesToCommitQueue(),
                                    None,
                                    expected_stdout,
                                    expected_logs=expected_logs,
                                    options=options)

        expected_stdout = "http://example.com/50003\n"
        options.bugs = True
        self.assert_execute_outputs(PatchesToCommitQueue(),
                                    None,
                                    expected_stdout,
                                    expected_logs=expected_logs,
                                    options=options)
    def _assert_run_webkit_patch(self, run_args):
        queue = TestQueue()
        tool = MockTool()
        tool.executive = Mock()
        queue.bind_to_tool(tool)

        queue.run_webkit_patch(run_args)
        expected_run_args = ["echo", "--status-host=example.com"] + run_args
        tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args)
Beispiel #52
0
    def test_upload_results_archive_for_patch(self):
        queue = PatchProcessingQueue()
        queue.name = "mock-queue"
        tool = MockTool()
        queue.bind_to_tool(tool)
        queue._options = Mock()
        queue._options.port = None
        patch = queue._tool.bugs.fetch_attachment(10001)
        expected_logs = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot for mac-snowleopard filename=layout-test-results.zip mimetype=None
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the mock-queue.
Port: mac-snowleopard  Platform: MockPlatform 1.0
-- End comment --
"""
        OutputCapture().assert_outputs(self,
                                       queue._upload_results_archive_for_patch,
                                       [patch, Mock()],
                                       expected_logs=expected_logs)
Beispiel #53
0
    def _assert_run_webkit_patch(self, run_args, port=None):
        queue = TestQueue()
        tool = MockTool()
        tool.status_server.bot_id = "gort"
        tool.executive = Mock()
        queue.bind_to_tool(tool)
        queue._options = Mock()
        queue._options.port = port

        queue.run_webkit_patch(run_args)
        expected_run_args = [
            "echo", "--status-host=example.com", "--bot-id=gort"
        ]
        if port:
            expected_run_args.append("--port=%s" % port)
        expected_run_args.extend(run_args)
        tool.executive.run_command.assert_called_with(expected_run_args,
                                                      cwd='/mock-checkout')
Beispiel #54
0
    def test_results_html(self):
        mock_port = Mock()
        mock_port.relative_test_filename = lambda name: name
        mock_port.filename_to_uri = lambda name: name

        runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(), printer=Mock())
        expected_html = u"""<html>
  <head>
    <title>Layout Test Results (time)</title>
  </head>
  <body>
    <h2>Title (time)</h2>
        <p><a href='test_path'>test_path</a><br />
</p>
</body></html>
"""
        html = runner._results_html(["test_path"], {}, "Title", override_time="time")
        self.assertEqual(html, expected_html)
 def test_error_working_changes_exist_without_force(self):
     tool = MockTool()
     tool._scm = Mock()
     tool._scm.has_working_directory_changes = lambda: True
     tool._scm.has_local_commits = lambda: False
     step = DiscardLocalChanges(tool,
                                MockOptions(clean=True, force_clean=False))
     self.assertRaises(ScriptError, step.run, {})
     self.assertEqual(tool._scm.discard_local_changes.call_count, 0)
Beispiel #56
0
    def test_runtests_leopard_commit_queue_hack(self):
        mock_options = Mock()
        mock_options.non_interactive = True
        step = RunTests(MockTool(log_executive=True), mock_options)
        # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
        mock_port = WebKitPort()
        mock_port.name = lambda: "Mac"
        mock_port.is_leopard = lambda: True
        step.port = lambda: mock_port
        expected_stderr = """Running Python unit tests
MOCK run_and_throw_if_fail: ['WebKitTools/Scripts/test-webkitpy']
Running Perl unit tests
MOCK run_and_throw_if_fail: ['WebKitTools/Scripts/test-webkitperl']
Running JavaScriptCore tests
MOCK run_and_throw_if_fail: ['WebKitTools/Scripts/run-javascriptcore-tests']
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['WebKitTools/Scripts/run-webkit-tests', '--no-launch-safari', '--exit-after-n-failures=1', '--ignore-tests', 'compositing/iframes', '--quiet']
"""
        OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)
Beispiel #57
0
    def test_commit_info(self):
        command = AbstractRevertPrepCommand()
        tool = MockTool()
        command.bind_to_tool(tool)

        with OutputCapture(level=logging.INFO) as captured:
            commit_info = command._commit_info(1234)
        self.assertEqual(captured.root.log.getvalue(),
                         'Preparing revert for bug 50000.\n')
        self.assertTrue(commit_info)

        mock_commit_info = Mock()
        mock_commit_info.bug_id = lambda: None
        tool._checkout.commit_info_for_revision = lambda revision: mock_commit_info
        with OutputCapture(level=logging.INFO) as captured:
            commit_info = command._commit_info(1234)
        self.assertEqual(captured.root.log.getvalue(),
                         'Unable to parse bug number from diff.\n')
        self.assertEqual(commit_info, mock_commit_info)
Beispiel #58
0
 def _assert_start_line_produces_output(self,
                                        start_line,
                                        should_fail=False,
                                        non_interactive=False):
     tool = MockTool()
     step = ValidateChangeLogs(
         tool, MockOptions(git_commit=None,
                           non_interactive=non_interactive))
     diff_file = Mock()
     diff_file.filename = "mock/ChangeLog"
     diff_file.lines = [(start_line, start_line, "foo")]
     expected_stdout = expected_stderr = expected_logs = ""
     if should_fail and not non_interactive:
         expected_logs = "The diff to mock/ChangeLog looks wrong. Are you sure your ChangeLog entry is at the top of the file?\nOK to continue?\n"
     result = OutputCapture().assert_outputs(self,
                                             step._check_changelog_diff,
                                             [diff_file],
                                             expected_logs=expected_logs)
     self.assertEqual(not result, should_fail)
 def test_run(self):
     tool = MockTool()
     tool._scm = Mock()
     tool._scm.checkout_root = '/mock-checkout'
     step = CleanWorkingDirectory(
         tool, MockOptions(clean=True, force_clean=False))
     step.run({})
     self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 1)
     self.assertEqual(tool._scm.ensure_clean_working_directory.call_count,
                      1)
    def test_suggested_reviewers(self):
        def mock_changelog_entries_for_revision(revision):
            if revision % 2 == 0:
                return [ChangeLogEntry(_changelog1entry1)]
            return [ChangeLogEntry(_changelog1entry2)]

        def mock_revisions_changing_file(path, limit=5):
            if path.endswith("ChangeLog"):
                return [3]
            return [4, 8]

        scm = Mock()
        scm.checkout_root = "/foo/bar"
        scm.changed_files = lambda git_commit: ["file1", "file2", "relative/path/ChangeLog"]
        scm.revisions_changing_file = mock_revisions_changing_file
        checkout = Checkout(scm)
        checkout.changelog_entries_for_revision = mock_changelog_entries_for_revision
        reviewers = checkout.suggested_reviewers(git_commit=None)
        reviewer_names = [reviewer.full_name for reviewer in reviewers]
        self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8'])