Ejemplo n.º 1
0
    def test_run_with_driver_accumulates_memory_results(self):
        port = MockPort()
        test, port = self._setup_test()
        counter = [0]

        def mock_run_signle(drive, path, timeout):
            counter[0] += 1
            return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={'Malloc': 10, 'JSHeap': 5})

        test.run_single = mock_run_signle
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            metrics = test._run_with_driver(driver, None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')

        self.assertEqual(len(metrics), 3)
        self.assertEqual(metrics[0].metric(), 'Time')
        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
            'values': [float(i * 1000) for i in range(2, 21)]})
        self.assertEqual(metrics[1].metric(), 'Malloc')
        self.assertEqual(metrics[1].to_dict(), {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(10)] * 19})
        self.assertEqual(metrics[2].metric(), 'JSHeap')
        self.assertEqual(metrics[2].to_dict(), {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(5)] * 19})
    def test_start_cmd(self):
        # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
        if sys.platform in ("cygwin", "win32"):
            return

        def fake_pid(_):
            host.filesystem.write_text_file("/tmp/WebKit/httpd.pid", "42")
            return True

        host = MockHost()
        host.executive = MockExecutive(should_log=True)
        test_port = test.TestPort(host)
        host.filesystem.write_text_file(test_port._path_to_apache_config_file(), "")

        server = LayoutTestApacheHttpd(test_port, "/mock/output_dir", number_of_servers=4)
        server._check_that_all_ports_are_available = lambda: True
        server._is_server_running_on_all_ports = lambda: True
        server._wait_for_action = fake_pid
        oc = OutputCapture()
        try:
            oc.capture_output()
            server.start()
            server.stop()
        finally:
            _, _, logs = oc.restore_output()
        self.assertIn("StartServers 4", logs)
        self.assertIn("MinSpareServers 4", logs)
        self.assertIn("MaxSpareServers 4", logs)
        self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
Ejemplo n.º 3
0
    def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
        self.tool.executive = MockExecutive2()

        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
        port = self.tool.port_factory.get('test-mac-snowleopard')
        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
            }

            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
                move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html",
                results_directory=None)

            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()
            builders._exact_matches = old_exact_matches

        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
        self.assertMultiLineEqual(out, '{"add": []}\n')
Ejemplo n.º 4
0
    def test_no_tests_found(self):
        tester = Tester()
        errors = StringIO.StringIO()

        # Here we need to remove any existing log handlers so that they
        # don't log the messages webkitpy.test while we're testing it.
        root_logger = logging.getLogger()
        root_handlers = root_logger.handlers
        root_logger.handlers = []

        tester.printer.stream = errors
        tester.finder.find_names = lambda args, run_all: []
        oc = OutputCapture()
        orig_argv = sys.argv[:]
        try:
            sys.argv = sys.argv[0:1]
            oc.capture_output()
            self.assertFalse(tester.run())
        finally:
            _, _, logs = oc.restore_output()
            root_logger.handlers = root_handlers
            sys.argv = orig_argv

        self.assertIn('No tests to run', errors.getvalue())
        self.assertIn('No tests to run', logs)
 def test_empty_state(self):
     capture = OutputCapture()
     options = MockOptions()
     options.reviewer = 'MOCK reviewer'
     options.git_commit = 'MOCK git commit'
     step = UpdateChangeLogsWithReviewer(MockTool(), options)
     capture.assert_outputs(self, step.run, [{}])
Ejemplo n.º 6
0
    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')

        uploaded = [False]

        def mock_upload_json(hostname, json_path):
            self.assertEqual(hostname, 'some.host')
            self.assertEqual(json_path, '/mock-checkout/output.json')
            uploaded[0] = True
            return upload_suceeds

        runner._upload_json = mock_upload_json
        runner._timestamp = 123456789
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            self.assertEqual(runner.run(), expected_exit_code)
        finally:
            stdout, stderr, logs = output_capture.restore_output()

        if not expected_exit_code:
            self.assertEqual(logs, '\n'.join([
                'Running 2 tests',
                'Running Bindings/event-target-wrapper.html (1 of 2)',
                'RESULT Bindings: event-target-wrapper= 1489.05 ms',
                'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
                '',
                'Running inspector/pass.html (2 of 2)',
                'RESULT group_name: test_name= 42 ms',
                '',
                '']))

        return uploaded[0]
Ejemplo n.º 7
0
    def test_analyze_test_reftest_match_and_mismatch(self):
        test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="orange-box-notref.xht" />
</head>
"""
        oc = OutputCapture()
        oc.capture_output()

        try:
            test_path = '/some/madeup/path/'
            parser = TestParser(options, test_path + 'somefile.html')
            test_info = parser.analyze_test(test_contents=test_html)
        finally:
            _, _, logs = oc.restore_output()

        self.assertNotEqual(test_info, None, 'did not find a test')
        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')

        self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
Ejemplo n.º 8
0
    def test_run_single_fails_when_output_has_error(self):
        output_capture = OutputCapture()
        output_capture.capture_output()

        loaded_pages = []

        def run_test(test_input, stop_when_done):
            loaded_pages.append(test_input)
            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
            return DriverOutput('actual text', 'actual image', 'actual checksum',
                audio=None, crash=False, timeout=False, error='some error')

        test, port = self._setup_test(run_test)
        test._archive_path = '/path/some-dir.wpr'
        test._url = 'http://some-test/'

        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(len(loaded_pages), 2)
        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'error: some-test.replay\nsome error\n')
Ejemplo n.º 9
0
 def test_run_test_set_for_parser_tests(self):
     runner, port = self.create_runner()
     tests = self._tests_for_runner(runner, ["Bindings/event-target-wrapper.html", "Parser/some-parser.html"])
     output = OutputCapture()
     output.capture_output()
     try:
         unexpected_result_count = runner._run_tests_set(tests, port)
     finally:
         stdout, stderr, log = output.restore_output()
     self.assertEqual(unexpected_result_count, 0)
     self.assertEqual(
         log,
         "\n".join(
             [
                 "Running Bindings/event-target-wrapper.html (1 of 2)",
                 "RESULT Bindings: event-target-wrapper= 1489.05 ms",
                 "median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms",
                 "",
                 "Running Parser/some-parser.html (2 of 2)",
                 "RESULT Parser: some-parser= 1100.0 ms",
                 "median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms",
                 "",
                 "",
             ]
         ),
     )
Ejemplo n.º 10
0
    def test_run_with_memory_output(self):
        port = MockPort()
        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
        memory_results = {'Malloc': 10, 'JSHeap': 5}
        self.maxDiff = None
        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            metrics = test._run_with_driver(driver, None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')

        self.assertEqual(len(metrics), 3)
        self.assertEqual(metrics[0].metric(), 'Time')
        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
            'values': [float(i * 1000) for i in range(2, 21)]})
        self.assertEqual(metrics[1].metric(), 'Malloc')
        self.assertEqual(metrics[1].to_dict(), {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(10)] * 19})
        self.assertEqual(metrics[2].metric(), 'JSHeap')
        self.assertEqual(metrics[2].to_dict(), {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(5)] * 19})
Ejemplo n.º 11
0
    def test_run_single(self):
        output_capture = OutputCapture()
        output_capture.capture_output()

        loaded_pages = []

        def run_test(test_input, stop_when_done):
            if test_input.test_name == test.force_gc_test:
                loaded_pages.append(test_input)
                return
            if test_input.test_name != "about:blank":
                self.assertEqual(test_input.test_name, 'http://some-test/')
            loaded_pages.append(test_input)
            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
            return DriverOutput('actual text', 'actual image', 'actual checksum',
                audio=None, crash=False, timeout=False, error=False)

        test, port = self._setup_test(run_test)
        test._archive_path = '/path/some-dir/some-test.wpr'
        test._url = 'http://some-test/'

        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(len(loaded_pages), 2)
        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')
        self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
Ejemplo n.º 12
0
    def test_parse_output_with_failing_line(self):
        output = DriverOutput("""
Running 20 times
Ignoring warm-up run (1115)

some-unrecognizable-line

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            test._filter_output(output)
            self.assertEqual(test.parse_output(output), None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
Ejemplo n.º 13
0
    def test_parse_output_with_subtests(self):
        output = DriverOutput("""
Running 20 times
some test: [1, 2, 3, 4, 5]
other test = else: [6, 7, 8, 9, 10]
Ignoring warm-up run (1115)

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')
Ejemplo n.º 14
0
class PrintBaselinesTest(unittest.TestCase):
    def setUp(self):
        self.oc = None
        self.tool = MockTool()
        self.test_port = self.tool.port_factory.get('test-win-xp')
        self.tool.port_factory.get = lambda port_name=None: self.test_port
        self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS

    def tearDown(self):
        if self.oc:
            self.restore_output()

    def capture_output(self):
        self.oc = OutputCapture()
        self.oc.capture_output()

    def restore_output(self):
        stdout, stderr, logs = self.oc.restore_output()
        self.oc = None
        return (stdout, stderr, logs)

    def test_basic(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_multiple(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-vista\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'
                           '\n'
                           '// For test-win-win7\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'
                           '\n'
                           '// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_csv(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
                           'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
Ejemplo n.º 15
0
 def test_parse_output(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
Ejemplo n.º 16
0
    def test_convert_for_webkit_nothing_to_convert(self):
        """ Tests convert_for_webkit() using a basic test that has nothing to convert """

        test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
        converter = W3CTestConverter()

        oc = OutputCapture()
        oc.capture_output()
        try:
            converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
        finally:
            oc.restore_output()

        self.verify_no_conversion_happened(converted)
Ejemplo n.º 17
0
    def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
        old_exact_matches = builders._exact_matches
        try:
            builders._exact_matches = {
                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
            }

            command = RebaselineTest()
            tool = MockTool()
            tool.executive = MockExecutive(should_log=True)
            command.bind_to_tool(tool)

            port = tool.port_factory.get('test-mac-snowleopard')
            tool.filesystem.write_text_file(tool.filesystem.join(port.baseline_version_dir(), 'failures', 'expected', 'image-expected.txt'), '')

            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
                move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html")

            oc = OutputCapture()
            oc.capture_output()
            try:
                logs = ''
                command.execute(options, [], tool)
            finally:
                _, _, logs = oc.restore_output()

            self.assertTrue("Copying baseline from /test.checkout/LayoutTests/platform/test-mac-snowleopard/failures/expected/image-expected.txt to /test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.txt.\n" in logs)

        finally:
            builders._exact_matches = old_exact_matches
Ejemplo n.º 18
0
    def test_analyze_test_reftest_multiple_matches(self):
        test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="match" href="orange-box-ref.xht" />
</head>
"""
        oc = OutputCapture()
        oc.capture_output()
        try:
            test_path = "/some/madeup/path/"
            parser = TestParser(options, test_path + "somefile.html")
            test_info = parser.analyze_test(test_contents=test_html)
        finally:
            _, _, logs = oc.restore_output()

        self.assertNotEqual(test_info, None, "did not find a test")
        self.assertTrue("test" in test_info.keys(), "did not find a test file")
        self.assertTrue("reference" in test_info.keys(), "did not find a reference file")
        self.assertTrue(test_info["reference"].startswith(test_path), "reference path is not correct")
        self.assertFalse("refsupport" in test_info.keys(), "there should be no refsupport files for this test")
        self.assertFalse("jstest" in test_info.keys(), "test should not have been analyzed as a jstest")

        self.assertEqual(
            logs, "Multiple references are not supported. Importing the first ref defined in somefile.html\n"
        )
Ejemplo n.º 19
0
    def test_convert_for_webkit_harness_and_properties(self):
        """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """

        test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">

#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }

</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
        converter = W3CTestConverter()
        fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')

        oc = OutputCapture()
        oc.capture_output()
        try:
            test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
            converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
        self.verify_prefixed_properties(converted, test_content[0])
    def test_convert_attributes_if_needed(self):
        """ Tests convert_attributes_if_needed() using a reference file that has some relative src paths """

        test_html = """<html>
<head>
<script src="../../some-script.js"></script>
<style src="../../../some-style.css"></style>
</head>
<body>
<img src="../../../../some-image.jpg">
</body>
</html>
"""
        test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
        converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_html)
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_reference_relative_paths(converted, test_reference_support_info)
    def test_run_with_driver_accumulates_memory_results(self):
        port = MockPort()
        test, port = self._setup_test()
        counter = [0]

        def mock_run_signle(drive, path, timeout):
            counter[0] += 1
            return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={'Malloc': 10, 'JSHeap': 5})

        test.run_single = mock_run_signle
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertTrue(test._run_with_driver(driver, None))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')

        metrics = test._metrics
        self.assertEqual(sorted(metrics.keys()), ['JSHeap', 'Malloc', 'Time'])
        self.assertEqual(metrics['Time'].flattened_iteration_values(), [float(i * 1000) for i in range(2, 7)])
        self.assertEqual(metrics['Malloc'].flattened_iteration_values(), [float(10)] * 5)
        self.assertEqual(metrics['JSHeap'].flattened_iteration_values(), [float(5)] * 5)
Ejemplo n.º 22
0
    def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
        self.tool.executive = MockExecutive2()

        port = self.tool.port_factory.get('test-win-win7')
        self._write(
            port.host.filesystem.join(
                port.layout_tests_dir(),
                'platform/test-win-win10/failures/expected/image-expected.txt'),
            'original win10 result')

        oc = OutputCapture()
        try:
            options = optparse.Values({
                'optimize': True,
                'builder': "MOCK Win10",
                'suffixes': "txt",
                'verbose': True,
                'test': "failures/expected/image.html",
                'results_directory': None,
                'build_number': None
            })
            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()

        self.assertMultiLineEqual(
            self._read(self.tool.filesystem.join(
                port.layout_tests_dir(),
                'platform/test-win-win10/failures/expected/image-expected.txt')),
            'MOCK Web result, convert 404 to None=True')
        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(
            port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')))
        self.assertMultiLineEqual(
            out, '{"remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win10"}]}\n')
Ejemplo n.º 23
0
    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, results_shown=True, expected_exit_code=0):
        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')

        uploaded = [False]

        def mock_upload_json(hostname, json_path):
            self.assertEqual(hostname, 'some.host')
            self.assertEqual(json_path, '/mock-checkout/output.json')
            uploaded[0] = upload_suceeds
            return upload_suceeds

        runner._upload_json = mock_upload_json
        runner._timestamp = 123456789
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            self.assertEqual(runner.run(), expected_exit_code)
        finally:
            stdout, stderr, logs = output_capture.restore_output()

        if not expected_exit_code:
            expected_logs = 'Running 2 tests\n' + EventTargetWrapperTestData.output + InspectorPassTestData.output
            if results_shown:
                expected_logs += 'MOCK: user.open_url: file://...\n'
            self.assertEqual(self._normalize_output(logs), expected_logs)

        self.assertEqual(uploaded[0], upload_suceeds)

        return logs
Ejemplo n.º 24
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'some-unrecognizable-line',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
Ejemplo n.º 25
0
    def test_run_memory_test(self):
        runner, port = self.create_runner_and_setup_results_template()
        runner._timestamp = 123456789
        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')

        output = OutputCapture()
        output.capture_output()
        try:
            unexpected_result_count = runner.run()
        finally:
            stdout, stderr, log = output.restore_output()
        self.assertEqual(unexpected_result_count, 0)
        self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
        results = self._load_output_json(runner)[0]['results']
        values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]

        # Stdev for test doesn't match on some bots
        self.assertEqual(sorted(results['Parser/memory-test'].keys()), sorted(MemoryTestData.results.keys()))
        for key in MemoryTestData.results:
            if key == 'stdev':
                self.assertAlmostEqual(results['Parser/memory-test'][key], MemoryTestData.results[key], places=4)
            else:
                self.assertEqual(results['Parser/memory-test'][key], MemoryTestData.results[key])
        self.assertEqual(results['Parser/memory-test'], MemoryTestData.results)
        self.assertEqual(results['Parser/memory-test:JSHeap'], MemoryTestData.js_heap_results)
        self.assertEqual(results['Parser/memory-test:Malloc'], MemoryTestData.malloc_results)
Ejemplo n.º 26
0
    def test_rebaseline(self):
        self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
        self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']

        self._zero_out_test_expectations()

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK builder": {
                    "port_name": "test-mac-leopard",
                    "specifiers": set(["mock-specifier"])
                },
            }
            oc.capture_output()
            self.command.execute(
                MockOptions(
                    optimize=False,
                    builders=None,
                    suffixes="txt,png",
                    verbose=True,
                    move_overwritten_baselines=False), [], self.tool)
        finally:
            oc.restore_output()
            builders._exact_matches = old_exact_matches

        calls = filter(lambda x: x[0] != 'perl', self.tool.executive.calls)
        self.assertEqual(calls, [[[
            'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png',
            '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html',
            '--verbose'
        ]]])
Ejemplo n.º 27
0
 def test_parse_output_with_subtests(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'some test: [1, 2, 3, 4, 5]',
         'other test = else: [6, 7, 8, 9, 10]',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, '')
Ejemplo n.º 28
0
    def _test_check_test_expectations(self, filename):
        capture = OutputCapture()
        options = MockOptions()
        options.git_commit = ""
        options.non_interactive = True

        tool = MockTool()
        tool.user = None  # Will cause any access of tool.user to raise an exception.
        step = Commit(tool, options)
        state = {
            "changed_files": [filename + "XXX"],
        }

        tool.executive = MockExecutive(should_log=True, should_throw_when_run=False)
        expected_logs = "Committed r49824: <http://trac.webkit.org/changeset/49824>\n"
        capture.assert_outputs(self, step.run, [state], expected_logs=expected_logs)

        state = {
            "changed_files": ["platform/chromium/" + filename],
        }
        expected_logs = """MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--diff-files', 'platform/chromium/%s'], cwd=/mock-checkout
Committed r49824: <http://trac.webkit.org/changeset/49824>
""" % filename
        capture.assert_outputs(self, step.run, [state], expected_logs=expected_logs)

        tool.executive = MockExecutive(should_log=True, should_throw_when_run=set(["platform/chromium/" + filename]))
        self.assertRaises(ScriptError, capture.assert_outputs, self, step.run, [state])
Ejemplo n.º 29
0
    def test_run_memory_test(self):
        runner, port = self.create_runner_and_setup_results_template()
        runner._timestamp = 123456789
        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')

        output = OutputCapture()
        output.capture_output()
        try:
            unexpected_result_count = runner.run()
        finally:
            stdout, stderr, log = output.restore_output()
        self.assertEqual(unexpected_result_count, 0)
        self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
            'Running 1 tests',
            'Running Parser/memory-test.html (1 of 1)',
            'RESULT Parser: memory-test= 1100.0 ms',
            'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
            'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
            'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
            'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
            'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
            'Finished: 0.1 s',
            '', '']))
        results = runner.load_output_json()[0]['results']
        values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
        self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
        self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
        self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
    def test_convert_for_webkit_properties_only(self):
        """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """

        test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">

#block1 { @test0@: @propvalue0@; }

</style>
</head>
<body>
<div id="elem1" style="@test1@: @propvalue1@;"></div>
</body>
</html>
"""
        fake_dir_path = self.fake_dir_path('harnessandprops')
        converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
        test_content = self.generate_test_content_properties_and_values(converter.prefixed_properties, converter.prefixed_property_values, 1, test_html)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_content[2])
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_test_harness_paths(converter, converted[2], fake_dir_path, 1, 1)
        self.verify_prefixed_properties(converted, test_content[0])
        self.verify_prefixed_property_values(converted, test_content[1])
Ejemplo n.º 31
0
 def capture_output(self):
     self.oc = OutputCapture()
     self.oc.capture_output()
Ejemplo n.º 32
0
 def test_default_configuration__unknown(self):
     # Ignore the warning about an unknown configuration value.
     oc = OutputCapture()
     oc.capture_output()
     self.assert_configuration('Unknown', 'Unknown')
     oc.restore_output()
Ejemplo n.º 33
0
 def test_rollout_updates_working_copy(self):
     rollout = Rollout()
     tool = MockTool()
     tool.executive = MockExecutive(should_log=True)
     expected_logs = "MOCK run_and_throw_if_fail: ['mock-update-webkit'], cwd=/mock-checkout\n"
     OutputCapture().assert_outputs(self, rollout._update_working_copy, [tool], expected_logs=expected_logs)
Ejemplo n.º 34
0
 def test_section_typo(self):
     watch_list = ('{"DEFINTIONS": {}}')
     OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
                                    expected_logs='Unknown section "DEFINTIONS" in watch list.'
                                    + '\n\nPerhaps it should be DEFINITIONS.\n')
Ejemplo n.º 35
0
 def test_bad_section(self):
     watch_list = ('{"FOO": {}}')
     OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
                                    expected_logs='Unknown section "FOO" in watch list.\n')
Ejemplo n.º 36
0
 def test_add_cc_to_bug(self):
     bugzilla = Bugzilla()
     bugzilla.browser = MockBrowser()
     bugzilla.authenticate = lambda: None
     expected_logs = "Adding ['*****@*****.**'] to the CC list for bug 42\n"
     OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["*****@*****.**"]], expected_logs=expected_logs)
Ejemplo n.º 37
0
 def test_show_results_html_file(self):
     port = MacPort(filesystem=MockFileSystem(), user=MockUser(), executive=MockExecutive())
     # Delay setting a should_log executive to avoid logging from MacPort.__init__.
     port._executive = MockExecutive(should_log=True)
     expected_stderr = "MOCK run_command: ['Tools/Scripts/run-safari', '--release', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
     OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
Ejemplo n.º 38
0
 def test_show_results_html_file(self):
     port = self.make_port()
     port._executive = MockExecutive(should_log=True)
     expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--qt', 'file://test.html'], cwd=/mock-checkout\n"
     OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
Ejemplo n.º 39
0
class PrintBaselinesTest(unittest.TestCase):
    def setUp(self):
        self.oc = None
        self.tool = MockTool()
        self.test_port = self.tool.port_factory.get('test-win-xp')
        self.tool.port_factory.get = lambda port_name=None: self.test_port
        self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS

    def tearDown(self):
        if self.oc:
            self.restore_output()

    def capture_output(self):
        self.oc = OutputCapture()
        self.oc.capture_output()

    def restore_output(self):
        stdout, stderr, logs = self.oc.restore_output()
        self.oc = None
        return (stdout, stderr, logs)

    def test_basic(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(
            MockOptions(all=False,
                        include_virtual_tests=False,
                        csv=False,
                        platform=None), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout, ('// For test-win-xp\n'
                                           'passes/text-expected.png\n'
                                           'passes/text-expected.txt\n'))

    def test_multiple(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(
            MockOptions(all=False,
                        include_virtual_tests=False,
                        csv=False,
                        platform='test-win-*'), ['passes/text.html'],
            self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout, ('// For test-win-vista\n'
                                           'passes/text-expected.png\n'
                                           'passes/text-expected.txt\n'
                                           '\n'
                                           '// For test-win-win7\n'
                                           'passes/text-expected.png\n'
                                           'passes/text-expected.txt\n'
                                           '\n'
                                           '// For test-win-xp\n'
                                           'passes/text-expected.png\n'
                                           'passes/text-expected.txt\n'))

    def test_csv(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(
            MockOptions(all=False,
                        platform='*xp',
                        csv=True,
                        include_virtual_tests=False), ['passes/text.html'],
            self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout, (
            'test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
            'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'
        ))
Ejemplo n.º 40
0
 def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
     OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
     self.assertTrue(driver._server_process.started)
     self.assertEqual(driver._server_process.env['DISPLAY'], expected_display)
     self.assertEqual(driver._server_process.env['GDK_BACKEND'], 'x11')
Ejemplo n.º 41
0
 def test_show_results_html_file(self):
     port = self.make_port()
     # Delay setting a should_log executive to avoid logging from MacPort.__init__.
     port._executive = MockExecutive(should_log=True)
     expected_logs = "MOCK popen: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
     OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
Ejemplo n.º 42
0
 def begin_work_queue(self):
     output_capture = OutputCapture()
     output_capture.capture_output()
     CommitQueue.begin_work_queue(self)
     output_capture.restore_output()
 def setUp(self):
     self.output = OutputCapture()
Ejemplo n.º 44
0
    def test_warn_if_application_is_xcode(self):
        output = OutputCapture()
        user = User()
        output.assert_outputs(self, user._warn_if_application_is_xcode,
                              ["TextMate"])
        output.assert_outputs(self, user._warn_if_application_is_xcode,
                              ["/Applications/TextMate.app"])
        output.assert_outputs(self, user._warn_if_application_is_xcode,
                              ["XCode"])  # case sensitive matching

        xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
        output.assert_outputs(self,
                              user._warn_if_application_is_xcode, ["Xcode"],
                              expected_stdout=xcode_warning)
        output.assert_outputs(self,
                              user._warn_if_application_is_xcode,
                              ["/Developer/Applications/Xcode.app"],
                              expected_stdout=xcode_warning)
Ejemplo n.º 45
0
 def test_help(self):
     expected_stderr = "MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, last-green-revision, restart, roll-chromium-deps, rollout, whois\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["help"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 46
0
    def test_upload_json(self):
        runner, port = self.create_runner()
        port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'

        class MockFileUploader:
            called = []
            upload_single_text_file_throws = False
            upload_single_text_file_return_value = None

            @classmethod
            def reset(cls):
                cls.called = []
                cls.upload_single_text_file_throws = False
                cls.upload_single_text_file_return_value = None

            def __init__(mock, url, timeout):
                self.assertEqual(url, 'https://some.host/some/path')
                self.assertTrue(isinstance(timeout, int) and timeout)
                mock.called.append('FileUploader')

            def upload_single_text_file(mock, filesystem, content_type,
                                        filename):
                self.assertEqual(filesystem, port.host.filesystem)
                self.assertEqual(content_type, 'application/json')
                self.assertEqual(filename, 'some.json')
                mock.called.append('upload_single_text_file')
                if mock.upload_single_text_file_throws:
                    raise Exception
                return mock.upload_single_text_file_return_value

        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO(
            'OK')
        self.assertTrue(
            runner._upload_json('some.host', 'some.json', '/some/path',
                                MockFileUploader))
        self.assertEqual(MockFileUploader.called,
                         ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO(
            'Some error')
        output = OutputCapture()
        output.capture_output()
        self.assertFalse(
            runner._upload_json('some.host', 'some.json', '/some/path',
                                MockFileUploader))
        _, _, logs = output.restore_output()
        self.assertEqual(
            logs,
            'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n'
        )

        # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_throws = True
        self.assertFalse(
            runner._upload_json('some.host', 'some.json', '/some/path',
                                MockFileUploader))
        self.assertEqual(MockFileUploader.called,
                         ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO(
            '{"status": "OK"}')
        self.assertTrue(
            runner._upload_json('some.host', 'some.json', '/some/path',
                                MockFileUploader))
        self.assertEqual(MockFileUploader.called,
                         ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO(
            '{"status": "SomethingHasFailed", "failureStored": false}')
        output = OutputCapture()
        output.capture_output()
        self.assertFalse(
            runner._upload_json('some.host', 'some.json', '/some/path',
                                MockFileUploader))
        _, _, logs = output.restore_output()
        serialized_json = json.dumps(
            {
                'status': 'SomethingHasFailed',
                'failureStored': False
            }, indent=4)
        self.assertEqual(
            logs,
            'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n'
            % serialized_json)
Ejemplo n.º 47
0
 def test_rollout_bananas(self):
     expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["rollout bananas"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 48
0
 def test_empty_state(self):
     capture = OutputCapture()
     step = UpdateChangeLogsWithReviewer(MockTool(), Mock())
     capture.assert_outputs(self, step.run, [{}])
Ejemplo n.º 49
0
 def test_roll_chromium_deps(self):
     expected_stderr = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to r21654\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["roll-chromium-deps 21654"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 50
0
 def test_multi_rollout_no_reason(self):
     expected_stderr = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["rollout 21654 21655 r21656"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 51
0
    def test_parse_and_print_leaks_detail_empty(self):
        files = {}
        files['/tmp/Logs/layout-test-results/drt-28532-ebc9a6c63be411e399d4d43d7e01ba08-leaks.xml'] = ""
        leakdetector_valgrind = LeakDetectorValgrind(MockExecutive2(), MockFileSystem(files), '/tmp/layout-test-results/')

        OutputCapture().assert_outputs(self, leakdetector_valgrind.parse_and_print_leaks_detail, [files], expected_logs=make_mock_valgrind_results_empty())
Ejemplo n.º 52
0
 def test_roll_chromium_deps_to_lkgr(self):
     expected_stderr = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to last-known good revision\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["roll-chromium-deps"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 53
0
    def test_rebaseline_expectations(self):
        command = RebaselineExpectations()
        tool = MockTool()
        command.bind_to_tool(tool)

        for port_name in tool.port_factory.all_port_names():
            port = tool.port_factory.get(port_name)
            for path in port.expectations_files():
                tool.filesystem.write_text_file(path, '')

        # Don't enable logging until after we create the mock expectation files as some Port.__init__'s run subcommands.
        tool.executive = MockExecutive(should_log=True)

        def run_in_parallel(commands):
            print commands
            return ""

        tool.executive.run_in_parallel = run_in_parallel

        expected_logs = """Retrieving results for chromium-linux-x86 from Webkit Linux 32.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for chromium-linux-x86_64 from Webkit Linux.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for chromium-mac-lion from Webkit Mac10.7.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for chromium-mac-snowleopard from Webkit Mac10.6.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for chromium-win-win7 from Webkit Win7.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for chromium-win-xp from Webkit Win.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for efl from EFL Linux 64-bit Release.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for gtk from GTK Linux 64-bit Release.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for mac-lion from Apple Lion Release WK1 (Tests).
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for qt-linux from Qt Linux Release.
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Retrieving results for win-7sp0 from Apple Win 7 Release (Tests).
    userscripts/another-test.html (txt)
    userscripts/images.svg (png)
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
"""

        expected_stdout = """[(['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Linux 32', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Linux', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Mac10.6', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Mac10.7', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Win7', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Apple Win 7 Release (Tests)', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'EFL Linux 64-bit Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Win', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'GTK Linux 64-bit Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Qt Linux Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Apple Lion Release WK1 (Tests)', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Linux 32', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Linux', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Mac10.6', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Mac10.7', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Win7', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Apple Win 7 Release (Tests)', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'EFL Linux 64-bit Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Win', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'GTK Linux 64-bit Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Qt Linux Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Apple Lion Release WK1 (Tests)', '--test', 'userscripts/images.svg'], '/mock-checkout')]
"""

        expected_stderr = """MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
"""

        command._tests_to_rebaseline = lambda port: {'userscripts/another-test.html': set(['txt']), 'userscripts/images.svg': set(['png'])}
        OutputCapture().assert_outputs(self, command.execute, [MockOptions(optimize=False), [], tool], expected_logs=expected_logs, expected_stdout=expected_stdout, expected_stderr=expected_stderr)

        expected_stderr_with_optimize = """MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['echo', 'optimize-baselines', '--suffixes', 'txt', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['echo', 'optimize-baselines', '--suffixes', 'png', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
MOCK run_command: ['qmake', '-v'], cwd=None
"""

        command._tests_to_rebaseline = lambda port: {'userscripts/another-test.html': set(['txt']), 'userscripts/images.svg': set(['png'])}
        OutputCapture().assert_outputs(self, command.execute, [MockOptions(optimize=True), [], tool], expected_logs=expected_logs, expected_stdout=expected_stdout, expected_stderr=expected_stderr_with_optimize)
Ejemplo n.º 54
0
    def test_convert_prefixed_properties(self):
        """ Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
        10 in one style block + 5 in another style
        block + 5 inline styles, including one with multiple prefixed properties.
        The properties in the test content are in all sorts of wack formatting.
        """

        test_html = """<html>
<style type="text/css"><![CDATA[

.block1 {
    width: 300px;
    height: 300px
}

.block2 {
    @test0@: propvalue;
}

.block3{@test1@: propvalue;}

.block4 { @test2@:propvalue; }

.block5{ @test3@ :propvalue; }

#block6 {    @test4@   :   propvalue;  }

#block7
{
    @test5@: propvalue;
}

#block8 { @test6@: propvalue; }

#block9:pseudo
{

    @test7@: propvalue;
    @test8@:  propvalue propvalue propvalue;;
}

]]></style>
</head>
<body>
    <div id="elem1" style="@test9@: propvalue;"></div>
    <div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
    <div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@   :propvalue;"></div>
    <div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[

.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
    @test18@: propvalue;
    @test19@: propvalue;
}

]]></style>
</html>
"""
        converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME)
        test_content = self.generate_test_content(
            converter.prefixed_properties, 20, test_html)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_content[1])
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_prefixed_properties(converted, test_content[0])
Ejemplo n.º 55
0
    def test_analyze_one_changelog(self):
        host = MockHost()
        host.filesystem.files[
            'mock-checkout/foo/ChangeLog'] = u"""2011-11-17  Mark Rowe  <*****@*****.**>

    <http://webkit.org/b/72646> Disable deprecation warnings around code where we cannot easily
    switch away from the deprecated APIs.

    Reviewed by Sam Weinig.

    * platform/mac/WebCoreNSStringExtras.mm:
    * platform/network/cf/SocketStreamHandleCFNet.cpp:
    (WebCore::SocketStreamHandle::reportErrorToClient):

2011-11-19  Kevin Ollivier  <*****@*****.**>

    [wx] C++ bindings build fix for move of array classes to WTF.

    * bindings/scripts/CodeGeneratorCPP.pm:
    (GetCPPTypeGetter):
    (GetNamespaceForClass):
    (GenerateHeader):
    (GenerateImplementation):

2011-10-27  Philippe Normand  <*****@*****.**> and Zan Dobersek  <*****@*****.**>

        [GStreamer] WebAudio AudioFileReader implementation
        https://bugs.webkit.org/show_bug.cgi?id=69834

        Reviewed by Martin Robinson.

        Basic FileReader implementation, supporting one or 2 audio
        channels. An empty AudioDestination is also provided, its complete
        implementation is handled in bug 69835.

        * GNUmakefile.am:
        * GNUmakefile.list.am:
        * platform/audio/gstreamer/AudioDestinationGStreamer.cpp: Added.
        (WebCore::AudioDestination::create):
        (WebCore::AudioDestination::hardwareSampleRate):
        (WebCore::AudioDestinationGStreamer::AudioDestinationGStreamer):
        (WebCore::AudioDestinationGStreamer::~AudioDestinationGStreamer):
        (WebCore::AudioDestinationGStreamer::start):
        (WebCore::AudioDestinationGStreamer::stop):
        * platform/audio/gstreamer/AudioDestinationGStreamer.h: Added.
        (WebCore::AudioDestinationGStreamer::isPlaying):
        (WebCore::AudioDestinationGStreamer::sampleRate):
        (WebCore::AudioDestinationGStreamer::sourceProvider):
        * platform/audio/gstreamer/AudioFileReaderGStreamer.cpp: Added.
        (WebCore::getGStreamerAudioCaps):
        (WebCore::getFloatFromByteReader):
        (WebCore::copyGstreamerBuffersToAudioChannel):
        (WebCore::onAppsinkNewBufferCallback):
        (WebCore::messageCallback):
        (WebCore::onGStreamerDeinterleavePadAddedCallback):
        (WebCore::onGStreamerDeinterleaveReadyCallback):
        (WebCore::onGStreamerDecodebinPadAddedCallback):
        (WebCore::AudioFileReader::AudioFileReader):
        (WebCore::AudioFileReader::~AudioFileReader):
        (WebCore::AudioFileReader::handleBuffer):
        (WebCore::AudioFileReader::handleMessage):
        (WebCore::AudioFileReader::handleNewDeinterleavePad):
        (WebCore::AudioFileReader::deinterleavePadsConfigured):
        (WebCore::AudioFileReader::plugDeinterleave):
        (WebCore::AudioFileReader::createBus):
        (WebCore::createBusFromAudioFile):
        (WebCore::createBusFromInMemoryAudioFile):
        * platform/audio/gtk/AudioBusGtk.cpp: Added.
        (WebCore::AudioBus::loadPlatformResource):
"""

        capture = OutputCapture()
        capture.capture_output()

        analyzer = ChangeLogAnalyzer(host, ['mock-checkout/foo/ChangeLog'])
        analyzer.analyze()

        capture.restore_output()

        self.assertEqual(
            analyzer.summary(), {
                'reviewed': 2,
                'unreviewed': 1,
                'contributors': 6,
                'contributors_with_reviews': 2,
                'contributors_without_reviews': 4
            })

        self.assertEqual(
            set(analyzer.contributors_statistics().keys()),
            set([
                'Sam Weinig', u'Mark Rowe', u'Kevin Ollivier',
                'Martin Robinson', u'Philippe Normand', u'Zan Dobersek'
            ]))

        self.assertEqual(
            analyzer.contributors_statistics()['Sam Weinig'], {
                'reviews': {
                    'files': {
                        u'foo/platform/mac/WebCoreNSStringExtras.mm': 1,
                        u'foo/platform/network/cf/SocketStreamHandleCFNet.cpp':
                        1
                    },
                    'total': 1,
                    'areas': {
                        'Network': 1
                    }
                },
                'patches': {
                    'files': {},
                    'areas': {},
                    'unreviewed': 0,
                    'reviewed': 0
                }
            })
        self.assertEqual(
            analyzer.contributors_statistics()[u'Mark Rowe'], {
                'reviews': {
                    'files': {},
                    'total': 0,
                    'areas': {}
                },
                'patches': {
                    'files': {
                        u'foo/platform/mac/WebCoreNSStringExtras.mm': 1,
                        u'foo/platform/network/cf/SocketStreamHandleCFNet.cpp':
                        1
                    },
                    'areas': {
                        'Network': 1
                    },
                    'unreviewed': 0,
                    'reviewed': 1
                }
            })
        self.assertEqual(
            analyzer.contributors_statistics()[u'Kevin Ollivier'], {
                'reviews': {
                    'files': {},
                    'total': 0,
                    'areas': {}
                },
                'patches': {
                    'files': {
                        u'foo/bindings/scripts/CodeGeneratorCPP.pm': 1
                    },
                    'areas': {
                        'Bindings': 1
                    },
                    'unreviewed': 1,
                    'reviewed': 0
                }
            })

        files_for_audio_patch = {
            u'foo/GNUmakefile.am': 1,
            u'foo/GNUmakefile.list.am': 1,
            'foo/platform/audio/gstreamer/AudioDestinationGStreamer.cpp': 1,
            'foo/platform/audio/gstreamer/AudioDestinationGStreamer.h': 1,
            'foo/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp': 1,
            'foo/platform/audio/gtk/AudioBusGtk.cpp': 1
        }
        author_expectation_for_audio_patch = {
            'reviews': {
                'files': {},
                'total': 0,
                'areas': {}
            },
            'patches': {
                'files': files_for_audio_patch,
                'areas': {
                    'The WebKitGTK+ Port': 1
                },
                'unreviewed': 0,
                'reviewed': 1
            }
        }
        self.assertEqual(
            analyzer.contributors_statistics()[u'Martin Robinson'], {
                'reviews': {
                    'files': files_for_audio_patch,
                    'total': 1,
                    'areas': {
                        'The WebKitGTK+ Port': 1
                    }
                },
                'patches': {
                    'files': {},
                    'areas': {},
                    'unreviewed': 0,
                    'reviewed': 0
                }
            })
        self.assertEqual(
            analyzer.contributors_statistics()[u'Philippe Normand'],
            author_expectation_for_audio_patch)
        self.assertEqual(analyzer.contributors_statistics()[u'Zan Dobersek'],
                         author_expectation_for_audio_patch)

        areas_statistics = analyzer.areas_statistics()
        areas_with_patches = [
            area for area in areas_statistics
            if areas_statistics[area]['reviewed']
            or areas_statistics[area]['unreviewed']
        ]
        self.assertEqual(set(areas_with_patches),
                         set(['Bindings', 'Network', 'The WebKitGTK+ Port']))
        self.assertEqual(
            areas_statistics['Bindings'], {
                'unreviewed': 1,
                'reviewed': 0,
                'contributors': {
                    u'Kevin Ollivier': {
                        'reviews': 0,
                        'unreviewed': 1,
                        'reviewed': 0
                    }
                }
            })
        self.assertEqual(
            areas_statistics['Network'], {
                'unreviewed': 0,
                'reviewed': 1,
                'contributors': {
                    'Sam Weinig': {
                        'reviews': 1,
                        'unreviewed': 0,
                        'reviewed': 0
                    },
                    u'Mark Rowe': {
                        'reviews': 0,
                        'unreviewed': 0,
                        'reviewed': 1
                    }
                }
            })
Ejemplo n.º 56
0
 def test_tests_to_update(self):
     command = Rebaseline()
     command.bind_to_tool(MockTool())
     build = Mock()
     OutputCapture().assert_outputs(self, command._tests_to_update, [build])
Ejemplo n.º 57
0
 def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
     exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
     self.assertEqual(exit_code, expected_exit_code)
Ejemplo n.º 58
0
 def test_rebaseline_test(self):
     command = RebaselineTest()
     command.bind_to_tool(MockTool())
     expected_logs = "Retrieving http://example.com/f/builders/Webkit Linux/results/layout-test-results/userscripts/another-test-actual.txt.\n"
     OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Linux", "userscripts/another-test.html", None, "txt"], expected_logs=expected_logs)
Ejemplo n.º 59
0
 def test_lgr(self):
     expected_stderr = "MOCK: irc.post: mock_nick: http://trac.webkit.org/changeset/9479\n"
     OutputCapture().assert_outputs(self,
                                    run,
                                    args=["last-green-revision"],
                                    expected_stderr=expected_stderr)
Ejemplo n.º 60
0
    def test_rebaseline_expectations(self):
        command = RebaselineExpectations()
        tool = MockTool()
        tool.executive = MockExecutive(should_log=True)
        command.bind_to_tool(tool)
        expected_stdout = """Retrieving results for chromium-cg-mac-leopard from Webkit Mac10.5 (CG).
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-cg-mac-snowleopard from Webkit Mac10.6 (CG).
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-gpu-cg-mac-leopard from Webkit Mac10.5 (CG) - GPU.
Retrieving results for chromium-gpu-cg-mac-snowleopard from Webkit Mac10.6 (CG) - GPU.
Retrieving results for chromium-gpu-win-win7 from Webkit Win7 - GPU.
Retrieving results for chromium-gpu-win-xp from Webkit Win - GPU.
Retrieving results for chromium-linux-x86 from Webkit Linux 32.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-linux-x86_64 from Webkit Linux.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-mac-leopard from Webkit Mac10.5.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-mac-snowleopard from Webkit Mac10.6.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-win-vista from Webkit Vista.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-win-win7 from Webkit Win7.
    userscripts/another-test.html
    userscripts/images.svg
Retrieving results for chromium-win-xp from Webkit Win.
    userscripts/another-test.html
    userscripts/images.svg
Optimizing baselines for userscripts/another-test.html.
Optimizing baselines for userscripts/images.svg.
"""
        expected_stderr = """MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5 (CG)', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5 (CG)', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6 (CG)', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6 (CG)', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux 32', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux 32', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Vista', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Vista', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win7', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win7', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/images.svg'], cwd=/mock-checkout
"""
        command._tests_to_rebaseline = lambda port: [] if not port.name().find(
            '-gpu-') == -1 else [
                'userscripts/another-test.html', 'userscripts/images.svg'
            ]
        OutputCapture().assert_outputs(self,
                                       command.execute,
                                       [MockTool(), None, None],
                                       expected_stdout=expected_stdout,
                                       expected_stderr=expected_stderr)