Ejemplo n.º 1
0
    def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
        self.tool.executive = MockExecutive2()

        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
        port = self.tool.port_factory.get('test-mac-snowleopard')
        self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
            }

            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
                move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html",
                results_directory=None)

            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()
            builders._exact_matches = old_exact_matches

        self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
        self.assertMultiLineEqual(out, '{"add": []}\n')
    def test_upload_json(self):
        runner, port = self.create_runner()
        port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'

        class MockFileUploader:
            called = []
            upload_single_text_file_throws = False
            upload_single_text_file_return_value = None

            @classmethod
            def reset(cls):
                cls.called = []
                cls.upload_single_text_file_throws = False
                cls.upload_single_text_file_return_value = None

            def __init__(mock, url, timeout):
                self.assertEqual(url, 'https://some.host/some/path')
                self.assertTrue(isinstance(timeout, int) and timeout)
                mock.called.append('FileUploader')

            def upload_single_text_file(mock, filesystem, content_type, filename):
                self.assertEqual(filesystem, port.host.filesystem)
                self.assertEqual(content_type, 'application/json')
                self.assertEqual(filename, 'some.json')
                mock.called.append('upload_single_text_file')
                if mock.upload_single_text_file_throws:
                    raise Exception
                return mock.upload_single_text_file_return_value

        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
        self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
        self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
        output = OutputCapture()
        output.capture_output()
        self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
        _, _, logs = output.restore_output()
        self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')

        # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_throws = True
        self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
        self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
        self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
        self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])

        MockFileUploader.reset()
        MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
        output = OutputCapture()
        output.capture_output()
        self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
        _, _, logs = output.restore_output()
        serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
        self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
Ejemplo n.º 3
0
 def test_parse_output_with_subtests(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'some test: [1, 2, 3, 4, 5]',
         'other test = else: [6, 7, 8, 9, 10]',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, '')
    def test_start_cmd(self):
        # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
        if sys.platform in ("cygwin", "win32"):
            return

        def fake_pid(_):
            host.filesystem.write_text_file("/tmp/WebKit/httpd.pid", "42")
            return True

        host = MockHost()
        host.executive = MockExecutive(should_log=True)
        test_port = test.TestPort(host)
        host.filesystem.write_text_file(test_port._path_to_apache_config_file(), "")

        server = LayoutTestApacheHttpd(test_port, "/mock/output_dir", number_of_servers=4)
        server._check_that_all_ports_are_available = lambda: True
        server._is_server_running_on_all_ports = lambda: True
        server._wait_for_action = fake_pid
        oc = OutputCapture()
        try:
            oc.capture_output()
            server.start()
            server.stop()
        finally:
            _, _, logs = oc.restore_output()
        self.assertIn("StartServers 4", logs)
        self.assertIn("MinSpareServers 4", logs)
        self.assertIn("MaxSpareServers 4", logs)
        self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
Ejemplo n.º 5
0
    def test_rebaseline(self):
        self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
        self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']

        self._zero_out_test_expectations()

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK builder": {
                    "port_name": "test-mac-leopard",
                    "specifiers": set(["mock-specifier"])
                },
            }
            oc.capture_output()
            self.command.execute(
                MockOptions(
                    optimize=False,
                    builders=None,
                    suffixes="txt,png",
                    verbose=True,
                    move_overwritten_baselines=False), [], self.tool)
        finally:
            oc.restore_output()
            builders._exact_matches = old_exact_matches

        calls = filter(lambda x: x[0] != 'perl', self.tool.executive.calls)
        self.assertEqual(calls, [[[
            'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png',
            '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html',
            '--verbose'
        ]]])
Ejemplo n.º 6
0
    def test_parse_output_with_failing_line(self):
        output = DriverOutput("""
Running 20 times
Ignoring warm-up run (1115)

some-unrecognizable-line

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            test._filter_output(output)
            self.assertEqual(test.parse_output(output), None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
Ejemplo n.º 7
0
    def test_convert_for_webkit_harness_and_properties(self):
        """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """

        test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">

#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }

</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
        converter = W3CTestConverter()
        fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')

        oc = OutputCapture()
        oc.capture_output()
        try:
            test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
            converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
        self.verify_prefixed_properties(converted, test_content[0])
Ejemplo n.º 8
0
class OutputCaptureTest(unittest.TestCase):
    def setUp(self):
        self.output = OutputCapture()

    def log_all_levels(self):
        _log.info('INFO')
        _log.warning('WARN')
        _log.error('ERROR')
        _log.critical('CRITICAL')

    def assertLogged(self, expected_logs):
        actual_stdout, actual_stderr, actual_logs = self.output.restore_output()
        self.assertEqual('', actual_stdout)
        self.assertEqual('', actual_stderr)
        self.assertEqual(expected_logs, actual_logs)

    def test_initial_log_level(self):
        self.output.capture_output()
        self.log_all_levels()
        self.assertLogged('INFO\nWARN\nERROR\nCRITICAL\n')

    def test_set_log_level(self):
        self.output.set_log_level(logging.ERROR)
        self.output.capture_output()
        self.log_all_levels()
        self.output.set_log_level(logging.WARN)
        self.log_all_levels()
        self.assertLogged('ERROR\nCRITICAL\nWARN\nERROR\nCRITICAL\n')
Ejemplo n.º 9
0
    def test_parse_output_with_subtests(self):
        output = DriverOutput("""
Running 20 times
some test: [1, 2, 3, 4, 5]
other test = else: [6, 7, 8, 9, 10]
Ignoring warm-up run (1115)

Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50862 ms
min 1080 ms
max 1120 ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
            self._assert_results_are_correct(test, output)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')
Ejemplo n.º 10
0
    def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
        old_exact_matches = builders._exact_matches
        try:
            builders._exact_matches = {
                "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
                "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
            }

            command = RebaselineTest()
            tool = MockTool()
            tool.executive = MockExecutive(should_log=True)
            command.bind_to_tool(tool)

            port = tool.port_factory.get('test-mac-snowleopard')
            tool.filesystem.write_text_file(tool.filesystem.join(port.baseline_version_dir(), 'failures', 'expected', 'image-expected.txt'), '')

            options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
                move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html")

            oc = OutputCapture()
            oc.capture_output()
            try:
                logs = ''
                command.execute(options, [], tool)
            finally:
                _, _, logs = oc.restore_output()

            self.assertTrue("Copying baseline from /test.checkout/LayoutTests/platform/test-mac-snowleopard/failures/expected/image-expected.txt to /test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.txt.\n" in logs)

        finally:
            builders._exact_matches = old_exact_matches
Ejemplo n.º 11
0
    def test_run_memory_test(self):
        runner, port = self.create_runner_and_setup_results_template()
        runner._timestamp = 123456789
        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')

        output = OutputCapture()
        output.capture_output()
        try:
            unexpected_result_count = runner.run()
        finally:
            stdout, stderr, log = output.restore_output()
        self.assertEqual(unexpected_result_count, 0)
        self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
        results = self._load_output_json(runner)[0]['results']
        values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]

        # Stdev for test doesn't match on some bots
        self.assertEqual(sorted(results['Parser/memory-test'].keys()), sorted(MemoryTestData.results.keys()))
        for key in MemoryTestData.results:
            if key == 'stdev':
                self.assertAlmostEqual(results['Parser/memory-test'][key], MemoryTestData.results[key], places=4)
            else:
                self.assertEqual(results['Parser/memory-test'][key], MemoryTestData.results[key])
        self.assertEqual(results['Parser/memory-test'], MemoryTestData.results)
        self.assertEqual(results['Parser/memory-test:JSHeap'], MemoryTestData.js_heap_results)
        self.assertEqual(results['Parser/memory-test:Malloc'], MemoryTestData.malloc_results)
Ejemplo n.º 12
0
    def test_analyze_test_reftest_match_and_mismatch(self):
        test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="orange-box-notref.xht" />
</head>
"""
        oc = OutputCapture()
        oc.capture_output()

        try:
            test_path = '/some/madeup/path/'
            parser = TestParser(options, test_path + 'somefile.html')
            test_info = parser.analyze_test(test_contents=test_html)
        finally:
            _, _, logs = oc.restore_output()

        self.assertNotEqual(test_info, None, 'did not find a test')
        self.assertTrue('test' in test_info.keys(), 'did not find a test file')
        self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
        self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
        self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
        self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')

        self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
Ejemplo n.º 13
0
    def test_check_build(self):
        port = self.make_port()
        port._check_file_exists = lambda path, desc: True
        if port._dump_reader:
            port._dump_reader.check_is_functional = lambda: True
        port._options.build = True
        port._check_driver_build_up_to_date = lambda config: True
        port.check_httpd = lambda: True
        oc = OutputCapture()
        try:
            oc.capture_output()
            self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
                             test_run_results.OK_EXIT_STATUS)
        finally:
            out, err, logs = oc.restore_output()
            self.assertIn('pretty patches', logs)         # We should get a warning about PrettyPatch being missing,
            self.assertNotIn('build requirements', logs)  # but not the driver itself.

        port._check_file_exists = lambda path, desc: False
        port._check_driver_build_up_to_date = lambda config: False
        try:
            oc.capture_output()
            self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
                            test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
        finally:
            out, err, logs = oc.restore_output()
            self.assertIn('pretty patches', logs)        # And, hereere we should get warnings about both.
            self.assertIn('build requirements', logs)
    def test_run_with_driver_accumulates_memory_results(self):
        port = MockPort()
        test, port = self._setup_test()
        counter = [0]

        def mock_run_signle(drive, path, timeout):
            counter[0] += 1
            return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={'Malloc': 10, 'JSHeap': 5})

        test.run_single = mock_run_signle
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertTrue(test._run_with_driver(driver, None))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')

        metrics = test._metrics
        self.assertEqual(sorted(metrics.keys()), ['JSHeap', 'Malloc', 'Time'])
        self.assertEqual(metrics['Time'].flattened_iteration_values(), [float(i * 1000) for i in range(2, 7)])
        self.assertEqual(metrics['Malloc'].flattened_iteration_values(), [float(10)] * 5)
        self.assertEqual(metrics['JSHeap'].flattened_iteration_values(), [float(5)] * 5)
Ejemplo n.º 15
0
class PrintBaselinesTest(unittest.TestCase):
    def setUp(self):
        self.oc = None
        self.tool = MockTool()
        self.test_port = self.tool.port_factory.get('test-win-xp')
        self.tool.port_factory.get = lambda port_name=None: self.test_port
        self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS

    def tearDown(self):
        if self.oc:
            self.restore_output()

    def capture_output(self):
        self.oc = OutputCapture()
        self.oc.capture_output()

    def restore_output(self):
        stdout, stderr, logs = self.oc.restore_output()
        self.oc = None
        return (stdout, stderr, logs)

    def test_basic(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_multiple(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-vista\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'
                           '\n'
                           '// For test-win-win7\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'
                           '\n'
                           '// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_csv(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
                           'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
Ejemplo n.º 16
0
    def test_run_single(self):
        output_capture = OutputCapture()
        output_capture.capture_output()

        loaded_pages = []

        def run_test(test_input, stop_when_done):
            if test_input.test_name == test.force_gc_test:
                loaded_pages.append(test_input)
                return
            if test_input.test_name != "about:blank":
                self.assertEqual(test_input.test_name, 'http://some-test/')
            loaded_pages.append(test_input)
            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
            return DriverOutput('actual text', 'actual image', 'actual checksum',
                audio=None, crash=False, timeout=False, error=False)

        test, port = self._setup_test(run_test)
        test._archive_path = '/path/some-dir/some-test.wpr'
        test._url = 'http://some-test/'

        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(len(loaded_pages), 2)
        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')
        self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
    def test_convert_for_webkit_properties_only(self):
        """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """

        test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">

#block1 { @test0@: @propvalue0@; }

</style>
</head>
<body>
<div id="elem1" style="@test1@: @propvalue1@;"></div>
</body>
</html>
"""
        fake_dir_path = self.fake_dir_path('harnessandprops')
        converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
        test_content = self.generate_test_content_properties_and_values(converter.prefixed_properties, converter.prefixed_property_values, 1, test_html)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_content[2])
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_test_harness_paths(converter, converted[2], fake_dir_path, 1, 1)
        self.verify_prefixed_properties(converted, test_content[0])
        self.verify_prefixed_property_values(converted, test_content[1])
Ejemplo n.º 18
0
 def test_run_test_set_for_parser_tests(self):
     runner, port = self.create_runner()
     tests = self._tests_for_runner(runner, ["Bindings/event-target-wrapper.html", "Parser/some-parser.html"])
     output = OutputCapture()
     output.capture_output()
     try:
         unexpected_result_count = runner._run_tests_set(tests, port)
     finally:
         stdout, stderr, log = output.restore_output()
     self.assertEqual(unexpected_result_count, 0)
     self.assertEqual(
         log,
         "\n".join(
             [
                 "Running Bindings/event-target-wrapper.html (1 of 2)",
                 "RESULT Bindings: event-target-wrapper= 1489.05 ms",
                 "median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms",
                 "",
                 "Running Parser/some-parser.html (2 of 2)",
                 "RESULT Parser: some-parser= 1100.0 ms",
                 "median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms",
                 "",
                 "",
             ]
         ),
     )
Ejemplo n.º 19
0
    def test_run_single_fails_when_output_has_error(self):
        output_capture = OutputCapture()
        output_capture.capture_output()

        loaded_pages = []

        def run_test(test_input, stop_when_done):
            loaded_pages.append(test_input)
            self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
            return DriverOutput('actual text', 'actual image', 'actual checksum',
                audio=None, crash=False, timeout=False, error='some error')

        test, port = self._setup_test(run_test)
        test._archive_path = '/path/some-dir.wpr'
        test._url = 'http://some-test/'

        try:
            driver = port.create_driver(worker_number=1, no_timeout=True)
            self.assertEqual(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100), None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(len(loaded_pages), 2)
        self.assertEqual(loaded_pages[0].test_name, test.force_gc_test)
        self.assertEqual(loaded_pages[1].test_name, 'http://some-test/')
        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, 'error: some-test.replay\nsome error\n')
Ejemplo n.º 20
0
    def test_run_memory_test(self):
        runner, port = self.create_runner_and_setup_results_template()
        runner._timestamp = 123456789
        port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')

        output = OutputCapture()
        output.capture_output()
        try:
            unexpected_result_count = runner.run()
        finally:
            stdout, stderr, log = output.restore_output()
        self.assertEqual(unexpected_result_count, 0)
        self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
            'Running 1 tests',
            'Running Parser/memory-test.html (1 of 1)',
            'RESULT Parser: memory-test= 1100.0 ms',
            'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
            'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
            'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
            'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
            'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
            'Finished: 0.1 s',
            '', '']))
        results = runner.load_output_json()[0]['results']
        values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
        self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
        self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
        self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
Ejemplo n.º 21
0
    def test_paths(self):
        self.fs.chdir('/foo/bar')
        self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
        self.check_names(['.'], ['bar.baz_unittest'])
        self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])

        self.fs.chdir('/')
        self.check_names(['bar'], ['bar.baz_unittest'])
        self.check_names(['/foo/bar/'], ['bar.baz_unittest'])

        # This works 'by accident' since it maps onto a package.
        self.check_names(['bar/'], ['bar.baz_unittest'])

        # This should log an error, since it's outside the trees.
        oc = OutputCapture()
        oc.set_log_level(logging.ERROR)
        oc.capture_output()
        try:
            self.check_names(['/tmp/another_unittest.py'], [])
        finally:
            _, _, logs = oc.restore_output()
            self.assertIn('another_unittest.py', logs)

        # Paths that don't exist are errors.
        oc.capture_output()
        try:
            self.check_names(['/foo/bar/notexist_unittest.py'], [])
        finally:
            _, _, logs = oc.restore_output()
            self.assertIn('notexist_unittest.py', logs)

        # Names that don't exist are caught later, at load time.
        self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
Ejemplo n.º 22
0
    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')

        uploaded = [False]

        def mock_upload_json(hostname, json_path):
            self.assertEqual(hostname, 'some.host')
            self.assertEqual(json_path, '/mock-checkout/output.json')
            uploaded[0] = True
            return upload_suceeds

        runner._upload_json = mock_upload_json
        runner._timestamp = 123456789
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            self.assertEqual(runner.run(), expected_exit_code)
        finally:
            stdout, stderr, logs = output_capture.restore_output()

        if not expected_exit_code:
            self.assertEqual(logs, '\n'.join([
                'Running 2 tests',
                'Running Bindings/event-target-wrapper.html (1 of 2)',
                'RESULT Bindings: event-target-wrapper= 1489.05 ms',
                'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
                '',
                'Running inspector/pass.html (2 of 2)',
                'RESULT group_name: test_name= 42 ms',
                '',
                '']))

        return uploaded[0]
Ejemplo n.º 23
0
    def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, results_shown=True, expected_exit_code=0):
        filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
        filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')

        uploaded = [False]

        def mock_upload_json(hostname, json_path):
            self.assertEqual(hostname, 'some.host')
            self.assertEqual(json_path, '/mock-checkout/output.json')
            uploaded[0] = upload_suceeds
            return upload_suceeds

        runner._upload_json = mock_upload_json
        runner._timestamp = 123456789
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            self.assertEqual(runner.run(), expected_exit_code)
        finally:
            stdout, stderr, logs = output_capture.restore_output()

        if not expected_exit_code:
            expected_logs = 'Running 2 tests\n' + EventTargetWrapperTestData.output + InspectorPassTestData.output
            if results_shown:
                expected_logs += 'MOCK: user.open_url: file://...\n'
            self.assertEqual(self._normalize_output(logs), expected_logs)

        self.assertEqual(uploaded[0], upload_suceeds)

        return logs
Ejemplo n.º 24
0
    def test_run_with_memory_output(self):
        port = MockPort()
        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
        memory_results = {'Malloc': 10, 'JSHeap': 5}
        self.maxDiff = None
        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            metrics = test._run_with_driver(driver, None)
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, '')

        self.assertEqual(len(metrics), 3)
        self.assertEqual(metrics[0].metric(), 'Time')
        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
            'values': [float(i * 1000) for i in range(2, 21)]})
        self.assertEqual(metrics[1].metric(), 'Malloc')
        self.assertEqual(metrics[1].to_dict(), {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(10)] * 19})
        self.assertEqual(metrics[2].metric(), 'JSHeap')
        self.assertEqual(metrics[2].to_dict(), {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
            'values': [float(5)] * 19})
Ejemplo n.º 25
0
    def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
        self.tool.executive = MockExecutive2()

        port = self.tool.port_factory.get('test-win-win7')
        self._write(
            port.host.filesystem.join(
                port.layout_tests_dir(),
                'platform/test-win-win10/failures/expected/image-expected.txt'),
            'original win10 result')

        oc = OutputCapture()
        try:
            options = optparse.Values({
                'optimize': True,
                'builder': "MOCK Win10",
                'suffixes': "txt",
                'verbose': True,
                'test': "failures/expected/image.html",
                'results_directory': None,
                'build_number': None
            })
            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()

        self.assertMultiLineEqual(
            self._read(self.tool.filesystem.join(
                port.layout_tests_dir(),
                'platform/test-win-win10/failures/expected/image-expected.txt')),
            'MOCK Web result, convert 404 to None=True')
        self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(
            port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')))
        self.assertMultiLineEqual(
            out, '{"remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win10"}]}\n')
Ejemplo n.º 26
0
 def test_parse_output(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
         self.assertEqual(test.parse_output(output),
             {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
                 'values': [i for i in range(1, 20)]}})
     finally:
         pass
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
    def test_convert_attributes_if_needed(self):
        """ Tests convert_attributes_if_needed() using a reference file that has some relative src paths """

        test_html = """<html>
<head>
<script src="../../some-script.js"></script>
<style src="../../../some-style.css"></style>
</head>
<body>
<img src="../../../../some-image.jpg">
</body>
</html>
"""
        test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
        converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_html)
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)
        self.verify_reference_relative_paths(converted, test_reference_support_info)
Ejemplo n.º 28
0
    def test_convert_for_webkit_nothing_to_convert(self):
        """ Tests convert_for_webkit() using a basic test that has nothing to convert """

        test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
        converter = W3CTestConverter()

        oc = OutputCapture()
        oc.capture_output()
        try:
            converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
        finally:
            oc.restore_output()

        self.verify_no_conversion_happened(converted)
Ejemplo n.º 29
0
    def test_analyze_test_reftest_multiple_matches(self):
        test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="match" href="orange-box-ref.xht" />
</head>
"""
        oc = OutputCapture()
        oc.capture_output()
        try:
            test_path = "/some/madeup/path/"
            parser = TestParser(options, test_path + "somefile.html")
            test_info = parser.analyze_test(test_contents=test_html)
        finally:
            _, _, logs = oc.restore_output()

        self.assertNotEqual(test_info, None, "did not find a test")
        self.assertTrue("test" in test_info.keys(), "did not find a test file")
        self.assertTrue("reference" in test_info.keys(), "did not find a reference file")
        self.assertTrue(test_info["reference"].startswith(test_path), "reference path is not correct")
        self.assertFalse("refsupport" in test_info.keys(), "there should be no refsupport files for this test")
        self.assertFalse("jstest" in test_info.keys(), "test should not have been analyzed as a jstest")

        self.assertEqual(
            logs, "Multiple references are not supported. Importing the first ref defined in somefile.html\n"
        )
Ejemplo n.º 30
0
 def test_parse_output_with_failing_line(self):
     output = DriverOutput('\n'.join([
         'Running 20 times',
         'Ignoring warm-up run (1115)',
         '',
         'some-unrecognizable-line',
         '',
         'Time:',
         'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
         'avg 1100 ms',
         'median 1101 ms',
         'stdev 11 ms',
         'min 1080 ms',
         'max 1120 ms']), image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test._filter_output(output)
         self.assertEqual(test.parse_output(output), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
Ejemplo n.º 31
0
 def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
     oc = OutputCapture()
     try:
         oc.capture_output()
         self.parse_exp('-- this should be a syntax error',
                        is_lint_mode=False)
     finally:
         _, _, logs = oc.restore_output()
         self.assertNotEquals(logs, '')
Ejemplo n.º 32
0
 def assert_commit_queue_flag(commit_flag, expected, username=None):
     bugzilla.username = username
     capture = OutputCapture()
     capture.capture_output()
     try:
         self.assertEqual(bugzilla._commit_queue_flag(commit_flag),
                          expected)
     finally:
         capture.restore_output()
Ejemplo n.º 33
0
class PrintBaselinesTest(unittest.TestCase):
    def setUp(self):
        self.oc = None
        self.tool = MockTool()
        self.test_port = self.tool.port_factory.get('test-win-xp')
        self.tool.port_factory.get = lambda port_name=None: self.test_port
        self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS

    def tearDown(self):
        if self.oc:
            self.restore_output()

    def capture_output(self):
        self.oc = OutputCapture()
        self.oc.capture_output()

    def restore_output(self):
        stdout, stderr, logs = self.oc.restore_output()
        self.oc = None
        return (stdout, stderr, logs)

    def test_basic(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_multiple(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('// For test-win-win7\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'
                           '\n'
                           '// For test-win-xp\n'
                           'passes/text-expected.png\n'
                           'passes/text-expected.txt\n'))

    def test_csv(self):
        command = PrintBaselines()
        command.bind_to_tool(self.tool)
        self.capture_output()
        command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
        stdout, _, _ = self.restore_output()
        self.assertMultiLineEqual(stdout,
                          ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
                           'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
    def test_rebaseline_expectations_noop(self):
        self._zero_out_test_expectations()

        oc = OutputCapture()
        try:
            oc.capture_output()
            self.command.execute(self.options, [], self.tool)
        finally:
            _, _, logs = oc.restore_output()
            self.assertEqual(self.tool.filesystem.written_files, {})
            self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
Ejemplo n.º 35
0
    def test_convert_style_multiple_url(self):
        """ Tests convert_attributes_if_needed() using a reference file that has several relative URL paths in the style """

        test_html = """<html>
<head>
 <style type="text/css">
        .redSquare {
            position: absolute;
            left:50px;
            width: 100px;
            height: 100px;
            background-image:url(../support/yyy.png);
        }
        .greenSquare {
            position: absolute;
            left:50px;
            width: 100px;
            height: 100px;
            background-image:url(../support/yy.png);
        }
        .yellowSquare {
            position: absolute;
            left:50px;
            width: 100px;
            height: 100px;
            background-image:url(../../another/directory/x.png);
        }
        .container {
            position: absolute;
        }
    </style>
</head>
<body>
</body>
</html>
"""
        test_reference_support_info = {'reference_relpath': '../', 'files': ['../support/yyy.png', '../support/yy.png', '../../another/directory/x.png']}
        converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)

        oc = OutputCapture()
        oc.capture_output()
        try:
            converter.feed(test_html)
            converter.close()
            converted = converter.output()
        finally:
            oc.restore_output()

        self.verify_conversion_happened(converted)

        for path in test_reference_support_info['files']:
            expected_path = re.sub(test_reference_support_info['reference_relpath'], '', path, 1)
            expected_url = 'background-image:url(' + expected_path + ');'
            self.assertTrue(expected_url in converted[2], 'relative path ' + path + ' was not converted correcty')
Ejemplo n.º 36
0
 def test_run_test_set_for_parser_tests(self):
     runner, port = self.create_runner()
     tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
     output = OutputCapture()
     output.capture_output()
     try:
         unexpected_result_count = runner._run_tests_set(tests)
     finally:
         stdout, stderr, log = output.restore_output()
     self.assertEqual(unexpected_result_count, 0)
     self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
Ejemplo n.º 37
0
 def test_run_with_memory_output(self):
     port = MockPort()
     test = PageLoadingPerfTest(port, 'some-test',
                                '/path/some-dir/some-test')
     memory_results = {'Malloc': 10, 'JSHeap': 5}
     self.maxDiff = None
     driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test,
                                                 memory_results)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         self.assertEqual(
             test.run(driver, None), {
                 'some-test': {
                     'max': 20000,
                     'avg': 11000.0,
                     'median': 11000,
                     'stdev': 5627.314338711378,
                     'min': 2000,
                     'unit': 'ms',
                     'values': [i * 1000 for i in range(2, 21)]
                 },
                 'some-test:Malloc': {
                     'max': 10,
                     'avg': 10.0,
                     'median': 10,
                     'min': 10,
                     'stdev': 0.0,
                     'unit': 'bytes',
                     'values': [10] * 19
                 },
                 'some-test:JSHeap': {
                     'max': 5,
                     'avg': 5.0,
                     'median': 5,
                     'min': 5,
                     'stdev': 0.0,
                     'unit': 'bytes',
                     'values': [5] * 19
                 }
             })
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output(
         )
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(
         actual_logs,
         'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n'
         +
         'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n'
         +
         'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n'
     )
Ejemplo n.º 38
0
 def test_latest_try_jobs_http_error(self):
     def raise_error(_):
         raise urllib2.URLError('Some request error message')
     self.web.get_binary = raise_error
     oc = OutputCapture()
     try:
         oc.capture_output()
         self.assertEqual(latest_try_jobs(11112222, ('bar-builder',), self.web), [])
     finally:
         _, _, logs = oc.restore_output()
     self.assertEqual(logs, 'Request failed to URL: https://codereview.chromium.org/api/11112222\n')
Ejemplo n.º 39
0
    def test_security_output_parse_entry_not_found(self):
        credentials = Credentials("foo.example.com")
        if not credentials._is_mac_os_x():
            return  # This test does not run on a non-Mac.

        # Note, we ignore the captured output because it is already covered
        # by the test case CredentialsTest._assert_security_call (below).
        outputCapture = OutputCapture()
        outputCapture.capture_output()
        self.assertEqual(credentials._run_security_tool(), None)
        outputCapture.restore_output()
Ejemplo n.º 40
0
    def test_pretty_patch_os_error(self):
        port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
        oc = OutputCapture()
        oc.capture_output()
        self.assertEqual(port.pretty_patch_text("patch.txt"),
                         port._pretty_patch_error_html)

        # This tests repeated calls to make sure we cache the result.
        self.assertEqual(port.pretty_patch_text("patch.txt"),
                         port._pretty_patch_error_html)
        oc.restore_output()
Ejemplo n.º 41
0
 def test_show_results_html_file(self):
     port = self.make_port()
     port._executive = MockExecutive(should_log=True)
     capture = OutputCapture()
     capture.capture_output()
     port.show_results_html_file('test.html')
     _, _, logs = capture.restore_output()
     # We can't know for sure what path will be produced by cygpath, but we can assert about
     # everything else.
     self.assertTrue(logs.startswith("MOCK run_command: ['Tools/Scripts/run-safari', '--release', '"))
     self.assertTrue(logs.endswith("test.html'], cwd=/mock-checkout\n"))
Ejemplo n.º 42
0
 def test_skipped_entry_dont_exist(self):
     port = MockHost().port_factory.get('qt')
     expectations_dict = OrderedDict()
     expectations_dict['expectations'] = ''
     port.expectations_dict = lambda: expectations_dict
     port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
     capture = OutputCapture()
     capture.capture_output()
     exp = TestExpectations(port)
     _, _, logs = capture.restore_output()
     self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
    def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
        self.tool.executive = MockExecutive2()

        # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
        port = self.tool.port_factory.get('test-win-vista')
        self._write(
            port._filesystem.join(
                port.layout_tests_dir(),
                'platform/test-win-vista/failures/expected/image-expected.txt'
            ), 'original vista result')

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK XP": {
                    "port_name": "test-win-xp"
                },
                "MOCK Vista": {
                    "port_name": "test-win-vista"
                },
            }

            options = MockOptions(optimize=True,
                                  builder="MOCK Vista",
                                  suffixes="txt",
                                  move_overwritten_baselines_to=None,
                                  verbose=True,
                                  test="failures/expected/image.html",
                                  results_directory=None)

            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()
            builders._exact_matches = old_exact_matches

        self.assertMultiLineEqual(
            self._read(
                self.tool.filesystem.join(
                    port.layout_tests_dir(),
                    'platform/test-win-vista/failures/expected/image-expected.txt'
                )), 'MOCK Web result, convert 404 to None=True')
        self.assertFalse(
            self.tool.filesystem.exists(
                self.tool.filesystem.join(
                    port.layout_tests_dir(),
                    'platform/test-win-xp/failures/expected/image-expected.txt'
                )))
        self.assertMultiLineEqual(
            out,
            '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Vista"}]}\n'
        )
Ejemplo n.º 44
0
    def test_security_output_parse_entry_not_found(self):
        # FIXME: This test won't work if the user has a credential for foo.example.com!
        credentials = Credentials("foo.example.com")
        if not credentials._is_mac_os_x():
            return  # This test does not run on a non-Mac.

        # Note, we ignore the captured output because it is already covered
        # by the test case CredentialsTest._assert_security_call (below).
        outputCapture = OutputCapture()
        outputCapture.capture_output()
        self.assertIsNone(credentials._run_security_tool("find-internet-password"))
        outputCapture.restore_output()
Ejemplo n.º 45
0
 def test_run_with_bad_output(self):
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test')
         driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
         self.assertEqual(test.run(driver, None), None)
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
Ejemplo n.º 46
0
    def test_import_dir_with_no_tests(self):
        host = MockHost()
        host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
        host.filesystem = MockFileSystem(files=FAKE_FILES)

        importer = TestImporter(host, FAKE_SOURCE_DIR, self._parse_options(['-n', '-d', 'w3c', '-t', FAKE_TEST_PATH]))
        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
Ejemplo n.º 47
0
 def test_run_test_set(self):
     runner, port = self.create_runner()
     tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
         'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
     output = OutputCapture()
     output.capture_output()
     try:
         unexpected_result_count = runner._run_tests_set(tests)
     finally:
         stdout, stderr, log = output.restore_output()
     self.assertEqual(unexpected_result_count, len(tests) - 1)
     self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
Ejemplo n.º 48
0
    def test_parse_output_with_subtests(self):
        output = DriverOutput("""
Description: this is a test description.
some test:Time -> [1, 2, 3, 4, 5] ms
some other test = else:Time -> [6, 7, 8, 9, 10] ms
some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes
Array Construction, []:Time -> [11, 12, 13, 14, 15] ms
Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms
jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms
Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms
Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms
Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms
Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms

:Time -> [1080, 1120, 1095, 1101, 1104] ms
""", image=None, image_hash=None, audio=None)
        output_capture = OutputCapture()
        output_capture.capture_output()
        try:
            test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
            test.run_single = lambda driver, path, time_out_ms: output
            self.assertTrue(test.run(10))
        finally:
            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()

        subtests = test._metrics
        self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else',
            'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child',
            'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None])

        some_test_metrics = subtests[0]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test'])
        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)

        some_other_test_metrics = subtests[1]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc'])
        self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else'])
        self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4)
        self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else'])
        self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4)

        main_metrics = subtests[len(subtests) - 1]['metrics']
        self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
        self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
        self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4)

        self.assertEqual(actual_stdout, '')
        self.assertEqual(actual_stderr, '')
        self.assertEqual(actual_logs, """DESCRIPTION: this is a test description.
RESULT some-dir: some-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
""")
Ejemplo n.º 49
0
    def test_import_dir_with_no_tests(self):
        host = MockHost()
        host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
        host.filesystem = MockFileSystem(files=FAKE_FILES)

        importer = TestImporter(host, FAKE_SOURCE_DIR, optparse.Values({"overwrite": False, 'destination': 'w3c', 'test_paths': [FAKE_TEST_PATH]}))
        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
Ejemplo n.º 50
0
    def test_helper_starts(self):
        host = MockSystemHost(MockExecutive())
        port = self.make_port(host)
        oc = OutputCapture()
        oc.capture_output()
        host.executive._proc = MockProcess('ready\n')
        port.start_helper()
        port.stop_helper()
        oc.restore_output()

        # make sure trying to stop the helper twice is safe.
        port.stop_helper()
    def test_run_memory_test(self):
        runner, port = self.create_runner_and_setup_results_template()
        runner._timestamp = 123456789
        port.host.filesystem.write_text_file(
            runner._base_path + '/Parser/memory-test.html', 'some content')

        output = OutputCapture()
        output.capture_output()
        try:
            unexpected_result_count = runner.run()
        finally:
            stdout, stderr, log = output.restore_output()
        self.assertEqual(unexpected_result_count, 0)
        self.assertEqual(
            log, '\n'.join([
                'Running 1 tests', 'Running Parser/memory-test.html (1 of 1)',
                'RESULT Parser: memory-test= 1100.0 ms',
                'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
                'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
                'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
                'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
                'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
                '', ''
            ]))
        results = runner.load_output_json()[0]['results']
        self.assertEqual(
            results['Parser/memory-test'], {
                'min': 1080.0,
                'max': 1120.0,
                'median': 1101.0,
                'stdev': 11.0,
                'avg': 1100.0,
                'unit': 'ms'
            })
        self.assertEqual(
            results['Parser/memory-test:JSHeap'], {
                'min': 811000.0,
                'max': 848000.0,
                'median': 829000.0,
                'stdev': 15000.0,
                'avg': 832000.0,
                'unit': 'bytes'
            })
        self.assertEqual(
            results['Parser/memory-test:Malloc'], {
                'min': 511000.0,
                'max': 548000.0,
                'median': 529000.0,
                'stdev': 13000.0,
                'avg': 532000.0,
                'unit': 'bytes'
            })
Ejemplo n.º 52
0
 def test_run(self):
     test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test')
     driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         self.assertEqual(test.run(driver, None),
             {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n')
Ejemplo n.º 53
0
 def test_help_argument(self):
     oc = OutputCapture()
     oc.capture_output()
     tool = WebKitPatch('path')
     try:
         tool.main(['tool', '--help'])
     except SystemExit:
         pass  # optparse calls sys.exit after showing help.
     finally:
         out, err, logs = oc.restore_output()
     self.assertTrue(out.startswith('Usage: '))
     self.assertEqual('', err)
     self.assertEqual('', logs)
Ejemplo n.º 54
0
    def test_import_dir_with_no_tests_and_no_hg(self):
        host = MockHost()
        host.executive = MockExecutive2(exception=OSError())
        host.filesystem = MockFileSystem(files=FAKE_FILES)

        importer = TestImporter(host, FAKE_SOURCE_DIR, optparse.Values({"overwrite": False, 'destination': 'w3c', 'test_paths': [FAKE_TEST_PATH]}))

        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
Ejemplo n.º 55
0
 def _assert_failed_on_line(self, output_text, expected_log):
     output = DriverOutput(output_text, image=None, image_hash=None, audio=None)
     output_capture = OutputCapture()
     output_capture.capture_output()
     try:
         test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
         test.run_single = lambda driver, path, time_out_ms: output
         self.assertFalse(test._run_with_driver(None, None))
     finally:
         actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
     self.assertEqual(actual_stdout, '')
     self.assertEqual(actual_stderr, '')
     self.assertEqual(actual_logs, expected_log)
Ejemplo n.º 56
0
    def test_import_dir_with_no_tests_and_no_hg(self):
        host = MockHost()
        host.executive = MockExecutive2(exception=OSError())
        host.filesystem = MockFileSystem(files=FAKE_FILES)

        importer = TestImporter(host, FAKE_SOURCE_DIR, self._parse_options(['-n', '-d', 'w3c', '-t', FAKE_TEST_PATH]))

        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
Ejemplo n.º 57
0
    def test_helper_fails_to_stop(self):
        host = MockSystemHost(MockExecutive())
        host.executive._proc = MockProcess()

        def bad_waiter():
            raise IOError('failed to wait')
        host.executive._proc.wait = bad_waiter

        port = self.make_port(host)
        oc = OutputCapture()
        oc.capture_output()
        port.start_helper()
        port.stop_helper()
        oc.restore_output()
    def test_rebaseline_test_internal_with_copying_overwritten_baseline_first(
            self):
        self.tool.executive = MockExecutive2()

        # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
        port = self.tool.port_factory.get('test-mac-snowleopard')
        self._write(
            port._filesystem.join(
                port.layout_tests_dir(),
                'platform/test-mac-snowleopard/failures/expected/image-expected.txt'
            ), 'original snowleopard result')

        old_exact_matches = builders._exact_matches
        oc = OutputCapture()
        try:
            builders._exact_matches = {
                "MOCK Leopard": {
                    "port_name": "test-mac-leopard",
                    "specifiers": set(["mock-specifier"])
                },
                "MOCK SnowLeopard": {
                    "port_name": "test-mac-snowleopard",
                    "specifiers": set(["mock-specifier"])
                },
            }

            options = MockOptions(optimize=True,
                                  builder="MOCK SnowLeopard",
                                  suffixes="txt",
                                  move_overwritten_baselines_to=None,
                                  verbose=True,
                                  test="failures/expected/image.html",
                                  results_directory=None)

            oc.capture_output()
            self.command.execute(options, [], self.tool)
        finally:
            out, _, _ = oc.restore_output()
            builders._exact_matches = old_exact_matches

        self.assertMultiLineEqual(
            self._read(
                self.tool.filesystem.join(
                    port.layout_tests_dir(),
                    'platform/test-mac-leopard/failures/expected/image-expected.txt'
                )), 'original snowleopard result')
        self.assertMultiLineEqual(
            out,
            '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK SnowLeopard"}]}\n'
        )
Ejemplo n.º 59
0
    def test_optimize_all_suffixes_by_default(self):
        test_port = self.tool.port_factory.get('test')
        self._write_test_file(test_port, 'another/test.html',
                              "Dummy test contents")
        self._write_test_file(
            test_port, 'platform/test-mac-mac10.10/another/test-expected.txt',
            "result A")
        self._write_test_file(
            test_port, 'platform/test-mac-mac10.10/another/test-expected.png',
            "result A png")
        self._write_test_file(test_port, 'another/test-expected.txt',
                              "result A")
        self._write_test_file(test_port, 'another/test-expected.png',
                              "result A png")

        try:
            oc = OutputCapture()
            oc.capture_output()
            self.command.execute(
                optparse.Values({
                    'suffixes': 'txt,wav,png',
                    'no_modify_scm': True,
                    'platform': 'test-mac-mac10.10'
                }), ['another/test.html'], self.tool)
        finally:
            out, _, _ = oc.restore_output()

        self.assertEquals(
            out, '{"add": [], "remove-lines": [], '
            '"delete": ["/test.checkout/LayoutTests/platform/test-mac-mac10.10/another/test-expected.txt", '
            '"/test.checkout/LayoutTests/platform/test-mac-mac10.10/another/test-expected.png"]}\n'
        )
        self.assertFalse(
            self.tool.filesystem.exists(
                self.tool.filesystem.join(
                    test_port.layout_tests_dir(),
                    'platform/mac/another/test-expected.txt')))
        self.assertFalse(
            self.tool.filesystem.exists(
                self.tool.filesystem.join(
                    test_port.layout_tests_dir(),
                    'platform/mac/another/test-expected.png')))
        self.assertTrue(
            self.tool.filesystem.exists(
                self.tool.filesystem.join(test_port.layout_tests_dir(),
                                          'another/test-expected.txt')))
        self.assertTrue(
            self.tool.filesystem.exists(
                self.tool.filesystem.join(test_port.layout_tests_dir(),
                                          'another/test-expected.png')))
Ejemplo n.º 60
0
    def test_generate_jsons(self):
        filesystem = MockFileSystem()
        test_json = {'array.json': [1, 2, 3, {'key': 'value'}], 'dictionary.json': {'somekey': 'somevalue', 'array': [4, 5]}}

        capture = OutputCapture()
        capture.capture_output()

        AnalyzeChangeLog._generate_jsons(filesystem, test_json, 'bar')
        self.assertEqual(set(filesystem.files.keys()), set(['bar/array.json', 'bar/dictionary.json']))

        capture.restore_output()

        self.assertEqual(json.loads(filesystem.files['bar/array.json']), test_json['array.json'])
        self.assertEqual(json.loads(filesystem.files['bar/dictionary.json']), test_json['dictionary.json'])