def test_optimize_all_suffixes_by_default(self): test_port = self.tool.port_factory.get('test') self._write_test_file(test_port, 'another/test.html', "Dummy test contents") self._write_test_file(test_port, 'platform/test-mac-mac10.10/another/test-expected.txt', "result A") self._write_test_file(test_port, 'platform/test-mac-mac10.10/another/test-expected.png', "result A png") self._write_test_file(test_port, 'another/test-expected.txt', "result A") self._write_test_file(test_port, 'another/test-expected.png', "result A png") try: oc = OutputCapture() oc.capture_output() self.command.execute( optparse.Values({'suffixes': 'txt,wav,png', 'no_modify_scm': True, 'platform': 'test-mac-mac10.10'}), ['another/test.html'], self.tool) finally: oc.restore_output() self.assertFalse( self.tool.filesystem.exists(self.tool.filesystem.join( test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt'))) self.assertFalse( self.tool.filesystem.exists(self.tool.filesystem.join( test_port.layout_tests_dir(), 'platform/mac/another/test-expected.png'))) self.assertTrue( self.tool.filesystem.exists(self.tool.filesystem.join( test_port.layout_tests_dir(), 'another/test-expected.txt'))) self.assertTrue( self.tool.filesystem.exists(self.tool.filesystem.join( test_port.layout_tests_dir(), 'another/test-expected.png')))
def test_hides_all_instructions_for_manual_testers(self): test_html = """<body> <h1 class="instructions">Hello manual tester!</h1> <p class="instructions some_other_class">This is how you run this test.</p> <p style="willbeoverwritten" class="instructions">...</p> <doesntmatterwhichtagitis class="some_other_class instructions">...</p> <p>Legit content may contain the instructions string</p> </body> """ expected_test_html = """<body> <h1 class="instructions" style="display:none">Hello manual tester!</h1> <p class="instructions some_other_class" style="display:none">This is how you run this test.</p> <p class="instructions" style="display:none">...</p> <doesntmatterwhichtagitis class="some_other_class instructions" style="display:none">...</p> <p>Legit content may contain the instructions string</p> </body> """ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME) oc = OutputCapture() oc.capture_output() try: converter.feed(test_html) converter.close() converted = converter.output() finally: oc.restore_output() self.assertEqual(converted[1], expected_test_html)
def test_convert_for_webkit_harness_and_properties(self): """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """ test_html = """<html> <head> <link href="/resources/testharness.css" rel="stylesheet" type="text/css"> <script src="/resources/testharness.js"></script> <style type="text/css"> #block1 { @test0@: propvalue; } #block2 { @test1@: propvalue; } #block3 { @test2@: propvalue; } </style> </head> <body> <div id="elem1" style="@test3@: propvalue;"></div> </body> </html> """ converter = W3CTestConverter() fake_dir_path = self.fake_dir_path(converter, 'harnessandprops') oc = OutputCapture() oc.capture_output() try: test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html) converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME) finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1) self.verify_prefixed_properties(converted, test_content[0])
def test_upload_json(self): runner, port = self.create_runner() port.host.filesystem.files['/mock-checkout/some.json'] = 'some content' class MockFileUploader: called = [] upload_single_text_file_throws = False upload_single_text_file_return_value = None @classmethod def reset(cls): cls.called = [] cls.upload_single_text_file_throws = False cls.upload_single_text_file_return_value = None def __init__(mock, url, timeout): self.assertEqual(url, 'https://some.host/some/path') self.assertTrue(isinstance(timeout, int) and timeout) mock.called.append('FileUploader') def upload_single_text_file(mock, filesystem, content_type, filename): self.assertEqual(filesystem, port.host.filesystem) self.assertEqual(content_type, 'application/json') self.assertEqual(filename, 'some.json') mock.called.append('upload_single_text_file') if mock.upload_single_text_file_throws: raise Exception return mock.upload_single_text_file_return_value MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK') self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader)) self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file']) MockFileUploader.reset() MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error') output = OutputCapture() output.capture_output() self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader)) _, _, logs = output.restore_output() self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n') # Throwing an exception upload_single_text_file shouldn't blow up _upload_json MockFileUploader.reset() MockFileUploader.upload_single_text_file_throws = True self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader)) self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file']) MockFileUploader.reset() MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}') self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader)) self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file']) MockFileUploader.reset() MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}') output = OutputCapture() output.capture_output() self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader)) _, _, logs = output.restore_output() serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4) self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
def test_rebaseline(self): # This test basically tests the path from command.execute() to command._rebaseline(); # it doesn't test that _rebaseline() actually does anything (that is tested in TestRebaselineJson. self.test_list = {} def rebaseline_stub(options, test_list): self.test_list = test_list self.command._builders_to_pull_from = lambda: [MockBuilder("MOCK builder")] self.command._tests_to_update = lambda builder: ["mock/path/to/test.html"] self.command._rebaseline = rebaseline_stub self._zero_out_test_expectations() old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])} } oc.capture_output() self.command.execute( MockOptions(optimize=True, builders=None, suffixes=["txt"], verbose=True), [], self.tool ) finally: oc.restore_output() builders._exact_matches = old_exact_matches self.assertEquals(self.test_list, {"mock/path/to/test.html": {"MOCK builder": ["txt"]}})
def test_convert_test_harness_paths(self): """ Tests convert_testharness_paths() with a test that uses multiple testharness files """ test_html = """<head> <link href="/resources/testharness.css" rel="stylesheet" type="text/css"> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <script src="/resources/WebIDLParser.js"></script> <script src="/resources/idlharness.js"></script> </head> """ fake_dir_path = self.fake_dir_path('testharnesspaths') converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None) oc = OutputCapture() oc.capture_output() try: converter.feed(test_html) converter.close() converted = converter.output() finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 4, 1)
def test_convert_for_webkit_properties_only(self): """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """ test_html = """<html> <head> <link href="/resources/testharness.css" rel="stylesheet" type="text/css"> <script src="/resources/testharness.js"></script> <style type="text/css"> #block1 { @test0@: @propvalue0@; } </style> </head> <body> <div id="elem1" style="@test1@: @propvalue1@;"></div> </body> </html> """ fake_dir_path = self.fake_dir_path('harnessandprops') converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None) test_content = self.generate_test_content_properties_and_values(converter.prefixed_properties, converter.prefixed_property_values, 1, test_html) oc = OutputCapture() oc.capture_output() try: converter.feed(test_content[2]) converter.close() converted = converter.output() finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_test_harness_paths(converter, converted[2], fake_dir_path, 1, 1) self.verify_prefixed_properties(converted, test_content[0]) self.verify_prefixed_property_values(converted, test_content[1])
def test_convert_for_webkit_nothing_to_convert(self): """ Tests convert_for_webkit() using a basic test that has nothing to convert """ test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>CSS Test: DESCRIPTION OF TEST</title> <link rel="author" title="NAME_OF_AUTHOR" href="mailto:EMAIL OR http://CONTACT_PAGE"/> <link rel="help" href="RELEVANT_SPEC_SECTION"/> <meta name="assert" content="TEST ASSERTION"/> <style type="text/css"><![CDATA[ CSS FOR TEST ]]></style> </head> <body> CONTENT OF TEST </body> </html> """ converter = W3CTestConverter() oc = OutputCapture() oc.capture_output() try: converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME) finally: oc.restore_output() self.verify_no_conversion_happened(converted)
def test_convert_attributes_if_needed(self): """ Tests convert_attributes_if_needed() using a reference file that has some relative src paths """ test_html = """<html> <head> <script src="../../some-script.js"></script> <style src="../../../some-style.css"></style> </head> <body> <img src="../../../../some-image.jpg"> </body> </html> """ test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']} converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info) oc = OutputCapture() oc.capture_output() try: converter.feed(test_html) converter.close() converted = converter.output() finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_reference_relative_paths(converted, test_reference_support_info)
def test_convert_vendor_prefix_js_paths(self): test_html = """<head> <script src="/common/vendor-prefix.js"> </head> """ fake_dir_path = self.fake_dir_path('adapterjspaths') converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME) oc = OutputCapture() oc.capture_output() try: converter.feed(test_html) converter.close() converted = converter.output() finally: oc.restore_output() new_html = BeautifulSoup(converted[1]) # Verify the original paths are gone, and the new paths are present. orig_path_pattern = re.compile('\"/common/vendor-prefix.js') self.assertEquals(len(new_html.findAll(src=orig_path_pattern)), 0, 'vendor-prefix.js path was not converted') resources_dir = converter.path_from_webkit_root("LayoutTests", "resources") new_relpath = os.path.relpath(resources_dir, fake_dir_path) relpath_pattern = re.compile(new_relpath) self.assertEquals(len(new_html.findAll(src=relpath_pattern)), 1, 'vendor-prefix.js relative path not correct')
def test_rebaseline(self): self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')] self.command._tests_to_update = lambda builder: ['mock/path/to/test.html'] self._zero_out_test_expectations() old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK builder": { "port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"]) }, } oc.capture_output() self.command.execute( MockOptions( optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool) finally: oc.restore_output() builders._exact_matches = old_exact_matches calls = filter(lambda x: x[0] != 'perl', self.tool.executive.calls) self.assertEqual(calls, [[[ 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose' ]]])
def test_paths(self): self.fs.chdir('/foo/bar') self.check_names(['baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['./baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['.'], ['bar.baz_unittest']) self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest']) self.fs.chdir('/') self.check_names(['bar'], ['bar.baz_unittest']) self.check_names(['/foo/bar/'], ['bar.baz_unittest']) # This works 'by accident' since it maps onto a package. self.check_names(['bar/'], ['bar.baz_unittest']) # This should log an error, since it's outside the trees. oc = OutputCapture() oc.set_log_level(logging.ERROR) oc.capture_output() try: self.check_names(['/tmp/another_unittest.py'], []) finally: _, _, logs = oc.restore_output() self.assertIn('another_unittest.py', logs) # Paths that don't exist are errors. oc.capture_output() try: self.check_names(['/foo/bar/notexist_unittest.py'], []) finally: _, _, logs = oc.restore_output() self.assertIn('notexist_unittest.py', logs) # Names that don't exist are caught later, at load time. self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
def test_check_build(self): port = self.make_port() port._check_file_exists = lambda path, desc: True if port._dump_reader: port._dump_reader.check_is_functional = lambda: True port._options.build = True port._check_driver_build_up_to_date = lambda config: True port.check_httpd = lambda: True oc = OutputCapture() try: oc.capture_output() self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()), test_run_results.OK_EXIT_STATUS) finally: out, err, logs = oc.restore_output() self.assertIn('pretty patches', logs) # We should get a warning about PrettyPatch being missing, self.assertNotIn('build requirements', logs) # but not the driver itself. port._check_file_exists = lambda path, desc: False port._check_driver_build_up_to_date = lambda config: False try: oc.capture_output() self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS) finally: out, err, logs = oc.restore_output() self.assertIn('pretty patches', logs) # And, hereere we should get warnings about both. self.assertIn('build requirements', logs)
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None): bugzilla.username = username capture = OutputCapture() capture.capture_output() try: self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected) finally: capture.restore_output()
def test_helper_fails_to_start(self): host = MockSystemHost(MockExecutive()) port = self.make_port(host) oc = OutputCapture() oc.capture_output() port.start_helper() port.stop_helper() oc.restore_output()
def assert_commit_queue_flag(commit_flag, expected, username=None): bugzilla.username = username capture = OutputCapture() capture.capture_output() try: self.assertEqual(bugzilla._commit_queue_flag(commit_flag), expected) finally: capture.restore_output()
def test_pretty_patch_os_error(self): port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError)) oc = OutputCapture() oc.capture_output() self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) # This tests repeated calls to make sure we cache the result. self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) oc.restore_output()
def test_security_output_parse_entry_not_found(self): credentials = Credentials("foo.example.com") if not credentials._is_mac_os_x(): return # This test does not run on a non-Mac. # Note, we ignore the captured output because it is already covered # by the test case CredentialsTest._assert_security_call (below). outputCapture = OutputCapture() outputCapture.capture_output() self.assertEqual(credentials._run_security_tool(), None) outputCapture.restore_output()
def test_import_dir_with_no_tests(self): host = MockHost() host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!")) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_import_dir_with_no_tests(self): host = MockHost() host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!")) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, self._parse_options(['-n', '-d', 'w3c', '-t', FAKE_TEST_PATH])) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_security_output_parse_entry_not_found(self): # FIXME: This test won't work if the user has a credential for foo.example.com! credentials = Credentials("foo.example.com") if not credentials._is_mac_os_x(): return # This test does not run on a non-Mac. # Note, we ignore the captured output because it is already covered # by the test case CredentialsTest._assert_security_call (below). outputCapture = OutputCapture() outputCapture.capture_output() self.assertIsNone(credentials._run_security_tool("find-internet-password")) outputCapture.restore_output()
def test_helper_starts(self): host = MockSystemHost(MockExecutive()) port = self.make_port(host) oc = OutputCapture() oc.capture_output() host.executive._proc = MockProcess('ready\n') port.start_helper() port.stop_helper() oc.restore_output() # make sure trying to stop the helper twice is safe. port.stop_helper()
def test_import_dir_with_no_tests_and_no_hg(self): host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, self._parse_options(['-n', '-d', 'w3c', '-t', FAKE_TEST_PATH])) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_import_dir_with_no_tests_and_no_hg(self): host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_helper_fails_to_stop(self): host = MockSystemHost(MockExecutive()) host.executive._proc = MockProcess() def bad_waiter(): raise IOError('failed to wait') host.executive._proc.wait = bad_waiter port = self.make_port(host) oc = OutputCapture() oc.capture_output() port.start_helper() port.stop_helper() oc.restore_output()
def test_generate_jsons(self): filesystem = MockFileSystem() test_json = {'array.json': [1, 2, 3, {'key': 'value'}], 'dictionary.json': {'somekey': 'somevalue', 'array': [4, 5]}} capture = OutputCapture() capture.capture_output() AnalyzeChangeLog._generate_jsons(filesystem, test_json, 'bar') self.assertEqual(set(filesystem.files.keys()), set(['bar/array.json', 'bar/dictionary.json'])) capture.restore_output() self.assertEqual(json.loads(filesystem.files['bar/array.json']), test_json['array.json']) self.assertEqual(json.loads(filesystem.files['bar/dictionary.json']), test_json['dictionary.json'])
def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self): self.tool.executive = MockExecutive2() # FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports. port = self.tool.port_factory.get('test-mac-snowleopard') self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result') old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])}, "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])}, } options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt", move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html", results_directory=None) oc.capture_output() self.command.execute(options, [], self.tool) finally: out, _, _ = oc.restore_output() builders._exact_matches = old_exact_matches self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result') self.assertMultiLineEqual(out, '{"add": []}\n')
def test_analyze_test_reftest_match_and_mismatch(self): test_html = """<head> <link rel="match" href="green-box-ref.xht" /> <link rel="match" href="blue-box-ref.xht" /> <link rel="mismatch" href="orange-box-notref.xht" /> </head> """ oc = OutputCapture() oc.capture_output() try: test_path = '/some/madeup/path/' parser = TestParser(options, test_path + 'somefile.html') test_info = parser.analyze_test(test_contents=test_html) finally: _, _, logs = oc.restore_output() self.assertNotEqual(test_info, None, 'did not find a test') self.assertTrue('test' in test_info.keys(), 'did not find a test file') self.assertTrue('reference' in test_info.keys(), 'did not find a reference file') self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct') self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test') self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest') self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0): filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content') filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content') uploaded = [False] def mock_upload_json(hostname, json_path): self.assertEqual(hostname, 'some.host') self.assertEqual(json_path, '/mock-checkout/output.json') uploaded[0] = True return upload_suceeds runner._upload_json = mock_upload_json runner._timestamp = 123456789 output_capture = OutputCapture() output_capture.capture_output() try: self.assertEqual(runner.run(), expected_exit_code) finally: stdout, stderr, logs = output_capture.restore_output() if not expected_exit_code: self.assertEqual(logs, '\n'.join([ 'Running 2 tests', 'Running Bindings/event-target-wrapper.html (1 of 2)', 'RESULT Bindings: event-target-wrapper= 1489.05 ms', 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms', '', 'Running inspector/pass.html (2 of 2)', 'RESULT group_name: test_name= 42 ms', '', ''])) return uploaded[0]
def test_start_cmd(self): # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726 if sys.platform in ("cygwin", "win32"): return def fake_pid(_): host.filesystem.write_text_file("/tmp/WebKit/httpd.pid", "42") return True host = MockHost() host.executive = MockExecutive(should_log=True) test_port = test.TestPort(host) host.filesystem.write_text_file(test_port._path_to_apache_config_file(), "") server = LayoutTestApacheHttpd(test_port, "/mock/output_dir", number_of_servers=4) server._check_that_all_ports_are_available = lambda: True server._is_server_running_on_all_ports = lambda: True server._wait_for_action = fake_pid oc = OutputCapture() try: oc.capture_output() server.start() server.stop() finally: _, _, logs = oc.restore_output() self.assertIn("StartServers 4", logs) self.assertIn("MinSpareServers 4", logs) self.assertIn("MaxSpareServers 4", logs) self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
def test_import_dir_with_no_tests_and_no_hg(self): FAKE_FILES.update(FAKE_REPOSITORY) host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter( host, FAKE_TEST_PATH, self._parse_options(['-n', '-d', 'w3c', '-s', FAKE_SOURCE_DIR])) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_check_httpd_success(self): port = self.make_port(executive=MockExecutive2()) port._path_to_apache = lambda: '/usr/sbin/httpd' capture = OutputCapture() capture.capture_output() self.assertTrue(port.check_httpd()) _, _, logs = capture.restore_output() self.assertEqual('', logs)
def test_httpd_returns_error_code(self): port = self.make_port(executive=MockExecutive2(exit_code=1)) port._path_to_apache = lambda: '/usr/sbin/httpd' capture = OutputCapture() capture.capture_output() self.assertFalse(port.check_httpd()) _, _, logs = capture.restore_output() self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self): oc = OutputCapture() try: oc.capture_output() self.parse_exp('-- this should be a syntax error', is_lint_mode=False) finally: _, _, logs = oc.restore_output() self.assertNotEquals(logs, '')
def test_latest_try_jobs_non_json_response(self): oc = OutputCapture() try: oc.capture_output() self.assertEqual(latest_try_jobs(11113333, ('bar-builder',), self.web), []) finally: _, _, logs = oc.restore_output() self.assertEqual(logs, 'Invalid JSON: my non-JSON contents\n')
def test_run_memory_test(self): runner, port = self.create_runner_and_setup_results_template() runner._timestamp = 123456789 port.host.filesystem.write_text_file( runner._base_path + '/Parser/memory-test.html', 'some content') output = OutputCapture() output.capture_output() try: unexpected_result_count = runner.run() finally: stdout, stderr, log = output.restore_output() self.assertEqual(unexpected_result_count, 0) self.assertEqual( self.normalizeFinishedTime(log), '\n'.join([ 'Running 1 tests', 'Running Parser/memory-test.html (1 of 1)', 'RESULT Parser: memory-test= 1100.0 ms', 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms', 'RESULT Parser: memory-test: JSHeap= 832000.0 bytes', 'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes', 'RESULT Parser: memory-test: Malloc= 532000.0 bytes', 'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes', 'Finished: 0.1 s', '', '' ])) results = runner.load_output_json()[0]['results'] values = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ] self.assertEqual( results['Parser/memory-test'], { 'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values }) self.assertEqual( results['Parser/memory-test:JSHeap'], { 'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values }) self.assertEqual( results['Parser/memory-test:Malloc'], { 'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values })
def test_optimize_all_suffixes_by_default(self): test_port = self.tool.port_factory.get('test') self._write_test_file(test_port, 'another/test.html', "Dummy test contents") self._write_test_file( test_port, 'platform/test-mac-mac10.10/another/test-expected.txt', "result A") self._write_test_file( test_port, 'platform/test-mac-mac10.10/another/test-expected.png', "result A png") self._write_test_file(test_port, 'another/test-expected.txt', "result A") self._write_test_file(test_port, 'another/test-expected.png', "result A png") try: oc = OutputCapture() oc.capture_output() self.command.execute( optparse.Values({ 'suffixes': 'txt,wav,png', 'no_modify_scm': True, 'platform': 'test-mac-mac10.10' }), ['another/test.html'], self.tool) finally: oc.restore_output() self.assertFalse( self.tool.filesystem.exists( self.tool.filesystem.join( test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt'))) self.assertFalse( self.tool.filesystem.exists( self.tool.filesystem.join( test_port.layout_tests_dir(), 'platform/mac/another/test-expected.png'))) self.assertTrue( self.tool.filesystem.exists( self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt'))) self.assertTrue( self.tool.filesystem.exists( self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.png')))
def test_help_command(self): oc = OutputCapture() oc.capture_output() tool = WebKitPatch('path') tool.main(['tool', 'help']) out, err, logs = oc.restore_output() self.assertTrue(out.startswith('Usage: ')) self.assertEqual('', err) self.assertEqual('', logs)
def test_import_dir_with_no_tests_and_no_hg(self): # FIXME: Use MockHosts instead. host = Host() host.executive = MockExecutive2(exception=OSError()) importer = TestImporter(host, DUMMY_SOURCE_DIR, DUMMY_REPO_DIR, optparse.Values({"overwrite": False})) importer.source_directory = importer.path_from_webkit_root( "Tools", "Scripts", "webkitpy", "w3c") importer.destination_directory = tempfile.mkdtemp(prefix='csswg') oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() shutil.rmtree(importer.destination_directory, ignore_errors=True)
def test_import_dir_with_no_tests_and_no_hg(self): host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter( host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({ "overwrite": False, 'destination': 'w3c' })) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_import_dir_with_no_tests(self): FAKE_FILES.update(FAKE_REPOSITORY) host = MockHost() host.executive = MockExecutive2(exception=ScriptError( "abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!" )) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter( host, FAKE_TEST_PATH, self._parse_options(['-n', '-d', 'w3c', '-s', FAKE_SOURCE_DIR])) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
class PrintBaselinesTest(unittest.TestCase): def setUp(self): self.oc = None self.tool = MockTool() self.test_port = self.tool.port_factory.get('test-win-xp') self.tool.port_factory.get = lambda port_name=None: self.test_port self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS def tearDown(self): if self.oc: self.restore_output() def capture_output(self): self.oc = OutputCapture() self.oc.capture_output() def restore_output(self): stdout, stderr, logs = self.oc.restore_output() self.oc = None return (stdout, stderr, logs) def test_basic(self): command = PrintBaselines() command.bind_to_tool(self.tool) self.capture_output() command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool) stdout, _, _ = self.restore_output() self.assertMultiLineEqual(stdout, ('// For test-win-xp\n' 'passes/text-expected.png\n' 'passes/text-expected.txt\n')) def test_multiple(self): command = PrintBaselines() command.bind_to_tool(self.tool) self.capture_output() command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool) stdout, _, _ = self.restore_output() self.assertMultiLineEqual(stdout, ('// For test-win-win7\n' 'passes/text-expected.png\n' 'passes/text-expected.txt\n' '\n' '// For test-win-xp\n' 'passes/text-expected.png\n' 'passes/text-expected.txt\n')) def test_csv(self): command = PrintBaselines() command.bind_to_tool(self.tool) self.capture_output() command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool) stdout, _, _ = self.restore_output() self.assertMultiLineEqual(stdout, ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n' 'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
def test_import_dir_with_no_tests(self): host = MockHost() host.executive = MockExecutive2(exception=ScriptError( "abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!" )) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter( host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({ "overwrite": False, 'destination': 'w3c' })) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output()
def test_rebaseline(self): # This test basically tests the path from command.execute() to command._rebaseline(); # it doesn't test that _rebaseline() actually does anything (that is tested in TestRebaselineJson. self.test_list = {} def rebaseline_stub(options, test_list): self.test_list = test_list self.command._builders_to_pull_from = lambda: [ MockBuilder('MOCK builder') ] self.command._tests_to_update = lambda builder: [ 'mock/path/to/test.html' ] self.command._rebaseline = rebaseline_stub self._zero_out_test_expectations() old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK builder": { "port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"]) }, } oc.capture_output() self.command.execute( MockOptions(optimize=True, builders=None, suffixes=["txt"], verbose=True), [], self.tool) finally: oc.restore_output() builders._exact_matches = old_exact_matches self.assertEquals( self.test_list, {'mock/path/to/test.html': { 'MOCK builder': ['txt'] }})
def test_run_test_set_for_parser_tests(self): runner, port = self.create_runner() tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html']) output = OutputCapture() output.capture_output() try: unexpected_result_count = runner._run_tests_set(tests) finally: stdout, stderr, log = output.restore_output() self.assertEqual(unexpected_result_count, 0) self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
def test_rebaseline_expectations_noop(self): self._zero_out_test_expectations() oc = OutputCapture() try: oc.capture_output() self.command.execute(self.options, [], self.tool) finally: _, _, logs = oc.restore_output() self.assertEqual(self.tool.filesystem.written_files, {}) self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def test_skipped_entry_dont_exist(self): port = MockHost().port_factory.get('qt') expectations_dict = OrderedDict() expectations_dict['expectations'] = '' port.expectations_dict = lambda: expectations_dict port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html']) capture = OutputCapture() capture.capture_output() exp = TestExpectations(port) _, _, logs = capture.restore_output() self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
def test_latest_try_jobs_http_error(self): def raise_error(_): raise urllib2.URLError('Some request error message') self.web.get_binary = raise_error oc = OutputCapture() try: oc.capture_output() self.assertEqual(latest_try_jobs(11112222, ('bar-builder',), self.web), []) finally: _, _, logs = oc.restore_output() self.assertEqual(logs, 'Request failed to URL: https://codereview.chromium.org/api/11112222\n')
def test_show_results_html_file(self): port = self.make_port() port._executive = MockExecutive(should_log=True) capture = OutputCapture() capture.capture_output() port.show_results_html_file('test.html') _, _, logs = capture.restore_output() # We can't know for sure what path will be produced by cygpath, but we can assert about # everything else. self.assertTrue(logs.startswith("MOCK run_command: ['Tools/Scripts/run-safari', '--release', '")) self.assertTrue(logs.endswith("test.html'], cwd=/mock-checkout\n"))
def test_import_dir_with_no_tests(self): # FIXME: Use MockHosts instead. host = Host() host.executive = MockExecutive2(exception=ScriptError( "abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!" )) importer = TestImporter(host, '/w3c', '/blink', optparse.Values({"overwrite": False})) importer.source_directory = importer.path_from_webkit_root( "Tools", "Scripts", "webkitpy", "w3c") importer.destination_directory = tempfile.mkdtemp(prefix='csswg') oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() shutil.rmtree(importer.destination_directory, ignore_errors=True)
def test_run_with_memory_output(self): port = MockPort() test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test') memory_results = {'Malloc': 10, 'JSHeap': 5} self.maxDiff = None driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results) output_capture = OutputCapture() output_capture.capture_output() try: self.assertEqual( test.run(driver, None), { 'some-test': { 'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms', 'values': [i * 1000 for i in range(2, 21)] }, 'some-test:Malloc': { 'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes', 'values': [10] * 19 }, 'some-test:JSHeap': { 'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes', 'values': [5] * 19 } }) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual( actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n' + 'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n' + 'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n' )
def _create_and_run_perfalizer(self, commands_to_fail=[]): tool = MockTool() patch = tool.bugs.fetch_attachment(10000) logs = [] def logger(message): logs.append(message) def run_webkit_patch(args): if args[0] in commands_to_fail: raise ScriptError def run_perf_test(build_path, description): self.assertTrue(description == 'without 10000' or description == 'with 10000') if 'run-perf-tests' in commands_to_fail: return -1 if 'results-page' not in commands_to_fail: tool.filesystem.write_text_file( tool.filesystem.join(build_path, 'PerformanceTestResults.html'), 'results page') return 0 perfalizer = PerfalizerTask(tool, patch, logger) perfalizer._port = TestPort(tool) perfalizer.run_webkit_patch = run_webkit_patch perfalizer._run_perf_test = run_perf_test capture = OutputCapture() capture.capture_output() if commands_to_fail: self.assertFalse(perfalizer.run()) else: self.assertTrue(perfalizer.run()) capture.restore_output() return logs
def test_rebaseline(self): self.command._builders_to_pull_from = lambda: [ MockBuilder('MOCK builder') ] self.command._tests_to_update = lambda builder: [ 'mock/path/to/test.html' ] self._zero_out_test_expectations() old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK builder": { "port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"]) }, } oc.capture_output() self.command.execute( MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False, update_expectations=True), [], self.tool) finally: oc.restore_output() builders._exact_matches = old_exact_matches calls = list( filter( lambda x: x[ 0] not in ['perl', '/usr/bin/xcrun', '/usr/bin/ulimit'], self.tool.executive.calls)) self.assertEqual(calls, [[[ 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--update-expectations', 'True', '--verbose' ]]])
def test_run_with_bad_output(self): output_capture = OutputCapture() output_capture.capture_output() try: test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test') driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) self.assertEqual(test.run(driver, None), None) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
def test_parse_output_with_subtests(self): output = DriverOutput(""" Description: this is a test description. some test:Time -> [1, 2, 3, 4, 5] ms some other test = else:Time -> [6, 7, 8, 9, 10] ms some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes Array Construction, []:Time -> [11, 12, 13, 14, 15] ms Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertTrue(test.run(10)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() subtests = test._metrics self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else', 'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child', 'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None]) some_test_metrics = subtests[0]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test']) self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4) some_other_test_metrics = subtests[1]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc']) self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else']) self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4) self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else']) self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4) main_metrics = subtests[len(subtests) - 1]['metrics'] self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time']) self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test']) self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, """DESCRIPTION: this is a test description. RESULT some-dir: some-test: Time= 1100.0 ms median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms """)
def test_run_test_set(self): runner, port = self.create_runner() tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html', 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html']) output = OutputCapture() output.capture_output() try: unexpected_result_count = runner._run_tests_set(tests) finally: stdout, stderr, log = output.restore_output() self.assertEqual(unexpected_result_count, len(tests) - 1) self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self): self.tool.executive = MockExecutive2() # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports. port = self.tool.port_factory.get('test-win-vista') self._write( port._filesystem.join( port.layout_tests_dir(), 'platform/test-win-vista/failures/expected/image-expected.txt' ), 'original vista result') old_exact_matches = builders._exact_matches oc = OutputCapture() try: builders._exact_matches = { "MOCK XP": { "port_name": "test-win-xp" }, "MOCK Vista": { "port_name": "test-win-vista" }, } options = MockOptions(optimize=True, builder="MOCK Vista", suffixes="txt", move_overwritten_baselines_to=None, verbose=True, test="failures/expected/image.html", results_directory=None) oc.capture_output() self.command.execute(options, [], self.tool) finally: out, _, _ = oc.restore_output() builders._exact_matches = old_exact_matches self.assertMultiLineEqual( self._read( self.tool.filesystem.join( port.layout_tests_dir(), 'platform/test-win-vista/failures/expected/image-expected.txt' )), 'MOCK Web result, convert 404 to None=True') self.assertFalse( self.tool.filesystem.exists( self.tool.filesystem.join( port.layout_tests_dir(), 'platform/test-win-xp/failures/expected/image-expected.txt' ))) self.assertMultiLineEqual( out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Vista"}]}\n' )
def test_convert_attributes_if_needed(self): """ Tests convert_attributes_if_needed() using a reference file that has some relative src paths """ test_html = """<html> <head> <link href="../support/base-style.css"> <video src="resources/video.mkv"></video> <script src="../../some-script.js"></script> <style src="../../../some-style.css"></style> </head> <body> <img src="../../../../some-image.jpg"> </body> </html> """ test_reference_support_info = { 'reference_relpath': '../', 'files': [ '../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg', '../support/base-style.css', 'resources/video.mkv' ], 'elements': ['script', 'style', 'img', 'link', 'video'] } converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info) oc = OutputCapture() oc.capture_output() try: converter.feed(test_html) converter.close() converted = converter.output() finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_reference_relative_paths(converted, test_reference_support_info)
def test_convert_for_webkit_harness_and_properties(self): """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """ test_html = """<html> <head> <link href="/resources/testharness.css" rel="stylesheet" type="text/css"> <script src="/resources/testharness.js"></script> <style type="text/css"> #block1 { @test0@: @propvalue0@; } #block2 { @test1@: @propvalue1@; } #block3 { @test2@: @propvalue2@; } </style> </head> <body> <div id="elem1" style="@test3@: @propvalue3@;"></div> </body> </html> """ fake_dir_path = self.fake_dir_path('harnessandprops') converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None) oc = OutputCapture() oc.capture_output() try: test_content = self.generate_test_content_properties_and_values( converter.prefixed_properties, converter.prefixed_property_values, 2, test_html) converter.feed(test_content[2]) converter.close() converted = converter.output() finally: oc.restore_output() self.verify_conversion_happened(converted) self.verify_test_harness_paths(converter, converted[2], fake_dir_path, 1, 1) self.verify_prefixed_properties(converted, test_content[0]) self.verify_prefixed_property_values(converted, test_content[1])
def _assert_failed_on_line(self, output_text, expected_log): output = DriverOutput(output_text, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertFalse(test._run_with_driver(None, None)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, expected_log)