def assert_architecture(self, port_name=None, file_output=None, expected_architecture=None): filesystem = filesystem_mock.MockFileSystem() filesystem.exists = lambda x: 'DumpRenderTree' in x executive = None if file_output: executive = executive_mock.MockExecutive2(file_output) port = chromium_linux.ChromiumLinuxPort(port_name=port_name, executive=executive, filesystem=filesystem) self.assertEquals(port.architecture(), expected_architecture) if expected_architecture == 'x86': self.assertTrue( port.baseline_path().endswith('chromium-linux-x86')) self.assertTrue( port.baseline_search_path()[0].endswith('chromium-linux-x86')) self.assertTrue( port.baseline_search_path()[1].endswith('chromium-linux')) else: self.assertTrue(port.baseline_path().endswith('chromium-linux')) self.assertTrue( port.baseline_search_path()[0].endswith('chromium-linux'))
def test_interrupt_if_at_failure_limits(self): port = Mock() port.TEST_PATH_SEPARATOR = '/' port._filesystem = filesystem_mock.MockFileSystem() manager = Manager(port=port, options=MockOptions(), printer=Mock()) manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None) result_summary = ResultSummary(expectations=Mock(), test_files=[]) result_summary.unexpected_failures = 100 result_summary.unexpected_crashes = 50 result_summary.unexpected_timeouts = 50 # No exception when the exit_after* options are None. manager._interrupt_if_at_failure_limits(result_summary) # No exception when we haven't hit the limit yet. manager._options.exit_after_n_failures = 101 manager._options.exit_after_n_crashes_or_timeouts = 101 manager._interrupt_if_at_failure_limits(result_summary) # Interrupt if we've exceeded either limit: manager._options.exit_after_n_crashes_or_timeouts = 10 self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary) manager._options.exit_after_n_crashes_or_timeouts = None manager._options.exit_after_n_failures = 10 exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
def _verify_expectations_overrides(self, port_name): # FIXME: make this more robust when we have the Tree() abstraction. # we should be able to test for the files existing or not, and # be able to control the contents better. chromium_port = factory.get("chromium-cg-mac") chromium_base = chromium_port.path_from_chromium_base() fs = filesystem_mock.MockFileSystem() port = google_chrome.GetGoogleChromePort(port_name=port_name, options=None, filesystem=fs) expected_chromium_overrides = '// chromium overrides\n' expected_chrome_overrides = '// chrome overrides\n' chromium_path = fs.join(chromium_base, 'webkit', 'tools', 'layout_tests', 'test_expectations.txt') chrome_path = fs.join(chromium_base, 'webkit', 'tools', 'layout_tests', 'test_expectations_chrome.txt') fs.files[chromium_path] = expected_chromium_overrides fs.files[chrome_path] = None actual_chrome_overrides = port.test_expectations_overrides() self.assertEqual(expected_chromium_overrides, actual_chrome_overrides) fs.files[chrome_path] = expected_chrome_overrides actual_chrome_overrides = port.test_expectations_overrides() self.assertEqual( actual_chrome_overrides, expected_chromium_overrides + expected_chrome_overrides)
def get_test_config(test_files=[], result_files=[]): # We could grab this from port.layout_tests_dir(), but instantiating a fully mocked port is a pain. layout_tests_directory = "/mock-checkout/LayoutTests" results_directory = '/WebKitBuild/Debug/layout-test-results' mock_filesystem = filesystem_mock.MockFileSystem() for file in test_files: file_path = mock_filesystem.join(layout_tests_directory, file) mock_filesystem.files[file_path] = '' for file in result_files: file_path = mock_filesystem.join(results_directory, file) mock_filesystem.files[file_path] = '' class TestMacPort(WebKitPort): port_name = "mac" def __init__(self): # FIXME: This should use MockExecutive and MockUser as well. WebKitPort.__init__(self, filesystem=mock_filesystem) return TestConfig( TestMacPort(), layout_tests_directory, results_directory, ('mac', 'mac-leopard', 'win', 'linux'), mock_filesystem, MockSCM())
def get_shards(self, num_workers, fully_parallel, test_list=None): test_list = test_list or self.test_list port = layout_tests.port.get(port_name='test') port._filesystem = filesystem_mock.MockFileSystem() self.manager = ManagerWrapper(port=port, options=Mock(), printer=Mock()) return self.manager._shard_tests(test_list, num_workers, fully_parallel)
def unit_test_filesystem(files=None): """Return the FileSystem object used by the unit tests.""" test_list = unit_test_list() files = files or {} def add_file(files, test, suffix, contents): dirname = test.name[0:test.name.rfind('/')] base = test.base path = LAYOUT_TEST_DIR + '/' + dirname + '/' + base + suffix files[path] = contents # Add each test and the expected output, if any. for test in test_list.tests.values(): add_file(files, test, '.html', '') if test.is_reftest: continue if test.actual_audio: add_file(files, test, '-expected.wav', test.expected_audio) continue add_file(files, test, '-expected.txt', test.expected_text) add_file(files, test, '-expected.png', test.expected_image) # Add the test_expectations file. files[LAYOUT_TEST_DIR + '/platform/test/test_expectations.txt'] = """ WONTFIX : failures/expected/checksum.html = IMAGE WONTFIX : failures/expected/crash.html = CRASH WONTFIX : failures/expected/image.html = IMAGE WONTFIX : failures/expected/audio.html = AUDIO WONTFIX : failures/expected/image_checksum.html = IMAGE WONTFIX : failures/expected/mismatch.html = IMAGE WONTFIX : failures/expected/missing_check.html = MISSING PASS WONTFIX : failures/expected/missing_image.html = MISSING PASS WONTFIX : failures/expected/missing_audio.html = MISSING PASS WONTFIX : failures/expected/missing_text.html = MISSING PASS WONTFIX : failures/expected/newlines_leading.html = TEXT WONTFIX : failures/expected/newlines_trailing.html = TEXT WONTFIX : failures/expected/newlines_with_excess_CR.html = TEXT WONTFIX : failures/expected/reftest.html = IMAGE WONTFIX : failures/expected/text.html = TEXT WONTFIX : failures/expected/timeout.html = TIMEOUT WONTFIX SKIP : failures/expected/hang.html = TIMEOUT WONTFIX SKIP : failures/expected/keyboard.html = CRASH WONTFIX SKIP : failures/expected/exception.html = CRASH """ # FIXME: This test was only being ignored because of missing a leading '/'. # Fixing the typo causes several tests to assert, so disabling the test entirely. # Add in a file should be ignored by test_files.find(). #files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe' fs = filesystem_mock.MockFileSystem(files, dirs=set([ '/mock-checkout' ])) # Make sure at least the checkout_root exists as a directory. fs._tests = test_list return fs
def unit_test_filesystem(files=None): """Return the FileSystem object used by the unit tests.""" test_list = unit_test_list() files = files or {} def add_file(files, test, suffix, contents): dirname = test.name[0:test.name.rfind('/')] base = test.base path = LAYOUT_TEST_DIR + '/' + dirname + '/' + base + suffix files[path] = contents # Add each test and the expected output, if any. for test in test_list.tests.values(): add_file(files, test, '.html', '') if test.is_reftest: continue if test.actual_audio: add_file(files, test, '-expected.wav', test.expected_audio) continue add_file(files, test, '-expected.txt', test.expected_text) add_file(files, test, '-expected.checksum', test.expected_checksum) add_file(files, test, '-expected.png', test.expected_image) # Add the test_expectations file. files[LAYOUT_TEST_DIR + '/platform/test/test_expectations.txt'] = """ WONTFIX : failures/expected/checksum.html = IMAGE WONTFIX : failures/expected/crash.html = CRASH // This one actually passes because the checksums will match. WONTFIX : failures/expected/image.html = PASS WONTFIX : failures/expected/audio.html = AUDIO WONTFIX : failures/expected/image_checksum.html = IMAGE WONTFIX : failures/expected/mismatch.html = IMAGE WONTFIX : failures/expected/missing_check.html = MISSING PASS WONTFIX : failures/expected/missing_image.html = MISSING PASS WONTFIX : failures/expected/missing_audio.html = MISSING PASS WONTFIX : failures/expected/missing_text.html = MISSING PASS WONTFIX : failures/expected/newlines_leading.html = TEXT WONTFIX : failures/expected/newlines_trailing.html = TEXT WONTFIX : failures/expected/newlines_with_excess_CR.html = TEXT WONTFIX : failures/expected/reftest.html = IMAGE WONTFIX : failures/expected/text.html = TEXT WONTFIX : failures/expected/timeout.html = TIMEOUT WONTFIX SKIP : failures/expected/hang.html = TIMEOUT WONTFIX SKIP : failures/expected/keyboard.html = CRASH WONTFIX SKIP : failures/expected/exception.html = CRASH """ # Add in a file should be ignored by test_files.find(). files[LAYOUT_TEST_DIR + 'userscripts/resources/iframe.html'] = 'iframe' fs = filesystem_mock.MockFileSystem(files) fs._tests = test_list return fs
def test_determine_architecture_fails(self): # Test that we default to 'x86' if the driver doesn't exist. filesystem = filesystem_mock.MockFileSystem() port = chromium_linux.ChromiumLinuxPort(filesystem=filesystem) self.assertEquals(port.architecture(), 'x86') # Test that we default to 'x86' on an unknown architecture. filesystem = filesystem_mock.MockFileSystem() filesystem.exists = lambda x: True executive = executive_mock.MockExecutive2('win32') port = chromium_linux.ChromiumLinuxPort(filesystem=filesystem, executive=executive) self.assertEquals(port.architecture(), 'x86') # Test that we raise errors if something weird happens. filesystem = filesystem_mock.MockFileSystem() filesystem.exists = lambda x: True executive = executive_mock.MockExecutive2(exception=AssertionError) self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, filesystem=filesystem, executive=executive)
def make_config(self, output='', files={}, exit_code=0, exception=None, run_command_fn=None): e = executive_mock.MockExecutive2(output=output, exit_code=exit_code, exception=exception, run_command_fn=run_command_fn) fs = filesystem_mock.MockFileSystem(files) return config.Config(e, fs)
def main(argv=None): if not argv: argv = sys.argv if len(argv) == 3 and argv[1] == '--mock': e = executive_mock.MockExecutive2(output='foo\nfoo/%s' % argv[2]) fs = filesystem_mock.MockFileSystem({'foo/Configuration': argv[2]}) else: e = executive.Executive() fs = filesystem.FileSystem() c = config.Config(e, fs) print(c.default_configuration())
def test_dirs_under(self): FAKE_FILES = { '/tests/test1.txt': '', '/tests/test3/test2/test.txt': 'test', '/tests/test2/test.txt': 'test'} fs = filesystem_mock.MockFileSystem(files=FAKE_FILES) self.assertEqual(fs.dirs_under('/tests'), ['/tests', '/tests/test2', '/tests/test3', '/tests/test3/test2']) def filter_dir(fs, dirpath): return fs.basename(dirpath) != 'test2' self.assertEqual(fs.dirs_under('/tests', filter_dir), ['/tests', '/tests/test3'])
def _test_json_generation(self, passed_tests_list, failed_tests_list): tests_set = set(passed_tests_list) | set(failed_tests_list) DISABLED_tests = set( [t for t in tests_set if t.startswith('DISABLED_')]) FLAKY_tests = set([t for t in tests_set if t.startswith('FLAKY_')]) FAILS_tests = set([t for t in tests_set if t.startswith('FAILS_')]) PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests) failed_tests = set(failed_tests_list) - DISABLED_tests failed_count_map = dict([(t, 1) for t in failed_tests]) test_timings = {} i = 0 for test in tests_set: test_timings[test] = float(self._num_runs * 100 + i) i += 1 test_results_map = dict() for test in tests_set: test_results_map[test] = json_results_generator.TestResult( test, failed=(test in failed_tests), elapsed_time=test_timings[test]) port = Mock() port._filesystem = filesystem_mock.MockFileSystem() generator = json_results_generator.JSONResultsGeneratorBase( port, self.builder_name, self.build_name, self.build_number, '', None, # don't fetch past json results archive test_results_map) failed_count_map = dict([(t, 1) for t in failed_tests]) # Test incremental json results incremental_json = generator.get_json() self._verify_json_results(tests_set, test_timings, failed_count_map, len(PASS_tests), len(DISABLED_tests), len(FLAKY_tests), len(DISABLED_tests | failed_tests), incremental_json, 1) # We don't verify the results here, but at least we make sure the code runs without errors. generator.generate_json_output() generator.generate_full_results_file()
def test_test_expectations(self): # Check that we read both the expectations file and anything in a # Skipped file, and that we include the feature and platform checks. files = { '/tmp/test_expectations.txt': 'BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL\n', '/tmp/Skipped': 'fast/html/keygen.html', } mock_fs = filesystem_mock.MockFileSystem(files) port = TestWebKitPort(expectations_file='/tmp/test_expectations.txt', skips_file='/tmp/Skipped', filesystem=mock_fs) self.assertEqual(port.test_expectations(), """BUG_TESTEXPECTATIONS SKIP : fast/html/article-element.html = FAIL BUG_SKIPPED SKIP : fast/html/keygen.html = FAIL BUG_SKIPPED SKIP : media = FAIL BUG_SKIPPED SKIP : accessibility = FAIL""")
def test_overrides_and_builder_names(self): port = self.make_port() if not port: return filesystem = filesystem_mock.MockFileSystem() port._filesystem = filesystem port.path_from_chromium_base = lambda *comps: '/' + '/'.join(comps) overrides_path = port.path_from_chromium_base('webkit', 'tools', 'layout_tests', 'test_expectations.txt') OVERRIDES = 'foo' filesystem.files[overrides_path] = OVERRIDES port._options.builder_name = 'DUMMY_BUILDER_NAME' self.assertEquals(port.test_expectations_overrides(), OVERRIDES) port._options.builder_name = 'builder (deps)' self.assertEquals(port.test_expectations_overrides(), OVERRIDES) port._options.builder_name = 'builder' self.assertEquals(port.test_expectations_overrides(), None)
def test_results_html(self): mock_port = Mock() mock_port._filesystem = filesystem_mock.MockFileSystem() mock_port.relative_test_filename = lambda name: name mock_port.filename_to_uri = lambda name: name runner = test_runner.TestRunner(port=mock_port, options=Mock(), printer=Mock()) expected_html = u"""<html> <head> <title>Layout Test Results (time)</title> </head> <body> <h2>Title (time)</h2> <p><a href='test_path'>test_path</a><br /> </p> </body></html> """ html = runner._results_html(["test_path"], {}, "Title", override_time="time") self.assertEqual(html, expected_html)
def get_test_config(test_files=[], result_files=[]): layout_tests_directory = base.Port().layout_tests_dir() results_directory = '/WebKitBuild/Debug/layout-test-results' mock_filesystem = filesystem_mock.MockFileSystem() for file in test_files: file_path = mock_filesystem.join(layout_tests_directory, file) mock_filesystem.files[file_path] = '' for file in result_files: file_path = mock_filesystem.join(results_directory, file) mock_filesystem.files[file_path] = '' class TestMacPort(WebKitPort): def __init__(self): WebKitPort.__init__(self, filesystem=mock_filesystem) self._name = 'mac' return rebaselineserver.TestConfig( TestMacPort(), layout_tests_directory, results_directory, ('mac', 'mac-leopard', 'win', 'linux'), mock_filesystem, MockSCM())
def test_shard_tests(self): # Test that _shard_tests in test_runner.TestRunner really # put the http tests first in the queue. port = Mock() port._filesystem = filesystem_mock.MockFileSystem() runner = TestRunnerWrapper(port=port, options=Mock(), printer=Mock()) test_list = [ "LayoutTests/websocket/tests/unicode.htm", "LayoutTests/animations/keyframes.html", "LayoutTests/http/tests/security/view-source-no-refresh.html", "LayoutTests/websocket/tests/websocket-protocol-ignored.html", "LayoutTests/fast/css/display-none-inline-style-change-crash.html", "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html", "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html", "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html", "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html", ] expected_tests_to_http_lock = set([ 'LayoutTests/websocket/tests/unicode.htm', 'LayoutTests/http/tests/security/view-source-no-refresh.html', 'LayoutTests/websocket/tests/websocket-protocol-ignored.html', 'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html', ]) # FIXME: Ideally the HTTP tests don't have to all be in one shard. single_thread_results = runner._shard_tests(test_list, False) multi_thread_results = runner._shard_tests(test_list, True) self.assertEqual("tests_to_http_lock", single_thread_results[0][0]) self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1])) self.assertEqual("tests_to_http_lock", multi_thread_results[0][0]) self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
def __init__(self, options): chromium_mac.ChromiumMacPort.__init__(self, options=options, filesystem=filesystem_mock.MockFileSystem())
def __init__(self, options): chromium_win.ChromiumWinPort.__init__(self, options=options, filesystem=filesystem_mock.MockFileSystem())
def __init__(self, options): chromium_linux.ChromiumLinuxPort.__init__(self, options=options, filesystem=filesystem_mock.MockFileSystem())
def test_watch_list_not_found(self): loader = WatchListLoader(filesystem_mock.MockFileSystem()) self.assertRaisesRegexp( Exception, r'Watch list file \(.*/watchlist\) not found\.', loader.load)
def __init__(self, options): # FIXME: This should use MockExecutive and MockUser as well. chromium_win.ChromiumWinPort.__init__(self, options=options, filesystem=filesystem_mock.MockFileSystem())
def setUp(self): self.fs = filesystem_mock.MockFileSystem() self.setup_generic_test_dir()
def mock_host_for_stub_repository(): host = filesystem_mock.MockFileSystem(files=FAKE_FILES) return host