def test_host_port_and_filesystem(options, expectations): filesystem = unit_test_filesystem() host_port_obj = port.get("test", options, filesystem=filesystem, user=mocktool.MockUser()) expectations_path = host_port_obj.path_to_test_expectations_file() filesystem.write_text_file(expectations_path, expectations) return (host_port_obj, filesystem)
def make_generator(self, files, tests): options = mocktool.MockOptions(configuration=None, html_directory="/tmp") host_port = port.get("test", options, filesystem=unit_test_filesystem(files)) generator = rebaseline_chromium_webkit_tests.HtmlGenerator( host_port, target_port=None, options=options, platforms=["test-mac-leopard"], rebaselining_tests=tests ) return generator, host_port
def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, filesystem=None): platform = 'test' filesystem = filesystem or test.unit_test_filesystem() port = factory.get(platform, filesystem=filesystem) drt_input, drt_output = self.make_input_output(port, test_name, pixel_tests, expected_checksum, drt_output) args = ['--platform', 'test'] + self.extra_args(pixel_tests) stdin = newstringio.StringIO(drt_input) stdout = newstringio.StringIO() stderr = newstringio.StringIO() options, args = mock_drt.parse_options(args) drt = self.make_drt(options, args, filesystem, stdin, stdout, stderr) res = drt.run() self.assertEqual(res, 0) # We use the StringIO.buflist here instead of getvalue() because # the StringIO might be a mix of unicode/ascii and 8-bit strings. self.assertEqual(stdout.buflist, drt_output) self.assertEqual(stderr.getvalue(), '')
def test_missing_results(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() res, out, err, _ = logging_run( [ "--no-show-results", "failures/unexpected/missing_text.html", "failures/unexpected/missing_image.html", "failures/unexpected/missing_audio.html", "failures/unexpected/missing_render_tree_dump.html", ], tests_included=True, filesystem=fs, new_results=True, ) file_list = fs.written_files.keys() file_list.remove("/tmp/layout-test-results/tests_run0.txt") self.assertEquals(res, 0) self.assertFalse(out.empty()) self.assertEqual(len(file_list), 6) self.assertBaselines(file_list, "/failures/unexpected/missing_text", [".txt"], err) self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/unexpected/missing_image", [".png"], err) self.assertBaselines( file_list, "/platform/test-mac-leopard/failures/unexpected/missing_render_tree_dump", [".txt"], err )
def test_single_file(self): # FIXME: We should consider replacing more of the get_tests_run()-style tests # with tests that read the tests_run* files, like this one. fs = unit_test_filesystem() tests_run = passing_run(['passes/text.html'], tests_included=True, filesystem=fs) self.assertEquals(fs.read_text_file('/tmp/layout-test-results/tests_run0.txt'), 'passes/text.html\n')
def test_host_port_and_filesystem(options, expectations): filesystem = unit_test_filesystem() host_port_obj = port.get('test', options, filesystem=filesystem, user=mocktool.MockUser()) expectations_path = host_port_obj.path_to_test_expectations_file() filesystem.write_text_file(expectations_path, expectations) return (host_port_obj, filesystem)
def test_stderr_is_saved(self): fs = unit_test_filesystem() self.assertTrue(passing_run(filesystem=fs)) self.assertEquals( fs.read_text_file( '/tmp/layout-test-results/passes/error-stderr.txt'), 'stuff going to stderr')
def test_results_directory_relative(self): # We run a configuration that should fail, to generate output, then # look for what the output results url was. fs = unit_test_filesystem() fs.maybe_make_directory("/tmp/cwd") fs.chdir("/tmp/cwd") res, out, err, user = logging_run(["--results-directory=foo"], tests_included=True, filesystem=fs) self.assertEqual(user.opened_urls, ["/tmp/cwd/foo/results.html"])
def test_test_list_with_prefix(self): fs = unit_test_filesystem() filename = "/tmp/foo.txt" fs.write_text_file(filename, "LayoutTests/passes/text.html") tests_run = get_tests_run( ["--test-list=%s" % filename], tests_included=True, flatten_batches=True, filesystem=fs ) self.assertEquals(["passes/text.html"], tests_run)
def test_exit_after_n_failures_upload(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run( ["failures/unexpected/text-image-checksum.html", "passes/text.html", "--exit-after-n-failures", "1"], tests_included=True, record_results=True, filesystem=fs, ) self.assertTrue("/tmp/layout-test-results/incremental_results.json" in fs.files)
def test_crash_with_stderr(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run([ 'failures/unexpected/crash-with-stderr.html', ], tests_included=True, record_results=True, filesystem=fs) self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
def test_no_image_failure_with_image_diff(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run([ 'failures/unexpected/checksum-with-matching-image.html', ], tests_included=True, record_results=True, filesystem=fs) self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_results_directory_absolute(self): # We run a configuration that should fail, to generate output, then # look for what the output results url was. fs = unit_test_filesystem() with fs.mkdtemp() as tmpdir: res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, filesystem=fs) self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
def test_test_list_with_prefix(self): fs = unit_test_filesystem() filename = '/tmp/foo.txt' fs.write_text_file(filename, 'LayoutTests/passes/text.html') tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs) self.assertEquals(['passes/text.html'], tests_run)
def test_results_directory_relative(self): # We run a configuration that should fail, to generate output, then # look for what the output results url was. fs = unit_test_filesystem() fs.maybe_make_directory('/tmp/cwd') fs.chdir('/tmp/cwd') res, out, err, user = logging_run(['--results-directory=foo'], tests_included=True, filesystem=fs) self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])
def test_single_file(self): # FIXME: We should consider replacing more of the get_tests_run()-style tests # with tests that read the tests_run* files, like this one. fs = unit_test_filesystem() tests_run = passing_run(['passes/text.html'], tests_included=True, filesystem=fs) self.assertEquals( fs.read_text_file('/tmp/layout-test-results/tests_run0.txt'), 'passes/text.html\n')
def test_test_list(self): fs = unit_test_filesystem() filename = '/tmp/foo.txt' fs.write_text_file(filename, 'passes/text.html') tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs) self.assertEquals(['passes/text.html'], tests_run) fs.remove(filename) res, out, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, filesystem=fs) self.assertEqual(res, -1) self.assertFalse(err.empty())
def test_main(self): filesystem = test.unit_test_filesystem() stdin = newstringio.StringIO() stdout = newstringio.StringIO() stderr = newstringio.StringIO() res = mock_drt.main(['--platform', 'test'] + self.extra_args(False), filesystem, stdin, stdout, stderr) self.assertEqual(res, 0) self.assertEqual(stdout.getvalue(), '') self.assertEqual(stderr.getvalue(), '') self.assertEqual(filesystem.written_files, {})
def test_end_to_end(self): fs = unit_test_filesystem() res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs) self.assertEquals(res, unexpected_tests_count) results = self.parse_full_results(fs.files["/tmp/layout-test-results/full_results.json"]) # Check to ensure we're passing back image diff %age correctly. self.assertEquals(results["tests"]["failures"]["expected"]["image.html"]["image_diff_percent"], 1) # Check that we attempted to display the results page in a browser. self.assertTrue(user.opened_urls)
def test_pixeltest__fails(self): filesystem = test.unit_test_filesystem() self.assertTest('failures/expected/checksum.html', pixel_tests=True, expected_checksum='wrong-checksum', drt_output=['#URL:file:///test.checkout/LayoutTests/failures/expected/checksum.html\n', '#MD5:checksum-checksum\n', 'checksum-txt', '\n', '#EOF\n'], filesystem=filesystem) self.assertEquals(filesystem.written_files, {'/tmp/png_result0.png': 'checksum\x8a-png'})
def test_no_image_failure_with_image_diff(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run( [ 'failures/unexpected/checksum-with-matching-image.html', ], tests_included=True, record_results=True, filesystem=fs) self.assertTrue( fs.read_text_file('/tmp/layout-test-results/full_results.json'). find('"num_regressions":0') != -1)
def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None): options, parsed_args = parse_args(extra_args, record_results, tests_included) filesystem = filesystem or unit_test_filesystem() if not port_obj: host = MockHost() port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem) buildbot_output = array_stream.ArrayStream() regular_output = array_stream.ArrayStream() res = run_webkit_tests.run( port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output ) return res == 0 and regular_output.empty() and buildbot_output.empty()
def test_results_directory_absolute(self): # We run a configuration that should fail, to generate output, then # look for what the output results url was. fs = unit_test_filesystem() with fs.mkdtemp() as tmpdir: res, out, err, user = logging_run( ['--results-directory=' + str(tmpdir)], tests_included=True, filesystem=fs) self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
def test_end_to_end(self): fs = unit_test_filesystem() res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs) # Six tests should fail, so the return code should be 6. self.assertEquals(res, 6) results = self.parse_full_results(fs.files['/tmp/layout-test-results/full_results.json']) # Check to ensure we're passing back image diff %age correctly. self.assertEquals(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1) # Check that we attempted to display the results page in a browser. self.assertTrue(user.opened_urls)
def test_exit_after_n_failures_upload(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run( [ 'failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1', ], tests_included=True, record_results=True, filesystem=fs) self.assertTrue( '/tmp/layout-test-results/incremental_results.json' in fs.files)
def test_crash_with_stderr(self): fs = unit_test_filesystem() res, buildbot_output, regular_output, user = logging_run( [ 'failures/unexpected/crash-with-stderr.html', ], tests_included=True, record_results=True, filesystem=fs) self.assertTrue( fs.read_text_file('/tmp/layout-test-results/full_results.json'). find( '{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}' ) != -1)
def test_crash_log(self): mock_crash_report = 'mock-crash-report' fs = unit_test_filesystem() fs.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report) res, buildbot_output, regular_output, user = logging_run([ 'failures/unexpected/crash-with-stderr.html', ], tests_included=True, record_results=True, filesystem=fs) expected_crash_log = mock_crash_report # Currently CrashLog uploading only works on Darwin. if sys.platform != "darwin": expected_crash_log = "mock-std-error-output" self.assertEquals(fs.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
def test_reset_results(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() res, out, err, _ = logging_run(['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'], tests_included=True, filesystem=fs, new_results=True) file_list = fs.written_files.keys() file_list.remove('/tmp/layout-test-results/tests_run0.txt') self.assertEquals(res, 0) self.assertTrue(out.empty()) self.assertEqual(len(file_list), 4) self.assertBaselines(file_list, "/passes/image", [".txt", ".png"], err) self.assertBaselines(file_list, "/failures/expected/missing_image", [".txt", ".png"], err)
def logging_run( extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None, new_results=False ): options, parsed_args = parse_args( extra_args=extra_args, record_results=record_results, tests_included=tests_included, print_nothing=False, new_results=new_results, ) host = MockHost() filesystem = filesystem or unit_test_filesystem() if not port_obj: port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem) res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args) return (res, buildbot_output, regular_output, host.user)
def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self): fs = unit_test_filesystem() res, out, err, _ = logging_run( ["--no-show-results", "reftests/foo/"], tests_included=True, filesystem=fs, record_results=True ) file_list = fs.written_files.keys() file_list.remove("/tmp/layout-test-results/tests_run0.txt") json_string = fs.read_text_file("/tmp/layout-test-results/full_results.json") self.assertTrue( json_string.find( '"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}' ) != -1 ) self.assertTrue(json_string.find('"num_regressions":4') != -1) self.assertTrue(json_string.find('"num_flaky":0') != -1) self.assertTrue(json_string.find('"num_missing":1') != -1)
def test_missing_and_unexpected_results_with_custom_exit_code(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() class CustomExitCodePort(TestPort): def exit_code_from_summarized_results(self, unexpected_results): return unexpected_results['num_regressions'] + unexpected_results['num_missing'] options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results']) test_port = CustomExitCodePort(options=options, user=mocktool.MockUser()) res, out, err, _ = logging_run(['--no-show-results', 'failures/expected/missing_image.html', 'failures/unexpected/missing_text.html', 'failures/unexpected/text-image-checksum.html'], tests_included=True, filesystem=fs, record_results=True, port_obj=test_port) self.assertEquals(res, 2)
def test_missing_and_unexpected_results(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() res, out, err, _ = logging_run(['--no-show-results', 'failures/expected/missing_image.html', 'failures/unexpected/missing_text.html', 'failures/unexpected/text-image-checksum.html'], tests_included=True, filesystem=fs, record_results=True) file_list = fs.written_files.keys() file_list.remove('/tmp/layout-test-results/tests_run0.txt') self.assertEquals(res, 1) expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}' json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json') self.assertTrue(json_string.find(expected_token) != -1) self.assertTrue(json_string.find('"num_regression":1') == -1) self.assertTrue(json_string.find('"num_flaky":1') == -1) self.assertTrue(json_string.find('"num_missing":1') != -1)
def test_end_to_end(self): fs = unit_test_filesystem() res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs) # Six tests should fail, so the return code should be 6. self.assertEquals(res, 6) results = self.parse_full_results( fs.files['/tmp/layout-test-results/full_results.json']) # Check to ensure we're passing back image diff %age correctly. self.assertEquals( results['tests']['failures']['expected']['image.html'] ['image_diff_percent'], 1) # Check that we attempted to display the results page in a browser. self.assertTrue(user.opened_urls)
def test_new_baseline(self): # Test that we update the platform expectations. If the expectation # is mssing, then create a new expectation in the platform dir. fs = unit_test_filesystem() res, out, err, _ = logging_run(['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'], tests_included=True, filesystem=fs, new_results=True) file_list = fs.written_files.keys() file_list.remove('/tmp/layout-test-results/tests_run0.txt') self.assertEquals(res, 0) self.assertTrue(out.empty()) self.assertEqual(len(file_list), 4) self.assertBaselines(file_list, "/platform/test-mac-leopard/passes/image", [".txt", ".png"], err) self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
def test_reset_results(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() res, out, err, _ = logging_run([ '--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html' ], tests_included=True, filesystem=fs, new_results=True) file_list = fs.written_files.keys() file_list.remove('/tmp/layout-test-results/tests_run0.txt') self.assertEquals(res, 0) self.assertTrue(out.empty()) self.assertEqual(len(file_list), 4) self.assertBaselines(file_list, "/passes/image", [".txt", ".png"], err) self.assertBaselines(file_list, "/failures/expected/missing_image", [".txt", ".png"], err)
def test_reftest_with_two_notrefs(self): # Test that we update expectations in place. If the expectation # is missing, update the expected generic location. fs = unit_test_filesystem() res, out, err, _ = logging_run( ["--no-show-results", "reftests/foo/"], tests_included=True, filesystem=fs, record_results=True ) file_list = fs.written_files.keys() file_list.remove("/tmp/layout-test-results/tests_run0.txt") json_string = fs.read_text_file("/tmp/layout-test-results/full_results.json") json = self.parse_full_results(json_string) self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"]) self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"]) self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"]) self.assertEqual( json["tests"]["reftests"]["foo"]["multiple-match-failure.html"], { "expected": "PASS", "ref_file": "reftests/foo/second-mismatching-ref.html", "actual": "IMAGE", "is_reftest": True, }, ) self.assertEqual( json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"], { "expected": "PASS", "ref_file": "reftests/foo/matching-ref.html", "actual": "IMAGE", "is_mismatch_reftest": True, }, ) self.assertEqual( json["tests"]["reftests"]["foo"]["multiple-both-failure.html"], { "expected": "PASS", "ref_file": "reftests/foo/matching-ref.html", "actual": "IMAGE", "is_mismatch_reftest": True, }, )
def test_crash_log(self): mock_crash_report = 'mock-crash-report' fs = unit_test_filesystem() fs.write_text_file( '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report) res, buildbot_output, regular_output, user = logging_run( [ 'failures/unexpected/crash-with-stderr.html', ], tests_included=True, record_results=True, filesystem=fs) expected_crash_log = mock_crash_report # Currently CrashLog uploading only works on Darwin. if sys.platform != "darwin": expected_crash_log = "mock-std-error-output" self.assertEquals( fs.read_text_file( '/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt' ), expected_crash_log)
def test_new_baseline(self): # Test that we update the platform expectations. If the expectation # is mssing, then create a new expectation in the platform dir. fs = unit_test_filesystem() res, out, err, _ = logging_run([ '--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html' ], tests_included=True, filesystem=fs, new_results=True) file_list = fs.written_files.keys() file_list.remove('/tmp/layout-test-results/tests_run0.txt') self.assertEquals(res, 0) self.assertTrue(out.empty()) self.assertEqual(len(file_list), 4) self.assertBaselines(file_list, "/platform/test-mac-leopard/passes/image", [".txt", ".png"], err) self.assertBaselines( file_list, "/platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
def make_generator(self, files, tests): options = mocktool.MockOptions(configuration=None, html_directory='/tmp') host_port = port.get('test', options, filesystem=unit_test_filesystem(files)) generator = rebaseline_chromium_webkit_tests.HtmlGenerator(host_port, target_port=None, options=options, platforms=['test-mac-leopard'], rebaselining_tests=tests) return generator, host_port
def test_stderr_is_saved(self): fs = unit_test_filesystem() self.assertTrue(passing_run(filesystem=fs)) self.assertEquals( fs.read_text_file("/tmp/layout-test-results/passes/error-stderr.txt"), "stuff going to stderr" )
def test_retries_directory(self): fs = unit_test_filesystem() res, out, err, user = logging_run(tests_included=True, filesystem=fs) self.assertTrue("/tmp/layout-test-results/retries/tests_run0.txt" in fs.files)