def test_file_for_test(self): # Test that we can lookup a test's filename for various cases like # variants and multi-globals. manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", ["test.any.html", {}], ["test.any.worker.html", {}] ] } } } ''' host = MockHost() host.filesystem.write_text_file( WEB_TEST_DIR + '/external/wpt/MANIFEST.json', manifest_json) manifest = WPTManifest(host, WEB_TEST_DIR + '/external/wpt/MANIFEST.json') self.assertEqual( manifest.all_url_items(), { u'test.any.html': [u'test.any.html', {}], u'test.any.worker.html': [u'test.any.worker.html', {}] }) # Ensure that we can get back to `test.any.js` from both of the tests. self.assertEqual(manifest.file_path_for_test_url('test.any.html'), 'test.any.js') self.assertEqual( manifest.file_path_for_test_url('test.any.worker.html'), 'test.any.js')
def test_file_for_test(self): # Test that we can lookup a test's filename for various cases like # variants and multi-globals. manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ ["/test.any.html", {}], ["/test.any.worker.html", {}] ] } } } ''' manifest = WPTManifest(manifest_json) self.assertEqual( manifest.all_url_items(), { u'/test.any.html': [u'/test.any.html', {}], u'/test.any.worker.html': [u'/test.any.worker.html', {}] }) # Ensure that we can get back to `test.any.js` from both of the tests. self.assertEqual(manifest.file_path_for_test_url('/test.any.html'), 'test.any.js') self.assertEqual( manifest.file_path_for_test_url('/test.any.worker.html'), 'test.any.js')
def test_ensure_manifest_raises_exception(self): host = MockHost() host.executive = MockExecutive(should_throw=True) port = TestPort(host) with self.assertRaises(ScriptError): WPTManifest.ensure_manifest(port)
def test_ensure_manifest_updates_manifest_if_it_exists(self): host = MockHost() port = TestPort(host) manifest_path = WEB_TEST_DIR + '/external/wpt/MANIFEST.json' host.filesystem.write_text_file(manifest_path, '{"manifest": "NOT base"}') self.assertTrue(host.filesystem.exists(manifest_path)) WPTManifest.ensure_manifest(port) self.assertTrue(host.filesystem.exists(manifest_path)) self.assertEqual(host.filesystem.written_files, {manifest_path: '{"manifest": "base"}'}) self.assertEqual( host.executive.calls, [ [ 'python', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '--no-download', '--tests-root', WEB_TEST_DIR + '/external/wpt', ] ] )
def _delete_orphaned_baselines(self): _log.info('Deleting any orphaned baselines.') is_baseline_filter = lambda fs, dirname, basename: is_testharness_baseline(basename) baselines = self.fs.files_under( self.dest_path, file_filter=is_baseline_filter) # Note about possible refactoring: # - the manifest path could be factored out to a common location, and # - the logic for reading the manifest could be factored out from here # and the Port class. manifest_path = self.finder.path_from_web_tests( 'external', 'wpt', 'MANIFEST.json') manifest = WPTManifest(self.fs.read_text_file(manifest_path)) wpt_urls = manifest.all_urls() # Currently baselines for tests with query strings are merged, # so that the tests foo.html?r=1 and foo.html?r=2 both have the same # baseline, foo-expected.txt. # TODO(qyearsley): Remove this when this behavior is fixed. wpt_urls = [url.split('?')[0] for url in wpt_urls] wpt_dir = self.finder.path_from_web_tests('external', 'wpt') for full_path in baselines: rel_path = self.fs.relpath(full_path, wpt_dir) if not self._has_corresponding_test(rel_path, wpt_urls): self.fs.remove(full_path)
def test_ensure_manifest_takes_optional_dest(self): host = MockHost() WPTManifest.ensure_manifest(host, 'wpt_internal') self.assertEqual(host.executive.calls, [[ 'python', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '--work', '--no-download', '--tests-root', MOCK_WEB_TESTS + 'wpt_internal', ]])
def test_ensure_manifest_takes_optional_dest(self): host = MockHost() port = TestPort(host) WPTManifest.ensure_manifest(port, 'wpt_internal') self.assertEqual(host.executive.calls, [[ 'python3', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '-v', '--no-download', '--tests-root', WEB_TEST_DIR + '/wpt_internal', ]])
def _generate_manifest(self): """Generates MANIFEST.json for imported tests. Runs the (newly-updated) manifest command if it's found, and then stages the generated MANIFEST.json in the git index, ready to commit. """ _log.info('Generating MANIFEST.json') WPTManifest.generate_manifest(self.host, self.dest_path) manifest_path = self.fs.join(self.dest_path, 'MANIFEST.json') assert self.fs.exists(manifest_path) manifest_base_path = self.fs.normpath( self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME)) self.copyfile(manifest_path, manifest_base_path) self.chromium_git.add_list([manifest_base_path])
def update_expectations(self): """Downloads text new baselines and adds test expectations lines. Returns: A pair: A set of tests that are rebaselined, and a dictionary mapping tests that couldn't be rebaselined to lists of expectation lines written to TestExpectations. """ issue_number = self.get_issue_number() if issue_number == 'None': raise ScriptError('No issue on current branch.') build_to_status = self.get_latest_try_jobs() _log.debug('Latest try jobs: %r', build_to_status) if not build_to_status: raise ScriptError('No try job information was collected.') # The manifest may be used below to do check which tests are reference tests. WPTManifest.ensure_manifest(self.host) # Here we build up a dict of failing test results for all platforms. test_expectations = {} for build, job_status in build_to_status.iteritems(): if job_status.result == 'SUCCESS': self.ports_with_all_pass.add(self.port_name(build)) port_results = self.get_failing_results_dict(build) test_expectations = self.merge_dicts(test_expectations, port_results) # And then we merge results for different platforms that had the same results. for test_name, platform_result in test_expectations.iteritems(): # platform_result is a dict mapping platforms to results. test_expectations[test_name] = self.merge_same_valued_keys( platform_result) # At this point, test_expectations looks like: { # 'test-with-failing-result': { # ('port-name1', 'port-name2'): SimpleTestResult, # 'port-name3': SimpleTestResult # } # } rebaselined_tests, test_expectations = self.download_text_baselines( test_expectations) test_expectation_lines = self.create_line_dict(test_expectations) self.write_to_test_expectations(test_expectation_lines) return rebaselined_tests, test_expectation_lines
def test_does_not_throw_when_missing_some_test_types(self): manifest_json = ''' { "items": { "testharness": { "test.any.js": [ ["/test.any.html", {}] ] } } } ''' manifest = WPTManifest(manifest_json) self.assertTrue(manifest.is_test_file('test.any.js')) self.assertEqual(manifest.all_url_items(), {u'/test.any.html': [u'/test.any.html', {}]}) self.assertEqual(manifest.extract_reference_list('/foo/bar.html'), [])
def test_ensure_manifest_copies_new_manifest(self): host = MockHost() manifest_path = '/mock-checkout/third_party/WebKit/LayoutTests/external/wpt/MANIFEST.json' self.assertFalse(host.filesystem.exists(manifest_path)) WPTManifest.ensure_manifest(host) self.assertTrue(host.filesystem.exists(manifest_path)) webkit_base = '/mock-checkout/third_party/WebKit' self.assertEqual(host.executive.calls, [[ 'python', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '--work', '--tests-root', webkit_base + '/LayoutTests/external/wpt', ]])
def main(argv, stderr, host=None): parser = optparse.OptionParser(option_list=platform_options( use_globs=True)) parser.add_option('--json', help='Path to JSON output file') parser.add_option( '--verbose', action='store_true', default=False, help='log extra details that may be helpful when debugging') options, _ = parser.parse_args(argv) if not host: if options.platform and 'test' in options.platform: # It's a bit lame to import mocks into real code, but this allows the user # to run tests against the test platform interactively, which is useful for # debugging test failures. from blinkpy.common.host_mock import MockHost host = MockHost() else: host = Host() if options.verbose: configure_logging(logging_level=logging.DEBUG, stream=stderr) # Print full stdout/stderr when a command fails. host.executive.error_output_limit = None else: # PRESUBMIT.py relies on our output, so don't include timestamps. configure_logging(logging_level=logging.INFO, stream=stderr, include_time=False) try: # Need to generate MANIFEST.json since some expectations correspond to WPT # tests that aren't files and only exist in the manifest. _log.debug('Generating MANIFEST.json for web-platform-tests ...') WPTManifest.ensure_manifest(host) exit_status = run_checks(host, options) except KeyboardInterrupt: exit_status = exit_codes.INTERRUPTED_EXIT_STATUS except Exception as error: # pylint: disable=broad-except print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error) traceback.print_exc(file=stderr) exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS return exit_status
def test_ensure_manifest_copies_new_manifest(self): host = MockHost() manifest_path = MOCK_WEB_TESTS + 'external/wpt/MANIFEST.json' self.assertFalse(host.filesystem.exists(manifest_path)) WPTManifest.ensure_manifest(host) self.assertTrue(host.filesystem.exists(manifest_path)) self.assertEqual(host.filesystem.written_files, {manifest_path: '{"manifest": "base"}'}) self.assertEqual(host.executive.calls, [[ 'python', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '--work', '--tests-root', MOCK_WEB_TESTS + 'external/wpt', ]])
def test_all_url_items_skips_jsshell_tests(self): manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ ["/test.any.html", {}], ["/test.any.js", {"jsshell": true}] ] } } } ''' manifest = WPTManifest(manifest_json) self.assertEqual(manifest.all_url_items(), {u'/test.any.html': [u'/test.any.html', {}]})
def test_all_url_items_skips_jsshell_tests(self): manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", ["test.any.html", {}], [null, {"jsshell": true}] ] } } } ''' manifest = WPTManifest(manifest_json) self.assertEqual(manifest.all_url_items(), {u'test.any.html': [u'test.any.html', {}]})
def test_ensure_manifest_copies_new_manifest(self): host = MockHost() port = TestPort(host) manifest_path = WEB_TEST_DIR + '/external/wpt/MANIFEST.json' self.assertFalse(host.filesystem.exists(manifest_path)) WPTManifest.ensure_manifest(port) self.assertTrue(host.filesystem.exists(manifest_path)) self.assertEqual(host.filesystem.written_files, {manifest_path: '{"manifest": "base"}'}) self.assertEqual(host.executive.calls, [[ 'python3', '/mock-checkout/third_party/wpt_tools/wpt/wpt', 'manifest', '-v', '--no-download', '--tests-root', WEB_TEST_DIR + '/external/wpt', ]])
def test_file_path_to_url_paths(self): manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ ["/test.any.html", {}], ["/test.any.js", {"jsshell": true}] ] } } } ''' manifest = WPTManifest(manifest_json) # Leading slashes should be stripped; and jsshell tests shouldn't be # included. self.assertEqual(manifest.file_path_to_url_paths('test.any.js'), [u'test.any.html'])
def test_crash_tests(self): # Test that the manifest recognizes crash tests and that is_crash_test # correctly identifies only crash tests in the manifest. manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [null, {}] ] }, "crashtest": { "test-crash.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [null, {}] ] } } } ''' manifest = WPTManifest(manifest_json) self.assertEqual( manifest.all_url_items(), { u'test.html': [u'test.html', {}], u'test-crash.html': [u'test-crash.html', {}] }) self.assertTrue(manifest.is_crash_test(u'test-crash.html')) self.assertFalse(manifest.is_crash_test(u'test.html')) self.assertFalse(manifest.is_crash_test(u'different-test-crash.html'))
def test_does_not_throw_when_missing_some_test_types(self): manifest_json = ''' { "items": { "testharness": { "test.any.js": [ "8d4b9a583f484741f4cd4e4940833a890c612656", ["test.any.html", {}] ] } } } ''' host = MockHost() host.filesystem.write_text_file( WEB_TEST_DIR + '/external/wpt/MANIFEST.json', manifest_json) manifest = WPTManifest(host, WEB_TEST_DIR + '/external/wpt/MANIFEST.json') self.assertTrue(manifest.is_test_file('test.any.js')) self.assertEqual(manifest.all_url_items(), {u'test.any.html': [u'test.any.html', {}]}) self.assertEqual(manifest.extract_reference_list('/foo/bar.html'), [])
def test_ensure_manifest_updates_manifest_if_it_exists(self): host = MockHost() manifest_path = '/mock-checkout/third_party/WebKit/LayoutTests/external/wpt/MANIFEST.json' host.filesystem.write_text_file(manifest_path, '{"manifest": "NOT base"}') self.assertTrue(host.filesystem.exists(manifest_path)) WPTManifest.ensure_manifest(host) self.assertTrue(host.filesystem.exists(manifest_path)) self.assertEqual(host.filesystem.written_files, {manifest_path: '{"manifest": "base"}'}) webkit_base = '/mock-checkout/third_party/WebKit' self.assertEqual(host.executive.calls, [[ 'python', '/mock-checkout/third_party/blink/tools/blinkpy/third_party/wpt/wpt/wpt', 'manifest', '--work', '--tests-root', webkit_base + '/LayoutTests/external/wpt', ]])
def test_all_url_items_skips_jsshell_tests(self): manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.any.js": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", ["test.any.html", {}], [null, {"jsshell": true}] ] } } } ''' host = MockHost() host.filesystem.write_text_file( WEB_TEST_DIR + '/external/wpt/MANIFEST.json', manifest_json) manifest = WPTManifest(host, WEB_TEST_DIR + '/external/wpt/MANIFEST.json') self.assertEqual(manifest.all_url_items(), {u'test.any.html': [u'test.any.html', {}]})
def test_crash_tests(self): # Test that the manifest recognizes crash tests and that is_crash_test # correctly identifies only crash tests in the manifest. manifest_json = ''' { "items": { "manual": {}, "reftest": {}, "testharness": { "test.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [null, {}] ] }, "crashtest": { "test-crash.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [null, {}] ] } } } ''' host = MockHost() host.filesystem.write_text_file( WEB_TEST_DIR + '/external/wpt/MANIFEST.json', manifest_json) manifest = WPTManifest(host, WEB_TEST_DIR + '/external/wpt/MANIFEST.json') self.assertEqual( manifest.all_url_items(), { u'test.html': [u'test.html', {}], u'test-crash.html': [u'test-crash.html', {}] }) self.assertTrue(manifest.is_crash_test(u'test-crash.html')) self.assertFalse(manifest.is_crash_test(u'test.html')) self.assertFalse(manifest.is_crash_test(u'different-test-crash.html'))
def test_extract_fuzzy_metadata(self): manifest_json = ''' { "items": { "reftest": { "not_fuzzy.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [ null, [ [ "not_fuzzy-ref.html", "==" ] ], {} ] ], "fuzzy.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [ null, [ [ "fuzzy-ref.html", "==" ] ], { "fuzzy": [ [ null, [ [2, 2], [40, 40] ] ] ] } ] ] }, "testharness": { "not_a_reftest.html": [ "d23fbb8c66def47e31ad01aa7a311064ba8fddbd", [null, {}] ] } } } ''' host = MockHost() host.filesystem.write_text_file( WEB_TEST_DIR + '/external/wpt/MANIFEST.json', manifest_json) manifest = WPTManifest(host, WEB_TEST_DIR + '/external/wpt/MANIFEST.json') self.assertEqual( manifest.extract_fuzzy_metadata('fuzzy.html'), [[2, 2], [40, 40]], ) self.assertEqual(manifest.extract_fuzzy_metadata('not_fuzzy.html'), (None, None)) self.assertEqual(manifest.extract_fuzzy_metadata('not_a_reftest.html'), (None, None))
def execute(self, options, args, tool): self._tool = tool self.git_cl = self.git_cl or GitCL(tool) if args and options.test_name_file: _log.error('Aborted: Cannot combine --test-name-file and ' 'positional parameters.') return 1 # The WPT manifest is required when iterating through tests # TestBaselineSet if there are any tests in web-platform-tests. # TODO(crbug.com/698294): Consider calling ensure_manifest in BlinkTool. WPTManifest.ensure_manifest(tool) if not self.check_ok_to_run(): return 1 if options.builders: try_builders = set() for builder_names in options.builders: try_builders.update(builder_names.split(',')) self._selected_try_bots = frozenset(try_builders) jobs = self.git_cl.latest_try_jobs(self.selected_try_bots, patchset=options.patchset) self._log_jobs(jobs) builders_with_no_jobs = self.selected_try_bots - { b.builder_name for b in jobs } if not options.trigger_jobs and not jobs: _log.info('Aborted: no try jobs and --no-trigger-jobs passed.') return 1 if options.trigger_jobs and builders_with_no_jobs: self.trigger_try_jobs(builders_with_no_jobs) return 1 jobs_to_results = self._fetch_results(jobs) builders_with_results = {b.builder_name for b in jobs_to_results} builders_without_results = set( self.selected_try_bots) - builders_with_results if builders_without_results: _log.info('There are some builders with no results:') self._log_builder_list(builders_without_results) if options.fill_missing is None and builders_without_results: should_continue = self._tool.user.confirm( 'Would you like to continue?', default=self._tool.user.DEFAULT_NO) if not should_continue: _log.info('Aborting.') return 1 options.fill_missing = self._tool.user.confirm( 'Would you like to try to fill in missing results with\n' 'available results?\n' 'Note: This will generally yield correct results\n' 'as long as the results are not platform-specific.', default=self._tool.user.DEFAULT_NO) if options.test_name_file: test_baseline_set = self._make_test_baseline_set_from_file( options.test_name_file, jobs_to_results) elif args: test_baseline_set = self._make_test_baseline_set_for_tests( args, jobs_to_results) else: test_baseline_set = self._make_test_baseline_set( jobs_to_results, options.only_changed_tests) if options.fill_missing: self.fill_in_missing_results(test_baseline_set) _log.debug('Rebaselining: %s', test_baseline_set) if not options.dry_run: self.rebaseline(options, test_baseline_set) return 0
def run(self, args): """Runs the tests and return a RunDetails object with the results.""" start_time = time.time() self._printer.write_update('Collecting tests ...') running_all_tests = False if not args or any('external' in path for path in args): self._printer.write_update('Generating MANIFEST.json for web-platform-tests ...') WPTManifest.ensure_manifest(self._port.host) self._printer.write_update('Completed generating manifest.') self._printer.write_update('Collecting tests ...') try: paths, all_test_names, running_all_tests = self._collect_tests(args) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXIT_STATUS) test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names) if self._options.order == 'natural': test_names.sort(key=self._port.test_key) elif self._options.order == 'random': test_names.sort() random.Random(self._options.seed).shuffle(test_names) self._printer.write_update('Parsing expectations ...') self._expectations = test_expectations.TestExpectations(self._port, test_names) tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) self._expectations.remove_tests_from_expectations(tests_in_other_chunks) self._printer.print_found( len(all_test_names), len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations) # Check to make sure we're not skipping every test. if not tests_to_run: msg = 'No tests to run.' if self._options.zero_tests_executed_ok: _log.info(msg) # Keep executing to produce valid (but empty) results. else: _log.critical(msg) code = exit_codes.NO_TESTS_EXIT_STATUS return test_run_results.RunDetails(exit_code=code) exit_code = self._set_up_run(tests_to_run) if exit_code: return test_run_results.RunDetails(exit_code=exit_code) if self._options.num_retries is None: # Don't retry failures if an explicit list of tests was passed in. should_retry_failures = len(paths) < len(test_names) # Retry failures 3 times by default. if should_retry_failures: self._options.num_retries = 3 else: should_retry_failures = self._options.num_retries > 0 try: self._start_servers(tests_to_run) if self._options.watch: run_results = self._run_test_loop(tests_to_run, tests_to_skip) else: run_results = self._run_test_once(tests_to_run, tests_to_skip, should_retry_failures) initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results finally: self._stop_servers() self._clean_up_run() # Some crash logs can take a long time to be written out so look # for new logs after the test run finishes. self._printer.write_update('Looking for new crash logs ...') self._look_for_new_crash_logs(initial_results, start_time) for retry_attempt_results in all_retry_results: self._look_for_new_crash_logs(retry_attempt_results, start_time) self._printer.write_update('Summarizing results ...') summarized_full_results = test_run_results.summarize_results( self._port, self._expectations, initial_results, all_retry_results, enabled_pixel_tests_in_retry) summarized_failing_results = test_run_results.summarize_results( self._port, self._expectations, initial_results, all_retry_results, enabled_pixel_tests_in_retry, only_include_failing=True) exit_code = summarized_failing_results['num_regressions'] if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS: _log.warning('num regressions (%d) exceeds max exit status (%d)', exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS) exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS if not self._options.dry_run: self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests) self._upload_json_files() self._copy_results_html_file(self._results_directory, 'results.html') self._copy_results_html_file(self._results_directory, 'legacy-results.html') if initial_results.keyboard_interrupted: exit_code = exit_codes.INTERRUPTED_EXIT_STATUS else: if initial_results.interrupted: exit_code = exit_codes.EARLY_EXIT_STATUS if self._options.show_results and (exit_code or initial_results.total_failures): self._port.show_results_html_file( self._filesystem.join(self._results_directory, 'results.html')) self._printer.print_results(time.time() - start_time, initial_results) return test_run_results.RunDetails( exit_code, summarized_full_results, summarized_failing_results, initial_results, all_retry_results, enabled_pixel_tests_in_retry)