def _insert_failure_summaries(self, results_for_builder): run_results = self._run_results self._insert_item_into_raw_list( results_for_builder, len((set(run_results.failures_by_name.keys()) | run_results.tests_by_expectation[test_expectations.SKIP]) & run_results.tests_by_timeline[test_expectations.NOW]), self.FIXABLE_COUNT) self._insert_item_into_raw_list( results_for_builder, self._get_failure_summary_entry(test_expectations.NOW), self.FIXABLE) num_fixable = 0 for expectation in itervalues(self._expectations): num_fixable += len(expectation.model().get_tests_with_timeline( test_expectations.NOW)) self._insert_item_into_raw_list(results_for_builder, num_fixable, self.ALL_FIXABLE_COUNT) self._insert_item_into_raw_list( results_for_builder, self._get_failure_summary_entry(test_expectations.WONTFIX), self.WONTFIX)
def _results_to_upload_json_trie(self, expectations, results): FAILURE_TO_TEXT = { test_expectations.PASS: Upload.Expectations.PASS, test_expectations.CRASH: Upload.Expectations.CRASH, test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT, test_expectations.IMAGE: Upload.Expectations.IMAGE, test_expectations.TEXT: Upload.Expectations.TEXT, test_expectations.AUDIO: Upload.Expectations.AUDIO, test_expectations.MISSING: Upload.Expectations.WARNING, test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]), } results_trie = {} for result in itervalues(results.results_by_name): if result.type == test_expectations.SKIP: continue expected = expectations.filtered_expectations_for_test( result.test_name, self._options.pixel_tests or bool(result.reftest_type), self._options.world_leaks, ) if expected == {test_expectations.PASS}: expected = None else: expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected]) json_results_generator.add_path_to_trie( result.test_name, Upload.create_test_result( expected=expected, actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None, time=int(result.test_run_time * 1000), ), results_trie) return results_trie
def run(self, args): num_failed_uploads = 0 device_type_list = self._port.supported_device_types() try: tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) aggregate_tests_to_run = set() # type: Set[Test] for v in tests_to_run_by_device.values(): aggregate_tests_to_run.update(v) skipped_tests_by_path = defaultdict(set) for test in aggregate_tests_to_skip: skipped_tests_by_path[test.test_path].add(test) # If a test is marked skipped, but was explicitly requested, run it anyways if self._options.skipped != 'always': for arg in args: if arg in skipped_tests_by_path: tests = skipped_tests_by_path[arg] tests_to_run_by_device[device_type_list[0]].extend(tests) aggregate_tests_to_run |= tests aggregate_tests_to_skip -= tests del skipped_tests_by_path[arg] aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip self._printer.print_found(len(aggregate_tests), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to see if all tests we are running are skipped. if aggregate_tests == aggregate_tests_to_skip: # XXX: this is currently identical to the follow if, which likely isn't intended _log.error("All tests skipped.") return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True) # Check to make sure we have no tests to run that are not skipped. if not aggregate_tests_to_run: _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory(self._results_directory) needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests) needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests) needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False max_child_processes_for_run = 1 child_processes_option_value = self._options.child_processes uploads = [] for device_type in device_type_list: self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type))) _log.info('') continue max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run) self._printer.print_baseline_search_path(device_type=device_type) _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else '')) _log.info('') start_time_for_device = time.time() if not tests_to_run_by_device[device_type]: continue test_inputs = [self._test_input_for_file(test, device_type=device_type) for test in tests_to_run_by_device[device_type]] if not self._set_up_run(test_inputs, device_type=device_type): return test_run_results.RunDetails(exit_code=-1) configuration = self._port.configuration_for_upload(self._port.target_host(0)) if not configuration.get('flavor', None): # The --result-report-flavor argument should override wk1/wk2 configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1' temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type) skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip)) for skipped_test in set(aggregate_tests_to_skip): skipped_result = test_results.TestResult(skipped_test.test_path) skipped_result.type = test_expectations.SKIP skipped_results.add(skipped_result, expected=True) temp_initial_results = temp_initial_results.merge(skipped_results) if self._options.report_urls: self._printer.writeln('\n') self._printer.write_update('Preparing upload data ...') upload = Upload( suite=self._options.suite or 'layout-tests', configuration=configuration, details=Upload.create_details(options=self._options), commits=self._port.commits_for_upload(), timestamp=start_time, run_stats=Upload.create_run_stats( start_time=start_time_for_device, end_time=time.time(), tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips, ), results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results), ) for hostname in self._options.report_urls: self._printer.write_update('Uploading to {} ...'.format(hostname)) if not upload.upload(hostname, log_line_func=self._printer.writeln): num_failed_uploads += 1 else: uploads.append(upload) self._printer.writeln('Uploads completed!') initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \ (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)): break # Used for final logging, max_child_processes_for_run is most relevant here. self._options.child_processes = max_child_processes_for_run self._runner.stop_servers() end_time = time.time() result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry) if self._options.report_urls and uploads: self._printer.writeln('\n') self._printer.write_update('Preparing to upload test archive ...') with self._filesystem.mkdtemp() as temp: archive = self._filesystem.join(temp, 'test-archive') shutil.make_archive(archive, 'zip', self._results_directory) for upload in uploads: for hostname in self._options.report_urls: self._printer.write_update('Uploading archive to {} ...'.format(hostname)) if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln): num_failed_uploads += 1 if num_failed_uploads: result.exit_code = -1 return result
def run(self, args): num_failed_uploads = 0 total_tests = set() aggregate_test_names = set() aggregate_tests = set() tests_to_run_by_device = {} device_type_list = self._port.supported_device_types() for device_type in device_type_list: """Run the tests and return a RunDetails object with the results.""" for_device_type = u'for {} '.format(device_type) if device_type else '' self._printer.write_update(u'Collecting tests {}...'.format(for_device_type)) try: paths, test_names = self._collect_tests(args, device_type=device_type) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type)) self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type) self._expectations[device_type].parse_all_expectations() aggregate_test_names.update(test_names) tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type) total_tests.update(tests_to_run) total_tests.update(tests_to_skip) tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests] aggregate_tests.update(tests_to_run) # If a test is marked skipped, but was explicitly requested, run it anyways if self._options.skipped != 'always': for arg in args: if arg in total_tests and arg not in aggregate_tests: tests_to_run_by_device[device_type_list[0]].append(arg) aggregate_tests.add(arg) tests_to_skip = total_tests - aggregate_tests self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to make sure we're not skipping every test. if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]): _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests) needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests) needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory(self._results_directory) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False max_child_processes_for_run = 1 child_processes_option_value = self._options.child_processes uploads = [] for device_type in device_type_list: self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type) self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type))) _log.info('') continue max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run) self._printer.print_baseline_search_path(device_type=device_type) _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else '')) _log.info('') start_time_for_device = time.time() if not tests_to_run_by_device[device_type]: continue if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type): return test_run_results.RunDetails(exit_code=-1) configuration = self._port.configuration_for_upload(self._port.target_host(0)) if not configuration.get('flavor', None): # The --result-report-flavor argument should override wk1/wk2 configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1' temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type) if self._options.report_urls: self._printer.writeln('\n') self._printer.write_update('Preparing upload data ...') upload = Upload( suite='layout-tests', configuration=configuration, details=Upload.create_details(options=self._options), commits=self._port.commits_for_upload(), timestamp=start_time, run_stats=Upload.create_run_stats( start_time=start_time_for_device, end_time=time.time(), tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips, ), results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results), ) for hostname in self._options.report_urls: self._printer.write_update('Uploading to {} ...'.format(hostname)) if not upload.upload(hostname, log_line_func=self._printer.writeln): num_failed_uploads += 1 else: uploads.append(upload) self._printer.writeln('Uploads completed!') initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \ (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)): break # Used for final logging, max_child_processes_for_run is most relevant here. self._options.child_processes = max_child_processes_for_run self._runner.stop_servers() end_time = time.time() result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry) if self._options.report_urls and uploads: self._printer.writeln('\n') self._printer.write_update('Preparing to upload test archive ...') with self._filesystem.mkdtemp() as temp: archive = self._filesystem.join(temp, 'test-archive') shutil.make_archive(archive, 'zip', self._results_directory) for upload in uploads: for hostname in self._options.report_urls: self._printer.write_update('Uploading archive to {} ...'.format(hostname)) if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln): num_failed_uploads += 1 if num_failed_uploads: result.exit_code = -1 return result