def __init__(self, port, options, printer): """Initialize test runner data structures. Args: port: An object implementing platform-specific functionality. options: An options argument which contains command line options. printer: A Printer object to record updates to. """ self._port = port self._filesystem = port.host.filesystem self._options = options self._printer = printer self._expectations = None self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR self.PERF_SUBDIR = 'perf' self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' self.ARCHIVED_RESULTS_LIMIT = 25 self._http_server_started = False self._wptserve_started = False self._websockets_server_started = False self._results_directory = self._port.results_directory() self._finder = LayoutTestFinder(self._port, self._options) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def __init__(self, port, options, printer): """Initialize test runner data structures. Args: port: an object implementing port-specific options: a dictionary of command line options printer: a Printer object to record updates to. """ self._port = port self._filesystem = port.host.filesystem self._options = options self._printer = printer self._expectations = None self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root( ) self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' # disable wss server. need to install pyOpenSSL on buildbots. # self._websocket_secure_server = websocket_server.PyWebSocket( # options.results_directory, use_tls=True, port=9323) self._results_directory = self._port.results_directory() self._finder = LayoutTestFinder(self._port, self._options) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def __init__(self, port, options, printer): """Initialize test runner data structures. Args: port: an object implementing port-specific options: a dictionary of command line options printer: a Printer object to record updates to. """ self._port = port self._filesystem = port.host.filesystem self._options = options self._printer = printer self._expectations = None self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR self.PERF_SUBDIR = 'perf' self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' self._http_server_started = False self._websockets_server_started = False self._results_directory = self._port.results_directory() self._finder = LayoutTestFinder(self._port, self._options) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def __init__(self, port, options, printer): """Initialize test runner data structures. Args: port: an object implementing port-specific options: a dictionary of command line options printer: a Printer object to record updates to. """ self._port = port self._filesystem = port.host.filesystem self._options = options self._printer = printer self._expectations = None self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root( ) self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests' self._results_directory = self._port.results_directory() self._finder = LayoutTestFinder(self._port, self._options) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow) test_options_json_path = self._port.path_from_webkit_base( self.LAYOUT_TESTS_DIRECTORY, "tests-options.json") self._tests_options = json.loads( self._filesystem.read_text_file(test_options_json_path) ) if self._filesystem.exists(test_options_json_path) else {}
def _runner(self, port=None): # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need. options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0] options.child_processes = '1' host = MockHost() port = port or host.port_factory.get(options.platform, options=options) return LayoutTestRunner(options, port, FakePrinter(), port.results_directory())
def __init__(self, port, options, printer): """Initializes test runner data structures. Args: port: An object implementing platform-specific functionality. options: An options argument which contains command line options. printer: A Printer object to record updates to. """ self._port = port self._filesystem = port.host.filesystem self._options = options self._printer = printer self._expectations = None self._http_server_started = False self._wptserve_started = False self._websockets_server_started = False self._results_directory = self._port.results_directory() self._finder = LayoutTestFinder(self._port, self._options) self._path_finder = PathFinder(port.host.filesystem) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def run(self, args): """Run the tests and return a RunDetails object with the results.""" self._printer.write_update("Collecting tests ...") try: paths, test_names = self._collect_tests(args) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) self._printer.write_update("Parsing expectations ...") self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force) self._expectations.parse_all_expectations() tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to make sure we're not skipping every test. if not tests_to_run: _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) default_device_tests = [] # Look for tests with custom device requirements. custom_device_tests = defaultdict(list) for test_file in tests_to_run: custom_device = self._custom_device_for_test(test_file) if custom_device: custom_device_tests[custom_device].append(test_file) else: default_device_tests.append(test_file) if custom_device_tests: for device_class in custom_device_tests: _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class)) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run) needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run) needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory(self._results_directory) if default_device_tests: _log.info('') _log.info("Running %s", pluralize(len(tests_to_run), "test")) _log.info('') if not self._set_up_run(tests_to_run): return test_run_results.RunDetails(exit_code=-1) initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip) # Only use a single worker for custom device classes self._options.child_processes = 1 for device_class in custom_device_tests: device_tests = custom_device_tests[device_class] if device_tests: _log.info('') _log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class) _log.info('') if not self._set_up_run(device_tests, device_class): return test_run_results.RunDetails(exit_code=-1) device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip) initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry self._runner.stop_servers() end_time = time.time() return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
def run(self, args): num_failed_uploads = 0 device_type_list = self._port.supported_device_types() try: tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) aggregate_tests_to_run = set() # type: Set[Test] for v in tests_to_run_by_device.values(): aggregate_tests_to_run.update(v) skipped_tests_by_path = defaultdict(set) for test in aggregate_tests_to_skip: skipped_tests_by_path[test.test_path].add(test) # If a test is marked skipped, but was explicitly requested, run it anyways if self._options.skipped != 'always': for arg in args: if arg in skipped_tests_by_path: tests = skipped_tests_by_path[arg] tests_to_run_by_device[device_type_list[0]].extend(tests) aggregate_tests_to_run |= tests aggregate_tests_to_skip -= tests del skipped_tests_by_path[arg] aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip self._printer.print_found(len(aggregate_tests), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to see if all tests we are running are skipped. if aggregate_tests == aggregate_tests_to_skip: # XXX: this is currently identical to the follow if, which likely isn't intended _log.error("All tests skipped.") return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True) # Check to make sure we have no tests to run that are not skipped. if not aggregate_tests_to_run: _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory(self._results_directory) needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests) needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests) needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False max_child_processes_for_run = 1 child_processes_option_value = self._options.child_processes uploads = [] for device_type in device_type_list: self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type))) _log.info('') continue max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run) self._printer.print_baseline_search_path(device_type=device_type) _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else '')) _log.info('') start_time_for_device = time.time() if not tests_to_run_by_device[device_type]: continue test_inputs = [self._test_input_for_file(test, device_type=device_type) for test in tests_to_run_by_device[device_type]] if not self._set_up_run(test_inputs, device_type=device_type): return test_run_results.RunDetails(exit_code=-1) configuration = self._port.configuration_for_upload(self._port.target_host(0)) if not configuration.get('flavor', None): # The --result-report-flavor argument should override wk1/wk2 configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1' temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type) skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip)) for skipped_test in set(aggregate_tests_to_skip): skipped_result = test_results.TestResult(skipped_test.test_path) skipped_result.type = test_expectations.SKIP skipped_results.add(skipped_result, expected=True) temp_initial_results = temp_initial_results.merge(skipped_results) if self._options.report_urls: self._printer.writeln('\n') self._printer.write_update('Preparing upload data ...') upload = Upload( suite=self._options.suite or 'layout-tests', configuration=configuration, details=Upload.create_details(options=self._options), commits=self._port.commits_for_upload(), timestamp=start_time, run_stats=Upload.create_run_stats( start_time=start_time_for_device, end_time=time.time(), tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips, ), results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results), ) for hostname in self._options.report_urls: self._printer.write_update('Uploading to {} ...'.format(hostname)) if not upload.upload(hostname, log_line_func=self._printer.writeln): num_failed_uploads += 1 else: uploads.append(upload) self._printer.writeln('Uploads completed!') initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \ (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)): break # Used for final logging, max_child_processes_for_run is most relevant here. self._options.child_processes = max_child_processes_for_run self._runner.stop_servers() end_time = time.time() result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry) if self._options.report_urls and uploads: self._printer.writeln('\n') self._printer.write_update('Preparing to upload test archive ...') with self._filesystem.mkdtemp() as temp: archive = self._filesystem.join(temp, 'test-archive') shutil.make_archive(archive, 'zip', self._results_directory) for upload in uploads: for hostname in self._options.report_urls: self._printer.write_update('Uploading archive to {} ...'.format(hostname)) if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln): num_failed_uploads += 1 if num_failed_uploads: result.exit_code = -1 return result
def run(self, args): num_failed_uploads = 0 total_tests = set() aggregate_test_names = set() aggregate_tests = set() tests_to_run_by_device = {} device_type_list = self._port.supported_device_types() for device_type in device_type_list: """Run the tests and return a RunDetails object with the results.""" for_device_type = u'for {} '.format(device_type) if device_type else '' self._printer.write_update(u'Collecting tests {}...'.format(for_device_type)) try: paths, test_names = self._collect_tests(args, device_type=device_type) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type)) self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type) self._expectations[device_type].parse_all_expectations() aggregate_test_names.update(test_names) tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type) total_tests.update(tests_to_run) total_tests.update(tests_to_skip) tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests] aggregate_tests.update(tests_to_run) # If a test is marked skipped, but was explicitly requested, run it anyways if self._options.skipped != 'always': for arg in args: if arg in total_tests and arg not in aggregate_tests: tests_to_run_by_device[device_type_list[0]].append(arg) aggregate_tests.add(arg) tests_to_skip = total_tests - aggregate_tests self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to make sure we're not skipping every test. if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]): _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests) needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests) needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests) self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory(self._results_directory) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False max_child_processes_for_run = 1 child_processes_option_value = self._options.child_processes uploads = [] for device_type in device_type_list: self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type) self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type))) _log.info('') continue max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run) self._printer.print_baseline_search_path(device_type=device_type) _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else '')) _log.info('') start_time_for_device = time.time() if not tests_to_run_by_device[device_type]: continue if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type): return test_run_results.RunDetails(exit_code=-1) configuration = self._port.configuration_for_upload(self._port.target_host(0)) if not configuration.get('flavor', None): # The --result-report-flavor argument should override wk1/wk2 configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1' temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type) if self._options.report_urls: self._printer.writeln('\n') self._printer.write_update('Preparing upload data ...') upload = Upload( suite='layout-tests', configuration=configuration, details=Upload.create_details(options=self._options), commits=self._port.commits_for_upload(), timestamp=start_time, run_stats=Upload.create_run_stats( start_time=start_time_for_device, end_time=time.time(), tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips, ), results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results), ) for hostname in self._options.report_urls: self._printer.write_update('Uploading to {} ...'.format(hostname)) if not upload.upload(hostname, log_line_func=self._printer.writeln): num_failed_uploads += 1 else: uploads.append(upload) self._printer.writeln('Uploads completed!') initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \ (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)): break # Used for final logging, max_child_processes_for_run is most relevant here. self._options.child_processes = max_child_processes_for_run self._runner.stop_servers() end_time = time.time() result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry) if self._options.report_urls and uploads: self._printer.writeln('\n') self._printer.write_update('Preparing to upload test archive ...') with self._filesystem.mkdtemp() as temp: archive = self._filesystem.join(temp, 'test-archive') shutil.make_archive(archive, 'zip', self._results_directory) for upload in uploads: for hostname in self._options.report_urls: self._printer.write_update('Uploading archive to {} ...'.format(hostname)) if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln): num_failed_uploads += 1 if num_failed_uploads: result.exit_code = -1 return result
def run(self, args): total_tests = set() aggregate_test_names = set() aggregate_tests = set() tests_to_run_by_device = {} device_type_list = self._port.supported_device_types() for device_type in device_type_list: """Run the tests and return a RunDetails object with the results.""" for_device_type = 'for {} '.format( device_type) if device_type else '' self._printer.write_update( 'Collecting tests {}...'.format(for_device_type)) try: paths, test_names = self._collect_tests( args, device_type=device_type) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) self._printer.write_update( 'Parsing expectations {}...'.format(for_device_type)) self._expectations[ device_type] = test_expectations.TestExpectations( self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type) self._expectations[device_type].parse_all_expectations() aggregate_test_names.update(test_names) tests_to_run, tests_to_skip = self._prepare_lists( paths, test_names, device_type=device_type) total_tests.update(tests_to_run) total_tests.update(tests_to_skip) tests_to_run_by_device[device_type] = [ test for test in tests_to_run if test not in aggregate_tests ] aggregate_tests.update(tests_to_run) tests_to_skip = total_tests - aggregate_tests self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to make sure we're not skipping every test. if not sum( [len(tests) for tests in tests_to_run_by_device.itervalues()]): _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in tests_to_run_by_device.itervalues() for test in tests) needs_web_platform_test_server = any( self._needs_web_platform_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests) needs_websockets = any( self._is_websocket_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests) self._runner = LayoutTestRunner( self._options, self._port, self._printer, self._results_directory, self._test_is_slow, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory( self._results_directory) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False max_child_processes_for_run = 1 child_processes_option_value = self._options.child_processes for device_type in device_type_list: self._runner._test_is_slow = lambda test_file: self._test_is_slow( test_file, device_type=device_type) self._options.child_processes = min( self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes( device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format( pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type))) _log.info('') continue max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run) self._printer.print_baseline_search_path(device_type=device_type) _log.info('Running {}{}'.format( pluralize(len(tests_to_run_by_device[device_type]), 'test'), ' for {}'.format(str(device_type)) if device_type else '')) _log.info('') if not tests_to_run_by_device[device_type]: continue if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type): return test_run_results.RunDetails(exit_code=-1) temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset( tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type) initial_results = initial_results.merge( temp_initial_results ) if initial_results else temp_initial_results retry_results = retry_results.merge( temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry # Used for final logging, max_child_processes_for_run is most relevant here. self._options.child_processes = max_child_processes_for_run self._runner.stop_servers() end_time = time.time() return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
def run(self, args): """Run the tests and return a RunDetails object with the results.""" self._printer.write_update("Collecting tests ...") try: paths, test_names = self._collect_tests(args) except IOError: # This is raised if --test-list doesn't exist return test_run_results.RunDetails(exit_code=-1) self._printer.write_update("Parsing expectations ...") self._expectations = test_expectations.TestExpectations( self._port, test_names, force_expectations_pass=self._options.force) self._expectations.parse_all_expectations() tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations) start_time = time.time() # Check to make sure we're not skipping every test. if not tests_to_run: _log.critical('No tests to run.') return test_run_results.RunDetails(exit_code=-1) # Look for tests with custom device requirements. test_device_mapping = defaultdict(list) for test_file in tests_to_run: test_device_mapping[self._custom_device_for_test(test_file) or self._port.DEFAULT_DEVICE_TYPE].append( test_file) # Order device types from most specific to least specific in the hopes that some of the more specific device # types will match the less specific device types. device_type_order = [] types_with_family = [] remaining_types = [] for device_type in test_device_mapping.iterkeys(): if device_type and device_type.hardware_family and device_type.hardware_type: device_type_order.append(device_type) elif device_type and device_type.hardware_family: types_with_family.append(device_type) else: remaining_types.append(device_type) device_type_order.extend(types_with_family + remaining_types) needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run) needs_web_platform_test_server = any( self._needs_web_platform_test(test) for test in tests_to_run) needs_websockets = any( self._is_websocket_test(test) for test in tests_to_run) self._runner = LayoutTestRunner( self._options, self._port, self._printer, self._results_directory, self._test_is_slow, needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets) self._printer.write_update("Checking build ...") if not self._port.check_build(): _log.error("Build check failed") return test_run_results.RunDetails(exit_code=-1) if self._options.clobber_old_results: self._clobber_old_results() # Create the output directory if it doesn't already exist. self._port.host.filesystem.maybe_make_directory( self._results_directory) initial_results = None retry_results = None enabled_pixel_tests_in_retry = False child_processes_option_value = self._options.child_processes while device_type_order: device_type = device_type_order[0] tests = test_device_mapping[device_type] del device_type_order[0] self._options.child_processes = min( self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes( device_type=device_type))) _log.info('') if not self._options.child_processes: _log.info('Skipping {} because {} is not available'.format( pluralize(len(test_device_mapping[device_type]), 'test'), str(device_type))) _log.info('') continue # This loop looks for any less-specific device types which match the current device type index = 0 while index < len(device_type_order): if device_type_order[index] == device_type: tests.extend(test_device_mapping[device_type_order[index]]) # Remove devices types from device_type_order once tests associated with that type have been claimed. del device_type_order[index] else: index += 1 _log.info('Running {}{}'.format( pluralize(len(tests), 'test'), ' for {}'.format(str(device_type)) if device_type else '')) _log.info('') if not self._set_up_run(tests, device_type): return test_run_results.RunDetails(exit_code=-1) temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset( tests, tests_to_skip) initial_results = initial_results.merge( temp_initial_results ) if initial_results else temp_initial_results retry_results = retry_results.merge( temp_retry_results) if retry_results else temp_retry_results enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry self._runner.stop_servers() end_time = time.time() return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)