def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying): self._expectations = expectations self._test_inputs = test_inputs self._needs_http = needs_http self._needs_websockets = needs_websockets self._retrying = retrying result_summary = ResultSummary(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_result_summary = result_summary self._remaining_locked_shards = [] self._has_http_lock = False self._printer.num_tests = len(test_inputs) self._printer.num_completed = 0 if not retrying: self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP result_summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel) # FIXME: We don't have a good way to coordinate the workers so that # they don't try to run the shards that need a lock if we don't actually # have the lock. The easiest solution at the moment is to grab the # lock at the beginning of the run, and then run all of the locked # shards first. This minimizes the time spent holding the lock, but # means that we won't be running tests while we're waiting for the lock. # If this becomes a problem in practice we'll need to change this. all_shards = locked_shards + unlocked_shards self._remaining_locked_shards = locked_shards if self._port.requires_http_server() or (locked_shards and self._options.http): self.start_servers_with_lock(2 * min(num_workers, len(locked_shards))) num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return result_summary self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) result_summary.interrupted = True
def _prepare_lists(self, paths, test_names): tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names)) tests_to_run = [test for test in test_names if test not in tests_to_skip] # Create a sorted list of test files so the subset chunk, # if used, contains alphabetically consecutive tests. if self._options.order == 'natural': tests_to_run.sort(key=self._port.test_key) elif self._options.order == 'random': random.shuffle(tests_to_run) tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run) self._expectations.add_skipped_tests(tests_in_other_chunks) tests_to_skip.update(tests_in_other_chunks) summary = ResultSummary(self._expectations, len(tests_to_run) * self._options.repeat_each * self._options.iterations + len(tests_to_skip)) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) return summary, tests_to_run