def RunTests(tests, runner_factory, devices, shard=True, test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT, num_retries=2, max_per_run=256): """Run all tests on attached devices, retrying tests that don't pass. Args: tests: List of tests to run. runner_factory: Callable that takes a device and index and returns a TestRunner object. devices: List of attached devices. shard: True if we should shard, False if we should replicate tests. - Sharding tests will distribute tests across all test runners through a shared test collection. - Replicating tests will copy all tests to each test runner through a unique test collection for each test runner. test_timeout: Watchdog timeout in seconds for running tests. setup_timeout: Watchdog timeout in seconds for creating and cleaning up test runners. num_retries: Number of retries for a test. max_per_run: Maximum number of tests to run in any group. Returns: A tuple of (base_test_result.TestRunResults object, exit code). """ if not tests: logging.critical('No tests to run.') return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) tests_expanded = ApplyMaxPerRun(tests, max_per_run) if shard: # Generate a shared TestCollection object for all test runners, so they # draw from a common pool of tests. shared_test_collection = test_collection.TestCollection( [_Test(t) for t in tests_expanded]) test_collection_factory = lambda: shared_test_collection tag_results_with_device = False log_string = 'sharded across devices' else: # Generate a unique TestCollection object for each test runner, but use # the same set of tests. test_collection_factory = lambda: test_collection.TestCollection( [_Test(t) for t in tests_expanded]) tag_results_with_device = True log_string = 'replicated on each device' logging.info('Will run %d tests (%s): %s', len(tests_expanded), log_string, str(tests_expanded)) runners = _CreateRunners(runner_factory, devices, setup_timeout) try: return _RunAllTests(runners, test_collection_factory, num_retries, test_timeout, tag_results_with_device) finally: try: _TearDownRunners(runners, setup_timeout) except device_errors.DeviceUnreachableError as e: logging.warning('Device unresponsive during TearDown: [%s]', e) except Exception as e: logging.error('Unexpected exception caught during TearDown: %s' % str(e))
def RunTests(self): tests = self._GetTests() @handle_shard_failures def run_tests_on_device(dev, tests, results): for test in tests: result = None try: result = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except: if isinstance(tests, test_collection.TestCollection): tests.add(test) raise finally: if isinstance(tests, test_collection.TestCollection): tests.test_completed() logging.info('Finished running tests on this device.') tries = 0 results = [] while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try_results = base_test_result.TestRunResults() if self._ShouldShard(): tc = test_collection.TestCollection(self._CreateShards(tests)) self._env.parallel_devices.pMap(run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap(run_tests_on_device, tests, try_results).pGet(None) results.append(try_results) tries += 1 tests = self._GetTestsToRetry(tests, try_results) logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') return results
def _RunTests(mock_runner, tests): results = [] tests = test_collection.TestCollection( [test_dispatcher._Test(t) for t in tests]) test_dispatcher._RunTestsFromQueue(mock_runner, tests, results, watchdog_timer.WatchdogTimer(None), 2) run_results = base_test_result.TestRunResults() for r in results: run_results.AddTestRunResults(r) return run_results
def RunTests(self): tests = self._GetTests() exit_now = threading.Event() @local_device_environment.handle_shard_failures def run_tests_on_device(dev, tests, results): for test in tests: if exit_now.isSet(): thread.exit() result = None rerun = None try: result, rerun = crash_handler.RetryOnSystemCrash( lambda d, t=test: self._RunTest(d, t), device=dev) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except Exception as e: # pylint: disable=broad-except if isinstance(tests, test_collection.TestCollection): rerun = test if (isinstance(e, device_errors.DeviceUnreachableError) or not isinstance(e, base_error.BaseError)): # If we get a device error but believe the device is still # reachable, attempt to continue using it. Otherwise, raise # the exception and terminate this run_tests_on_device call. raise finally: if isinstance(tests, test_collection.TestCollection): if rerun: tests.add(rerun) tests.test_completed() logging.info('Finished running tests on this device.') def stop_tests(_signum, _frame): logging.critical('Received SIGTERM. Stopping test execution.') exit_now.set() raise TestsTerminated() try: with signal_handler.SignalHandler(signal.SIGTERM, stop_tests): tries = 0 results = [] while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try_results = base_test_result.TestRunResults() test_names = (self._GetUniqueTestName(t) for t in tests) try_results.AddResults( base_test_result.BaseTestResult( t, base_test_result.ResultType.NOTRUN) for t in test_names if not t.endswith('*')) try: if self._ShouldShard(): tc = test_collection.TestCollection( self._CreateShards(tests)) self._env.parallel_devices.pMap( run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap( run_tests_on_device, tests, try_results).pGet(None) except TestsTerminated: for unknown_result in try_results.GetUnknown(): try_results.AddResult( base_test_result.BaseTestResult( unknown_result.GetName(), base_test_result.ResultType.TIMEOUT, log=_SIGTERM_TEST_LOG)) raise finally: results.append(try_results) tries += 1 tests = self._GetTestsToRetry(tests, try_results) logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') except TestsTerminated: pass return results
def RunTests(self): tests = self._GetTests() @handle_shard_failures def run_tests_on_device(dev, tests, results): for test in tests: result = None try: result = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except: if isinstance(tests, test_collection.TestCollection): tests.add(test) raise finally: if isinstance(tests, test_collection.TestCollection): tests.test_completed() logging.info('Finished running tests on this device.') tries = 0 results = base_test_result.TestRunResults() all_fail_results = {} while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try_results = base_test_result.TestRunResults() if self._ShouldShard(): tc = test_collection.TestCollection(self._CreateShards(tests)) self._env.parallel_devices.pMap(run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap(run_tests_on_device, tests, try_results).pGet(None) for result in try_results.GetAll(): if result.GetType() in (base_test_result.ResultType.PASS, base_test_result.ResultType.SKIP): results.AddResult(result) else: all_fail_results[result.GetName()] = result results_names = set(r.GetName() for r in results.GetAll()) def has_test_result(name): # When specifying a test filter, names can contain trailing wildcards. # See local_device_gtest_run._ExtractTestsFromFilter() if name.endswith('*'): return any(fnmatch.fnmatch(n, name) for n in results_names) return name in results_names tests = [ t for t in tests if not has_test_result(self._GetTestName(t)) ] tries += 1 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') all_unknown_test_names = set(self._GetTestName(t) for t in tests) all_failed_test_names = set(all_fail_results.iterkeys()) unknown_tests = all_unknown_test_names.difference( all_failed_test_names) failed_tests = all_failed_test_names.intersection( all_unknown_test_names) if unknown_tests: results.AddResults( base_test_result.BaseTestResult( u, base_test_result.ResultType.UNKNOWN) for u in unknown_tests) if failed_tests: results.AddResults(all_fail_results[f] for f in failed_tests) return results
def RunTests(self, results): tests = self._GetTests() exit_now = threading.Event() @local_device_environment.handle_shard_failures def run_tests_on_device(dev, tests, results): consecutive_device_errors = 0 for test in tests: if exit_now.isSet(): thread.exit() result = None rerun = None try: result, rerun = crash_handler.RetryOnSystemCrash( lambda d, t=test: self._RunTest(d, t), device=dev) consecutive_device_errors = 0 if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except device_errors.CommandTimeoutError: # Test timeouts don't count as device errors for the purpose # of bad device detection. consecutive_device_errors = 0 if isinstance(test, list): results.AddResults( base_test_result.BaseTestResult( self._GetUniqueTestName(t), base_test_result.ResultType.TIMEOUT) for t in test) else: results.AddResult( base_test_result.BaseTestResult( self._GetUniqueTestName(test), base_test_result.ResultType.TIMEOUT)) except Exception as e: # pylint: disable=broad-except if isinstance(tests, test_collection.TestCollection): rerun = test if (isinstance(e, device_errors.DeviceUnreachableError) or not isinstance(e, base_error.BaseError)): # If we get a device error but believe the device is still # reachable, attempt to continue using it. Otherwise, raise # the exception and terminate this run_tests_on_device call. raise consecutive_device_errors += 1 if consecutive_device_errors >= 3: # We believe the device is still reachable and may still be usable, # but if it fails repeatedly, we shouldn't attempt to keep using # it. logging.error( 'Repeated failures on device %s. Abandoning.', str(dev)) raise logging.exception( 'Attempting to continue using device %s despite failure (%d/3).', str(dev), consecutive_device_errors) finally: if isinstance(tests, test_collection.TestCollection): if rerun: tests.add(rerun) tests.test_completed() logging.info('Finished running tests on this device.') def stop_tests(_signum, _frame): logging.critical('Received SIGTERM. Stopping test execution.') exit_now.set() raise TestsTerminated() try: with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests): tries = 0 while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) if tries > 0 and self._env.recover_devices: if any(d.build_version_sdk == version_codes.LOLLIPOP_MR1 for d in self._env.devices): logging.info( 'Attempting to recover devices due to known issue on L MR1. ' 'See crbug.com/787056 for details.') self._env.parallel_devices.pMap( device_recovery.RecoverDevice, None) elif tries + 1 == self._env.max_tries: logging.info( 'Attempting to recover devices prior to last test attempt.' ) self._env.parallel_devices.pMap( device_recovery.RecoverDevice, None) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try_results = base_test_result.TestRunResults() test_names = (self._GetUniqueTestName(t) for t in tests) try_results.AddResults( base_test_result.BaseTestResult( t, base_test_result.ResultType.NOTRUN) for t in test_names if not t.endswith('*')) # As soon as we know the names of the tests, we populate |results|. # The tests in try_results will have their results updated by # try_results.AddResult() as they are run. results.append(try_results) try: if self._ShouldShard(): tc = test_collection.TestCollection( self._CreateShards(tests)) self._env.parallel_devices.pMap( run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap( run_tests_on_device, tests, try_results).pGet(None) except TestsTerminated: for unknown_result in try_results.GetUnknown(): try_results.AddResult( base_test_result.BaseTestResult( unknown_result.GetName(), base_test_result.ResultType.TIMEOUT, log=_SIGTERM_TEST_LOG)) raise tries += 1 tests = self._GetTestsToRetry(tests, try_results) logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') except TestsTerminated: pass
def RunTests(self): tests = self._GetTests() def run_tests_on_device(dev, tests, results): for test in tests: try: result = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except: if isinstance(tests, test_collection.TestCollection): tests.add(test) raise finally: if isinstance(tests, test_collection.TestCollection): tests.test_completed() logging.info('Finished running tests on this device.') tries = 0 results = base_test_result.TestRunResults() all_fail_results = {} while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try: try_results = base_test_result.TestRunResults() if self._ShouldShard(): tc = test_collection.TestCollection( self._CreateShards(tests)) self._env.parallel_devices.pMap(run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap(run_tests_on_device, tests, try_results).pGet(None) except device_errors.CommandFailedError: logging.exception('Shard terminated: command failed') except device_errors.CommandTimeoutError: logging.exception('Shard terminated: command timed out') except device_errors.DeviceUnreachableError: logging.exception( 'Shard terminated: device became unreachable') for result in try_results.GetAll(): if result.GetType() in (base_test_result.ResultType.PASS, base_test_result.ResultType.SKIP): results.AddResult(result) else: all_fail_results[result.GetName()] = result results_names = set(r.GetName() for r in results.GetAll()) tests = [ t for t in tests if self._GetTestName(t) not in results_names ] tries += 1 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') all_unknown_test_names = set(self._GetTestName(t) for t in tests) all_failed_test_names = set(all_fail_results.iterkeys()) unknown_tests = all_unknown_test_names.difference( all_failed_test_names) failed_tests = all_failed_test_names.intersection( all_unknown_test_names) if unknown_tests: results.AddResults( base_test_result.BaseTestResult( u, base_test_result.ResultType.UNKNOWN) for u in unknown_tests) if failed_tests: results.AddResults(all_fail_results[f] for f in failed_tests) return results
def RunTests(self): tests = self._GetTests() exit_now = threading.Event() @local_device_environment.handle_shard_failures def run_tests_on_device(dev, tests, results): for test in tests: if exit_now.isSet(): thread.exit() result = None try: result = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): results.AddResult(result) elif isinstance(result, list): results.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) except: if isinstance(tests, test_collection.TestCollection): tests.add(test) raise finally: if isinstance(tests, test_collection.TestCollection): tests.test_completed() logging.info('Finished running tests on this device.') class TestsTerminated(Exception): pass def stop_tests(_signum, _frame): logging.critical('Received SIGTERM. Stopping test execution.') exit_now.set() raise TestsTerminated() try: with signal_handler.SignalHandler(signal.SIGTERM, stop_tests): tries = 0 results = [] while tries < self._env.max_tries and tests: logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) logging.info('Will run %d tests on %d devices: %s', len(tests), len(self._env.devices), ', '.join(str(d) for d in self._env.devices)) for t in tests: logging.debug(' %s', t) try_results = base_test_result.TestRunResults() test_names = (self._GetUniqueTestName(t) for t in tests) try_results.AddResults( base_test_result.BaseTestResult( t, base_test_result.ResultType.UNKNOWN) for t in test_names if not t.endswith('*')) try: if self._ShouldShard(): tc = test_collection.TestCollection( self._CreateShards(tests)) self._env.parallel_devices.pMap( run_tests_on_device, tc, try_results).pGet(None) else: self._env.parallel_devices.pMap( run_tests_on_device, tests, try_results).pGet(None) except TestsTerminated: for unknown_result in try_results.GetUnknown(): try_results.AddResult( base_test_result.BaseTestResult( unknown_result.GetName(), base_test_result.ResultType.TIMEOUT, log=_SIGTERM_TEST_LOG)) raise finally: results.append(try_results) tries += 1 tests = self._GetTestsToRetry(tests, try_results) logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) if tests: logging.info('%d failed tests remain.', len(tests)) else: logging.info('All tests completed.') except TestsTerminated: pass return results
def RunTests(self): tests = self._GetTests() def run_tests_on_device(dev, tests): r = base_test_result.TestRunResults() for test in tests: result = self._RunTest(dev, test) if isinstance(result, base_test_result.BaseTestResult): r.AddResult(result) elif isinstance(result, list): r.AddResults(result) else: raise Exception('Unexpected result type: %s' % type(result).__name__) if isinstance(tests, test_collection.TestCollection): tests.test_completed() return r tries = 0 results = base_test_result.TestRunResults() all_fail_results = {} while tries < self._env.max_tries and tests: logging.debug('try %d, will run %d tests:', tries, len(tests)) for t in tests: logging.debug(' %s', t) if self._ShouldShard(): tc = test_collection.TestCollection(self._CreateShards(tests)) try_results = self._env.parallel_devices.pMap( run_tests_on_device, tc).pGet(None) else: try_results = self._env.parallel_devices.pMap( run_tests_on_device, tests).pGet(None) for try_result in try_results: for result in try_result.GetAll(): if result.GetType() in (base_test_result.ResultType.PASS, base_test_result.ResultType.SKIP): results.AddResult(result) else: all_fail_results[result.GetName()] = result results_names = set(r.GetName() for r in results.GetAll()) tests = [ t for t in tests if self._GetTestName(t) not in results_names ] tries += 1 all_unknown_test_names = set(self._GetTestName(t) for f in tests) all_failed_test_names = set(all_fail_results.iterkeys()) unknown_tests = all_unknown_test_names.difference( all_failed_test_names) failed_tests = all_failed_test_names.intersection( all_unknown_test_names) if unknown_tests: results.AddResults( base_test_result.BaseTestResult( t, base_test_result.ResultType.UNKNOWN) for t in tests) if failed_tests: results.AddResults(all_fail_results[f] for f in failed_tests) return results
def setUp(self): self.tests = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] shared_test_collection = test_collection.TestCollection( [test_dispatcher._Test(t) for t in self.tests]) self.test_collection_factory = lambda: shared_test_collection