def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, cleanup_test_files, tool_name, shard_index, build_type, in_webkit_checkout): BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type) self._running_on_emulator = self.device.startswith('emulator') self._gtest_filter = gtest_filter self._test_arguments = test_arguments self.test_results = TestResults() self.in_webkit_checkout = in_webkit_checkout logging.warning('Test suite: ' + test_suite) if os.path.splitext(test_suite)[1] == '.apk': self.test_package = TestPackageApk(self.adb, device, test_suite, timeout, cleanup_test_files, self.tool) else: # Put a copy into the android out/target directory, to allow stack trace # generation. symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type, 'lib.target') self.test_package = TestPackageExecutable(self.adb, device, test_suite, timeout, cleanup_test_files, self.tool, symbols_dir)
def RunShardedTests(self): """Runs tests in parallel using a pool of workers. Returns: A list of test results aggregated from all test runs. """ logging.warning('*' * 80) logging.warning('Sharding in ' + str(len(self.attached_devices)) + ' devices.') logging.warning('Note that the output is not synchronized.') logging.warning('Look for the "Final result" banner in the end.') logging.warning('*' * 80) all_passed = [] test_results = TestResults() tests_to_run = self.tests for retry in xrange(self.retries): logging.warning('Try %d of %d', retry + 1, self.retries) self._SetupSharding(self.tests) test_runners = self._MakeTestRunners(self.attached_devices) logging.warning('Starting...') pool = multiprocessing.Pool(len(self.attached_devices), SetTestsContainer, [PythonTestSharder.tests_container]) # List of TestResults objects from each test execution. try: results_lists = pool.map(_DefaultRunnable, test_runners) except Exception: logging.exception('Unable to run tests. Something with the ' 'PythonTestRunners has gone wrong.') raise FatalTestException('PythonTestRunners were unable to run tests.') test_results = TestResults.FromTestResults(results_lists) # Accumulate passing results. all_passed += test_results.ok # If we have failed tests, map them to tests to retry. failed_tests = test_results.GetAllBroken() tests_to_run = self._GetTestsToRetry(self.tests, failed_tests) # Bail out early if we have no more tests. This can happen if all tests # pass before we're out of retries, for example. if not tests_to_run: break final_results = TestResults() # all_passed has accumulated all passing test results. # test_results will have the results from the most recent run, which could # include a variety of failure modes (unknown, crashed, failed, etc). final_results = test_results final_results.ok = all_passed return final_results
def _ProcessResults(self, result, start_ms, duration_ms): """Translates a Java test result into a Python result for this test. The TestRunner class that we use under the covers will return a test result for that specific Java test. However, to make reporting clearer, we have this method to abstract that detail and instead report that as a failure of this particular test case while still including the Java stack trace. Args: result: TestResults with a single Java test result start_ms: the time the test started duration_ms: the length of the test Returns: A TestResults object containing a result for this Python test. """ test_results = TestResults() # If our test is in broken, then it crashed/failed. broken = result.GetAllBroken() if broken: # Since we have run only one test, take the first and only item. single_result = broken[0] log = single_result.log if not log: log = 'No logging information.' python_result = SingleTestResult(self.qualified_name, start_ms, duration_ms, log) # Figure out where the test belonged. There's probably a cleaner way of # doing this. if single_result in result.crashed: test_results.crashed = [python_result] elif single_result in result.failed: test_results.failed = [python_result] elif single_result in result.unknown: test_results.unknown = [python_result] else: python_result = SingleTestResult(self.qualified_name, start_ms, duration_ms) test_results.ok = [python_result] return test_results
def DispatchInstrumentationTests(options): """Dispatches the Java and Python instrumentation tests, sharding if possible. Uses the logging module to print the combined final results and summary of the Java and Python tests. If the java_only option is set, only the Java tests run. If the python_only option is set, only the python tests run. If neither are set, run both Java and Python tests. Args: options: command-line options for running the Java and Python tests. Returns: An integer representing the number of broken tests. """ if not options.keep_test_server_ports: # Reset the test port allocation. It's important to do it before starting # to dispatch any tests. if not ports.ResetTestServerPortAllocation(): raise Exception('Failed to reset test server port.') start_date = int(time.time() * 1000) java_results = TestResults() python_results = TestResults() if options.run_java_tests: java_results = run_java_tests.DispatchJavaTests( options, [ apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path) ]) if options.run_python_tests: python_results = run_python_tests.DispatchPythonTests(options) all_results = TestResults.FromTestResults([java_results, python_results]) all_results.LogFull(test_type='Instrumentation', test_package=options.test_apk, annotation=options.annotation, build_type=options.build_type, flakiness_server=options.flakiness_dashboard_server) return len(all_results.GetAllBroken())
def DispatchPythonTests(options): """Dispatches the Python tests. If there are multiple devices, use sharding. Args: options: command line options. Returns: A list of test results. """ attached_devices = android_commands.GetAttachedDevices() if not attached_devices: raise Exception('You have no devices attached or visible!') if options.device: attached_devices = [options.device] test_collection = TestInfoCollection() all_tests = _GetAllTests(options.python_test_root, options.official_build) test_collection.AddTests(all_tests) test_names = [t.qualified_name for t in all_tests] logging.debug('All available tests: ' + str(test_names)) available_tests = test_collection.GetAvailableTests( options.annotation, options.test_filter) if not available_tests: logging.warning('No Python tests to run with current args.') return TestResults() available_tests *= options.number_of_runs test_names = [t.qualified_name for t in available_tests] logging.debug('Final list of tests to run: ' + str(test_names)) # Copy files to each device before running any tests. for device_id in attached_devices: logging.debug('Pushing files to device %s', device_id) apks = [ apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path) ] test_files_copier = test_runner.TestRunner(options, device_id, None, False, 0, apks, []) test_files_copier.CopyTestFilesOnce() # Actually run the tests. if len(attached_devices) > 1 and options.wait_for_debugger: logging.warning('Debugger can not be sharded, ' 'using first available device') attached_devices = attached_devices[:1] logging.debug('Running Python tests') sharder = PythonTestSharder(attached_devices, available_tests, options) test_results = sharder.RunShardedTests() return test_results
def _ShardedTestRunnable(test): """Standalone function needed by multiprocessing.Pool.""" log_format = '[' + test.device + '] # %(asctime)-15s: %(message)s' if logging.getLogger().handlers: logging.getLogger().handlers[0].setFormatter(logging.Formatter(log_format)) else: logging.basicConfig(format=log_format) # Handle SystemExit here since python has a bug to exit current process try: return test.Run() except SystemExit: return TestResults()
def RunShardedTests(self): """Runs tests in parallel using a pool of workers. Returns: A list of test results aggregated from all test runs. """ logging.warning('*' * 80) logging.warning('Sharding in ' + str(len(self.attached_devices)) + ' devices.') logging.warning('Note that the output is not synchronized.') logging.warning('Look for the "Final result" banner in the end.') logging.warning('*' * 80) all_passed = [] test_results = TestResults() tests_to_run = self.tests for retry in xrange(self.retries): logging.warning('Try %d of %d', retry + 1, self.retries) self._SetupSharding(self.tests) test_runners = self._MakeTestRunners(self.attached_devices) logging.warning('Starting...') pool = multiprocessing.Pool(len(self.attached_devices), SetTestsContainer, [PythonTestSharder.tests_container]) # List of TestResults objects from each test execution. try: results_lists = pool.map(_DefaultRunnable, test_runners) except Exception: logging.exception('Unable to run tests. Something with the ' 'PythonTestRunners has gone wrong.') raise FatalTestException( 'PythonTestRunners were unable to run tests.') test_results = TestResults.FromTestResults(results_lists) # Accumulate passing results. all_passed += test_results.ok # If we have failed tests, map them to tests to retry. failed_tests = test_results.GetAllBroken() tests_to_run = self._GetTestsToRetry(self.tests, failed_tests) # Bail out early if we have no more tests. This can happen if all tests # pass before we're out of retries, for example. if not tests_to_run: break final_results = TestResults() # all_passed has accumulated all passing test results. # test_results will have the results from the most recent run, which could # include a variety of failure modes (unknown, crashed, failed, etc). final_results = test_results final_results.ok = all_passed return final_results
def RunTests(self): """Runs tests from the shared pool of tests, aggregating results. Returns: A list of test results for all of the tests which this runner executed. """ tests = PythonTestSharder.tests_container results = [] for t in tests: res = CallPythonTest(t, self.options) results.append(res) return TestResults.FromTestResults(results)
def _WatchTestOutput(self, p): """Watches the test output. Args: p: the process generating output as created by pexpect.spawn. """ ok_tests = [] failed_tests = [] crashed_tests = [] timed_out_tests = [] # Test case statuses. re_run = re.compile('\[ RUN \] ?(.*)\r\n') re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') # Test run statuses. re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') # Signal handlers are installed before starting tests # to output the CRASHED marker when a crash happens. re_crash = re.compile('\[ CRASHED \](.*)\r\n') try: while True: full_test_name = None found = p.expect([re_run, re_passed, re_runner_fail], timeout=self.timeout) if found == 1: # re_passed break elif found == 2: # re_runner_fail break else: # re_run full_test_name = p.match.group(1).replace('\r', '') found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) if found == 0: # re_ok if full_test_name == p.match.group(1).replace( '\r', ''): ok_tests += [ BaseTestResult(full_test_name, p.before) ] elif found == 2: # re_crash crashed_tests += [ BaseTestResult(full_test_name, p.before) ] break else: # re_fail failed_tests += [ BaseTestResult(full_test_name, p.before) ] except pexpect.EOF: logging.error('Test terminated - EOF') # We're here because either the device went offline, or the test harness # crashed without outputting the CRASHED marker (crbug.com/175538). if not self.adb.IsOnline(): raise errors.DeviceUnresponsiveError( 'Device %s went offline.' % self.device) elif full_test_name: crashed_tests += [BaseTestResult(full_test_name, p.before)] except pexpect.TIMEOUT: logging.error('Test terminated after %d second timeout.', self.timeout) if full_test_name: timed_out_tests += [BaseTestResult(full_test_name, p.before)] finally: p.close() ret_code = self._GetGTestReturnCode() if ret_code: logging.critical( 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', ret_code, p.before, p.after) # Create TestResults and return return TestResults.FromRun(ok=ok_tests, failed=failed_tests, crashed=crashed_tests, timed_out=timed_out_tests)
def __init__(self, options, device, tests_iter, coverage, shard_index, apks, ports_to_forward): """Create a new TestRunner. Args: options: An options object with the following required attributes: - build_type: 'Release' or 'Debug'. - install_apk: Re-installs the apk if opted. - save_perf_json: Whether or not to save the JSON file from UI perf tests. - screenshot_failures: Take a screenshot for a test failure - tool: Name of the Valgrind tool. - wait_for_debugger: blocks until the debugger is connected. - disable_assertions: Whether to disable java assertions on the device. device: Attached android device. tests_iter: A list of tests to be run. coverage: Collects coverage information if opted. shard_index: shard # for this TestRunner, used to create unique port numbers. apks: A list of ApkInfo objects need to be installed. The first element should be the tests apk, the rests could be the apks used in test. The default is ChromeTest.apk. ports_to_forward: A list of port numbers for which to set up forwarders. Can be optionally requested by a test case. Raises: FatalTestException: if coverage metadata is not available. """ BaseTestRunner.__init__( self, device, options.tool, shard_index, options.build_type) if not apks: apks = [apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)] self.build_type = options.build_type self.install_apk = options.install_apk self.test_data = options.test_data self.save_perf_json = options.save_perf_json self.screenshot_failures = options.screenshot_failures self.wait_for_debugger = options.wait_for_debugger self.disable_assertions = options.disable_assertions self.tests_iter = tests_iter self.coverage = coverage self.apks = apks self.test_apk = apks[0] self.instrumentation_class_path = self.test_apk.GetPackageName() self.ports_to_forward = ports_to_forward self.test_results = TestResults() self.forwarder = None if self.coverage: if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): os.remove(TestRunner._COVERAGE_MERGED_FILENAME) if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + ' : Coverage meta info [' + TestRunner._COVERAGE_META_INFO_PATH + '] does not exist.') if (not TestRunner._COVERAGE_WEB_ROOT_DIR or not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + ' : Path specified in $EMMA_WEB_ROOTDIR [' + TestRunner._COVERAGE_WEB_ROOT_DIR + '] does not exist.')
def DispatchJavaTests(options, apks): """Dispatches Java tests onto connected device(s). If possible, this method will attempt to shard the tests to all connected devices. Otherwise, dispatch and run tests on one device. Args: options: Command line options. apks: list of APKs to use. Returns: A TestResults object holding the results of the Java tests. Raises: FatalTestException: when there's no attached the devices. """ test_apk = apks[0] # The default annotation for tests which do not have any sizes annotation. default_size_annotation = 'SmallTest' def _GetTestsMissingAnnotation(test_apk): test_size_annotations = frozenset(['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest', 'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest']) tests_missing_annotations = [] for test_method in test_apk.GetTestMethods(): annotations = frozenset(test_apk.GetTestAnnotations(test_method)) if (annotations.isdisjoint(test_size_annotations) and not apk_info.ApkInfo.IsPythonDrivenTest(test_method)): tests_missing_annotations.append(test_method) return sorted(tests_missing_annotations) if options.annotation: available_tests = test_apk.GetAnnotatedTests(options.annotation) if options.annotation.count(default_size_annotation) > 0: tests_missing_annotations = _GetTestsMissingAnnotation(test_apk) if tests_missing_annotations: logging.warning('The following tests do not contain any annotation. ' 'Assuming "%s":\n%s', default_size_annotation, '\n'.join(tests_missing_annotations)) available_tests += tests_missing_annotations else: available_tests = [m for m in test_apk.GetTestMethods() if not apk_info.ApkInfo.IsPythonDrivenTest(m)] coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' tests = [] if options.test_filter: # |available_tests| are in adb instrument format: package.path.class#test. filter_without_hash = options.test_filter.replace('#', '.') tests = [t for t in available_tests if filter_without_hash in t.replace('#', '.')] else: tests = available_tests if not tests: logging.warning('No Java tests to run with current args.') return TestResults() tests *= options.number_of_runs attached_devices = android_commands.GetAttachedDevices() test_results = TestResults() if not attached_devices: raise FatalTestException('You have no devices attached or visible!') if options.device: attached_devices = [options.device] logging.info('Will run: %s', str(tests)) if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): logging.warning('Coverage / debugger can not be sharded, ' 'using first available device') attached_devices = attached_devices[:1] sharder = TestSharder(attached_devices, options, tests, apks) test_results = sharder.RunShardedTests() return test_results
class TestRunner(BaseTestRunner): """Single test suite attached to a single device. Args: device: Device to run the tests. test_suite: A specific test suite to run, empty to run all. gtest_filter: A gtest_filter flag. test_arguments: Additional arguments to pass to the test binary. timeout: Timeout for each test. cleanup_test_files: Whether or not to cleanup test files on device. tool: Name of the Valgrind tool. shard_index: index number of the shard on which the test suite will run. build_type: 'Release' or 'Debug'. in_webkit_checkout: Whether the suite is being run from a WebKit checkout. """ def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, cleanup_test_files, tool_name, shard_index, build_type, in_webkit_checkout): BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type) self._running_on_emulator = self.device.startswith('emulator') self._gtest_filter = gtest_filter self._test_arguments = test_arguments self.test_results = TestResults() self.in_webkit_checkout = in_webkit_checkout logging.warning('Test suite: ' + test_suite) if os.path.splitext(test_suite)[1] == '.apk': self.test_package = TestPackageApk(self.adb, device, test_suite, timeout, cleanup_test_files, self.tool) else: # Put a copy into the android out/target directory, to allow stack trace # generation. symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type, 'lib.target') self.test_package = TestPackageExecutable(self.adb, device, test_suite, timeout, cleanup_test_files, self.tool, symbols_dir) def _TestSuiteRequiresMockTestServer(self): """Returns True if the test suite requires mock test server.""" tests_require_net_test_server = [ 'unit_tests', 'net_unittests', 'content_unittests' ] return (self.test_package.test_suite_basename in tests_require_net_test_server) def GetDisabledTests(self): """Returns a list of disabled tests. Returns: A list of disabled tests obtained from 'filter' subdirectory. """ gtest_filter_base_path = os.path.join( CURFILE_PATH, 'filter', self.test_package.test_suite_basename) disabled_tests = run_tests_helper.GetExpectations( gtest_filter_base_path + '_disabled') if self._running_on_emulator: # Append emulator's filter file. disabled_tests.extend( run_tests_helper.GetExpectations( gtest_filter_base_path + '_emulator_additional_disabled')) return disabled_tests def LaunchHelperToolsForTestSuite(self): """Launches helper tools for the test suite. Sometimes one test may need to run some helper tools first in order to successfully complete the test. """ if self._TestSuiteRequiresMockTestServer(): self.LaunchChromeTestServerSpawner() def StripAndCopyFiles(self): """Strips and copies the required data files for the test suite.""" self.test_package.StripAndCopyExecutable() self.test_package.PushDataAndPakFiles() self.tool.CopyFiles() test_data = _GetDataFilesForTestSuite( self.test_package.test_suite_basename) if test_data: # Make sure SD card is ready. self.adb.WaitForSdCardReady(20) for data in test_data: self.CopyTestData([data], self.adb.GetExternalStorage()) if self.test_package.test_suite_basename == 'webkit_unit_tests': self.PushWebKitUnitTestsData() def PushWebKitUnitTestsData(self): """Pushes the webkit_unit_tests data files to the device. The path of this directory is different when the suite is being run as part of a WebKit check-out. """ webkit_src = os.path.join(constants.CHROME_DIR, 'third_party', 'WebKit') if self.in_webkit_checkout: webkit_src = os.path.join(constants.CHROME_DIR, '..', '..', '..') self.adb.PushIfNeeded( os.path.join(webkit_src, 'Source/WebKit/chromium/tests/data'), os.path.join( self.adb.GetExternalStorage(), 'third_party/WebKit/Source/WebKit/chromium/tests/data')) def RunTests(self): """Runs tests on a single device. Returns: A TestResults object. """ if self._gtest_filter: try: self.test_package.CreateTestRunnerScript( self._gtest_filter, self._test_arguments) self.test_results = self.test_package.RunTestsAndListResults() except errors.DeviceUnresponsiveError as e: # Make sure this device is not attached logging.warning(e) if android_commands.IsDeviceAttached(self.device): raise e self.test_results.device_exception = device_exception # Calculate unknown test results. finally: # TODO(frankf): Do not break TestResults encapsulation. all_tests = set(self._gtest_filter.split(':')) all_tests_ran = set( [t.name for t in self.test_results.GetAll()]) unknown_tests = all_tests - all_tests_ran self.test_results.unknown = [ BaseTestResult(t, '') for t in unknown_tests ] return self.test_results def SetUp(self): """Sets up necessary test enviroment for the test suite.""" super(TestRunner, self).SetUp() self.adb.ClearApplicationState(constants.CHROME_PACKAGE) self.StripAndCopyFiles() self.LaunchHelperToolsForTestSuite() self.tool.SetupEnvironment() def TearDown(self): """Cleans up the test enviroment for the test suite.""" self.tool.CleanUpEnvironment() if self.test_package.cleanup_test_files: self.adb.RemovePushedFiles() super(TestRunner, self).TearDown()
def RunShardedTests(self): """Runs the tests in all connected devices. Returns: A TestResults object. """ logging.warning('*' * 80) logging.warning('Sharding in ' + str(len(self.attached_devices)) + ' devices.') logging.warning('Note that the output is not synchronized.') logging.warning('Look for the "Final result" banner in the end.') logging.warning('*' * 80) final_results = TestResults() self._KillHostForwarder() for retry in xrange(self.retries): logging.warning('Try %d of %d', retry + 1, self.retries) logging.warning('Attempting to run %d tests: %s' % (len(self.tests), self.tests)) self.SetupSharding(self.tests) test_runners = [] # Try to create N shards, and retrying on failure. try: for index, device in enumerate(self.attached_devices): logging.warning('*' * 80) logging.warning('Creating shard %d for %s', index, device) logging.warning('*' * 80) test_runner = self.CreateShardedTestRunner(device, index) test_runners += [test_runner] except android_commands.errors.DeviceUnresponsiveError as e: logging.critical('****Failed to create a shard: [%s]', e) self.attached_devices.remove(device) continue logging.warning('Starting...') pool = multiprocessing.Pool(len(self.attached_devices), SetTestsContainer, [BaseTestSharder.tests_container]) # map can't handle KeyboardInterrupt exception. It's a python bug. # So use map_async instead. async_results = pool.map_async(_ShardedTestRunnable, test_runners) try: results_lists = async_results.get(999999) except android_commands.errors.DeviceUnresponsiveError as e: logging.critical('****Failed to run test: [%s]', e) self.attached_devices = android_commands.GetAttachedDevices() continue test_results = TestResults.FromTestResults(results_lists) # Re-check the attached devices for some devices may # become offline retry_devices = set(android_commands.GetAttachedDevices()) # Remove devices that had exceptions. retry_devices -= TestResults.DeviceExceptions(results_lists) # Retry on devices that didn't have any exception. self.attached_devices = list(retry_devices) # TODO(frankf): Do not break TestResults encapsulation. if (retry == self.retries - 1 or len(self.attached_devices) == 0): all_passed = final_results.ok + test_results.ok final_results = test_results final_results.ok = all_passed break else: final_results.ok += test_results.ok self.tests = [] for t in test_results.GetAllBroken(): self.tests += [t.name] if not self.tests: break else: # We ran out retries, possibly out of healthy devices. # There's no recovery at this point. raise Exception('Unrecoverable error while retrying test runs.') self._KillHostForwarder() return final_results
def CallPythonTest(test, options): """Invokes a test function and translates Python exceptions into test results. This method invokes SetUp()/TearDown() on the test. It is intended to be resilient to exceptions in SetUp(), the test itself, and TearDown(). Any Python exception means the test is marked as failed, and the test result will contain information about the exception. If SetUp() raises an exception, the test is not run. If TearDown() raises an exception, the test is treated as a failure. However, if the test itself raised an exception beforehand, that stack trace will take precedence whether or not TearDown() also raised an exception. shard_index is not applicable in single-device scenarios, when test execution is serial rather than parallel. Tests can use this to bring up servers with unique port numbers, for example. See also python_test_sharder. Args: test: an object which is ostensibly a subclass of PythonTestBase. options: Options to use for setting up tests. Returns: A TestResults object which contains any results produced by the test or, in the case of a Python exception, the Python exception info. """ start_date_ms = int(time.time()) * 1000 failed = False try: test.SetUp(options) except Exception: failed = True logging.exception( 'Caught exception while trying to run SetUp() for test: ' + test.qualified_name) # Tests whose SetUp() method has failed are likely to fail, or at least # yield invalid results. exc_info = sys.exc_info() return TestResults.FromPythonException(test.qualified_name, start_date_ms, exc_info) try: result = test.Run() except Exception: # Setting this lets TearDown() avoid stomping on our stack trace from Run() # should TearDown() also raise an exception. failed = True logging.exception('Caught exception while trying to run test: ' + test.qualified_name) exc_info = sys.exc_info() result = TestResults.FromPythonException(test.qualified_name, start_date_ms, exc_info) try: test.TearDown() except Exception: logging.exception( 'Caught exception while trying run TearDown() for test: ' + test.qualified_name) if not failed: # Don't stomp the error during the test if TearDown blows up. This is a # trade-off: if the test fails, this will mask any problem with TearDown # until the test is fixed. exc_info = sys.exc_info() result = TestResults.FromPythonException(test.qualified_name, start_date_ms, exc_info) return result