def run(self, test_names=None): """Run a parameterized test. For each parameter initialized for the test, runs test cases within this test class against that parameter. Args: test_names: A list of string that are test case names requested in cmd line. Returns: The test results object of this class. """ logging.info("==========> %s <==========", self.test_module_name) # Get the original tests. tests = self.getTests(test_names) # Run the set of original tests against each parameter. for param in self.params: self.cur_param = param for idx, (test_name, test_func) in enumerate(tests): param_test_name = str(test_name + self.getParamTag(param)) tests[idx] = (param_test_name, test_func) if not self.run_as_vts_self_test: self.results.requested = [ records.TestResultRecord(test_name, self.test_module_name) for test_name, _ in tests ] self.runTests(tests) return self.results
def ParseJsonResults(self, result_path): '''Parse mobly test json result. Args: result_path: string, result json file path. ''' with open(path, 'r') as f: mobly_summary = json.load(f) mobly_results = mobly_summary['Results'] for result in mobly_results: logging.debug('Adding result for %s' % result[records.TestResultEnums.RECORD_NAME]) record = records.TestResultRecord( result[records.TestResultEnums.RECORD_NAME]) record.test_class = result[records.TestResultEnums.RECORD_CLASS] record.begin_time = result[ records.TestResultEnums.RECORD_BEGIN_TIME] record.end_time = result[records.TestResultEnums.RECORD_END_TIME] record.result = result[records.TestResultEnums.RECORD_RESULT] record.uid = result[records.TestResultEnums.RECORD_UID] record.extras = result[records.TestResultEnums.RECORD_EXTRAS] record.details = result[records.TestResultEnums.RECORD_DETAILS] record.extra_errors = result[ records.TestResultEnums.RECORD_EXTRA_ERRORS] self.results.addRecord(record)
def runGeneratedTests(self, test_func, settings, args=None, kwargs=None, tag="", name_func=None): """Runs generated test cases. Generated test cases are not written down as functions, but as a list of parameter sets. This way we reduce code repetition and improve test case scalability. Args: test_func: The common logic shared by all these generated test cases. This function should take at least one argument, which is a parameter set. settings: A list of strings representing parameter sets. These are usually json strings that get loaded in the test_func. args: Iterable of additional position args to be passed to test_func. kwargs: Dict of additional keyword args to be passed to test_func tag: Name of this group of generated test cases. Ignored if name_func is provided and operates properly. name_func: A function that takes a test setting and generates a proper test name. The test name should be shorter than utils.MAX_FILENAME_LEN. Names over the limit will be truncated. Returns: A list of settings that did not pass. """ args = args or () kwargs = kwargs or {} failed_settings = [] for s in settings: test_name = "{} {}".format(tag, s) if name_func: try: test_name = name_func(s, *args, **kwargs) except: logging.exception(("Failed to get test name from " "test_func. Fall back to default %s"), test_name) tr_record = records.TestResultRecord(test_name, self.TAG) self.results.requested.append(tr_record) if len(test_name) > utils.MAX_FILENAME_LEN: test_name = test_name[:utils.MAX_FILENAME_LEN] previous_success_cnt = len(self.results.passed) self.execOneTest(test_name, test_func, (s, ) + args, **kwargs) if len(self.results.passed) - previous_success_cnt != 1: failed_settings.append(s) return failed_settings
def ListTestCases(self): '''List test cases. Returns: List of string, test names. ''' classes = mobly_test_runner._find_test_class() with capture_printout.CaptureStdout() as output: mobly_test_runner._print_test_names(classes) test_names = [] for line in output: if (not line.startswith(LIST_TEST_OUTPUT_START) and line.endswith(LIST_TEST_OUTPUT_END)): test_names.append(line) tr_record = records.TestResultRecord(line, self.test_module_name) self.results.requested.append(tr_record) return test_names
def InternalResultReportMultiThread(self, test_name, function, args, **kwargs): """Report a test result to runner thread safely. Run the given function to generate result for the runner. The function given should produce the same result visible to the runner but may not run any actual tests. Args: test_name: string, name of a test case function: the function to generate a test case result for runner args: any arguments for the function **kwargs: any additional keyword arguments for runner """ self._report_thread_lock.acquire() tr_record = records.TestResultRecord(test_name, self.TAG) self.results.requested.append(tr_record) try: self.execOneTest(test_name, function, args, **kwargs) except Exception as e: raise e finally: self._report_thread_lock.release()
def HandleRecord(self, doc): '''Handle record result document type. Args: doc: dict, result document item ''' logging.debug('Adding result for %s' % doc.get(records.TestResultEnums.RECORD_NAME)) record = records.TestResultRecord( doc.get(records.TestResultEnums.RECORD_NAME)) record.test_class = doc.get(records.TestResultEnums.RECORD_CLASS) record.begin_time = doc.get(records.TestResultEnums.RECORD_BEGIN_TIME) record.end_time = doc.get(records.TestResultEnums.RECORD_END_TIME) record.result = doc.get(records.TestResultEnums.RECORD_RESULT) record.uid = doc.get(records.TestResultEnums.RECORD_UID) record.extras = doc.get(records.TestResultEnums.RECORD_EXTRAS) record.details = doc.get(records.TestResultEnums.RECORD_DETAILS) record.extra_errors = doc.get( records.TestResultEnums.RECORD_EXTRA_ERRORS) # 'Stacktrace' in yaml result is ignored. 'Stacktrace' is a more # detailed version of record.details when exception is emitted. self.results.addRecord(record)
def execOneTest(self, test_name, test_func, args, **kwargs): """Executes one test case and update test results. Executes one test case, create a records.TestResultRecord object with the execution information, and add the record to the test class's test results. Args: test_name: Name of the test. test_func: The test function. args: A tuple of params. kwargs: Extra kwargs. """ is_silenced = False tr_record = records.TestResultRecord(test_name, self.test_module_name) tr_record.testBegin() logging.info("%s %s", TEST_CASE_TOKEN, test_name) verdict = None finished = False try: ret = self._testEntry(tr_record) asserts.assertTrue(ret is not False, "Setup test entry for %s failed." % test_name) self.filterOneTest(test_name) if self.collect_tests_only: asserts.explicitPass("Collect tests only.") try: ret = self._setUp(test_name) asserts.assertTrue(ret is not False, "Setup for %s failed." % test_name) if args or kwargs: verdict = test_func(*args, **kwargs) else: verdict = test_func() finished = True finally: self._tearDown(test_name) except (signals.TestFailure, acts_signals.TestFailure, AssertionError) as e: tr_record.testFail(e) self._exec_procedure_func(self._onFail) finished = True except (signals.TestSkip, acts_signals.TestSkip) as e: # Test skipped. tr_record.testSkip(e) self._exec_procedure_func(self._onSkip) finished = True except (signals.TestAbortClass, acts_signals.TestAbortClass) as e: # Abort signals, pass along. tr_record.testFail(e) finished = True raise signals.TestAbortClass, e, sys.exc_info()[2] except (signals.TestAbortAll, acts_signals.TestAbortAll) as e: # Abort signals, pass along. tr_record.testFail(e) finished = True raise signals.TestAbortAll, e, sys.exc_info()[2] except (signals.TestPass, acts_signals.TestPass) as e: # Explicit test pass. tr_record.testPass(e) self._exec_procedure_func(self._onPass) finished = True except (signals.TestSilent, acts_signals.TestSilent) as e: # Suppress test reporting. is_silenced = True self._exec_procedure_func(self._onSilent) self.results.removeRecord(tr_record) finished = True except Exception as e: # Exception happened during test. logging.exception(e) tr_record.testError(e) self._exec_procedure_func(self._onException) self._exec_procedure_func(self._onFail) finished = True else: # Keep supporting return False for now. # TODO(angli): Deprecate return False support. if verdict or (verdict is None): # Test passed. tr_record.testPass() self._exec_procedure_func(self._onPass) return # Test failed because it didn't return True. # This should be removed eventually. tr_record.testFail() self._exec_procedure_func(self._onFail) finished = True finally: if not finished: for device in self.android_devices: # if shell has not been set up yet if device.shell is not None: device.shell.DisableShell() logging.error('Test timed out.') tr_record.testError() self._exec_procedure_func(self._onException) self._exec_procedure_func(self._onFail) if not is_silenced: self.results.addRecord(tr_record) self._testExit()
def execOneTest(self, test_name, test_func, args, **kwargs): """Executes one test case and update test results. Executes one test case, create a records.TestResultRecord object with the execution information, and add the record to the test class's test results. Args: test_name: Name of the test. test_func: The test function. args: A tuple of params. kwargs: Extra kwargs. """ is_silenced = False tr_record = records.TestResultRecord(test_name, self.TAG) tr_record.testBegin() logging.info("%s %s", TEST_CASE_TOKEN, test_name) verdict = None try: ret = self._testEntry(test_name) asserts.assertTrue(ret is not False, "Setup test entry for %s failed." % test_name) self.filterOneTest(test_name) try: ret = self._setUp(test_name) asserts.assertTrue(ret is not False, "Setup for %s failed." % test_name) if args or kwargs: verdict = test_func(*args, **kwargs) else: verdict = test_func() finally: self._tearDown(test_name) except (signals.TestFailure, AssertionError) as e: tr_record.testFail(e) self._exec_procedure_func(self._onFail, tr_record) except signals.TestSkip as e: # Test skipped. tr_record.testSkip(e) self._exec_procedure_func(self._onSkip, tr_record) except (signals.TestAbortClass, signals.TestAbortAll) as e: # Abort signals, pass along. tr_record.testFail(e) raise e except signals.TestPass as e: # Explicit test pass. tr_record.testPass(e) self._exec_procedure_func(self._onPass, tr_record) except signals.TestSilent as e: # Suppress test reporting. is_silenced = True self._exec_procedure_func(self._onSilent, tr_record) self.results.requested.remove(test_name) except Exception as e: # Exception happened during test. logging.exception(e) tr_record.testError(e) self._exec_procedure_func(self._onException, tr_record) self._exec_procedure_func(self._onFail, tr_record) else: # Keep supporting return False for now. # TODO(angli): Deprecate return False support. if verdict or (verdict is None): # Test passed. tr_record.testPass() self._exec_procedure_func(self._onPass, tr_record) return # Test failed because it didn't return True. # This should be removed eventually. tr_record.testFail() self._exec_procedure_func(self._onFail, tr_record) finally: if not is_silenced: self.results.addRecord(tr_record) self._testExit(test_name)
def run(self, test_names=None): """Runs test cases within a test class by the order they appear in the execution list. One of these test cases lists will be executed, shown here in priority order: 1. The test_names list, which is passed from cmd line. Invalid names are guarded by cmd line arg parsing. 2. The self.tests list defined in test class. Invalid names are ignored. 3. All function that matches test case naming convention in the test class. Args: test_names: A list of string that are test case names requested in cmd line. Returns: The test results object of this class. """ logging.info("==========> %s <==========", self.TAG) # Devise the actual test cases to run in the test class. if not test_names: if self.tests: # Specified by run list in class. test_names = list(self.tests) else: # No test case specified by user, execute all in the test class test_names = self._get_all_test_names() self.results.requested = [ records.TestResultRecord(test_name, self.TAG) for test_name in test_names if test_name.startswith(STR_TEST) ] tests = self._get_test_funcs(test_names) # Setup for the class. try: if self._setUpClass() is False: raise signals.TestFailure("Failed to setup %s." % self.TAG) except Exception as e: logging.exception("Failed to setup %s.", self.TAG) self.results.failClass(self.TAG, e) self._exec_func(self._tearDownClass) return self.results # Run tests in order. try: for test_name, test_func in tests: if test_name.startswith(STR_GENERATE): logging.info( "Executing generated test trigger function '%s'", test_name) test_func() logging.info("Finished '%s'", test_name) else: self.execOneTest(test_name, test_func, None) if self._skip_all_testcases and not self.results.executed: self.results.skipClass( self.TAG, "All test cases skipped; unable to find any test case.") return self.results except signals.TestAbortClass: logging.info("Received TestAbortClass signal") return self.results except signals.TestAbortAll as e: logging.info("Received TestAbortAll signal") # Piggy-back test results on this exception object so we don't lose # results from this test class. setattr(e, "results", self.results) raise e except Exception as e: # Exception happened during test. logging.exception(e) raise e finally: self._exec_func(self._tearDownClass) if self.web.enabled: name, timestamp = self.web.GetTestModuleKeys() self.results.setTestModuleKeys(name, timestamp) logging.info("Summary for test class %s: %s", self.TAG, self.results.summary())