def test_phase_test_same_result(self): @htf.diagnose(dupe_a_phase_diag) def a1(): pass diagnoses_lib.check_for_duplicate_results([a1], [dupe_a2_test_diag])
def test_phase_test_dupe(self): @htf.diagnose(dupe_a_phase_diag) def a1(): pass with self.assertRaises(diagnoses_lib.DuplicateResultError): diagnoses_lib.check_for_duplicate_results([a1], [dupe_b_test_diag])
def test_phase_phase_same_diagnoser(self): @htf.diagnose(dupe_a_phase_diag) def a1(): pass @htf.diagnose(dupe_a_phase_diag) def a2(): pass diagnoses_lib.check_for_duplicate_results(iter([a1, a2]), [])
def test_phase_phase_dupe(self): @htf.diagnose(dupe_a_phase_diag) def a1(): pass @htf.diagnose(dupe_b_phase_diag) def b2(): pass with self.assertRaises(diagnoses_lib.DuplicateResultError): diagnoses_lib.check_for_duplicate_results(iter([a1, b2]), [])
def _handle_phase(self, phase_desc): """Handle execution of a single test phase.""" diagnoses_lib.check_for_duplicate_results(iter([phase_desc]), []) logs.configure_logging() self._initialize_plugs(phase_plug.cls for phase_plug in phase_desc.plugs) # Cobble together a fake TestState to pass to the test phase. test_options = test_descriptor.TestOptions() with mock.patch( 'openhtf.plugs.PlugManager', new=lambda _, __: self.plug_manager): test_state_ = test_state.TestState( openhtf.TestDescriptor( phase_collections.PhaseSequence((phase_desc,)), phase_desc.code_info, {}), 'Unittest:StubTest:UID', test_options) test_state_.mark_test_started() test_state_.user_defined_state.update(self.phase_user_defined_state) for diag in self.phase_diagnoses: test_state_.diagnoses_manager._add_diagnosis(diag) # pylint: disable=protected-access test_state_.test_record.add_diagnosis(diag) # Save the test_state to the last_test_case attribute to give it access to # the underlying state. self.test_case.last_test_state = test_state_ # Actually execute the phase, saving the result in our return value. executor = phase_executor.PhaseExecutor(test_state_) # Log an exception stack when a Phase errors out. with mock.patch.object( phase_executor.PhaseExecutorThread, '_log_exception', side_effect=logging.exception): # Use _execute_phase_once because we want to expose all possible outcomes. phase_result, _ = executor._execute_phase_once( phase_desc, is_last_repeat=False, run_with_profiling=False, subtest_rec=None) if phase_result.raised_exception: failure_message = phase_result.phase_result.get_traceback_string() else: failure_message = None return test_state_.test_record.phases[-1], failure_message
def test_test_test_same_diagnoser(self): diagnoses_lib.check_for_duplicate_results( [], [dupe_a_test_diag, dupe_a_test_diag])
def test_test_test_dupe(self): with self.assertRaises(diagnoses_lib.DuplicateResultError): diagnoses_lib.check_for_duplicate_results( [], [dupe_a_test_diag, dupe_b_test_diag])
def execute(self, test_start: Optional[phase_descriptor.PhaseT] = None, profile_filename: Optional[Text] = None) -> bool: """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. profile_filename: Name of file to put profiling stats into. This also enables profiling data collection. Returns: Boolean indicating whether the test failed (False) or passed (True). Raises: InvalidTestStateError: if this test is already being executed. """ diagnoses_lib.check_for_duplicate_results( self._test_desc.phase_sequence.all_phases(), self._test_options.diagnosers) phase_collections.check_for_duplicate_subtest_names( self._test_desc.phase_sequence) # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, types.LambdaType): @phase_descriptor.PhaseOptions() def trigger_phase(test): test.test_record.dut_id = typing.cast( types.LambdaType, test_start)() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = htf_test_record.CodeInfo.for_function( trigger.func) self._executor = test_executor.TestExecutor( self._test_desc, self.make_uid(), trigger, self._test_options, run_with_profiling=profile_filename is not None) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self.TEST_INSTANCES[self.uid] = self self._executor.start() try: self._executor.wait() except KeyboardInterrupt: # The SIGINT handler only raises the KeyboardInterrupt once, so only retry # that once. self._executor.wait() raise finally: try: final_state = self._executor.finalize() _LOG.debug('Test completed for %s, outputting now.', final_state.test_record.metadata['test_name']) test_executor.combine_profile_stats( self._executor.phase_profile_stats, profile_filename) for output_cb in self._test_options.output_callbacks: try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except stacktrace = traceback.format_exc() _LOG.error( 'Output callback %s raised:\n%s\nContinuing anyway...', output_cb, stacktrace) # Make sure the final outcome of the test is printed last and in a # noticeable color so it doesn't get scrolled off the screen or missed. if final_state.test_record.outcome == htf_test_record.Outcome.ERROR: for detail in final_state.test_record.outcome_details: console_output.error_print(detail.description) else: colors = collections.defaultdict( lambda: colorama.Style.BRIGHT) colors[htf_test_record.Outcome.PASS] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.GREEN)) # pytype: disable=wrong-arg-types colors[htf_test_record.Outcome.FAIL] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.RED)) # pytype: disable=wrong-arg-types msg_template = 'test: {name} outcome: {color}{outcome}{rst}' console_output.banner_print( msg_template.format( name=final_state.test_record.metadata['test_name'], color=colors[final_state.test_record.outcome], outcome=final_state.test_record.outcome.name, rst=colorama.Style.RESET_ALL)) finally: del self.TEST_INSTANCES[self.uid] self._executor.close() self._executor = None return final_state.test_record.outcome == htf_test_record.Outcome.PASS