def execute(self, test_start=None): """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. """ # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, LambdaType): @TestPhase() def trigger_phase(test): test.test_record.dut_id = test_start() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = test_record.CodeInfo.for_function(trigger.func) self._executor = core.TestExecutor( self._test_desc, trigger, self._test_options.teardown_function) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self._executor.start() try: self._executor.wait() finally: try: final_state = self._executor.finalize() _LOG.debug('Test completed for %s, saving to history and outputting.', final_state.test_record.metadata['test_name']) for output_cb in (self._test_options.output_callbacks + [functools.partial(history.append_record, self.uid)]): try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except _LOG.exception( 'Output callback %s raised; continuing anyway', output_cb) finally: self._executor = None return final_state.test_record.outcome == test_record.Outcome.PASS
def test_as_dict(self): conf_dict = conf._asdict() expected_dict = { 'flag_key': 'flag_value', 'other_flag': 'other_value', 'true_value': True, 'num_value': 100, 'none_default': None, 'string_default': 'default', } # assert first dict is a subset of second dict self.assertLessEqual(six.viewitems(expected_dict), six.viewitems(conf_dict))
def test_as_dict(self): conf.load(station_id='station_id') self.assertEquals({ 'flag_key': 'flag_value', 'enable_station_discovery': True, 'station_api_port': 8888, 'allow_unset_measurements': False, 'capture_source': False, 'station_discovery_string': 'OPENHTF_DISCOVERY', 'station_api_bind_address': '0.0.0.0', 'station_id': 'station_id', 'other_flag': 'other_value', 'string_default': 'default', 'none_default': None, 'teardown_timeout_s': 3, 'max_history_size_mb': 256}, conf._asdict())
def test_as_dict(self): conf.load(station_id='station_id') self.assertEqual({ 'flag_key': 'flag_value', 'true_value': True, 'num_value': 100, 'cancel_timeout_s': 2, 'example_plug_increment_size': 1, 'allow_unset_measurements': False, 'capture_source': False, 'station_id': 'station_id', 'other_flag': 'other_value', 'plug_teardown_timeout_s': 0, 'string_default': 'default', 'none_default': None, 'teardown_timeout_s': 30, }, conf._asdict())
def test_as_dict(self): conf.load(station_id='station_id') self.assertEqual( { 'flag_key': 'flag_value', 'true_value': True, 'num_value': 100, 'cancel_timeout_s': 2, 'example_plug_increment_size': 1, 'allow_unset_measurements': False, 'capture_source': False, 'station_id': 'station_id', 'other_flag': 'other_value', 'plug_teardown_timeout_s': 0, 'string_default': 'default', 'none_default': None, 'teardown_timeout_s': 30, }, conf._asdict())
def test_as_dict(self): conf.load(station_id='station_id') self.assertEquals( { 'flag_key': 'flag_value', 'enable_station_discovery': True, 'station_api_port': 8888, 'allow_unset_measurements': False, 'capture_source': False, 'station_discovery_string': 'OPENHTF_DISCOVERY', 'station_api_bind_address': '0.0.0.0', 'station_id': 'station_id', 'other_flag': 'other_value', 'string_default': 'default', 'none_default': None, 'teardown_timeout_s': 30, 'max_history_size_mb': 256 }, conf._asdict())
def execute(self, test_start=None): """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. Returns: Boolean indicating whether the test failed (False) or passed (True). Raises: InvalidTestStateError: if this test is already being executed. """ # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, LambdaType): @phase_descriptor.PhaseOptions() def trigger_phase(test): test.test_record.dut_id = test_start() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = test_record.CodeInfo.for_function( trigger.func) self._executor = test_executor.TestExecutor( self._test_desc, self.make_uid(), trigger, self._test_options.default_dut_id, self._test_options.teardown_function, self._test_options.failure_exceptions) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self.TEST_INSTANCES[self.uid] = self self._executor.start() try: self._executor.wait() finally: try: final_state = self._executor.finalize() _LOG.debug('Test completed for %s, outputting now.', final_state.test_record.metadata['test_name']) for output_cb in self._test_options.output_callbacks: try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except _LOG.error( 'Output callback %s raised; continuing anyway', output_cb) # Make sure the final outcome of the test is printed last and in a # noticeable color so it doesn't get scrolled off the screen or missed. if final_state.test_record.outcome == test_record.Outcome.ERROR: for detail in final_state.test_record.outcome_details: console_output.error_print(detail.description) else: colors = collections.defaultdict( lambda: 'colorama.Style.BRIGHT') colors[test_record.Outcome.PASS] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.GREEN)) colors[test_record.Outcome.FAIL] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.RED)) msg_template = "test: {name} outcome: {color}{outcome}{rst}" console_output.banner_print( msg_template.format( name=final_state.test_record.metadata['test_name'], color=colors[final_state.test_record.outcome], outcome=final_state.test_record.outcome.name, rst=colorama.Style.RESET_ALL)) finally: del self.TEST_INSTANCES[self.uid] self._executor = None return final_state.test_record.outcome == test_record.Outcome.PASS
def execute(self, test_start=None): """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. """ # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, LambdaType): @TestPhase() def trigger_phase(test): test.test_record.dut_id = test_start() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = test_record.CodeInfo.for_function( trigger.func) self._executor = core.TestExecutor( self._test_desc, self.make_uid(), trigger, self._test_options.teardown_function) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self.TEST_INSTANCES[self.uid] = self self._executor.start() try: self._executor.wait() finally: try: final_state = self._executor.finalize() _LOG.debug( 'Test completed for %s, saving to history and outputting.', final_state.test_record.metadata['test_name']) for output_cb in ( self._test_options.output_callbacks + [functools.partial(history.append_record, self.uid)]): try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except _LOG.exception( 'Output callback %s raised; continuing anyway', output_cb) finally: del self.TEST_INSTANCES[self.uid] self._executor = None return final_state.test_record.outcome == test_record.Outcome.PASS
def get(self): self.write({ 'conf': conf._asdict(), 'flags': vars(self._cli_args), })
def execute(self, test_start=None): """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. Returns: Boolean indicating whether the test failed (False) or passed (True). Raises: InvalidTestStateError: if this test is already being executed. """ # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, LambdaType): @phase_descriptor.PhaseOptions() def trigger_phase(test): test.test_record.dut_id = test_start() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = test_record.CodeInfo.for_function(trigger.func) test_desc = self._get_running_test_descriptor() self._executor = test_executor.TestExecutor( test_desc, self.make_uid(), trigger, self._test_options) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self.TEST_INSTANCES[self.uid] = self self._executor.start() try: self._executor.wait() except KeyboardInterrupt: # The SIGINT handler only raises the KeyboardInterrupt once, so only retry # that once. self._executor.wait() raise finally: try: final_state = self._executor.finalize() _LOG.debug('Test completed for %s, outputting now.', final_state.test_record.metadata['test_name']) for output_cb in self._test_options.output_callbacks: try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except _LOG.exception( 'Output callback %s raised; continuing anyway', output_cb) # Make sure the final outcome of the test is printed last and in a # noticeable color so it doesn't get scrolled off the screen or missed. if final_state.test_record.outcome == test_record.Outcome.ERROR: for detail in final_state.test_record.outcome_details: console_output.error_print(detail.description) else: colors = collections.defaultdict(lambda: colorama.Style.BRIGHT) colors[test_record.Outcome.PASS] = ''.join((colorama.Style.BRIGHT, colorama.Fore.GREEN)) colors[test_record.Outcome.FAIL] = ''.join((colorama.Style.BRIGHT, colorama.Fore.RED)) msg_template = 'test: {name} outcome: {color}{outcome}{rst}' console_output.banner_print(msg_template.format( name=final_state.test_record.metadata['test_name'], color=colors[final_state.test_record.outcome], outcome=final_state.test_record.outcome.name, rst=colorama.Style.RESET_ALL)) finally: del self.TEST_INSTANCES[self.uid] self._executor = None return final_state.test_record.outcome == test_record.Outcome.PASS
def execute(self, test_start: Optional[phase_descriptor.PhaseT] = None, profile_filename: Optional[Text] = None) -> bool: """Starts the framework and executes the given test. Args: test_start: Either a trigger phase for starting the test, or a function that returns a DUT ID. If neither is provided, defaults to not setting the DUT ID. profile_filename: Name of file to put profiling stats into. This also enables profiling data collection. Returns: Boolean indicating whether the test failed (False) or passed (True). Raises: InvalidTestStateError: if this test is already being executed. """ phase_descriptor.check_for_duplicate_results( self._test_desc.phase_sequence.all_phases(), self._test_options.diagnosers) phase_collections.check_for_duplicate_subtest_names( self._test_desc.phase_sequence) # Lock this section so we don't .stop() the executor between instantiating # it and .Start()'ing it, doing so does weird things to the executor state. with self._lock: # Sanity check to make sure someone isn't doing something weird like # trying to Execute() the same test twice in two separate threads. We # hold the lock between here and Start()'ing the executor to guarantee # that only one thread is successfully executing the test. if self._executor: raise InvalidTestStateError('Test already running', self._executor) # Snapshot some things we care about and store them. self._test_desc.metadata['test_name'] = self._test_options.name self._test_desc.metadata['config'] = conf._asdict() self.last_run_time_millis = util.time_millis() if isinstance(test_start, types.LambdaType): @phase_descriptor.PhaseOptions() def trigger_phase(test): test.test_record.dut_id = typing.cast( types.LambdaType, test_start)() trigger = trigger_phase else: trigger = test_start if conf.capture_source: trigger.code_info = htf_test_record.CodeInfo.for_function( trigger.func) self._executor = test_executor.TestExecutor( self._test_desc, self.make_uid(), trigger, self._test_options, run_with_profiling=profile_filename is not None) _LOG.info('Executing test: %s', self.descriptor.code_info.name) self.TEST_INSTANCES[self.uid] = self self._executor.start() try: self._executor.wait() except KeyboardInterrupt: # The SIGINT handler only raises the KeyboardInterrupt once, so only retry # that once. self._executor.wait() raise finally: try: final_state = self._executor.finalize() _LOG.debug('Test completed for %s, outputting now.', final_state.test_record.metadata['test_name']) test_executor.combine_profile_stats( self._executor.phase_profile_stats, profile_filename) for output_cb in self._test_options.output_callbacks: try: output_cb(final_state.test_record) except Exception: # pylint: disable=broad-except stacktrace = traceback.format_exc() _LOG.error( 'Output callback %s raised:\n%s\nContinuing anyway...', output_cb, stacktrace) # Make sure the final outcome of the test is printed last and in a # noticeable color so it doesn't get scrolled off the screen or missed. if final_state.test_record.outcome == htf_test_record.Outcome.ERROR: for detail in final_state.test_record.outcome_details: console_output.error_print(detail.description) else: colors = collections.defaultdict( lambda: colorama.Style.BRIGHT) colors[htf_test_record.Outcome.PASS] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.GREEN)) # pytype: disable=wrong-arg-types colors[htf_test_record.Outcome.FAIL] = ''.join( (colorama.Style.BRIGHT, colorama.Fore.RED)) # pytype: disable=wrong-arg-types msg_template = ( 'test: {name} outcome: {color}{outcome}{marginal}{rst}' ) console_output.banner_print( msg_template.format( name=final_state.test_record.metadata['test_name'], color=(colorama.Fore.YELLOW if final_state.test_record.marginal else colors[final_state.test_record.outcome]), outcome=final_state.test_record.outcome.name, marginal=(' (MARGINAL)' if final_state.test_record.marginal else ''), rst=colorama.Style.RESET_ALL)) finally: del self.TEST_INSTANCES[self.uid] self._executor.close() self._executor = None return final_state.test_record.outcome == htf_test_record.Outcome.PASS