def main(): test = htf.Test(add_numbers_fails, hello_world) test.add_output_callbacks(console_summary.ConsoleSummary()) # Option 1: test.configure test.configure(stop_on_first_failure=True) # Option 2 : You can disable option 1 and enable below line # to get same result # CONF.load(stop_on_first_failure=True) test.execute(test_start=user_input.prompt_for_test_start())
def test_mfg_event_from_test_record(self, user_mock): user_mock.prompt.return_value = 'SomeWidget' record = yield self._test mfg_event = mfg_event_converter.mfg_event_from_test_record(record) callback = console_summary.ConsoleSummary() callback(record) # Assert test status self.assertEqual(test_runs_pb2.PASS, mfg_event.test_status) # Verify all expected phases included. expected_phase_names = [ 'trigger_phase', 'hello_world', 'dimensions', 'attachments', 'hello_world', 'attachments' ] actual_phase_names = [phase.name for phase in mfg_event.phases] self.assertEqual(expected_phase_names, actual_phase_names) # Spot check duplicate measurements (widget_size) for measurement_name in ['widget_size_0', 'widget_size_1']: for measurement in mfg_event.measurement: if measurement.name == measurement_name: self.assertEqual(3.0, measurement.numeric_value) break else: raise AssertionError('No measurement named %s' % measurement_name) # Spot check an attachment (example_attachment.txt) for attachment_name in [ 'example_attachment_0.txt', 'example_attachment_1.txt' ]: for attachment in mfg_event.attachment: if attachment.name == attachment_name: self.assertEqual( b'This is a text file attachment.\n', attachment.value_binary, ) break else: raise AssertionError('No attachment named %s' % attachment_name)
def main(): test = htf.Test( htf.PhaseGroup.with_teardown(teardown)( hello_world, set_measurements, dimensions, attachments, skip_phase, measures_with_args.with_args(minimum=1, maximum=4), measures_with_marginal_args.with_args(marginal_minimum=4, marginal_maximum=6), analysis, ), # Some metadata fields, these in particular are used by mfg-inspector, # but you can include any metadata fields. test_name='MyTest', test_description='OpenHTF Example Test', test_version='1.0.0') test.add_output_callbacks( callbacks.OutputToFile( './{dut_id}.{metadata[test_name]}.{start_time_millis}.pickle')) test.add_output_callbacks( json_factory.OutputToJSON( './{dut_id}.{metadata[test_name]}.{start_time_millis}.json', indent=4)) test.add_output_callbacks(console_summary.ConsoleSummary()) # Example of how to output to testrun protobuf format and save to disk then # upload. Replace json_file with your JSON-formatted private key downloaded # from Google Developers Console when you created the Service Account you # intend to use, or name it 'my_private_key.json'. # inspector = (mfg_inspector.MfgInspector # .from_json(json.load(json_file))) # .set_converter(test_runs_converter.test_run_from_test_record)) # test.add_output_callbacks( # inspector.save_to_disk('./{dut_id}.{start_time_millis}.pb'), # inspector.upload()) test.execute(test_start=user_input.prompt_for_test_start())
def main(): # We instantiate our OpenHTF test with the phases we want to run as args. test = htf.Test( measurements_example.hello_phase, measurements_example.again_phase, failing_phase, measurements_example.lots_of_measurements, checkpoints.checkpoint(), long_running_phase, ) # In order to view the result of the test, we have to output it somewhere, # outputting to console is an easy way to do this. test.add_output_callbacks(console_summary.ConsoleSummary()) # The complete summary is viable in json, including the measurements # included in measurements_example.lots_of_measurements. test.add_output_callbacks( json_factory.OutputToJSON('./checkpoints.json', indent=2)) # Unlike hello_world.py, where we prompt for a DUT ID, here we'll just # use an arbitrary one. test.execute(test_start=lambda: 'MyDutId')
# checkpoint phase is run right before this phase. for i in range(10): test.logger.info('Still running....') time.sleep(10) test.logger.info('Done with long_running_phase') if __name__ == '__main__': # We instantiate our OpenHTF test with the phases we want to run as args. test = htf.Test( measurements_example.hello_phase, measurements_example.again_phase, failing_phase, measurements_example.lots_of_measurements, checkpoints.checkpoint(), long_running_phase, ) # In order to view the result of the test, we have to output it somewhere, # outputting to console is an easy way to do this. test.add_output_callbacks(console_summary.ConsoleSummary()) # The complete summary is viable in json, including the measurements # included in measurements_example.lots_of_measurements. test.add_output_callbacks( json_factory.OutputToJSON('./checkpoints.json', indent=2)) # Unlike hello_world.py, where we prompt for a DUT ID, here we'll just # use an arbitrary one. test.execute(test_start=lambda: 'MyDutId')
def test_outcome_colors(self): """Ensure there is an output color for each outcome.""" instance = console_summary.ConsoleSummary() for outcome in htf.test_record.Outcome: self.assertIn(outcome, instance.color_table)