def test_platform_property(self): """Test that platform_property picks the property value preserving the following priority relationship: targets.json > yotta blob > default """ with patch("mbed_greentea.mbed_target_info._get_platform_property_from_targets") as _targets,\ patch("mbed_greentea.mbed_target_info._get_platform_property_from_info_mapping") as _info_mapping,\ patch("mbed_greentea.mbed_target_info._get_platform_property_from_default") as _default: # 1 _targets.return_value = "targets" _info_mapping.return_value = None _default.return_value = "default" self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "targets") # 2 _info_mapping.return_value = "yotta" self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "targets") # 3 _targets.return_value = None self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "yotta") # 4 _info_mapping.return_value = None self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "default") # 5 _default.return_value = None self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), None) # 6 _targets.return_value = "targets" self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "targets") # 7 _info_mapping.return_value = "yotta" self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "targets") # 8 _targets.return_value = None self.assertEqual( mbed_target_info.get_platform_property("K64F", "copy_method"), "yotta")
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [ ] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err( "mbed-ls was unable to enumerate correctly all properties of the device!" ) gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) for prop in mut: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == 'serial_port': gt_logger.gt_log_tab( "check if your serial port driver is correctly installed!" ) if prop == 'mount_point': gt_logger.gt_log_tab( 'check if your OS can detect and mount mbed device mount point!' ) else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices) def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disabled parallel mode" ) parallel_test_exec = 1 return parallel_test_exec if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err( "error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # Prints version and exits if opts.version: print_version() return (0) # Load test specification or print warnings / info messages and exit CLI mode test_spec, ret = get_test_spec(opts) if not test_spec: return ret # Verbose flag verbose = opts.verbose_test_result_only # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks( opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=verbose) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() if opts.global_resource_mgr: # Mocking available platform requested by --grm switch grm_values = parse_global_resource_mgr(opts.global_resource_mgr) if grm_values: gt_logger.gt_log_warn( "entering global resource manager mbed-ls dummy mode!") grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values mbeds_list = [] mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name)) opts.global_resource_mgr = ':'.join(grm_values[1:]) gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name) else: gt_logger.gt_log( "global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr) return (-1) ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) if mbeds_list: ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices( mbeds_list) if ready_mbed_devices: # devices in form of a pretty formatted table for line in log_mbed_devices_in_table( ready_mbed_devices).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) else: gt_logger.gt_log_err("no compatible devices detected") return (RET_NO_DEVICES) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log( "filtering out target ids not on below list (specified with --use-tids switch)" ) accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue( ) # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 parallel_test_exec = get_parallel_value(opts.parallel_test_exec) # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets> # is used to enumerate builds from test spec we are supplying filter_test_builds = opts.list_of_targets.split( ',') if opts.list_of_targets else None for test_build in test_spec.get_test_builds(filter_test_builds): platform_name = test_build.get_platform() gt_logger.gt_log( "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)" % (gt_logger.gt_bright(platform_name), gt_logger.gt_bright( test_build.get_toolchain()), int(opts.parallel_test_exec))) baudrate = test_build.get_baudrate() ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev[ 'target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: # We will force configuration specific baudrate by adding baudrate to serial port # Only add baudrate decoration for serial port if it's not already there # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>' if not mbed_dev['serial_port'].endswith(str(baudrate)): mbed_dev['serial_port'] = "%s:%d" % ( mbed_dev['serial_port'], baudrate) mut = mbed_dev muts_to_test.append(mbed_dev) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(muts_to_test).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) # Configuration print mode: if opts.verbose_test_configuration_only: continue ### If we have at least one available device we can proceed if mut: target_platforms_match += 1 build = test_build.get_name() build_path = test_build.get_path() # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log( "running '%s' for '%s'-'%s'" % (gt_logger.gt_bright( opts.run_app), gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = get_platform_property( micro, "program_cycle_s") copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir( opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, build_path, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result ) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 test_list = test_build.get_tests() filtered_ctest_test_list = create_filtered_test_list( test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec) gt_logger.gt_log( "running %d test%s for platform '%s' and toolchain '%s'" % (len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_name in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_name].get_binary( binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path() if image_path is None: gt_logger.gt_log_err( "Failed to find test binary for test %s flash method %s" % (test_name, 'usb')) else: test = {"test_bin": test_name, "image_path": image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: # Experimental, parallel test execution if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s of execution threads for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else str()), print_text=verbose) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from different threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update( partial_test_report[report_key]) execute_threads = [] if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for build_name in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[build_name]: test = test_report[build_name][test_name] # Test was successful if test['single_test_result'] in [ TEST_RESULT_OK, TEST_RESULT_FAIL ]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: build = test_spec.get_test_build(build_name) assert build is not None, "Failed to find build info for build %s" % build_name # Call hook executed for each yotta target, just after all tests are finished build_path = build.get_path() build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, 'w') as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True # Reports to JUNIT file if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer) test_suite_properties = {} for target_name in test_report: test_build_properties = get_test_build_properties( test_spec, target_name) if test_build_properties: test_suite_properties[target_name] = test_build_properties junit_report = exporter_testcase_junit( test_report, test_suite_properties=test_suite_properties) dump_report_to_text_file(opts.report_junit_file_name, junit_report) # Reports to text file if opts.report_text_file_name: gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) # Useful text reporter for those who do not like to copy paste to files tabale with results text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) text_final_report = '\n'.join([ text_report, text_results, text_testcase_report, text_testcase_results ]) dump_report_to_text_file(opts.report_text_file_name, text_final_report) # Reports to JSON file if opts.report_json_file_name: # We will not print summary and json report together gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name)) json_report = exporter_json(test_report) dump_report_to_text_file(opts.report_json_file_name, json_report) # Reports to HTML file if opts.report_html_file_name: gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name)) # Generate a HTML page displaying all of the results html_report = exporter_html(test_report) dump_report_to_text_file(opts.report_html_file_name, html_report) # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn( "no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [] # Devices which can't be used (are not fully detected) gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else "")) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!") gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) for prop in mut: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == "serial_port": gt_logger.gt_log_tab("check if your serial port driver is correctly installed!") if prop == "mount_point": gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!") else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices) def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err("argument of mode --parallel is not a int, disabled parallel mode") parallel_test_exec = 1 return parallel_test_exec if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return -1 if not MBED_HOST_TESTS: gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed") return -1 # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # Prints version and exits if opts.version: print_version() return 0 # Load test specification or print warnings / info messages and exit CLI mode test_spec, ret = get_test_spec(opts) if not test_spec: return ret # Verbose flag verbose = opts.verbose_test_result_only # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=verbose, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return status ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() if opts.global_resource_mgr: # Mocking available platform requested by --grm switch grm_values = parse_global_resource_mgr(opts.global_resource_mgr) if grm_values: gt_logger.gt_log_warn("entering global resource manager mbed-ls dummy mode!") grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values mbeds_list = [] mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name)) opts.global_resource_mgr = ":".join(grm_values[1:]) gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name) else: gt_logger.gt_log("global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr) return -1 ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [] # Devices which can't be used (are not fully detected) if mbeds_list: ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(mbeds_list) if ready_mbed_devices: # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(ready_mbed_devices).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) else: gt_logger.gt_log_err("no compatible devices detected") return RET_NO_DEVICES ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)") accepted_target_ids = opts.use_target_ids.split(",") for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue() # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 parallel_test_exec = get_parallel_value(opts.parallel_test_exec) # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets> # is used to enumerate builds from test spec we are supplying filter_test_builds = opts.list_of_targets.split(",") if opts.list_of_targets else None for test_build in test_spec.get_test_builds(filter_test_builds): platform_name = test_build.get_platform() gt_logger.gt_log( "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)" % ( gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), int(opts.parallel_test_exec), ) ) baudrate = test_build.get_baudrate() ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev["target_id"] not in accepted_target_ids: continue if mbed_dev["platform_name"] == platform_name: # We will force configuration specific baudrate by adding baudrate to serial port # Only add baudrate decoration for serial port if it's not already there # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>' if not mbed_dev["serial_port"].endswith(str(baudrate)): mbed_dev["serial_port"] = "%s:%d" % (mbed_dev["serial_port"], baudrate) mut = mbed_dev muts_to_test.append(mbed_dev) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(muts_to_test).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) # Configuration print mode: if opts.verbose_test_configuration_only: continue ### If we have at least one available device we can proceed if mut: target_platforms_match += 1 build = test_build.get_name() build_path = test_build.get_path() # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log( "running '%s' for '%s'-'%s'" % ( gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), ) ) disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = get_platform_property(micro, "program_cycle_s") copy_method = opts.copy_method if opts.copy_method else "shell" enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, build_path, mut["target_id"], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 test_list = test_build.get_tests() filtered_ctest_test_list = create_filtered_test_list( test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec ) gt_logger.gt_log( "running %d test%s for platform '%s' and toolchain '%s'" % ( len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), ) ) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_name in filtered_ctest_test_list_keys: image_path = ( filtered_ctest_test_list[test_name].get_binary(binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path() ) if image_path is None: gt_logger.gt_log_err("Failed to find test binary for test %s flash method %s" % (test_name, "usb")) else: test = {"test_bin": test_name, "image_path": image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: # Experimental, parallel test execution if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s of execution threads for testing" % (len(execute_threads), "s" if len(execute_threads) != 1 else str()), print_text=verbose, ) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from different threads to final test report for t in execute_threads: try: t.join() # blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data["test_platforms_match"] test_exec_retcode += test_return_data["test_exec_retcode"] partial_test_report = test_return_data["test_report"] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) execute_threads = [] if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return 0 gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for build_name in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[build_name]: test = test_report[build_name][test_name] # Test was successful if test["single_test_result"] in [TEST_RESULT_OK, TEST_RESULT_FAIL]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test["test_bin_name"], "image_path": test["image_path"], "build_path": test["build_path"], "build_path_abs": test["build_path_abs"], } greentea_hooks.run_hook_ext("hook_post_test_end", format) if greentea_hooks: build = test_spec.get_test_build(build_name) assert build is not None, "Failed to find build info for build %s" % build_name # Call hook executed for each yotta target, just after all tests are finished build_path = build.get_path() build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = {"build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list} greentea_hooks.run_hook_ext("hook_post_all_test_end", format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, "w") as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True # Reports to JUNIT file if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer) test_suite_properties = {} for target_name in test_report: test_build_properties = get_test_build_properties(test_spec, target_name) if test_build_properties: test_suite_properties[target_name] = test_build_properties junit_report = exporter_testcase_junit(test_report, test_suite_properties=test_suite_properties) dump_report_to_text_file(opts.report_junit_file_name, junit_report) # Reports to text file if opts.report_text_file_name: gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) # Useful text reporter for those who do not like to copy paste to files tabale with results text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) text_final_report = "\n".join([text_report, text_results, text_testcase_report, text_testcase_results]) dump_report_to_text_file(opts.report_text_file_name, text_final_report) # Reports to JSON file if opts.report_json_file_name: # We will not print summary and json report together gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name)) json_report = exporter_json(test_report) dump_report_to_text_file(opts.report_json_file_name, json_report) # Reports to HTML file if opts.report_html_file_name: gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name)) # Generate a HTML page displaying all of the results html_report = exporter_html(test_report) dump_report_to_text_file(opts.report_html_file_name, html_report) # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn("no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no matching platforms were found!") test_exec_retcode += -100 return test_exec_retcode
def run_test_thread(test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = 'SKIPPED' disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = get_platform_property(micro, "program_cycle_s") forced_reset_timeout = get_platform_property(micro, "forced_reset_timeout") copy_method = opts.copy_method if opts.copy_method else 'shell' verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( test['image_path'], disk, port, build_path, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, forced_reset_timeout=forced_reset_timeout, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, global_resource_mgr=opts.global_resource_mgr, verbose=verbose) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun gt_logger.gt_log_err( "run_test_thread.run_host_test() failed, aborting...") break # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result test_result = single_test_result build_path_abs = os.path.abspath(build_path) if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]: if greentea_hooks: # Test was successful # We can execute test hook just after test is finished ('hook_test_end') format = { "test_name": test['test_bin'], "test_bin_name": os.path.basename(test['image_path']), "image_path": test['image_path'], "build_path": build_path, "build_path_abs": build_path_abs, "build_name": build, } greentea_hooks.run_hook_ext('hook_test_end', format) # Update report for optional reporting feature test_suite_name = test['test_bin'].lower() if build not in test_report: test_report[build] = {} if test_suite_name not in test_report[build]: test_report[build][test_suite_name] = {} if not test_cases_summary and not result_test_cases: gt_logger.gt_log_warn("test case summary event not found") gt_logger.gt_log_tab( "no test case report present, assuming test suite to be a single test case!" ) # We will map test suite result to test case to # output valid test case in report # Generate "artificial" test case name from test suite name# # E.g: # mbed-drivers-test-dev_null -> dev_null test_case_name = test_suite_name test_str_idx = test_suite_name.find("-test-") if test_str_idx != -1: test_case_name = test_case_name[test_str_idx + 6:] gt_logger.gt_log_tab("test suite: %s" % test_suite_name) gt_logger.gt_log_tab("test case: %s" % test_case_name) # Test case result: OK, FAIL or ERROR tc_result_text = { "OK": "OK", "FAIL": "FAIL", }.get(single_test_result, 'ERROR') # Test case integer success code OK, FAIL and ERROR: (0, >0, <0) tc_result = { "OK": 0, "FAIL": 1024, "ERROR": -1024, }.get(tc_result_text, '-2048') # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure) tc_passed, tc_failed = { 0: (1, 0), }.get(tc_result, (0, 1)) # Test case report build for whole binary # Add test case made from test suite result to test case report result_test_cases = { test_case_name: { 'duration': single_testduration, 'time_start': 0.0, 'time_end': 0.0, 'utest_log': single_test_output.splitlines(), 'result_text': tc_result_text, 'passed': tc_passed, 'failed': tc_failed, 'result': tc_result, } } # Test summary build for whole binary (as a test case) test_cases_summary = ( tc_passed, tc_failed, ) gt_logger.gt_log("test on hardware with target id: %s" % (mut['target_id'])) gt_logger.gt_log( "test suite '%s' %s %s in %.2f sec" % (test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration)) # Test report build for whole binary test_report[build][test_suite_name][ 'single_test_result'] = single_test_result test_report[build][test_suite_name][ 'single_test_output'] = single_test_output test_report[build][test_suite_name][ 'elapsed_time'] = single_testduration test_report[build][test_suite_name]['platform_name'] = micro test_report[build][test_suite_name]['copy_method'] = copy_method test_report[build][test_suite_name][ 'testcase_result'] = result_test_cases test_report[build][test_suite_name]['build_path'] = build_path test_report[build][test_suite_name]['build_path_abs'] = build_path_abs test_report[build][test_suite_name]['image_path'] = test['image_path'] test_report[build][test_suite_name][ 'test_bin_name'] = os.path.basename(test['image_path']) passes_cnt, failures_cnt = 0, 0 for tc_name in sorted(result_test_cases.keys()): gt_logger.gt_log_tab( "test case: '%s' %s %s in %.2f sec" % (tc_name, '.' * (80 - len(tc_name)), result_test_cases[tc_name].get('result_text', '_'), result_test_cases[tc_name].get('duration', 0.0))) if result_test_cases[tc_name].get('result_text', '_') == 'OK': passes_cnt += 1 else: failures_cnt += 1 if test_cases_summary: passes, failures = test_cases_summary gt_logger.gt_log("test case summary: %d pass%s, %d failur%s" % (passes, '' if passes == 1 else 'es', failures, 'e' if failures == 1 else 'es')) if passes != passes_cnt or failures != failures_cnt: gt_logger.gt_log_err( "utest test case summary mismatch: utest reported passes and failures miscount!" ) gt_logger.gt_log_tab( "reported by utest: passes = %d, failures %d)" % (passes, failures)) gt_logger.gt_log_tab( "test case result count: passes = %d, failures %d)" % (passes_cnt, failures_cnt)) if single_test_result != 'OK' and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab( "test failed, reporting console output (specified with --report-fails option)" ) print print single_test_output #greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put({ 'test_platforms_match': test_platforms_match, 'test_exec_retcode': test_exec_retcode, 'test_report': test_report }) return
def run_test_thread(test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = "SKIPPED" disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = get_platform_property(micro, "program_cycle_s") forced_reset_timeout = get_platform_property(micro, "forced_reset_timeout") copy_method = opts.copy_method if opts.copy_method else "shell" verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( test["image_path"], disk, port, build_path, mut["target_id"], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, forced_reset_timeout=forced_reset_timeout, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, global_resource_mgr=opts.global_resource_mgr, verbose=verbose, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun gt_logger.gt_log_err("run_test_thread.run_host_test() failed, aborting...") break # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) test_result = single_test_result build_path_abs = os.path.abspath(build_path) if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]: if greentea_hooks: # Test was successful # We can execute test hook just after test is finished ('hook_test_end') format = { "test_name": test["test_bin"], "test_bin_name": os.path.basename(test["image_path"]), "image_path": test["image_path"], "build_path": build_path, "build_path_abs": build_path_abs, "build_name": build, } greentea_hooks.run_hook_ext("hook_test_end", format) # Update report for optional reporting feature test_suite_name = test["test_bin"].lower() if build not in test_report: test_report[build] = {} if test_suite_name not in test_report[build]: test_report[build][test_suite_name] = {} if not test_cases_summary and not result_test_cases: gt_logger.gt_log_warn("test case summary event not found") gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!") # We will map test suite result to test case to # output valid test case in report # Generate "artificial" test case name from test suite name# # E.g: # mbed-drivers-test-dev_null -> dev_null test_case_name = test_suite_name test_str_idx = test_suite_name.find("-test-") if test_str_idx != -1: test_case_name = test_case_name[test_str_idx + 6 :] gt_logger.gt_log_tab("test suite: %s" % test_suite_name) gt_logger.gt_log_tab("test case: %s" % test_case_name) # Test case result: OK, FAIL or ERROR tc_result_text = {"OK": "OK", "FAIL": "FAIL"}.get(single_test_result, "ERROR") # Test case integer success code OK, FAIL and ERROR: (0, >0, <0) tc_result = {"OK": 0, "FAIL": 1024, "ERROR": -1024}.get(tc_result_text, "-2048") # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure) tc_passed, tc_failed = {0: (1, 0)}.get(tc_result, (0, 1)) # Test case report build for whole binary # Add test case made from test suite result to test case report result_test_cases = { test_case_name: { "duration": single_testduration, "time_start": 0.0, "time_end": 0.0, "utest_log": single_test_output.splitlines(), "result_text": tc_result_text, "passed": tc_passed, "failed": tc_failed, "result": tc_result, } } # Test summary build for whole binary (as a test case) test_cases_summary = (tc_passed, tc_failed) gt_logger.gt_log("test on hardware with target id: %s" % (mut["target_id"])) gt_logger.gt_log( "test suite '%s' %s %s in %.2f sec" % (test["test_bin"], "." * (80 - len(test["test_bin"])), test_result, single_testduration) ) # Test report build for whole binary test_report[build][test_suite_name]["single_test_result"] = single_test_result test_report[build][test_suite_name]["single_test_output"] = single_test_output test_report[build][test_suite_name]["elapsed_time"] = single_testduration test_report[build][test_suite_name]["platform_name"] = micro test_report[build][test_suite_name]["copy_method"] = copy_method test_report[build][test_suite_name]["testcase_result"] = result_test_cases test_report[build][test_suite_name]["build_path"] = build_path test_report[build][test_suite_name]["build_path_abs"] = build_path_abs test_report[build][test_suite_name]["image_path"] = test["image_path"] test_report[build][test_suite_name]["test_bin_name"] = os.path.basename(test["image_path"]) passes_cnt, failures_cnt = 0, 0 for tc_name in sorted(result_test_cases.keys()): gt_logger.gt_log_tab( "test case: '%s' %s %s in %.2f sec" % ( tc_name, "." * (80 - len(tc_name)), result_test_cases[tc_name].get("result_text", "_"), result_test_cases[tc_name].get("duration", 0.0), ) ) if result_test_cases[tc_name].get("result_text", "_") == "OK": passes_cnt += 1 else: failures_cnt += 1 if test_cases_summary: passes, failures = test_cases_summary gt_logger.gt_log( "test case summary: %d pass%s, %d failur%s" % (passes, "" if passes == 1 else "es", failures, "e" if failures == 1 else "es") ) if passes != passes_cnt or failures != failures_cnt: gt_logger.gt_log_err("utest test case summary mismatch: utest reported passes and failures miscount!") gt_logger.gt_log_tab("reported by utest: passes = %d, failures %d)" % (passes, failures)) gt_logger.gt_log_tab("test case result count: passes = %d, failures %d)" % (passes_cnt, failures_cnt)) if single_test_result != "OK" and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)") print print single_test_output # greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put( { "test_platforms_match": test_platforms_match, "test_exec_retcode": test_exec_retcode, "test_report": test_report, } ) return