def test_load_ctest_testsuite(self): root_path = os.path.dirname(os.path.realpath(__file__)) emty_path = os.path.join(root_path, "resources", "empty") full_path = os.path.join(root_path, "resources", "not-empty") # Empty LINK_TARGET empty_link_target = emty_path empty_suite = cmake_handlers.load_ctest_testsuite(empty_link_target) self.assertEqual(empty_suite, {}) # Not empty LINK_TARGET link_target = full_path test_suite = cmake_handlers.load_ctest_testsuite(link_target) self.assertIsNotNone(test_suite) self.assertIn('mbed-client-test-mbedclient-smokeTest', test_suite) self.assertIn('mbed-client-test-helloworld-mbedclient', test_suite)
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err( "error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return (0) # Prints version and exits if opts.version: print_version() return (0) # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks( opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=opts.verbose_test_result_only) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): gt_logger.gt_log(""" ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """) return (0) ### Selecting yotta targets to process yt_targets = [ ] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'" % gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab( "yotta target in current directory is not set") gt_logger.gt_log_err( "yotta target is not specified. Use '%s' or '%s' command to set target" % (gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>'))) return (-1) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() ready_mbed_devices = [] # Devices which can be used (are fully detected) if mbeds_list: gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err( "can't detect all properties of the device!") for prop in mut: if not mut[prop]: gt_logger.gt_log_tab("property '%s' is '%s'" % (prop, str(mut[prop]))) else: ready_mbed_devices.append(mut) gt_logger.gt_log_tab( "detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'" % (gt_logger.gt_bright(mut['platform_name']), gt_logger.gt_bright(mut['platform_name_unique']), gt_logger.gt_bright(mut['serial_port']), gt_logger.gt_bright(mut['mount_point']), gt_logger.gt_bright(mut['target_id']))) else: gt_logger.gt_log_err("no devices detected") return (RET_NO_DEVICES) ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure map_platform_to_yt_target = {} if opts.map_platform_to_yt_target: gt_logger.gt_log( "user defined platform -> target supported mapping definition (specified with --map-target switch)" ) p_to_t_mappings = opts.map_platform_to_yt_target.split(',') for mapping in p_to_t_mappings: if len(mapping.split(':')) == 2: platform, yt_target = mapping.split(':') if platform not in map_platform_to_yt_target: map_platform_to_yt_target[platform] = [] map_platform_to_yt_target[platform].append(yt_target) gt_logger.gt_log_tab( "mapped platform '%s' to be compatible with '%s'" % (gt_logger.gt_bright(platform), gt_logger.gt_bright(yt_target))) else: gt_logger.gt_log_tab( "unknown format '%s', use 'platform:target' format" % mapping) # Check if mbed classic target name can be translated to yotta target name mut_info_map = { } # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]] for mut in ready_mbed_devices: platfrom_name = mut['platform_name'] if platfrom_name not in mut_info_map: mut_info = get_mbed_clasic_target_info( platfrom_name, map_platform_to_yt_target, use_yotta_registry=opts.yotta_search_for_mbed_target) if mut_info: mut_info_map[platfrom_name] = mut_info ### List of unique ready platform names unique_mbed_devices = list(set(mut_info_map.keys())) ### Identify which targets has to be build because platforms are present yt_target_platform_map = {} # yt_target_to_test : platforms to test on for yt_target in yt_targets: for platform_name in unique_mbed_devices: if yt_target in [ k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"] ]: if yt_target not in yt_target_platform_map: yt_target_platform_map[yt_target] = [] if platform_name not in yt_target_platform_map[yt_target]: yt_target_platform_map[yt_target].append(platform_name) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log( "filtering out target ids not on below list (specified with --use-tids switch)" ) accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue( ) # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 try: parallel_test_exec = int(opts.parallel_test_exec) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disable parallel mode") parallel_test_exec = 1 # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform for yotta_target_name in yt_target_platform_map: gt_logger.gt_log( "processing '%s' yotta target compatible platforms..." % gt_logger.gt_bright(yotta_target_name)) for platform_name in yt_target_platform_map[yotta_target_name]: gt_logger.gt_log("processing '%s' platform..." % gt_logger.gt_bright(platform_name)) ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev[ 'target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: mut = mbed_dev muts_to_test.append(mbed_dev) gt_logger.gt_log("using platform '%s' for test:" % gt_logger.gt_bright(platform_name)) for k in mbed_dev: gt_logger.gt_log_tab("%s = '%s'" % (k, mbed_dev[k])) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # Configuration print mode: if opts.verbose_test_configuration_only: continue if mut: target_platforms_match += 1 # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log("running '%s' for '%s'" % (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info_map[platfrom_name][ 'properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir( opts.enum_host_tests) yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() # We will force configuration specific baudrate if port: port = "%s:%d" % (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result ) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful if opts.skip_yotta_build: gt_logger.gt_log( "skipping calling yotta (specified with --skip-build option)" ) else: yotta_result, yotta_ret = build_with_yotta( yotta_target_name, verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_logger.gt_log_err("yotta returned %d" % yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties'][ 'binary_type'] ctest_test_list = load_ctest_testsuite( os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #TODO no tests to execute filtered_ctest_test_list = create_filtered_test_list( ctest_test_list, opts.test_by_names, opts.skip_test) gt_logger.gt_log( "running %d test%s for target '%s' and platform '%s'" % (len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(yotta_target_name), gt_logger.gt_bright(platform_name))) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_bin in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_bin] test = {"test_bin": test_bin, "image_path": image_path} test_queue.put(test) #for test_bin, image_path in filtered_ctest_test_list.iteritems(): # test = {"test_bin":test_bin, "image_path":image_path} # test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: ################################################################# # Experimental, parallel test execution ################################################################# if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else '')) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from diffrent threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for yotta_target in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[yotta_target]: test = test_report[yotta_target][test_name] # Test was successful if test['single_test_result'] in [ TEST_RESULT_OK, TEST_RESULT_FAIL ]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: # Call hook executed for each yotta target, just after all tests are finished build_path = os.path.join("./build", yotta_target) build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) # Reports (to file) if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUnit file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) junit_report = exporter_testcase_junit( test_report, test_suite_properties=yotta_module.get_data()) with open(opts.report_junit_file_name, 'w') as f: f.write(junit_report) if opts.report_text_file_name: gt_logger.gt_log("exporting to text '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) with open(opts.report_text_file_name, 'w') as f: f.write('\n'.join([ text_report, text_results, text_testcase_report, text_testcase_results ])) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_logger.gt_log("json test report:") print exporter_json(test_report) else: # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text( test_report, test_suite_properties=yotta_module.get_data()) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn( "no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no target matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ if not MBED_LMTOOLS: gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_log_err("error: mbed-host-tests proprietary module not installed") return (-1) # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return (0) # Prints version and exits if opts.version: print_version() return (0) # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test(image_path=None, disk=None, port=None, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=opts.verbose_test_result_only) single_test_result, single_test_output, single_testduration, single_timeout = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Selecting yotta targets to process yt_targets = [] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_log("checking for yotta target in current directory") gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_log("assuming default target as '%s'"% gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_log_tab("yotta target in current directory is not set") gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"% ( gt_bright('mbedgt -t <yotta_target>'), gt_bright('yotta target <yotta_target>') )) return (-1) #print "yt_targets:", yt_targets ### Query with mbedls for available mbed-enabled devices gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() ready_mbed_devices = [] # Devices which can be used (are fully detected) if mbeds_list: gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_log_err("can't detect all properties of the device!") else: ready_mbed_devices.append(mut) gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% ( gt_bright(mut['platform_name']), gt_bright(mut['platform_name_unique']), gt_bright(mut['serial_port']), gt_bright(mut['mount_point']), gt_bright(mut['target_id']) )) else: gt_log("no devices detected") return (RET_NO_DEVICES) ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure map_platform_to_yt_target = {} if opts.map_platform_to_yt_target: gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)") p_to_t_mappings = opts.map_platform_to_yt_target.split(',') for mapping in p_to_t_mappings: if len(mapping.split(':')) == 2: platform, yt_target = mapping.split(':') if platform not in map_platform_to_yt_target: map_platform_to_yt_target[platform] = [] map_platform_to_yt_target[platform].append(yt_target) gt_log_tab("mapped platform '%s' to be compatible with '%s'"% ( gt_bright(platform), gt_bright(yt_target) )) else: gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping) # Check if mbed classic target name can be translated to yotta target name mut_info_map = {} # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]] for mut in ready_mbed_devices: platfrom_name = mut['platform_name'] if platfrom_name not in mut_info_map: mut_info = get_mbed_clasic_target_info(platfrom_name, map_platform_to_yt_target, use_yotta_registry=opts.yotta_search_for_mbed_target) if mut_info: mut_info_map[platfrom_name] = mut_info #print "mut_info_map:", json.dumps(mut_info_map, indent=2) ### List of unique ready platform names unique_mbed_devices = list(set(mut_info_map.keys())) #print "unique_mbed_devices", json.dumps(unique_mbed_devices, indent=2) ### Identify which targets has to be build because platforms are present yt_target_platform_map = {} # yt_target_to_test : platforms to test on for yt_target in yt_targets: for platform_name in unique_mbed_devices: if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]: if yt_target not in yt_target_platform_map: yt_target_platform_map[yt_target] = [] if platform_name not in yt_target_platform_map[yt_target]: yt_target_platform_map[yt_target].append(platform_name) #print "yt_target_platform_map", json.dumps(yt_target_platform_map, indent=2) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_log("filtering out target ids not on below list (specified with --use-tids switch)") accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_log_tab("accepting target id '%s'"% gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue() # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 try: parallel_test_exec = int(opts.parallel_test_exec) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_log_err("argument of mode --parallel is not a int, disable parallel mode") parallel_test_exec = 1 ### Testing procedures, for each target, for each target's compatible platform for yotta_target_name in yt_target_platform_map: gt_log("processing '%s' yotta target compatible platforms..."% gt_bright(yotta_target_name)) for platform_name in yt_target_platform_map[yotta_target_name]: gt_log("processing '%s' platform..."% gt_bright(platform_name)) ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: mut = mbed_dev muts_to_test.append(mbed_dev) gt_log("using platform '%s' for test:"% gt_bright(platform_name)) for k in mbed_dev: gt_log_tab("%s = '%s'"% (k, mbed_dev[k])) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # Configuration print mode: if opts.verbose_test_configuration_only: continue if mut: target_platforms_match += 1 # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_log("running '%s' for '%s'"% (gt_bright(opts.run_app), gt_bright(yotta_target_name))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() # We will force configuration specific baudrate if port: port = "%s:%d"% (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test(opts.run_app, disk, port, micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) single_test_result, single_test_output, single_testduration, single_timeout = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful if opts.skip_yotta_build: gt_log("skipping calling yotta (specified with --skip-build option)") else: yotta_result, yotta_ret = build_with_yotta(yotta_target_name, verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_log_err("yotta returned %d"% yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties']['binary_type'] ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #print json.dumps(ctest_test_list, indent=2) #TODO no tests to execute filtered_ctest_test_list = ctest_test_list test_list = None if opts.test_by_names: filtered_ctest_test_list = {} # Subset of 'ctest_test_list' test_list = opts.test_by_names.split(',') gt_log("test case filter (specified with -n option)") invalid_test_names = False for test_name in test_list: if test_name not in ctest_test_list: gt_log_tab("test name '%s' not found in CTestTestFile.cmake (specified with -n option)"% gt_bright(test_name)) invalid_test_names = True else: gt_log_tab("test filtered in '%s'"% gt_bright(test_name)) filtered_ctest_test_list[test_name] = ctest_test_list[test_name] if invalid_test_names: gt_log("invalid test case names (specified with -n option)") gt_log_tab("note: test case names are case sensitive") gt_log_tab("note: see list of available test cases below") list_binaries_for_targets(verbose_footer=False) gt_log("running %d test%s for target '%s' and platform '%s'"% ( len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_bright(yotta_target_name), gt_bright(platform_name) )) for test_bin, image_path in filtered_ctest_test_list.iteritems(): test = {"test_bin":test_bin, "image_path":image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: ################################################################# # Experimental, parallel test execution ################################################################# if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else '')) for t in execute_threads: t.daemon = True t.start() while test_result_queue.qsize() != len(execute_threads): sleep(1) # merge partial test reports from diffrent threads to final test report for t in execute_threads: t.join() test_return_data = test_result_queue.get(False) test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Reports (to file) if opts.report_junit_file_name: junit_report = exporter_junit(test_report) with open(opts.report_junit_file_name, 'w') as f: f.write(junit_report) if opts.report_text_file_name: gt_log("exporting to junit '%s'..."% gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) with open(opts.report_text_file_name, 'w') as f: f.write(text_report) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_log("json test report:") print exporter_json(test_report) else: # Final summary if test_report: gt_log("test report:") text_report, text_results = exporter_text(test_report) print text_report print print "Result: " + text_results # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_log("no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_log("no target matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return (0) # Prints version and exits if opts.version: print_version() return (0) # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test(None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=opts.verbose_test_result_only) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): gt_logger.gt_log(""" ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """) return (0) ### Selecting yotta targets to process yt_targets = [] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab("yotta target in current directory is not set") gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"% ( gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>') )) return (-1) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() ready_mbed_devices = [] # Devices which can be used (are fully detected) if mbeds_list: gt_logger.gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err("can't detect all properties of the device!") for prop in mut: if not mut[prop]: gt_logger.gt_log_tab("property '%s' is '%s'"% (prop, str(mut[prop]))) else: ready_mbed_devices.append(mut) gt_logger.gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% ( gt_logger.gt_bright(mut['platform_name']), gt_logger.gt_bright(mut['platform_name_unique']), gt_logger.gt_bright(mut['serial_port']), gt_logger.gt_bright(mut['mount_point']), gt_logger.gt_bright(mut['target_id']) )) else: gt_logger.gt_log_err("no devices detected") return (RET_NO_DEVICES) ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure map_platform_to_yt_target = {} if opts.map_platform_to_yt_target: gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)") p_to_t_mappings = opts.map_platform_to_yt_target.split(',') for mapping in p_to_t_mappings: if len(mapping.split(':')) == 2: platform, yt_target = mapping.split(':') if platform not in map_platform_to_yt_target: map_platform_to_yt_target[platform] = [] map_platform_to_yt_target[platform].append(yt_target) gt_logger.gt_log_tab("mapped platform '%s' to be compatible with '%s'"% ( gt_logger.gt_bright(platform), gt_logger.gt_bright(yt_target) )) else: gt_logger.gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping) # Check if mbed classic target name can be translated to yotta target name mut_info_map = {} # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]] for mut in ready_mbed_devices: platfrom_name = mut['platform_name'] if platfrom_name not in mut_info_map: mut_info = get_mbed_clasic_target_info(platfrom_name, map_platform_to_yt_target, use_yotta_registry=opts.yotta_search_for_mbed_target) if mut_info: mut_info_map[platfrom_name] = mut_info ### List of unique ready platform names unique_mbed_devices = list(set(mut_info_map.keys())) ### Identify which targets has to be build because platforms are present yt_target_platform_map = {} # yt_target_to_test : platforms to test on for yt_target in yt_targets: for platform_name in unique_mbed_devices: if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]: if yt_target not in yt_target_platform_map: yt_target_platform_map[yt_target] = [] if platform_name not in yt_target_platform_map[yt_target]: yt_target_platform_map[yt_target].append(platform_name) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)") accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'"% gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue() # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 try: parallel_test_exec = int(opts.parallel_test_exec) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err("argument of mode --parallel is not a int, disable parallel mode") parallel_test_exec = 1 # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform for yotta_target_name in yt_target_platform_map: gt_logger.gt_log("processing '%s' yotta target compatible platforms..."% gt_logger.gt_bright(yotta_target_name)) for platform_name in yt_target_platform_map[yotta_target_name]: gt_logger.gt_log("processing '%s' platform..."% gt_logger.gt_bright(platform_name)) ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: mut = mbed_dev muts_to_test.append(mbed_dev) gt_logger.gt_log("using platform '%s' for test:"% gt_logger.gt_bright(platform_name)) for k in mbed_dev: gt_logger.gt_log_tab("%s = '%s'"% (k, mbed_dev[k])) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # Configuration print mode: if opts.verbose_test_configuration_only: continue if mut: target_platforms_match += 1 # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log("running '%s' for '%s'"% (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() # We will force configuration specific baudrate if port: port = "%s:%d"% (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test(opts.run_app, disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful if opts.skip_yotta_build: gt_logger.gt_log("skipping calling yotta (specified with --skip-build option)") else: yotta_result, yotta_ret = build_with_yotta(yotta_target_name, verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_logger.gt_log_err("yotta returned %d"% yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties']['binary_type'] ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #TODO no tests to execute filtered_ctest_test_list = create_filtered_test_list(ctest_test_list, opts.test_by_names, opts.skip_test) gt_logger.gt_log("running %d test%s for target '%s' and platform '%s'"% ( len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(yotta_target_name), gt_logger.gt_bright(platform_name) )) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_bin in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_bin] test = {"test_bin":test_bin, "image_path":image_path} test_queue.put(test) #for test_bin, image_path in filtered_ctest_test_list.iteritems(): # test = {"test_bin":test_bin, "image_path":image_path} # test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: ################################################################# # Experimental, parallel test execution ################################################################# if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else '')) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from diffrent threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for yotta_target in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[yotta_target]: test = test_report[yotta_target][test_name] # Test was successful if test['single_test_result'] in [TEST_RESULT_OK, TEST_RESULT_FAIL]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: # Call hook executed for each yotta target, just after all tests are finished build_path = os.path.join("./build", yotta_target) build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f"% (SHUFFLE_SEED_ROUND, shuffle_random_seed)) # Reports (to file) if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUnit file '%s'..."% gt_logger.gt_bright(opts.report_junit_file_name)) junit_report = exporter_testcase_junit(test_report, test_suite_properties=yotta_module.get_data()) with open(opts.report_junit_file_name, 'w') as f: f.write(junit_report) if opts.report_text_file_name: gt_logger.gt_log("exporting to text '%s'..."% gt_logger.gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) with open(opts.report_text_file_name, 'w') as f: f.write('\n'.join([text_report, text_results, text_testcase_report, text_testcase_results])) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_logger.gt_log("json test report:") print exporter_json(test_report) else: # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text(test_report, test_suite_properties=yotta_module.get_data()) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn("no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no target matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_log_err("yotta returned %d"% yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties']['binary_type'] ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #print json.dumps(ctest_test_list, indent=2) #TODO no tests to execute filtered_ctest_test_list = ctest_test_list test_list = None if opts.test_by_names: filtered_ctest_test_list = {} # Subset of 'ctest_test_list' test_list = opts.test_by_names.split(',') gt_log("test case filter (specified with -n option)") invalid_test_names = False for test_name in test_list: if test_name not in ctest_test_list: gt_log_tab("test name '%s' not found in CTestTestFile.cmake (specified with -n option)"% gt_bright(test_name)) invalid_test_names = True
def get_test_spec_from_yt_module(opts): """ Gives test specification created from yotta module environment. :return TestSpec: """ ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): error = """ ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """ raise YottaError(error) test_spec = TestSpec() ### Selecting yotta targets to process yt_targets = [] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab("yotta target in current directory is not set") gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"% ( gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>') )) raise YottaError("Yotta target not set in current directory!") ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure yt_target_to_map_platform = {} if opts.map_platform_to_yt_target: gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)") for mapping in opts.map_platform_to_yt_target.split(','): if len(mapping.split(':')) == 2: yt_target, platform = mapping.split(':') yt_target_to_map_platform[yt_target] = platform gt_logger.gt_log_tab("mapped yotta target '%s' to be compatible with platform '%s'"% ( gt_logger.gt_bright(yt_target), gt_logger.gt_bright(platform) )) else: gt_logger.gt_log_tab("unknown format '%s', use 'target:platform' format"% mapping) for yt_target in yt_targets: if yt_target in yt_target_to_map_platform: platform = yt_target_to_map_platform[yt_target] else: # get it from local Yotta target platform = get_platform_name_from_yotta_target(yt_target) # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform toolchain = yt_target yotta_config = YottaConfig() yotta_config.init(yt_target) baud_rate = yotta_config.get_baudrate() base_path = os.path.join('.', 'build', yt_target) tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path) test_spec.add_test_builds(yt_target, tb) # Find tests ctest_test_list = load_ctest_testsuite(base_path, binary_type=get_binary_type_for_platform(platform)) for name, path in ctest_test_list.iteritems(): t = Test(name) t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE) tb.add_test(name, t) return test_spec
def get_test_spec_from_yt_module(opts): """ Gives test specification created from yotta module environment. :return TestSpec: """ ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): error = """ ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """ raise YottaError(error) test_spec = TestSpec() ### Selecting yotta targets to process yt_targets = [ ] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'" % gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab( "yotta target in current directory is not set") gt_logger.gt_log_err( "yotta target is not specified. Use '%s' or '%s' command to set target" % (gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>'))) raise YottaError("Yotta target not set in current directory!") ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure yt_target_to_map_platform = {} if opts.map_platform_to_yt_target: gt_logger.gt_log( "user defined platform -> target supported mapping definition (specified with --map-target switch)" ) for mapping in opts.map_platform_to_yt_target.split(','): if len(mapping.split(':')) == 2: yt_target, platform = mapping.split(':') yt_target_to_map_platform[yt_target] = platform gt_logger.gt_log_tab( "mapped yotta target '%s' to be compatible with platform '%s'" % (gt_logger.gt_bright(yt_target), gt_logger.gt_bright(platform))) else: gt_logger.gt_log_tab( "unknown format '%s', use 'target:platform' format" % mapping) for yt_target in yt_targets: if yt_target in yt_target_to_map_platform: platform = yt_target_to_map_platform[yt_target] else: # get it from local Yotta target platform = get_platform_name_from_yotta_target(yt_target) # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform toolchain = yt_target yotta_config = YottaConfig() yotta_config.init(yt_target) baud_rate = yotta_config.get_baudrate() base_path = os.path.join('.', 'build', yt_target) tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path) test_spec.add_test_builds(yt_target, tb) # Find tests ctest_test_list = load_ctest_testsuite( base_path, binary_type=get_binary_type_for_platform(platform)) for name, path in ctest_test_list.items(): t = Test(name) t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE) tb.add_test(name, t) return test_spec
def test_load_ctest_testsuite_missing_link_target(self): null_link_target = None null_suite = cmake_handlers.load_ctest_testsuite(null_link_target) self.assertEqual(null_suite, {})
def main_singletest_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return 0 # Prints version and exits if opts.version: print_version() return 0 # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: host_test_result = run_host_test( image_path=None, disk=None, port=None, digest_source=opts.digest_source, verbose=opts.verbose_test_result_only, ) single_test_result, single_test_output, single_testduration, single_timeout = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return status # mbed-enabled devices auto-detection procedures mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds() platform_list = mbeds.list_platforms_ext() # Option -t <opts.list_of_targets> supersedes yotta target set in current directory if opts.list_of_targets is None: if opts.verbose: gt_log("yotta target not set from command line (specified with -t option)") # Trying to use locally set yotta target current_target = get_mbed_target_from_current_dir() if current_target: gt_log("yotta target in current directory is set to '%s'" % gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used opts.list_of_targets = current_target.split(",")[0] else: gt_log("yotta target in current directory is not set") gt_log_err( "yotta target is not specified. Use '%s' or '%s' command to set target" % (gt_bright("mbedgt -t <target>"), gt_bright("yotta target <target>")) ) return -1 gt_log("detecting connected mbed-enabled devices... %s" % ("no devices detected" if not len(mbeds_list) else "")) if mbeds_list: gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else "")) else: gt_log("no devices detected") list_of_targets = opts.list_of_targets.split(",") if opts.list_of_targets is not None else None test_report = {} # Test report used to export to Junit, HTML etc... if opts.list_of_targets is None: gt_log("assuming default target as '%s'" % gt_bright(current_target)) gt_log_tab("reason: no --target switch set") list_of_targets = [current_target] test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings for mut in mbeds_list: platform_text = gt_bright(mut["platform_name"]) serial_text = gt_bright(mut["serial_port"]) mount_text = gt_bright(mut["mount_point"]) platform_target_id = gt_bright(mut["target_id"]) # We can use it to do simple resource lock if not all([platform_text, serial_text, mount_text]): gt_log_err("can't detect all properties of the device!") gt_log_tab("detected '%s', console at '%s', mounted at '%s'" % (platform_text, serial_text, mount_text)) continue gt_log_tab("detected '%s', console at '%s', mounted at '%s'" % (platform_text, serial_text, mount_text)) # Check if mbed classic target name can be translated to yotta target name gt_log("scan available targets for '%s' platform..." % gt_bright(mut["platform_name"])) mut_info = get_mbed_clasic_target_info(mut["platform_name"]) if mut_info is not None: for yotta_target in mut_info["yotta_targets"]: yotta_target_name = yotta_target["yotta_target"] if yotta_target_name in list_of_targets: target_platforms_match += 1 # Configuration print mode: if opts.verbose_test_configuration_only: continue # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app and yotta_target_name in list_of_targets: gt_log("running '%s' for '%s'" % (gt_bright(opts.run_app), gt_bright(yotta_target_name))) disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = mut_info["properties"]["program_cycle_s"] copy_method = opts.copy_method if opts.copy_method else "shell" verbose = opts.verbose_test_result_only test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, verbose=True, ) single_test_result, single_test_output, single_testduration, single_timeout = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing if yotta_target_name in list_of_targets: gt_log("using '%s' target, prepare to build" % gt_bright(yotta_target_name)) cmd = ["yotta"] # "yotta %s --target=%s,* build"% (yotta_verbose, yotta_target_name) if opts.verbose is not None: cmd.append("-v") cmd.append("--target=%s,*" % yotta_target_name) cmd.append("build") if opts.build_to_release: cmd.append("-r") elif opts.build_to_debug: cmd.append("-d") if not opts.skip_yotta_build: gt_log("building your sources and tests with yotta...") gt_log_tab("calling yotta: %s" % " ".join(cmd)) yotta_result, yotta_ret = run_cli_command(cmd, shell=False, verbose=opts.verbose) if yotta_result: gt_log("yotta build for target '%s' was successful" % gt_bright(yotta_target_name)) else: gt_log_err("yotta build failed!") else: gt_log("skipping calling yotta (specified with --skip-build option)") yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info["properties"]["binary_type"] ctest_test_list = load_ctest_testsuite( os.path.join(".", "build", yotta_target_name), binary_type=binary_type ) test_list = None if opts.test_by_names: test_list = opts.test_by_names.split(",") gt_log( "test case filter: %s (specified with -n option)" % ", ".join(["'%s'" % gt_bright(t) for t in test_list]) ) invalid_test_names = False for test_n in test_list: if test_n not in ctest_test_list: gt_log_tab( "test name '%s' not found in CTestTestFile.cmake (specified with -n option)" % gt_bright(test_n) ) invalid_test_names = True if invalid_test_names: gt_log("invalid test case names (specified with -n option)") gt_log_tab("note: test case names are case sensitive") gt_log_tab("note: see list of available test cases below") list_binaries_for_targets(verbose_footer=False) gt_log("running tests for target '%s'" % gt_bright(yotta_target_name)) for test_bin, image_path in ctest_test_list.iteritems(): test_result = "SKIPPED" # Skip test not mentioned in -n option if opts.test_by_names: if test_bin not in test_list: continue if get_mbed_supported_test(test_bin): disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = mut_info["properties"]["program_cycle_s"] copy_method = opts.copy_method if opts.copy_method else "shell" verbose = opts.verbose_test_result_only test_platforms_match += 1 gt_log_tab("running host test...") host_test_result = run_host_test( image_path, disk, port, micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, verbose=verbose, ) single_test_result, single_test_output, single_testduration, single_timeout = ( host_test_result ) test_result = single_test_result if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 # Update report for optional reporting feature test_name = test_bin.lower() if yotta_target_name not in test_report: test_report[yotta_target_name] = {} if test_name not in test_report[yotta_target_name]: test_report[yotta_target_name][test_name] = {} test_report[yotta_target_name][test_name]["single_test_result"] = single_test_result test_report[yotta_target_name][test_name]["single_test_output"] = single_test_output test_report[yotta_target_name][test_name]["elapsed_time"] = single_testduration test_report[yotta_target_name][test_name]["platform_name"] = micro test_report[yotta_target_name][test_name]["copy_method"] = copy_method if single_test_result != "OK" and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_log_tab( "test failed, reporting console output (specified with --report-fails option)" ) print print single_test_output gt_log_tab( "test '%s' %s %s in %.2f sec" % (test_bin, "." * (80 - len(test_bin)), test_result, single_testduration) ) # We need to stop executing if yotta build fails if not yotta_result: gt_log_err("yotta returned %d" % yotta_ret) test_exec_retcode = -1 return test_exec_retcode else: gt_log_err("mbed classic target name '%s' is not in target database" % gt_bright(mut["platform_name"])) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return 0 # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Reports (to file) if opts.report_junit_file_name: junit_report = exporter_junit(test_report) with open(opts.report_junit_file_name, "w") as f: f.write(junit_report) if opts.report_text_file_name: gt_log("exporting to junit '%s'..." % gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) with open(opts.report_text_file_name, "w") as f: f.write(text_report) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_log("json test report:") print exporter_json(test_report) else: # Final summary gt_log("test report:") text_report, text_results = exporter_text(test_report) print text_report print print "Result: " + text_results # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_log("no target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_log("no target matching platforms were found!") test_exec_retcode += -100 return test_exec_retcode