def test_test_by_names_invalid(self): self.test_by_names='test3,testXY' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list, self.test_by_names, self.skip_test) filtered_test_list = {'test3': '\\build\\test3.bin'} self.assertEqual(filtered_test_list, filtered_ctest_test_list)
def test_skip_test(self): self.skip_test = 'test1,test2' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list, self.test_by_names, self.skip_test) filtered_test_list = {'test3': '\\build\\test3.bin', 'test4': '\\build\\test4.bin'} self.assertEqual(filtered_test_list, filtered_ctest_test_list)
def test_list_is_Empty_test_by_names(self): self.ctest_test_list = {} self.test_by_names='test4' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list, self.test_by_names, self.skip_test) filtered_test_list = {} self.assertEqual(filtered_test_list, filtered_ctest_test_list)
def test_list_is_None_skip_test(self): self.ctest_test_list = None self.skip_test='test3' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list, self.test_by_names, self.skip_test) filtered_test_list = {} self.assertEqual(filtered_test_list, filtered_ctest_test_list)
def test_prefix_filter_no_star(self): self.test_by_names='mbed-drivers-test-ticker_2,mbed-drivers-test-rtc,mbed-drivers-test-ticker' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list_mbed_drivers, self.test_by_names, self.skip_test) expected = ['mbed-drivers-test-ticker', 'mbed-drivers-test-ticker_2', 'mbed-drivers-test-rtc'] self.assertEqual(len(expected), len(filtered_ctest_test_list)) self.assertEqual(set(filtered_ctest_test_list.keys()), set(expected))
def test_create_filtered_test_list(self): test_spec = TestSpec() test_spec.parse(test_spec_def) test_build = test_spec.get_test_builds()[0] test_list = mbed_greentea_cli.create_filtered_test_list( test_build.get_tests(), 'mbed-drivers-test-generic_*', None, test_spec=test_spec) self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-generic_tests'])) test_list = mbed_greentea_cli.create_filtered_test_list( test_build.get_tests(), '*_strings', None, test_spec=test_spec) self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-c_strings'])) test_list = mbed_greentea_cli.create_filtered_test_list( test_build.get_tests(), 'mbed*s', None, test_spec=test_spec) expected = set( ['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) self.assertEqual(set(test_list.keys()), expected) test_list = mbed_greentea_cli.create_filtered_test_list( test_build.get_tests(), '*-drivers-*', None, test_spec=test_spec) expected = set( ['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) self.assertEqual(set(test_list.keys()), expected) # Should be case insensitive test_list = mbed_greentea_cli.create_filtered_test_list( test_build.get_tests(), '*-DRIVERS-*', None, test_spec=test_spec) expected = set( ['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) self.assertEqual(set(test_list.keys()), expected)
def test_prefix_filter_merge_n_multi_star(self): self.test_by_names='tests-mbedmicro-mbed*,tests-mbedmicro-rtos*' filtered_ctest_test_list = mbed_greentea_cli.create_filtered_test_list(self.ctest_test_list_mbed_drivers_ext, self.test_by_names, self.skip_test) expected = ['tests-mbedmicro-mbed-attributes', 'tests-mbedmicro-mbed-call_before_main', 'tests-mbedmicro-mbed-cpp', 'tests-mbedmicro-mbed-div', 'tests-mbedmicro-mbed-heap_and_stack', 'tests-mbedmicro-rtos-mbed-basic', 'tests-mbedmicro-rtos-mbed-isr', 'tests-mbedmicro-rtos-mbed-mail', 'tests-mbedmicro-rtos-mbed-mutex', 'tests-mbedmicro-rtos-mbed-queue', 'tests-mbedmicro-rtos-mbed-semaphore', 'tests-mbedmicro-rtos-mbed-signals', 'tests-mbedmicro-rtos-mbed-threads', 'tests-mbedmicro-rtos-mbed-timer'] self.assertEqual(len(expected), len(filtered_ctest_test_list)) self.assertEqual(set(filtered_ctest_test_list.keys()), set(expected))
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [ ] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) required_mut_props = [ 'target_id', 'platform_name', 'serial_port', 'mount_point' ] gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: for prop in required_mut_props: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err( "mbed-ls was unable to enumerate correctly all properties of the device!" ) gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == 'serial_port': gt_logger.gt_log_tab( "check if your serial port driver is correctly installed!" ) if prop == 'mount_point': gt_logger.gt_log_tab( 'check if your OS can detect and mount mbed device mount point!' ) else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices) def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disabled parallel mode" ) parallel_test_exec = 1 return parallel_test_exec # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # Prints version and exits if opts.version: print_version() return (0) # Load test specification or print warnings / info messages and exit CLI mode test_spec, ret = get_test_spec(opts) if not test_spec: return ret # Verbose flag verbose = opts.verbose_test_result_only # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks( opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=verbose) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary, memory_metrics = host_test_result status = TEST_RESULTS.index( single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") ### check if argument of --parallel mode is a integer and greater or equal 1 parallel_test_exec = get_parallel_value(opts.parallel_test_exec) # Detect devices connected to system mbeds = mbed_os_tools.detect.create() mbeds_list = mbeds.list_mbeds(unique_names=True, read_details_txt=True) if opts.global_resource_mgr: # Mocking available platform requested by --grm switch grm_values = parse_global_resource_mgr(opts.global_resource_mgr) if grm_values: gt_logger.gt_log_warn( "entering global resource manager mbed-ls dummy mode!") grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values mbeds_list = [] for _ in range(parallel_test_exec): mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name)) opts.global_resource_mgr = ':'.join(grm_values[1:]) gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name) else: gt_logger.gt_log( "global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr) return (-1) if opts.fast_model_connection: # Mocking available platform requested by --fm switch fm_values = parse_fast_model_connection(opts.fast_model_connection) if fm_values: gt_logger.gt_log_warn( "entering fastmodel connection, mbed-ls dummy simulator mode!") fm_platform_name, fm_config_name = fm_values mbeds_list = [] for _ in range(parallel_test_exec): mbeds_list.append(mbeds.get_dummy_platform(fm_platform_name)) opts.fast_model_connection = fm_config_name gt_logger.gt_log_tab("adding dummy fastmodel platform '%s'" % fm_platform_name) else: gt_logger.gt_log( "fast model connection switch '--fm %s' in wrong format!" % opts.fast_model_connection) return (-1) ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) if mbeds_list: ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices( mbeds_list) if ready_mbed_devices: # devices in form of a pretty formatted table for line in log_mbed_devices_in_table( ready_mbed_devices).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) else: gt_logger.gt_log_err("no compatible devices detected") return (RET_NO_DEVICES) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log( "filtering out target ids not on below list (specified with --use-tids switch)" ) accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... test_queue = Queue( ) # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets> # is used to enumerate builds from test spec we are supplying filter_test_builds = opts.list_of_targets.split( ',') if opts.list_of_targets else None for test_build in test_spec.get_test_builds(filter_test_builds): platform_name = test_build.get_platform() gt_logger.gt_log( "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)" % (gt_logger.gt_bright(platform_name), gt_logger.gt_bright( test_build.get_toolchain()), int(opts.parallel_test_exec))) baudrate = test_build.get_baudrate() ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 muts_to_test = [] # MUTs to actually be tested for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev[ 'target_id'] not in accepted_target_ids: continue # Check that we have a valid serial port detected. sp = mbed_dev['serial_port'] if not sp: gt_logger.gt_log_err( "Serial port for target %s not detected correctly\n" % mbed_dev['target_id']) continue if mbed_dev['platform_name'] == platform_name: # We will force configuration specific baudrate by adding baudrate to serial port # Only add baudrate decoration for serial port if it's not already there # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>' if not sp.endswith(str(baudrate)): mbed_dev['serial_port'] = "%s:%d" % ( mbed_dev['serial_port'], baudrate) mut = mbed_dev if mbed_dev not in muts_to_test: # We will only add unique devices to list of devices "for testing" in this test run muts_to_test.append(mbed_dev) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(muts_to_test).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) # Configuration print mode: if opts.verbose_test_configuration_only: continue ### If we have at least one available device we can proceed if mut: target_platforms_match += 1 build = test_build.get_name() build_path = test_build.get_path() # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log( "running '%s' for '%s'-'%s'" % (gt_logger.gt_bright( opts.run_app), gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = get_platform_property( micro, "program_cycle_s") copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir( opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, build_path, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary, memory_metrics = host_test_result status = TEST_RESULTS.index( single_test_result ) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 test_list = test_build.get_tests() filtered_ctest_test_list = create_filtered_test_list( test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec) gt_logger.gt_log( "running %d test%s for platform '%s' and toolchain '%s'" % (len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_name in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_name].get_binary( binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path() if image_path is None: gt_logger.gt_log_err( "Failed to find test binary for test %s flash method %s" % (test_name, 'usb')) else: test = {"test_bin": test_name, "image_path": image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: # Experimental, parallel test execution if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s of execution threads for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else str()), print_text=verbose) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from different threads to final test report for t in execute_threads: try: # We can't block forever here since that prevents KeyboardInterrupts # from being propagated correctly. Therefore, we just join with a # timeout of 0.1 seconds until the thread isn't alive anymore. # A time of 0.1 seconds is a fairly arbitrary choice. It needs # to balance CPU utilization and responsiveness to keyboard interrupts. # Checking 10 times a second seems to be stable and responsive. while t.isAlive(): t.join(0.1) test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update( partial_test_report[report_key]) execute_threads = [] if opts.verbose_test_configuration_only: print print( "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" ) return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for build_name in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[build_name]: test = test_report[build_name][test_name] # Test was successful if test['single_test_result'] in [ TEST_RESULT_OK, TEST_RESULT_FAIL ]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: build = test_spec.get_test_build(build_name) assert build is not None, "Failed to find build info for build %s" % build_name # Call hook executed for each yotta target, just after all tests are finished build_path = build.get_path() build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, 'w') as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True # Reports to JUNIT file if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer) test_suite_properties = {} for target_name in test_report: test_build_properties = get_test_build_properties( test_spec, target_name) if test_build_properties: test_suite_properties[target_name] = test_build_properties junit_report = exporter_testcase_junit( test_report, test_suite_properties=test_suite_properties) dump_report_to_text_file(opts.report_junit_file_name, junit_report) # Reports to text file if opts.report_text_file_name: gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) # Useful text reporter for those who do not like to copy paste to files tabale with results text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) text_final_report = '\n'.join([ text_report, text_results, text_testcase_report, text_testcase_results ]) dump_report_to_text_file(opts.report_text_file_name, text_final_report) # Reports to JSON file if opts.report_json_file_name: # We will not print summary and json report together gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name)) json_report = exporter_json(test_report) dump_report_to_text_file(opts.report_json_file_name, json_report) # Reports to HTML file if opts.report_html_file_name: gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name)) # Generate a HTML page displaying all of the results html_report = exporter_html(test_report) dump_report_to_text_file(opts.report_html_file_name, html_report) # Memory metrics to CSV file if opts.report_memory_metrics_csv_file_name: gt_logger.gt_log( "exporting memory metrics to CSV file '%s'..." % gt_logger.gt_bright(opts.report_memory_metrics_csv_file_name)) # Generate a CSV file page displaying all memory metrics memory_metrics_csv_report = exporter_memory_metrics_csv( test_report) dump_report_to_text_file(opts.report_memory_metrics_csv_file_name, memory_metrics_csv_report) # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print(text_report) gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) print(text_testcase_report) gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn( "no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)