def run_htrun(cmd, verbose): # detect overflow when running tests htrun_output = str() # run_command will return None if process can't be opened (Issue #134) p = run_command(cmd) if not p: # int value > 0 notifies caller that starting of host test process failed return RUN_HOST_TEST_POPEN_ERROR htrun_failure_line = re.compile('\[RXD\] (:\d+::FAIL: .*)') for line in iter(p.stdout.readline, b''): htrun_output += line # When dumping output to file both \r and \n will be a new line # To avoid this "extra new-line" we only use \n at the end test_error = htrun_failure_line.search(line) if test_error: gt_logger.gt_log_err(test_error.group(1)) if verbose: sys.stdout.write(line.rstrip() + '\n') sys.stdout.flush() # Check if process was terminated by signal returncode = p.wait() return returncode, htrun_output
def get_mbed_targets_from_yotta(mbed_classic_name): """! Function is using 'yotta search' command to fetch matching mbed device target's name @return Function returns list of possible targets or empty list if value not found @details Example: $ yt search -k mbed-target:k64f target frdm-k64f-gcc 0.0.16: Official mbed build target for the mbed frdm-k64f development board. frdm-k64f-armcc 0.0.10: Official mbed build target for the mbed frdm-k64f development board, using the armcc toolchain. Note: Function prints on console """ result = [] cmd = ['yotta', '--plain', 'search', '-k', 'mbed-target:%s'% mbed_classic_name.lower().strip(), 'target'] gt_logger.gt_log("yotta search for mbed-target '%s'"% gt_logger.gt_bright(mbed_classic_name.lower().strip())) gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd)) _stdout, _stderr, _ret = run_cli_process(cmd) if not _ret: for line in _stdout.splitlines(): yotta_target_name = parse_yotta_search_cmd_output(line) if yotta_target_name: if yotta_target_name and yotta_target_name not in result: result.append(yotta_target_name) gt_logger.gt_log_tab("found target '%s'" % gt_logger.gt_bright(yotta_target_name)) else: gt_logger.gt_log_err("calling yotta search failed!") return result
def get_mbed_targets_from_yotta_local_module(mbed_classic_name, yotta_targets_path='./yotta_targets'): """! Function is parsing local yotta targets to fetch matching mbed device target's name @return Function returns list of possible targets or empty list if value not found """ result = [] if os.path.exists(yotta_targets_path): # All local diorectories with yotta targets target_dirs = [target_dir_name for target_dir_name in os.listdir(yotta_targets_path) if os.path.isdir(os.path.join(yotta_targets_path, target_dir_name))] gt_logger.gt_log("local yotta target search in '%s' for compatible mbed-target '%s'"% (gt_logger.gt_bright(yotta_targets_path), gt_logger.gt_bright(mbed_classic_name.lower().strip()))) for target_dir in target_dirs: path = os.path.join(yotta_targets_path, target_dir, 'target.json') try: with open(path, 'r') as data_file: target_json_data = json.load(data_file) yotta_target_name = parse_mbed_target_from_target_json(mbed_classic_name, target_json_data) if yotta_target_name: target_dir_name = os.path.join(yotta_targets_path, target_dir) gt_logger.gt_log_tab("inside '%s' found compatible target '%s'"% (gt_logger.gt_bright(target_dir_name), gt_logger.gt_bright(yotta_target_name))) result.append(yotta_target_name) except IOError as e: gt_logger.gt_log_err(str(e)) return result
def get_mbed_targets_from_yotta(mbed_classic_name): """! Function is using 'yotta search' command to fetch matching mbed device target's name @return Function returns list of possible targets or empty list if value not found @details Example: $ yt search -k mbed-target:k64f target frdm-k64f-gcc 0.0.16: Official mbed build target for the mbed frdm-k64f development board. frdm-k64f-armcc 0.0.10: Official mbed build target for the mbed frdm-k64f development board, using the armcc toolchain. Note: Function prints on console """ result = [] cmd = [ 'yotta', '--plain', 'search', '-k', 'mbed-target:%s' % mbed_classic_name.lower().strip(), 'target' ] gt_logger.gt_log("yotta search for mbed-target '%s'" % gt_logger.gt_bright(mbed_classic_name.lower().strip())) gt_logger.gt_log_tab("calling yotta: %s" % " ".join(cmd)) _stdout, _stderr, _ret = run_cli_process(cmd) if not _ret: for line in _stdout.splitlines(): yotta_target_name = parse_yotta_search_cmd_output(line) if yotta_target_name: if yotta_target_name and yotta_target_name not in result: result.append(yotta_target_name) gt_logger.gt_log_tab( "found target '%s'" % gt_logger.gt_bright(yotta_target_name)) else: gt_logger.gt_log_err("calling yotta search failed!") return result
def get_platform_name_from_yotta_target(target): """ Parses target string and gives platform name and toolchain :param target: :return: """ target_json_path = os.path.join('yotta_targets', target, 'target.json') if not os.path.exists(target_json_path): gt_logger.gt_log_err( 'Target json does not exist [%s].\n' % target_json_path + 'mbed TAS Executor {greentea} must be run inside a pre built yotta module!' ) return None with open(target_json_path, 'r') as f: data = f.read() try: target_json = json.loads(data) except (TypeError, ValueError), e: gt_logger.gt_log_err( 'Failed to load json data from target.json! error [%s]\n' % str(e) + 'Can not determine required mbed platform name!') return None
def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [] # Devices which can't be used (are not fully detected) gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else "")) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!") gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) for prop in mut: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == "serial_port": gt_logger.gt_log_tab("check if your serial port driver is correctly installed!") if prop == "mount_point": gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!") else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices)
def build_with_yotta(yotta_target_name, verbose=False, build_to_release=False, build_to_debug=False): cmd = ["yotta"] # "yotta %s --target=%s,* build" if verbose: cmd.append("-v") cmd.append("--target=%s,*" % yotta_target_name) cmd.append("build") if build_to_release: cmd.append("-r") elif build_to_debug: cmd.append("-d") gt_logger.gt_log("building your sources and tests with yotta...") gt_logger.gt_log_tab("calling yotta: %s" % (" ".join(cmd))) yotta_result, yotta_ret = run_cli_command(cmd, shell=False, verbose=verbose) if yotta_result: gt_logger.gt_log("yotta build for target '%s' was successful" % gt_logger.gt_bright(yotta_target_name)) else: gt_logger.gt_log_err("yotta build failed!") return yotta_result, yotta_ret
def merge_multiple_test_specifications_from_file_list(test_spec_file_name_list): """! For each file in test_spec_file_name_list merge all test specifications into one @param test_spec_file_name_list List of paths to different test specifications @return TestSpec object with all test specification data inside """ def copy_builds_between_test_specs(source, destination): """! Copies build key-value pairs between two test_spec dicts @param source Source dictionary @param destination Dictionary with will be applied with 'builds' key-values @return Dictionary with merged source """ result = destination.copy() if 'builds' in source and 'builds' in destination: for k in source['builds']: result['builds'][k] = source['builds'][k] return result merged_test_spec = {} for test_spec_file in test_spec_file_name_list: gt_logger.gt_log_tab("using '%s'"% test_spec_file) try: with open(test_spec_file, 'r') as f: test_spec_data = json.load(f) merged_test_spec = copy_builds_between_test_specs(merged_test_spec, test_spec_data) except Exception as e: gt_logger.gt_log_err("Unexpected error while processing '%s' test specification file"% test_spec_file) gt_logger.gt_log_tab(str(e)) merged_test_spec = {} test_spec = TestSpec() test_spec.parse(merged_test_spec) return test_spec
def get_mbed_targets_from_yotta_local_module(mbed_classic_name, yotta_targets_path='./yotta_targets'): """! Function is parsing local yotta targets to fetch matching mbed device target's name @return Function returns list of possible targets or empty list if value not found """ result = [] if not os.path.exists(yotta_targets_path): return result # All local directories with yotta targets target_dirs = [target_dir_name for target_dir_name in os.listdir(yotta_targets_path) if os.path.isdir(os.path.join(yotta_targets_path, target_dir_name))] gt_logger.gt_log("local yotta target search in '%s' for compatible mbed-target '%s'"% (gt_logger.gt_bright(yotta_targets_path), gt_logger.gt_bright(mbed_classic_name.lower().strip()))) for target_dir in target_dirs: path = os.path.join(yotta_targets_path, target_dir, 'target.json') try: with open(path, 'r') as data_file: target_json_data = json.load(data_file) yotta_target_name = parse_mbed_target_from_target_json(mbed_classic_name, target_json_data) if yotta_target_name: target_dir_name = os.path.join(yotta_targets_path, target_dir) gt_logger.gt_log_tab("inside '%s' found compatible target '%s'"% (gt_logger.gt_bright(target_dir_name), gt_logger.gt_bright(yotta_target_name))) result.append(yotta_target_name) except IOError as e: gt_logger.gt_log_err(str(e)) return result
def run_command(cmd): """! Runs command and prints proc stdout on screen @paran cmd List with command line to execute e.g. ['ls', '-l] @return Value returned by subprocess.Popen, if failed return None """ try: p = Popen(cmd, stdout=PIPE, stderr=STDOUT) except OSError as e: gt_logger.gt_log_err("run_host_test.run_command(%s) failed!" % str(cmd)) gt_logger.gt_log_tab(str(e)) return None return p
def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err("argument of mode --parallel is not a int, disabled parallel mode") parallel_test_exec = 1 return parallel_test_exec
def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, "w") as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True
def run_cli_process(self, cmd): """! Runs command as a process and return stdout, stderr and ret code @param cmd Command to execute @return Tuple of (stdout, stderr, returncode) """ _stdout, _stderr, ret = None, None, -1 try: p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) _stdout, _stderr = p.communicate() ret = p.returncode except OSError as e: gt_logger.gt_log_err(str(e)) ret = -1 return _stdout, _stderr, ret
def run_command(cmd): """! Runs command and prints proc stdout on screen @paran cmd List with command line to execute e.g. ['ls', '-l] @return Value returned by subprocess.Popen, if failed return None """ try: p = Popen(cmd, stdout=PIPE, stderr=STDOUT) except OSError as e: gt_logger.gt_log_err("run_host_test.run_command(%s) failed!"% str(cmd)) gt_logger.gt_log_tab(str(e)) return None return p
def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disabled parallel mode" ) parallel_test_exec = 1 return parallel_test_exec
def get_coverage_data(build_path, output): # Example GCOV output # [1456840876.73][CONN][RXD] {{__coverage_start;c:\Work\core-util/source/PoolAllocator.cpp.gcda;6164636772393034c2733f32...a33e...b9}} gt_logger.gt_log("checking for GCOV data...") re_gcov = re.compile(r"^\[(\d+\.\d+)\][^\{]+\{\{(__coverage_start);([^;]+);([^}]+)\}\}$") for line in output.splitlines(): m = re_gcov.search(line) if m: _, _, gcov_path, gcov_payload = m.groups() try: bin_gcov_payload = coverage_pack_hex_payload(gcov_payload) coverage_dump_file(build_path, gcov_path, bin_gcov_payload) except Exception as e: gt_logger.gt_log_err("error while handling GCOV data: " + str(e)) gt_logger.gt_log_tab("storing %d bytes in '%s'"% (len(bin_gcov_payload), gcov_path))
def run(self, format=None): """! Runs hook after command is formated with in-place {tags} @format Pass format dictionary to replace hook {tags} with real values @param format Used to format string with cmd, notation used is e.g: {build_name} """ gt_logger.gt_log("hook '%s' execution"% self.name) cmd = self.format_before_run(self.cmd, format) gt_logger.gt_log_tab("hook command: %s"% cmd) (_stdout, _stderr, ret) = self.run_cli_process(cmd) if _stdout: print _stdout if ret: gt_logger.gt_log_err("hook exited with error: %d, dumping stderr..."% ret) print _stderr return ret
def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, 'w') as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True
def run(self, format=None): """! Runs hook after command is formated with in-place {tags} @format Pass format dictionary to replace hook {tags} with real values @param format Used to format string with cmd, notation used is e.g: {build_name} """ gt_logger.gt_log("hook '%s' execution" % self.name) cmd = self.format_before_run(self.cmd, format) gt_logger.gt_log_tab("hook command: %s" % cmd) (_stdout, _stderr, ret) = self.run_cli_process(cmd) if _stdout: print _stdout if ret: gt_logger.gt_log_err( "hook exited with error: %d, dumping stderr..." % ret) print _stderr return ret
def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [ ] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) required_mut_props = [ 'target_id', 'platform_name', 'serial_port', 'mount_point' ] gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: for prop in required_mut_props: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err( "mbed-ls was unable to enumerate correctly all properties of the device!" ) gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == 'serial_port': gt_logger.gt_log_tab( "check if your serial port driver is correctly installed!" ) if prop == 'mount_point': gt_logger.gt_log_tab( 'check if your OS can detect and mount mbed device mount point!' ) else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices)
def build_with_yotta(yotta_target_name, verbose = False, build_to_release = False, build_to_debug = False): cmd = ["yotta"] # "yotta %s --target=%s,* build" if verbose: cmd.append("-v") cmd.append("--target=%s,*"% yotta_target_name) cmd.append("build") if build_to_release: cmd.append("-r") elif build_to_debug: cmd.append("-d") gt_logger.gt_log("building your sources and tests with yotta...") gt_logger.gt_log_tab("calling yotta: %s"% (" ".join(cmd))) yotta_result, yotta_ret = run_cli_command(cmd, shell=False, verbose=verbose) if yotta_result: gt_logger.gt_log("yotta build for target '%s' was successful"% gt_logger.gt_bright(yotta_target_name)) else: gt_logger.gt_log_err("yotta build failed!") return yotta_result, yotta_ret
def get_platform_name_from_yotta_target(target): """ Parses target string and gives platform name and toolchain :param target: :return: """ target_json_path = os.path.join('yotta_targets', target, 'target.json') if not os.path.exists(target_json_path): gt_logger.gt_log_err('Target json does not exist [%s].\n' % target_json_path + 'mbed TAS Executor {greentea} must be run inside a pre built yotta module!') return None with open(target_json_path, 'r') as f: data = f.read() try: target_json = json.loads(data) except (TypeError, ValueError), e: gt_logger.gt_log_err('Failed to load json data from target.json! error [%s]\n' % str(e) + 'Can not determine required mbed platform name!') return None
def get_platform_name_from_yotta_target(target): """ Parses target string and gives platform name and toolchain :param target: :return: """ target_json_path = os.path.join('yotta_targets', target, 'target.json') if not os.path.exists(target_json_path): gt_logger.gt_log_err( 'Target json does not exist [%s].\n' % target_json_path + 'mbed TAS Executor {greentea} must be run inside a pre built yotta module!' ) return None with open(target_json_path, 'r') as f: data = f.read() try: target_json = json.loads(data) except (TypeError, ValueError) as e: gt_logger.gt_log_err( 'Failed to load json data from target.json! error [%s]\n' % str(e) + 'Can not determine required mbed platform name!') return None if 'keywords' not in target_json: gt_logger.gt_log_err( "No 'keywords' in target.json! Can not determine required mbed platform name!" ) return None platform_name = None for keyword in target_json['keywords']: m = re.search('mbed-target:(.*)', keyword) if m is not None: platform_name = m.group(1).upper() if platform_name is None: gt_logger.gt_log_err( 'No keyword with format "mbed-target:<platform name>" found in target.json!\n' + 'Can not determine required mbed platform name!') return None return platform_name
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [] # Devices which can't be used (are not fully detected) gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else "")) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!") gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) for prop in mut: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == "serial_port": gt_logger.gt_log_tab("check if your serial port driver is correctly installed!") if prop == "mount_point": gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!") else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices) def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err("argument of mode --parallel is not a int, disabled parallel mode") parallel_test_exec = 1 return parallel_test_exec if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return -1 if not MBED_HOST_TESTS: gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed") return -1 # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # Prints version and exits if opts.version: print_version() return 0 # Load test specification or print warnings / info messages and exit CLI mode test_spec, ret = get_test_spec(opts) if not test_spec: return ret # Verbose flag verbose = opts.verbose_test_result_only # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=verbose, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return status ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() if opts.global_resource_mgr: # Mocking available platform requested by --grm switch grm_values = parse_global_resource_mgr(opts.global_resource_mgr) if grm_values: gt_logger.gt_log_warn("entering global resource manager mbed-ls dummy mode!") grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values mbeds_list = [] mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name)) opts.global_resource_mgr = ":".join(grm_values[1:]) gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name) else: gt_logger.gt_log("global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr) return -1 ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [] # Devices which can't be used (are not fully detected) if mbeds_list: ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(mbeds_list) if ready_mbed_devices: # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(ready_mbed_devices).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) else: gt_logger.gt_log_err("no compatible devices detected") return RET_NO_DEVICES ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)") accepted_target_ids = opts.use_target_ids.split(",") for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue() # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 parallel_test_exec = get_parallel_value(opts.parallel_test_exec) # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets> # is used to enumerate builds from test spec we are supplying filter_test_builds = opts.list_of_targets.split(",") if opts.list_of_targets else None for test_build in test_spec.get_test_builds(filter_test_builds): platform_name = test_build.get_platform() gt_logger.gt_log( "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)" % ( gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), int(opts.parallel_test_exec), ) ) baudrate = test_build.get_baudrate() ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev["target_id"] not in accepted_target_ids: continue if mbed_dev["platform_name"] == platform_name: # We will force configuration specific baudrate by adding baudrate to serial port # Only add baudrate decoration for serial port if it's not already there # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>' if not mbed_dev["serial_port"].endswith(str(baudrate)): mbed_dev["serial_port"] = "%s:%d" % (mbed_dev["serial_port"], baudrate) mut = mbed_dev muts_to_test.append(mbed_dev) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(muts_to_test).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) # Configuration print mode: if opts.verbose_test_configuration_only: continue ### If we have at least one available device we can proceed if mut: target_platforms_match += 1 build = test_build.get_name() build_path = test_build.get_path() # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log( "running '%s' for '%s'-'%s'" % ( gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), ) ) disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = get_platform_property(micro, "program_cycle_s") copy_method = opts.copy_method if opts.copy_method else "shell" enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, build_path, mut["target_id"], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 test_list = test_build.get_tests() filtered_ctest_test_list = create_filtered_test_list( test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec ) gt_logger.gt_log( "running %d test%s for platform '%s' and toolchain '%s'" % ( len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()), ) ) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_name in filtered_ctest_test_list_keys: image_path = ( filtered_ctest_test_list[test_name].get_binary(binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path() ) if image_path is None: gt_logger.gt_log_err("Failed to find test binary for test %s flash method %s" % (test_name, "usb")) else: test = {"test_bin": test_name, "image_path": image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: # Experimental, parallel test execution if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s of execution threads for testing" % (len(execute_threads), "s" if len(execute_threads) != 1 else str()), print_text=verbose, ) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from different threads to final test report for t in execute_threads: try: t.join() # blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data["test_platforms_match"] test_exec_retcode += test_return_data["test_exec_retcode"] partial_test_report = test_return_data["test_report"] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) execute_threads = [] if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return 0 gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for build_name in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[build_name]: test = test_report[build_name][test_name] # Test was successful if test["single_test_result"] in [TEST_RESULT_OK, TEST_RESULT_FAIL]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test["test_bin_name"], "image_path": test["image_path"], "build_path": test["build_path"], "build_path_abs": test["build_path_abs"], } greentea_hooks.run_hook_ext("hook_post_test_end", format) if greentea_hooks: build = test_spec.get_test_build(build_name) assert build is not None, "Failed to find build info for build %s" % build_name # Call hook executed for each yotta target, just after all tests are finished build_path = build.get_path() build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = {"build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list} greentea_hooks.run_hook_ext("hook_post_all_test_end", format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, "w") as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True # Reports to JUNIT file if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer) test_suite_properties = {} for target_name in test_report: test_build_properties = get_test_build_properties(test_spec, target_name) if test_build_properties: test_suite_properties[target_name] = test_build_properties junit_report = exporter_testcase_junit(test_report, test_suite_properties=test_suite_properties) dump_report_to_text_file(opts.report_junit_file_name, junit_report) # Reports to text file if opts.report_text_file_name: gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) # Useful text reporter for those who do not like to copy paste to files tabale with results text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) text_final_report = "\n".join([text_report, text_results, text_testcase_report, text_testcase_results]) dump_report_to_text_file(opts.report_text_file_name, text_final_report) # Reports to JSON file if opts.report_json_file_name: # We will not print summary and json report together gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name)) json_report = exporter_json(test_report) dump_report_to_text_file(opts.report_json_file_name, json_report) # Reports to HTML file if opts.report_html_file_name: gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name)) # Generate a HTML page displaying all of the results html_report = exporter_html(test_report) dump_report_to_text_file(opts.report_html_file_name, html_report) # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn("no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no matching platforms were found!") test_exec_retcode += -100 return test_exec_retcode
def get_test_spec_from_yt_module(opts): """ Gives test specification created from yotta module environment. :return TestSpec: """ ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): error = """ ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """ raise YottaError(error) test_spec = TestSpec() ### Selecting yotta targets to process yt_targets = [ ] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'" % gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab( "yotta target in current directory is not set") gt_logger.gt_log_err( "yotta target is not specified. Use '%s' or '%s' command to set target" % (gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>'))) raise YottaError("Yotta target not set in current directory!") ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure yt_target_to_map_platform = {} if opts.map_platform_to_yt_target: gt_logger.gt_log( "user defined platform -> target supported mapping definition (specified with --map-target switch)" ) for mapping in opts.map_platform_to_yt_target.split(','): if len(mapping.split(':')) == 2: yt_target, platform = mapping.split(':') yt_target_to_map_platform[yt_target] = platform gt_logger.gt_log_tab( "mapped yotta target '%s' to be compatible with platform '%s'" % (gt_logger.gt_bright(yt_target), gt_logger.gt_bright(platform))) else: gt_logger.gt_log_tab( "unknown format '%s', use 'target:platform' format" % mapping) for yt_target in yt_targets: if yt_target in yt_target_to_map_platform: platform = yt_target_to_map_platform[yt_target] else: # get it from local Yotta target platform = get_platform_name_from_yotta_target(yt_target) # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform toolchain = yt_target yotta_config = YottaConfig() yotta_config.init(yt_target) baud_rate = yotta_config.get_baudrate() base_path = os.path.join('.', 'build', yt_target) tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path) test_spec.add_test_builds(yt_target, tb) # Find tests ctest_test_list = load_ctest_testsuite( base_path, binary_type=get_binary_type_for_platform(platform)) for name, path in ctest_test_list.items(): t = Test(name) t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE) tb.add_test(name, t) return test_spec
def get_test_spec(opts): """! Closure encapsulating how we get test specification and load it from file of from yotta module @return Returns tuple of (test specification, ret code). Test specification == None if test spec load was not successful """ test_spec = None # Check if test_spec.json file exist, if so we will pick it up as default file and load it test_spec_file_name = opts.test_spec test_spec_file_name_list = [] # Note: test_spec.json will have higher priority than module.json file # so if we are inside directory with module.json and test_spec.json we will use test spec file # instead of using yotta's module.json file def get_all_test_specs_from_build_dir(path_to_scan): """! Searches for all test_spec.json files @param path_to_scan Directory path used to recursively search for test_spec.json @result List of locations of test_spec.json """ return [os.path.join(dp, f) for dp, dn, filenames in os.walk(path_to_scan) for f in filenames if f == 'test_spec.json'] def merge_multiple_test_specifications_from_file_list(test_spec_file_name_list): """! For each file in test_spec_file_name_list merge all test specifications into one @param test_spec_file_name_list List of paths to different test specifications @return TestSpec object with all test specification data inside """ def copy_builds_between_test_specs(source, destination): """! Copies build key-value pairs between two test_spec dicts @param source Source dictionary @param destination Dictionary with will be applied with 'builds' key-values @return Dictionary with merged source """ result = destination.copy() if 'builds' in source and 'builds' in destination: for k in source['builds']: result['builds'][k] = source['builds'][k] return result merged_test_spec = {} for test_spec_file in test_spec_file_name_list: gt_logger.gt_log_tab("using '%s'"% test_spec_file) try: with open(test_spec_file, 'r') as f: test_spec_data = json.load(f) merged_test_spec = copy_builds_between_test_specs(merged_test_spec, test_spec_data) except Exception as e: gt_logger.gt_log_err("Unexpected error while processing '%s' test specification file"% test_spec_file) gt_logger.gt_log_tab(str(e)) merged_test_spec = {} test_spec = TestSpec() test_spec.parse(merged_test_spec) return test_spec # Test specification look-up if opts.test_spec: # Loading test specification from command line specified file gt_logger.gt_log("test specification file '%s' (specified with --test-spec option)"% opts.test_spec) elif os.path.exists('test_spec.json'): # Test specification file exists in current directory gt_logger.gt_log("using 'test_spec.json' from current directory!") test_spec_file_name = 'test_spec.json' elif os.path.exists('.build'): # Checking .build directory for test specifications test_spec_file_name_list = get_all_test_specs_from_build_dir('.build') elif os.path.exists(os.path.join('mbed-os', '.build')): # Checking mbed-os/.build directory for test specifications test_spec_file_name_list = get_all_test_specs_from_build_dir(os.path.join(['mbed-os', '.build'])) # Actual load and processing of test specification from sources if test_spec_file_name: # Test specification from command line (--test-spec) or default test_spec.json will be used gt_logger.gt_log("using '%s' from current directory!"% test_spec_file_name) test_spec = TestSpec(test_spec_file_name) if opts.list_binaries: list_binaries_for_builds(test_spec) return None, 0 elif test_spec_file_name_list: # Merge multiple test specs into one and keep calm gt_logger.gt_log("using multiple test specifications from current directory!") test_spec = merge_multiple_test_specifications_from_file_list(test_spec_file_name_list) if opts.list_binaries: list_binaries_for_builds(test_spec) return None, 0 elif os.path.exists('module.json'): # If inside yotta module load module data and generate test spec gt_logger.gt_log("using 'module.json' from current directory!") if opts.list_binaries: # List available test binaries (names, no extension) list_binaries_for_targets() return None, 0 else: test_spec = get_test_spec_from_yt_module(opts) else: gt_logger.gt_log_err("greentea should be run inside a Yotta module or --test-spec switch should be used") return None, -1 return test_spec, 0
from mbed_greentea.mbed_target_info import get_mbed_target_from_current_dir from mbed_greentea.mbed_greentea_log import gt_logger from mbed_greentea.mbed_greentea_dlm import GREENTEA_KETTLE_PATH from mbed_greentea.mbed_greentea_dlm import greentea_get_app_sem from mbed_greentea.mbed_greentea_dlm import greentea_update_kettle from mbed_greentea.mbed_greentea_dlm import greentea_clean_kettle from mbed_greentea.mbed_yotta_api import build_with_yotta from mbed_greentea.mbed_greentea_hooks import GreenteaHooks from mbed_greentea.mbed_yotta_module_parse import YottaConfig from mbed_greentea.mbed_yotta_module_parse import YottaModule try: import mbed_lstools import mbed_host_tests except ImportError as e: gt_logger.gt_log_err("Not all required Python modules were imported!") gt_logger.gt_log_err(str(e)) gt_logger.gt_log("Check if:") gt_logger.gt_log_tab( "1. You've correctly installed dependency module using setup tools or pip:" ) gt_logger.gt_log_tab("* python setup.py install", tab_count=2) gt_logger.gt_log_tab("* pip install <module-name>", tab_count=2) gt_logger.gt_log_tab( "2. There are no errors preventing import in dependency modules") gt_logger.gt_log_tab( "See: https://github.com/ARMmbed/greentea#installing-greentea") exit(-2342) MBED_LMTOOLS = 'mbed_lstools' in sys.modules MBED_HOST_TESTS = 'mbed_host_tests' in sys.modules
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} yotta_config_baudrate = None # Default serial port baudrate forced by configuration yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = 'SKIPPED' disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) # We will force configuration specific baudrate if port: port = "%s:%d"% (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test(test['image_path'], disk, port, micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, verbose=verbose) single_test_result, single_test_output, single_testduration, single_timeout = host_test_result test_result = single_test_result if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 # Update report for optional reporting feature test_name = test['test_bin'].lower() if yotta_target_name not in test_report: test_report[yotta_target_name] = {} if test_name not in test_report[yotta_target_name]: test_report[yotta_target_name][test_name] = {} test_report[yotta_target_name][test_name]['single_test_result'] = single_test_result test_report[yotta_target_name][test_name]['single_test_output'] = single_test_output test_report[yotta_target_name][test_name]['elapsed_time'] = single_testduration test_report[yotta_target_name][test_name]['platform_name'] = micro test_report[yotta_target_name][test_name]['copy_method'] = copy_method gt_logger.gt_log("test on hardware with target id: %s \n\ttest '%s' %s %s in %.2f sec"% (mut['target_id'], test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration)) if single_test_result != 'OK' and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)") print print single_test_output #greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put({'test_platforms_match': test_platforms_match, 'test_exec_retcode': test_exec_retcode, 'test_report': test_report}) return
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ def filter_ready_devices(mbeds_list): """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module. @details This function logs a lot to help users figure out root cause of their problems @param mbeds_list List of MUTs to verify @return Tuple of (MUTS detected correctly, MUTs not detected fully) """ ready_mbed_devices = [ ] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err( "mbed-ls was unable to enumerate correctly all properties of the device!" ) gt_logger.gt_log_tab( "check with 'mbedls -j' command if all properties of your device are enumerated properly" ) for prop in mut: if not mut[prop]: # Adding MUT to NOT DETECTED FULLY list if mut not in not_ready_mbed_devices: not_ready_mbed_devices.append(mut) gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop]))) if prop == 'serial_port': gt_logger.gt_log_tab( "check if your serial port driver is correctly installed!" ) if prop == 'mount_point': gt_logger.gt_log_tab( 'check if your OS can detect and mount mbed device mount point!' ) else: # Adding MUT to DETECTED CORRECTLY list ready_mbed_devices.append(mut) return (ready_mbed_devices, not_ready_mbed_devices) def get_parallel_value(value): """! Get correct value for parallel switch (--parallel) @param value Value passed from --parallel @return Refactored version of parallel number """ try: parallel_test_exec = int(value) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disabled parallel mode" ) parallel_test_exec = 1 return parallel_test_exec if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err( "error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # Prints version and exits if opts.version: print_version() return (0) # Load test specification or print warnings / info messages and exit CLI mode test_spec, ret = get_test_spec(opts) if not test_spec: return ret # Verbose flag verbose = opts.verbose_test_result_only # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks( opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=verbose) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() if opts.global_resource_mgr: # Mocking available platform requested by --grm switch grm_values = parse_global_resource_mgr(opts.global_resource_mgr) if grm_values: gt_logger.gt_log_warn( "entering global resource manager mbed-ls dummy mode!") grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values mbeds_list = [] mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name)) opts.global_resource_mgr = ':'.join(grm_values[1:]) gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name) else: gt_logger.gt_log( "global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr) return (-1) ready_mbed_devices = [] # Devices which can be used (are fully detected) not_ready_mbed_devices = [ ] # Devices which can't be used (are not fully detected) if mbeds_list: ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices( mbeds_list) if ready_mbed_devices: # devices in form of a pretty formatted table for line in log_mbed_devices_in_table( ready_mbed_devices).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) else: gt_logger.gt_log_err("no compatible devices detected") return (RET_NO_DEVICES) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log( "filtering out target ids not on below list (specified with --use-tids switch)" ) accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue( ) # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 parallel_test_exec = get_parallel_value(opts.parallel_test_exec) # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets> # is used to enumerate builds from test spec we are supplying filter_test_builds = opts.list_of_targets.split( ',') if opts.list_of_targets else None for test_build in test_spec.get_test_builds(filter_test_builds): platform_name = test_build.get_platform() gt_logger.gt_log( "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)" % (gt_logger.gt_bright(platform_name), gt_logger.gt_bright( test_build.get_toolchain()), int(opts.parallel_test_exec))) baudrate = test_build.get_baudrate() ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev[ 'target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: # We will force configuration specific baudrate by adding baudrate to serial port # Only add baudrate decoration for serial port if it's not already there # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>' if not mbed_dev['serial_port'].endswith(str(baudrate)): mbed_dev['serial_port'] = "%s:%d" % ( mbed_dev['serial_port'], baudrate) mut = mbed_dev muts_to_test.append(mbed_dev) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # devices in form of a pretty formatted table for line in log_mbed_devices_in_table(muts_to_test).splitlines(): gt_logger.gt_log_tab(line.strip(), print_text=verbose) # Configuration print mode: if opts.verbose_test_configuration_only: continue ### If we have at least one available device we can proceed if mut: target_platforms_match += 1 build = test_build.get_name() build_path = test_build.get_path() # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log( "running '%s' for '%s'-'%s'" % (gt_logger.gt_bright( opts.run_app), gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = get_platform_property( micro, "program_cycle_s") copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir( opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, build_path, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun return host_test_result # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result ) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 test_list = test_build.get_tests() filtered_ctest_test_list = create_filtered_test_list( test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec) gt_logger.gt_log( "running %d test%s for platform '%s' and toolchain '%s'" % (len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(platform_name), gt_logger.gt_bright(test_build.get_toolchain()))) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_name in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_name].get_binary( binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path() if image_path is None: gt_logger.gt_log_err( "Failed to find test binary for test %s flash method %s" % (test_name, 'usb')) else: test = {"test_bin": test_name, "image_path": image_path} test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: # Experimental, parallel test execution if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s of execution threads for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else str()), print_text=verbose) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from different threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update( partial_test_report[report_key]) execute_threads = [] if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for build_name in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[build_name]: test = test_report[build_name][test_name] # Test was successful if test['single_test_result'] in [ TEST_RESULT_OK, TEST_RESULT_FAIL ]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: build = test_spec.get_test_build(build_name) assert build is not None, "Failed to find build info for build %s" % build_name # Call hook executed for each yotta target, just after all tests are finished build_path = build.get_path() build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) def dump_report_to_text_file(filename, content): """! Closure for report dumps to text files @param filename Name of destination file @parm content Text content of the file to write @return True if write was successful, else return False """ try: with open(filename, 'w') as f: f.write(content) except IOError as e: gt_logger.gt_log_err("can't export to '%s', reason:" % filename) gt_logger.gt_log_err(str(e)) return False return True # Reports to JUNIT file if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer) test_suite_properties = {} for target_name in test_report: test_build_properties = get_test_build_properties( test_spec, target_name) if test_build_properties: test_suite_properties[target_name] = test_build_properties junit_report = exporter_testcase_junit( test_report, test_suite_properties=test_suite_properties) dump_report_to_text_file(opts.report_junit_file_name, junit_report) # Reports to text file if opts.report_text_file_name: gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) # Useful text reporter for those who do not like to copy paste to files tabale with results text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) text_final_report = '\n'.join([ text_report, text_results, text_testcase_report, text_testcase_results ]) dump_report_to_text_file(opts.report_text_file_name, text_final_report) # Reports to JSON file if opts.report_json_file_name: # We will not print summary and json report together gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name)) json_report = exporter_json(test_report) dump_report_to_text_file(opts.report_json_file_name, json_report) # Reports to HTML file if opts.report_html_file_name: gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name)) # Generate a HTML page displaying all of the results html_report = exporter_html(test_report) dump_report_to_text_file(opts.report_html_file_name, html_report) # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn( "no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
from mbed_greentea.mbed_target_info import get_mbed_target_from_current_dir from mbed_greentea.mbed_greentea_log import gt_logger from mbed_greentea.mbed_greentea_dlm import GREENTEA_KETTLE_PATH from mbed_greentea.mbed_greentea_dlm import greentea_get_app_sem from mbed_greentea.mbed_greentea_dlm import greentea_update_kettle from mbed_greentea.mbed_greentea_dlm import greentea_clean_kettle from mbed_greentea.mbed_yotta_api import build_with_yotta from mbed_greentea.mbed_greentea_hooks import GreenteaHooks from mbed_greentea.mbed_yotta_module_parse import YottaConfig from mbed_greentea.mbed_yotta_module_parse import YottaModule try: import mbed_lstools import mbed_host_tests except ImportError as e: gt_logger.gt_log_err("Not all required Python modules were imported!") gt_logger.gt_log_err(str(e)) gt_logger.gt_log("Check if:") gt_logger.gt_log_tab("1. You've correctly installed dependency module using setup tools or pip:") gt_logger.gt_log_tab("* python setup.py install", tab_count=2) gt_logger.gt_log_tab("* pip install <module-name>", tab_count=2) gt_logger.gt_log_tab("2. There are no errors preventing import in dependency modules") gt_logger.gt_log_tab("See: https://github.com/ARMmbed/greentea#installing-greentea") exit(-2342) MBED_LMTOOLS = 'mbed_lstools' in sys.modules MBED_HOST_TESTS = 'mbed_host_tests' in sys.modules RET_NO_DEVICES = 1001 RET_YOTTA_BUILD_FAIL = -1 LOCAL_HOST_TESTS_DIR = './test/host_tests' # Used by mbedhtrun -e <dir>
def get_test_spec_from_yt_module(opts): """ Gives test specification created from yotta module environment. :return TestSpec: """ ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): error = """ ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """ raise YottaError(error) test_spec = TestSpec() ### Selecting yotta targets to process yt_targets = [] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab("yotta target in current directory is not set") gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"% ( gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>') )) raise YottaError("Yotta target not set in current directory!") ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure yt_target_to_map_platform = {} if opts.map_platform_to_yt_target: gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)") for mapping in opts.map_platform_to_yt_target.split(','): if len(mapping.split(':')) == 2: yt_target, platform = mapping.split(':') yt_target_to_map_platform[yt_target] = platform gt_logger.gt_log_tab("mapped yotta target '%s' to be compatible with platform '%s'"% ( gt_logger.gt_bright(yt_target), gt_logger.gt_bright(platform) )) else: gt_logger.gt_log_tab("unknown format '%s', use 'target:platform' format"% mapping) for yt_target in yt_targets: if yt_target in yt_target_to_map_platform: platform = yt_target_to_map_platform[yt_target] else: # get it from local Yotta target platform = get_platform_name_from_yotta_target(yt_target) # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform toolchain = yt_target yotta_config = YottaConfig() yotta_config.init(yt_target) baud_rate = yotta_config.get_baudrate() base_path = os.path.join('.', 'build', yt_target) tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path) test_spec.add_test_builds(yt_target, tb) # Find tests ctest_test_list = load_ctest_testsuite(base_path, binary_type=get_binary_type_for_platform(platform)) for name, path in ctest_test_list.iteritems(): t = Test(name) t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE) tb.add_test(name, t) return test_spec
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} yotta_config_baudrate = None # Default serial port baudrate forced by configuration yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = 'SKIPPED' disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) # We will force configuration specific baudrate if port: port = "%s:%d" % (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test( test['image_path'], disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, verbose=verbose) # Some error in htrun, abort test execution if host_test_result < 0: break single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result test_result = single_test_result build_path = os.path.join("./build", yotta_target_name) build_path_abs = os.path.abspath(build_path) if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]: if greentea_hooks: # Test was successful # We can execute test hook just after test is finished ('hook_test_end') format = { "test_name": test['test_bin'], "test_bin_name": os.path.basename(test['image_path']), "image_path": test['image_path'], "build_path": build_path, "build_path_abs": build_path_abs, "yotta_target_name": yotta_target_name, } greentea_hooks.run_hook_ext('hook_test_end', format) # Update report for optional reporting feature test_suite_name = test['test_bin'].lower() if yotta_target_name not in test_report: test_report[yotta_target_name] = {} if test_suite_name not in test_report[yotta_target_name]: test_report[yotta_target_name][test_suite_name] = {} if not test_cases_summary and not result_test_cases: gt_logger.gt_log_warn("test case summary event not found") gt_logger.gt_log_tab( "no test case report present, assuming test suite to be a single test case!" ) # We will map test suite result to test case to # output valid test case in report # Generate "artificial" test case name from test suite name# # E.g: # mbed-drivers-test-dev_null -> dev_null test_case_name = test_suite_name test_str_idx = test_suite_name.find("-test-") if test_str_idx != -1: test_case_name = test_case_name[test_str_idx + 6:] gt_logger.gt_log_tab("test suite: %s" % test_suite_name) gt_logger.gt_log_tab("test case: %s" % test_case_name) # Test case result: OK, FAIL or ERROR tc_result_text = { "OK": "OK", "FAIL": "FAIL", }.get(single_test_result, 'ERROR') # Test case integer success code OK, FAIL and ERROR: (0, >0, <0) tc_result = { "OK": 0, "FAIL": 1024, "ERROR": -1024, }.get(tc_result_text, '-2048') # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure) tc_passed, tc_failed = { 0: (1, 0), }.get(tc_result, (0, 1)) # Test case report build for whole binary # Add test case made from test suite result to test case report result_test_cases = { test_case_name: { 'duration': single_testduration, 'time_start': 0.0, 'time_end': 0.0, 'utest_log': single_test_output.splitlines(), 'result_text': tc_result_text, 'passed': tc_passed, 'failed': tc_failed, 'result': tc_result, } } # Test summary build for whole binary (as a test case) test_cases_summary = ( tc_passed, tc_failed, ) gt_logger.gt_log("test on hardware with target id: %s" % (mut['target_id'])) gt_logger.gt_log( "test suite '%s' %s %s in %.2f sec" % (test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration)) # Test report build for whole binary test_report[yotta_target_name][test_suite_name][ 'single_test_result'] = single_test_result test_report[yotta_target_name][test_suite_name][ 'single_test_output'] = single_test_output test_report[yotta_target_name][test_suite_name][ 'elapsed_time'] = single_testduration test_report[yotta_target_name][test_suite_name][ 'platform_name'] = micro test_report[yotta_target_name][test_suite_name][ 'copy_method'] = copy_method test_report[yotta_target_name][test_suite_name][ 'testcase_result'] = result_test_cases test_report[yotta_target_name][test_suite_name][ 'build_path'] = build_path test_report[yotta_target_name][test_suite_name][ 'build_path_abs'] = build_path_abs test_report[yotta_target_name][test_suite_name]['image_path'] = test[ 'image_path'] test_report[yotta_target_name][test_suite_name][ 'test_bin_name'] = os.path.basename(test['image_path']) passes_cnt, failures_cnt = 0, 0 for tc_name in sorted(result_test_cases.keys()): gt_logger.gt_log_tab( "test case: '%s' %s %s in %.2f sec" % (tc_name, '.' * (80 - len(tc_name)), result_test_cases[tc_name].get('result_text', '_'), result_test_cases[tc_name].get('duration', 0.0))) if result_test_cases[tc_name].get('result_text', '_') == 'OK': passes_cnt += 1 else: failures_cnt += 1 if test_cases_summary: passes, failures = test_cases_summary gt_logger.gt_log("test case summary: %d pass%s, %d failur%s" % (passes, '' if passes == 1 else 'es', failures, 'e' if failures == 1 else 'es')) if passes != passes_cnt or failures != failures_cnt: gt_logger.gt_log_err( "test case summary mismatch: reported passes vs failures miscount!" ) gt_logger.gt_log_tab( "(%d, %d) vs (%d, %d)" % (passes, failures, passes_cnt, failures_cnt)) if single_test_result != 'OK' and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab( "test failed, reporting console output (specified with --report-fails option)" ) print print single_test_output #greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put({ 'test_platforms_match': test_platforms_match, 'test_exec_retcode': test_exec_retcode, 'test_report': test_report }) return
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} yotta_config_baudrate = None # Default serial port baudrate forced by configuration yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = 'SKIPPED' disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) # We will force configuration specific baudrate if port: port = "%s:%d"% (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test(test['image_path'], disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, verbose=verbose) # Some error in htrun, abort test execution if host_test_result < 0: break single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result test_result = single_test_result build_path = os.path.join("./build", yotta_target_name) build_path_abs = os.path.abspath(build_path) if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]: if greentea_hooks: # Test was successful # We can execute test hook just after test is finished ('hook_test_end') format = { "test_name": test['test_bin'], "test_bin_name": os.path.basename(test['image_path']), "image_path": test['image_path'], "build_path": build_path, "build_path_abs": build_path_abs, "yotta_target_name": yotta_target_name, } greentea_hooks.run_hook_ext('hook_test_end', format) # Update report for optional reporting feature test_suite_name = test['test_bin'].lower() if yotta_target_name not in test_report: test_report[yotta_target_name] = {} if test_suite_name not in test_report[yotta_target_name]: test_report[yotta_target_name][test_suite_name] = {} if not test_cases_summary and not result_test_cases: gt_logger.gt_log_warn("test case summary event not found") gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!") # We will map test suite result to test case to # output valid test case in report # Generate "artificial" test case name from test suite name# # E.g: # mbed-drivers-test-dev_null -> dev_null test_case_name = test_suite_name test_str_idx = test_suite_name.find("-test-") if test_str_idx != -1: test_case_name = test_case_name[test_str_idx + 6:] gt_logger.gt_log_tab("test suite: %s"% test_suite_name) gt_logger.gt_log_tab("test case: %s"% test_case_name) # Test case result: OK, FAIL or ERROR tc_result_text = { "OK": "OK", "FAIL": "FAIL", }.get(single_test_result, 'ERROR') # Test case integer success code OK, FAIL and ERROR: (0, >0, <0) tc_result = { "OK": 0, "FAIL": 1024, "ERROR": -1024, }.get(tc_result_text, '-2048') # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure) tc_passed, tc_failed = { 0: (1, 0), }.get(tc_result, (0, 1)) # Test case report build for whole binary # Add test case made from test suite result to test case report result_test_cases = { test_case_name: { 'duration': single_testduration, 'time_start': 0.0, 'time_end': 0.0, 'utest_log': single_test_output.splitlines(), 'result_text': tc_result_text, 'passed': tc_passed, 'failed': tc_failed, 'result': tc_result, } } # Test summary build for whole binary (as a test case) test_cases_summary = (tc_passed, tc_failed, ) gt_logger.gt_log("test on hardware with target id: %s"% (mut['target_id'])) gt_logger.gt_log("test suite '%s' %s %s in %.2f sec"% (test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration)) # Test report build for whole binary test_report[yotta_target_name][test_suite_name]['single_test_result'] = single_test_result test_report[yotta_target_name][test_suite_name]['single_test_output'] = single_test_output test_report[yotta_target_name][test_suite_name]['elapsed_time'] = single_testduration test_report[yotta_target_name][test_suite_name]['platform_name'] = micro test_report[yotta_target_name][test_suite_name]['copy_method'] = copy_method test_report[yotta_target_name][test_suite_name]['testcase_result'] = result_test_cases test_report[yotta_target_name][test_suite_name]['build_path'] = build_path test_report[yotta_target_name][test_suite_name]['build_path_abs'] = build_path_abs test_report[yotta_target_name][test_suite_name]['image_path'] = test['image_path'] test_report[yotta_target_name][test_suite_name]['test_bin_name'] = os.path.basename(test['image_path']) passes_cnt, failures_cnt = 0, 0 for tc_name in sorted(result_test_cases.keys()): gt_logger.gt_log_tab("test case: '%s' %s %s in %.2f sec"% (tc_name, '.' * (80 - len(tc_name)), result_test_cases[tc_name].get('result_text', '_'), result_test_cases[tc_name].get('duration', 0.0))) if result_test_cases[tc_name].get('result_text', '_') == 'OK': passes_cnt += 1 else: failures_cnt += 1 if test_cases_summary: passes, failures = test_cases_summary gt_logger.gt_log("test case summary: %d pass%s, %d failur%s"% (passes, '' if passes == 1 else 'es', failures, 'e' if failures == 1 else 'es')) if passes != passes_cnt or failures != failures_cnt: gt_logger.gt_log_err("test case summary mismatch: reported passes vs failures miscount!") gt_logger.gt_log_tab("(%d, %d) vs (%d, %d)"% (passes, failures, passes_cnt, failures_cnt)) if single_test_result != 'OK' and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)") print print single_test_output #greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put({'test_platforms_match': test_platforms_match, 'test_exec_retcode': test_exec_retcode, 'test_report': test_report}) return
if not os.path.exists(target_json_path): gt_logger.gt_log_err('Target json does not exist [%s].\n' % target_json_path + 'mbed TAS Executor {greentea} must be run inside a pre built yotta module!') return None with open(target_json_path, 'r') as f: data = f.read() try: target_json = json.loads(data) except (TypeError, ValueError), e: gt_logger.gt_log_err('Failed to load json data from target.json! error [%s]\n' % str(e) + 'Can not determine required mbed platform name!') return None if 'keywords' not in target_json: gt_logger.gt_log_err("No 'keywords' in target.json! Can not determine required mbed platform name!") return None platform_name = None for keyword in target_json['keywords']: m = re.search('mbed-target:(.*)', keyword) if m is not None: platform_name = m.group(1).upper() if platform_name is None: gt_logger.gt_log_err('No keyword with format "mbed-target:<platform name>" found in target.json!\n' + 'Can not determine required mbed platform name!') return None return platform_name
def main(): """ Closure for main_cli() function """ parser = optparse.OptionParser() parser.add_option('-t', '--target', dest='list_of_targets', help='You can specify list of targets you want to build. Use comma to sepatate them') parser.add_option('-n', '--test-by-names', dest='test_by_names', help='Runs only test enumerated it this switch. Use comma to separate test case names.') parser.add_option('-i', '--skip-test', dest='skip_test', help='Skip tests enumerated it this switch. Use comma to separate test case names.') parser.add_option("-O", "--only-build", action="store_true", dest="only_build_tests", default=False, help="Only build repository and tests, skips actual test procedures (flashing etc.)") parser.add_option("-S", "--skip-build", action="store_true", dest="skip_yotta_build", default=False, help="Skip calling 'yotta build' on this module") copy_methods_str = "Plugin support: " + ', '.join(mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod')) parser.add_option("-c", "--copy", dest="copy_method", help="Copy (flash the target) method selector. " + copy_methods_str, metavar="COPY_METHOD") parser.add_option('', '--parallel', dest='parallel_test_exec', default=1, help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)') parser.add_option("-e", "--enum-host-tests", dest="enum_host_tests", help="Define directory with yotta module local host tests. Default: ./test/host_tests") parser.add_option('', '--config', dest='verbose_test_configuration_only', default=False, action="store_true", help='Displays connected boards and detected targets and exits.') parser.add_option('', '--release', dest='build_to_release', default=False, action="store_true", help='If possible force build in release mode (yotta -r).') parser.add_option('', '--debug', dest='build_to_debug', default=False, action="store_true", help='If possible force build in debug mode (yotta -d).') parser.add_option('-l', '--list', dest='list_binaries', default=False, action="store_true", help='List available binaries') parser.add_option('-m', '--map-target', dest='map_platform_to_yt_target', help='List of custom mapping between platform name and yotta target. Comma separated list of PLATFORM:TARGET tuples') parser.add_option('', '--use-tids', dest='use_target_ids', help='Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)') parser.add_option('-u', '--shuffle', dest='shuffle_test_order', default=False, action="store_true", help='Shuffles test execution order') parser.add_option('', '--shuffle-seed', dest='shuffle_test_seed', default=None, help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)') parser.add_option('', '--lock', dest='lock_by_target', default=False, action="store_true", help='Use simple resource locking mechanism to run multiple application instances') parser.add_option('', '--digest', dest='digest_source', help='Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output') parser.add_option('-H', '--hooks', dest='hooks_json', help='Load hooks used drive extra functionality') parser.add_option('', '--test-cfg', dest='json_test_configuration', help='Pass to host test data with host test configuration') parser.add_option('', '--run', dest='run_app', help='Flash, reset and dump serial from selected binary application') parser.add_option('', '--report-junit', dest='report_junit_file_name', help='You can log test suite results in form of JUnit compliant XML report') parser.add_option('', '--report-text', dest='report_text_file_name', help='You can log test suite results to text file') parser.add_option('', '--report-json', dest='report_json', default=False, action="store_true", help='Outputs test results in JSON') parser.add_option('', '--report-fails', dest='report_fails', default=False, action="store_true", help='Prints console outputs for failed tests') parser.add_option('', '--yotta-registry', dest='yotta_search_for_mbed_target', default=False, action="store_true", help='Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory') parser.add_option('-V', '--verbose-test-result', dest='verbose_test_result_only', default=False, action="store_true", help='Prints test serial output') parser.add_option('-v', '--verbose', dest='verbose', default=False, action="store_true", help='Verbose mode (prints some extra information)') parser.add_option('', '--plain', dest='plain', default=False, action="store_true", help='Do not use colours while logging') parser.add_option('', '--version', dest='version', default=False, action="store_true", help='Prints package version and exits') parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool""" parser.epilog = """Example: mbedgt --target frdm-k64f-gcc""" (opts, args) = parser.parse_args() cli_ret = 0 start = time() if opts.lock_by_target: # We are using Greentea proprietary locking mechanism to lock between platforms and targets gt_logger.gt_log("using (experimental) simple locking mechanism") gt_logger.gt_log_tab("kettle: %s"% GREENTEA_KETTLE_PATH) gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem() with gt_file_sem: greentea_update_kettle(gt_instance_uuid) try: cli_ret = main_cli(opts, args, gt_instance_uuid) except KeyboardInterrupt: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return(-2) # Keyboard interrupt except: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(sys.exc_info()[0]) raise greentea_clean_kettle(gt_instance_uuid) else: # Standard mode of operation # Other instance must provide mutually exclusive access control to platforms and targets try: cli_ret = main_cli(opts, args) except KeyboardInterrupt: gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return(-2) # Keyboard interrupt except Exception as e: gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(str(e)) raise if not any([opts.list_binaries, opts.version]): delta = time() - start # Test execution time delta gt_logger.gt_log("completed in %.2f sec"% delta) if cli_ret: gt_logger.gt_log_err("exited with code %d"% cli_ret) return(cli_ret)
def main(): """ Closure for main_cli() function """ parser = optparse.OptionParser() parser.add_option( "-t", "--target", dest="list_of_targets", help="You can specify list of yotta targets you want to build. Use comma to separate them." + "Note: If --test-spec switch is defined this list becomes optional list of builds you want to filter in your test:" + "Comma separated list of builds from test specification. Applicable if --test-spec switch is specified", ) parser.add_option( "-n", "--test-by-names", dest="test_by_names", help="Runs only test enumerated it this switch. Use comma to separate test case names.", ) parser.add_option( "-i", "--skip-test", dest="skip_test", help="Skip tests enumerated it this switch. Use comma to separate test case names.", ) parser.add_option( "-O", "--only-build", action="store_true", dest="only_build_tests", default=False, help="Only build repository and tests, skips actual test procedures (flashing etc.)", ) parser.add_option( "-S", "--skip-build", action="store_true", dest="skip_yotta_build", default=True, help="Skip calling 'yotta build' on this module", ) copy_methods_str = "Plugin support: " + ", ".join(mbed_host_tests.host_tests_plugins.get_plugin_caps("CopyMethod")) parser.add_option( "-c", "--copy", dest="copy_method", help="Copy (flash the target) method selector. " + copy_methods_str, metavar="COPY_METHOD", ) parser.add_option( "", "--parallel", dest="parallel_test_exec", default=1, help="Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)", ) parser.add_option( "-e", "--enum-host-tests", dest="enum_host_tests", help="Define directory with yotta module local host tests. Default: ./test/host_tests", ) parser.add_option( "", "--config", dest="verbose_test_configuration_only", default=False, action="store_true", help="Displays connected boards and detected targets and exits.", ) parser.add_option( "", "--release", dest="build_to_release", default=False, action="store_true", help="If possible force build in release mode (yotta -r).", ) parser.add_option( "", "--debug", dest="build_to_debug", default=False, action="store_true", help="If possible force build in debug mode (yotta -d).", ) parser.add_option( "-l", "--list", dest="list_binaries", default=False, action="store_true", help="List available binaries" ) parser.add_option( "-g", "--grm", dest="global_resource_mgr", help="Global resource manager service query: platrform name, remote mgr module name, IP address and port, example K64F:module_name:10.2.123.43:3334", ) parser.add_option( "-m", "--map-target", dest="map_platform_to_yt_target", help="List of custom mapping between platform name and yotta target. Comma separated list of YOTTA_TARGET:PLATFORM tuples", ) parser.add_option( "", "--use-tids", dest="use_target_ids", help="Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)", ) parser.add_option( "-u", "--shuffle", dest="shuffle_test_order", default=False, action="store_true", help="Shuffles test execution order", ) parser.add_option( "", "--shuffle-seed", dest="shuffle_test_seed", default=None, help="Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)", ) parser.add_option( "", "--lock", dest="lock_by_target", default=False, action="store_true", help="Use simple resource locking mechanism to run multiple application instances", ) parser.add_option( "", "--digest", dest="digest_source", help="Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output", ) parser.add_option("-H", "--hooks", dest="hooks_json", help="Load hooks used drive extra functionality") parser.add_option("", "--test-spec", dest="test_spec", help="Test specification generated by build system.") parser.add_option( "", "--test-cfg", dest="json_test_configuration", help="Pass to host test data with host test configuration" ) parser.add_option("", "--run", dest="run_app", help="Flash, reset and dump serial from selected binary application") parser.add_option( "", "--report-junit", dest="report_junit_file_name", help="You can log test suite results in form of JUnit compliant XML report", ) parser.add_option( "", "--report-text", dest="report_text_file_name", help="You can log test suite results to text file" ) parser.add_option( "", "--report-json", dest="report_json_file_name", help="You can log test suite results to JSON formatted file" ) parser.add_option( "", "--report-html", dest="report_html_file_name", help="You can log test suite results in the form of a HTML page", ) parser.add_option( "", "--report-fails", dest="report_fails", default=False, action="store_true", help="Prints console outputs for failed tests", ) parser.add_option( "", "--yotta-registry", dest="yotta_search_for_mbed_target", default=False, action="store_true", help="Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory", ) parser.add_option( "-V", "--verbose-test-result", dest="verbose_test_result_only", default=False, action="store_true", help="Prints test serial output", ) parser.add_option( "-v", "--verbose", dest="verbose", default=False, action="store_true", help="Verbose mode (prints some extra information)", ) parser.add_option( "", "--plain", dest="plain", default=False, action="store_true", help="Do not use colours while logging" ) parser.add_option( "", "--version", dest="version", default=False, action="store_true", help="Prints package version and exits" ) parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool""" parser.epilog = """Example: mbedgt --target frdm-k64f-gcc""" (opts, args) = parser.parse_args() cli_ret = 0 if not opts.version: # This string should not appear when fetching plain version string gt_logger.gt_log(get_hello_string()) start = time() if opts.lock_by_target: # We are using Greentea proprietary locking mechanism to lock between platforms and targets gt_logger.gt_log("using (experimental) simple locking mechanism") gt_logger.gt_log_tab("kettle: %s" % GREENTEA_KETTLE_PATH) gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem() with gt_file_sem: greentea_update_kettle(gt_instance_uuid) try: cli_ret = main_cli(opts, args, gt_instance_uuid) except KeyboardInterrupt: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return -2 # Keyboard interrupt except: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(sys.exc_info()[0]) raise greentea_clean_kettle(gt_instance_uuid) else: # Standard mode of operation # Other instance must provide mutually exclusive access control to platforms and targets try: cli_ret = main_cli(opts, args) except KeyboardInterrupt: gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return -2 # Keyboard interrupt except Exception as e: gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(str(e)) raise if not any([opts.list_binaries, opts.version]): delta = time() - start # Test execution time delta gt_logger.gt_log("completed in %.2f sec" % delta) if cli_ret: gt_logger.gt_log_err("exited with code %d" % cli_ret) return cli_ret
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return (0) # Prints version and exits if opts.version: print_version() return (0) # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test(None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=opts.verbose_test_result_only) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): gt_logger.gt_log(""" ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """) return (0) ### Selecting yotta targets to process yt_targets = [] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab("yotta target in current directory is not set") gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"% ( gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>') )) return (-1) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() ready_mbed_devices = [] # Devices which can be used (are fully detected) if mbeds_list: gt_logger.gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err("can't detect all properties of the device!") for prop in mut: if not mut[prop]: gt_logger.gt_log_tab("property '%s' is '%s'"% (prop, str(mut[prop]))) else: ready_mbed_devices.append(mut) gt_logger.gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% ( gt_logger.gt_bright(mut['platform_name']), gt_logger.gt_bright(mut['platform_name_unique']), gt_logger.gt_bright(mut['serial_port']), gt_logger.gt_bright(mut['mount_point']), gt_logger.gt_bright(mut['target_id']) )) else: gt_logger.gt_log_err("no devices detected") return (RET_NO_DEVICES) ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure map_platform_to_yt_target = {} if opts.map_platform_to_yt_target: gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)") p_to_t_mappings = opts.map_platform_to_yt_target.split(',') for mapping in p_to_t_mappings: if len(mapping.split(':')) == 2: platform, yt_target = mapping.split(':') if platform not in map_platform_to_yt_target: map_platform_to_yt_target[platform] = [] map_platform_to_yt_target[platform].append(yt_target) gt_logger.gt_log_tab("mapped platform '%s' to be compatible with '%s'"% ( gt_logger.gt_bright(platform), gt_logger.gt_bright(yt_target) )) else: gt_logger.gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping) # Check if mbed classic target name can be translated to yotta target name mut_info_map = {} # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]] for mut in ready_mbed_devices: platfrom_name = mut['platform_name'] if platfrom_name not in mut_info_map: mut_info = get_mbed_clasic_target_info(platfrom_name, map_platform_to_yt_target, use_yotta_registry=opts.yotta_search_for_mbed_target) if mut_info: mut_info_map[platfrom_name] = mut_info ### List of unique ready platform names unique_mbed_devices = list(set(mut_info_map.keys())) ### Identify which targets has to be build because platforms are present yt_target_platform_map = {} # yt_target_to_test : platforms to test on for yt_target in yt_targets: for platform_name in unique_mbed_devices: if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]: if yt_target not in yt_target_platform_map: yt_target_platform_map[yt_target] = [] if platform_name not in yt_target_platform_map[yt_target]: yt_target_platform_map[yt_target].append(platform_name) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)") accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'"% gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue() # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 try: parallel_test_exec = int(opts.parallel_test_exec) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err("argument of mode --parallel is not a int, disable parallel mode") parallel_test_exec = 1 # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform for yotta_target_name in yt_target_platform_map: gt_logger.gt_log("processing '%s' yotta target compatible platforms..."% gt_logger.gt_bright(yotta_target_name)) for platform_name in yt_target_platform_map[yotta_target_name]: gt_logger.gt_log("processing '%s' platform..."% gt_logger.gt_bright(platform_name)) ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: mut = mbed_dev muts_to_test.append(mbed_dev) gt_logger.gt_log("using platform '%s' for test:"% gt_logger.gt_bright(platform_name)) for k in mbed_dev: gt_logger.gt_log_tab("%s = '%s'"% (k, mbed_dev[k])) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # Configuration print mode: if opts.verbose_test_configuration_only: continue if mut: target_platforms_match += 1 # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log("running '%s' for '%s'"% (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() # We will force configuration specific baudrate if port: port = "%s:%d"% (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test(opts.run_app, disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful if opts.skip_yotta_build: gt_logger.gt_log("skipping calling yotta (specified with --skip-build option)") else: yotta_result, yotta_ret = build_with_yotta(yotta_target_name, verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_logger.gt_log_err("yotta returned %d"% yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties']['binary_type'] ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #TODO no tests to execute filtered_ctest_test_list = create_filtered_test_list(ctest_test_list, opts.test_by_names, opts.skip_test) gt_logger.gt_log("running %d test%s for target '%s' and platform '%s'"% ( len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(yotta_target_name), gt_logger.gt_bright(platform_name) )) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_bin in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_bin] test = {"test_bin":test_bin, "image_path":image_path} test_queue.put(test) #for test_bin, image_path in filtered_ctest_test_list.iteritems(): # test = {"test_bin":test_bin, "image_path":image_path} # test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: ################################################################# # Experimental, parallel test execution ################################################################# if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else '')) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from diffrent threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for yotta_target in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[yotta_target]: test = test_report[yotta_target][test_name] # Test was successful if test['single_test_result'] in [TEST_RESULT_OK, TEST_RESULT_FAIL]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: # Call hook executed for each yotta target, just after all tests are finished build_path = os.path.join("./build", yotta_target) build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f"% (SHUFFLE_SEED_ROUND, shuffle_random_seed)) # Reports (to file) if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUnit file '%s'..."% gt_logger.gt_bright(opts.report_junit_file_name)) junit_report = exporter_testcase_junit(test_report, test_suite_properties=yotta_module.get_data()) with open(opts.report_junit_file_name, 'w') as f: f.write(junit_report) if opts.report_text_file_name: gt_logger.gt_log("exporting to text '%s'..."% gt_logger.gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text(test_report) with open(opts.report_text_file_name, 'w') as f: f.write('\n'.join([text_report, text_results, text_testcase_report, text_testcase_results])) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_logger.gt_log("json test report:") print exporter_json(test_report) else: # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text(test_report, test_suite_properties=yotta_module.get_data()) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn("no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no target matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
def run_test_thread(test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks): test_exec_retcode = 0 test_platforms_match = 0 test_report = {} while not test_queue.empty(): try: test = test_queue.get(False) except Exception as e: gt_logger.gt_log_err(str(e)) break test_result = "SKIPPED" disk = mut["mount_point"] port = mut["serial_port"] micro = mut["platform_name"] program_cycle_s = get_platform_property(micro, "program_cycle_s") forced_reset_timeout = get_platform_property(micro, "forced_reset_timeout") copy_method = opts.copy_method if opts.copy_method else "shell" verbose = opts.verbose_test_result_only enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) test_platforms_match += 1 host_test_result = run_host_test( test["image_path"], disk, port, build_path, mut["target_id"], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, forced_reset_timeout=forced_reset_timeout, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, enum_host_tests_path=enum_host_tests_path, global_resource_mgr=opts.global_resource_mgr, verbose=verbose, ) # Some error in htrun, abort test execution if isinstance(host_test_result, int): # int(host_test_result) > 0 - Call to mbedhtrun failed # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun gt_logger.gt_log_err("run_test_thread.run_host_test() failed, aborting...") break # If execution was successful 'run_host_test' return tuple with results single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = ( host_test_result ) test_result = single_test_result build_path_abs = os.path.abspath(build_path) if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]: if greentea_hooks: # Test was successful # We can execute test hook just after test is finished ('hook_test_end') format = { "test_name": test["test_bin"], "test_bin_name": os.path.basename(test["image_path"]), "image_path": test["image_path"], "build_path": build_path, "build_path_abs": build_path_abs, "build_name": build, } greentea_hooks.run_hook_ext("hook_test_end", format) # Update report for optional reporting feature test_suite_name = test["test_bin"].lower() if build not in test_report: test_report[build] = {} if test_suite_name not in test_report[build]: test_report[build][test_suite_name] = {} if not test_cases_summary and not result_test_cases: gt_logger.gt_log_warn("test case summary event not found") gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!") # We will map test suite result to test case to # output valid test case in report # Generate "artificial" test case name from test suite name# # E.g: # mbed-drivers-test-dev_null -> dev_null test_case_name = test_suite_name test_str_idx = test_suite_name.find("-test-") if test_str_idx != -1: test_case_name = test_case_name[test_str_idx + 6 :] gt_logger.gt_log_tab("test suite: %s" % test_suite_name) gt_logger.gt_log_tab("test case: %s" % test_case_name) # Test case result: OK, FAIL or ERROR tc_result_text = {"OK": "OK", "FAIL": "FAIL"}.get(single_test_result, "ERROR") # Test case integer success code OK, FAIL and ERROR: (0, >0, <0) tc_result = {"OK": 0, "FAIL": 1024, "ERROR": -1024}.get(tc_result_text, "-2048") # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure) tc_passed, tc_failed = {0: (1, 0)}.get(tc_result, (0, 1)) # Test case report build for whole binary # Add test case made from test suite result to test case report result_test_cases = { test_case_name: { "duration": single_testduration, "time_start": 0.0, "time_end": 0.0, "utest_log": single_test_output.splitlines(), "result_text": tc_result_text, "passed": tc_passed, "failed": tc_failed, "result": tc_result, } } # Test summary build for whole binary (as a test case) test_cases_summary = (tc_passed, tc_failed) gt_logger.gt_log("test on hardware with target id: %s" % (mut["target_id"])) gt_logger.gt_log( "test suite '%s' %s %s in %.2f sec" % (test["test_bin"], "." * (80 - len(test["test_bin"])), test_result, single_testduration) ) # Test report build for whole binary test_report[build][test_suite_name]["single_test_result"] = single_test_result test_report[build][test_suite_name]["single_test_output"] = single_test_output test_report[build][test_suite_name]["elapsed_time"] = single_testduration test_report[build][test_suite_name]["platform_name"] = micro test_report[build][test_suite_name]["copy_method"] = copy_method test_report[build][test_suite_name]["testcase_result"] = result_test_cases test_report[build][test_suite_name]["build_path"] = build_path test_report[build][test_suite_name]["build_path_abs"] = build_path_abs test_report[build][test_suite_name]["image_path"] = test["image_path"] test_report[build][test_suite_name]["test_bin_name"] = os.path.basename(test["image_path"]) passes_cnt, failures_cnt = 0, 0 for tc_name in sorted(result_test_cases.keys()): gt_logger.gt_log_tab( "test case: '%s' %s %s in %.2f sec" % ( tc_name, "." * (80 - len(tc_name)), result_test_cases[tc_name].get("result_text", "_"), result_test_cases[tc_name].get("duration", 0.0), ) ) if result_test_cases[tc_name].get("result_text", "_") == "OK": passes_cnt += 1 else: failures_cnt += 1 if test_cases_summary: passes, failures = test_cases_summary gt_logger.gt_log( "test case summary: %d pass%s, %d failur%s" % (passes, "" if passes == 1 else "es", failures, "e" if failures == 1 else "es") ) if passes != passes_cnt or failures != failures_cnt: gt_logger.gt_log_err("utest test case summary mismatch: utest reported passes and failures miscount!") gt_logger.gt_log_tab("reported by utest: passes = %d, failures %d)" % (passes, failures)) gt_logger.gt_log_tab("test case result count: passes = %d, failures %d)" % (passes_cnt, failures_cnt)) if single_test_result != "OK" and not verbose and opts.report_fails: # In some cases we want to print console to see why test failed # even if we are not in verbose mode gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)") print print single_test_output # greentea_release_target_id(mut['target_id'], gt_instance_uuid) test_result_queue.put( { "test_platforms_match": test_platforms_match, "test_exec_retcode": test_exec_retcode, "test_report": test_report, } ) return
def main(): """ Closure for main_cli() function """ parser = optparse.OptionParser() parser.add_option( '-t', '--target', dest='list_of_targets', help= 'You can specify list of targets you want to build. Use comma to sepatate them' ) parser.add_option( '-n', '--test-by-names', dest='test_by_names', help= 'Runs only test enumerated it this switch. Use comma to separate test case names.' ) parser.add_option( '-i', '--skip-test', dest='skip_test', help= 'Skip tests enumerated it this switch. Use comma to separate test case names.' ) parser.add_option( "-O", "--only-build", action="store_true", dest="only_build_tests", default=False, help= "Only build repository and tests, skips actual test procedures (flashing etc.)" ) parser.add_option("-S", "--skip-build", action="store_true", dest="skip_yotta_build", default=False, help="Skip calling 'yotta build' on this module") copy_methods_str = "Plugin support: " + ', '.join( mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod')) parser.add_option("-c", "--copy", dest="copy_method", help="Copy (flash the target) method selector. " + copy_methods_str, metavar="COPY_METHOD") parser.add_option( '', '--parallel', dest='parallel_test_exec', default=1, help= 'Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)' ) parser.add_option( "-e", "--enum-host-tests", dest="enum_host_tests", help= "Define directory with yotta module local host tests. Default: ./test/host_tests" ) parser.add_option( '', '--config', dest='verbose_test_configuration_only', default=False, action="store_true", help='Displays connected boards and detected targets and exits.') parser.add_option( '', '--release', dest='build_to_release', default=False, action="store_true", help='If possible force build in release mode (yotta -r).') parser.add_option('', '--debug', dest='build_to_debug', default=False, action="store_true", help='If possible force build in debug mode (yotta -d).') parser.add_option('-l', '--list', dest='list_binaries', default=False, action="store_true", help='List available binaries') parser.add_option( '-m', '--map-target', dest='map_platform_to_yt_target', help= 'List of custom mapping between platform name and yotta target. Comma separated list of PLATFORM:TARGET tuples' ) parser.add_option( '', '--use-tids', dest='use_target_ids', help= 'Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)' ) parser.add_option('-u', '--shuffle', dest='shuffle_test_order', default=False, action="store_true", help='Shuffles test execution order') parser.add_option( '', '--shuffle-seed', dest='shuffle_test_seed', default=None, help= 'Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)' ) parser.add_option( '', '--lock', dest='lock_by_target', default=False, action="store_true", help= 'Use simple resource locking mechanism to run multiple application instances' ) parser.add_option( '', '--digest', dest='digest_source', help= 'Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output' ) parser.add_option('-H', '--hooks', dest='hooks_json', help='Load hooks used drive extra functionality') parser.add_option( '', '--test-cfg', dest='json_test_configuration', help='Pass to host test data with host test configuration') parser.add_option( '', '--run', dest='run_app', help='Flash, reset and dump serial from selected binary application') parser.add_option( '', '--report-junit', dest='report_junit_file_name', help= 'You can log test suite results in form of JUnit compliant XML report') parser.add_option('', '--report-text', dest='report_text_file_name', help='You can log test suite results to text file') parser.add_option('', '--report-json', dest='report_json', default=False, action="store_true", help='Outputs test results in JSON') parser.add_option('', '--report-fails', dest='report_fails', default=False, action="store_true", help='Prints console outputs for failed tests') parser.add_option( '', '--yotta-registry', dest='yotta_search_for_mbed_target', default=False, action="store_true", help= 'Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory' ) parser.add_option('-V', '--verbose-test-result', dest='verbose_test_result_only', default=False, action="store_true", help='Prints test serial output') parser.add_option('-v', '--verbose', dest='verbose', default=False, action="store_true", help='Verbose mode (prints some extra information)') parser.add_option('', '--plain', dest='plain', default=False, action="store_true", help='Do not use colours while logging') parser.add_option('', '--version', dest='version', default=False, action="store_true", help='Prints package version and exits') parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool""" parser.epilog = """Example: mbedgt --target frdm-k64f-gcc""" (opts, args) = parser.parse_args() cli_ret = 0 start = time() if opts.lock_by_target: # We are using Greentea proprietary locking mechanism to lock between platforms and targets gt_logger.gt_log("using (experimental) simple locking mechanism") gt_logger.gt_log_tab("kettle: %s" % GREENTEA_KETTLE_PATH) gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem( ) with gt_file_sem: greentea_update_kettle(gt_instance_uuid) try: cli_ret = main_cli(opts, args, gt_instance_uuid) except KeyboardInterrupt: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return (-2) # Keyboard interrupt except: greentea_clean_kettle(gt_instance_uuid) gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(sys.exc_info()[0]) raise greentea_clean_kettle(gt_instance_uuid) else: # Standard mode of operation # Other instance must provide mutually exclusive access control to platforms and targets try: cli_ret = main_cli(opts, args) except KeyboardInterrupt: gt_logger.gt_log_err("ctrl+c keyboard interrupt!") return (-2) # Keyboard interrupt except Exception as e: gt_logger.gt_log_err("unexpected error:") gt_logger.gt_log_tab(str(e)) raise if not any([opts.list_binaries, opts.version]): delta = time() - start # Test execution time delta gt_logger.gt_log("completed in %.2f sec" % delta) if cli_ret: gt_logger.gt_log_err("exited with code %d" % cli_ret) return (cli_ret)
def get_test_spec(opts): """! Closure encapsulating how we get test specification and load it from file of from yotta module @return Returns tuple of (test specification, ret code). Test specification == None if test spec load was not successful """ test_spec = None # Check if test_spec.json file exist, if so we will pick it up as default file and load it test_spec_file_name = opts.test_spec test_spec_file_name_list = [] # Note: test_spec.json will have higher priority than module.json file # so if we are inside directory with module.json and test_spec.json we will use test spec file # instead of using yotta's module.json file def get_all_test_specs_from_build_dir(path_to_scan): """! Searches for all test_spec.json files @param path_to_scan Directory path used to recursively search for test_spec.json @result List of locations of test_spec.json """ return [ os.path.join(dp, f) for dp, dn, filenames in os.walk(path_to_scan) for f in filenames if f == 'test_spec.json' ] def merge_multiple_test_specifications_from_file_list( test_spec_file_name_list): """! For each file in test_spec_file_name_list merge all test specifications into one @param test_spec_file_name_list List of paths to different test specifications @return TestSpec object with all test specification data inside """ def copy_builds_between_test_specs(source, destination): """! Copies build key-value pairs between two test_spec dicts @param source Source dictionary @param destination Dictionary with will be applied with 'builds' key-values @return Dictionary with merged source """ result = destination.copy() if 'builds' in source and 'builds' in destination: for k in source['builds']: result['builds'][k] = source['builds'][k] return result merged_test_spec = {} for test_spec_file in test_spec_file_name_list: gt_logger.gt_log_tab("using '%s'" % test_spec_file) try: with open(test_spec_file, 'r') as f: test_spec_data = json.load(f) merged_test_spec = copy_builds_between_test_specs( merged_test_spec, test_spec_data) except Exception as e: gt_logger.gt_log_err( "Unexpected error while processing '%s' test specification file" % test_spec_file) gt_logger.gt_log_tab(str(e)) merged_test_spec = {} test_spec = TestSpec() test_spec.parse(merged_test_spec) return test_spec # Test specification look-up if opts.test_spec: # Loading test specification from command line specified file gt_logger.gt_log( "test specification file '%s' (specified with --test-spec option)" % opts.test_spec) elif os.path.exists('test_spec.json'): # Test specification file exists in current directory gt_logger.gt_log("using 'test_spec.json' from current directory!") test_spec_file_name = 'test_spec.json' elif 'BUILD' in os.listdir(os.getcwd()): # Checking 'BUILD' directory for test specifications # Using `os.listdir()` since it preserves case test_spec_file_name_list = get_all_test_specs_from_build_dir('BUILD') elif os.path.exists('.build'): # Checking .build directory for test specifications test_spec_file_name_list = get_all_test_specs_from_build_dir('.build') elif os.path.exists('mbed-os') and 'BUILD' in os.listdir('mbed-os'): # Checking mbed-os/.build directory for test specifications # Using `os.listdir()` since it preserves case test_spec_file_name_list = get_all_test_specs_from_build_dir( os.path.join(['mbed-os', 'BUILD'])) elif os.path.exists(os.path.join('mbed-os', '.build')): # Checking mbed-os/.build directory for test specifications test_spec_file_name_list = get_all_test_specs_from_build_dir( os.path.join(['mbed-os', '.build'])) # Actual load and processing of test specification from sources if test_spec_file_name: # Test specification from command line (--test-spec) or default test_spec.json will be used gt_logger.gt_log("using '%s' from current directory!" % test_spec_file_name) test_spec = TestSpec(test_spec_file_name) if opts.list_binaries: list_binaries_for_builds(test_spec) return None, 0 elif test_spec_file_name_list: # Merge multiple test specs into one and keep calm gt_logger.gt_log( "using multiple test specifications from current directory!") test_spec = merge_multiple_test_specifications_from_file_list( test_spec_file_name_list) if opts.list_binaries: list_binaries_for_builds(test_spec) return None, 0 elif os.path.exists('module.json'): # If inside yotta module load module data and generate test spec gt_logger.gt_log("using 'module.json' from current directory!") if opts.list_binaries: # List available test binaries (names, no extension) list_binaries_for_targets() return None, 0 else: test_spec = get_test_spec_from_yt_module(opts) else: gt_logger.gt_log_err( "greentea should be run inside a Yotta module or --test-spec switch should be used" ) return None, -1 return test_spec, 0
def main_cli(opts, args, gt_instance_uuid=None): """! This is main CLI function with all command line parameters @details This function also implements CLI workflow depending on CLI parameters inputed @return This function doesn't return, it exits to environment with proper success code """ if not MBED_LMTOOLS: gt_logger.gt_log_err("error: mbed-ls proprietary module not installed") return (-1) if not MBED_HOST_TESTS: gt_logger.gt_log_err( "error: mbed-host-tests proprietary module not installed") return (-1) # This is how you magically control colours in this piece of art software gt_logger.colorful(not opts.plain) # List available test binaries (names, no extension) if opts.list_binaries: list_binaries_for_targets() return (0) # Prints version and exits if opts.version: print_version() return (0) # We will load hooks from JSON file to support extra behaviour during test execution greentea_hooks = GreenteaHooks( opts.hooks_json) if opts.hooks_json else None # Capture alternative test console inputs, used e.g. in 'yotta test command' if opts.digest_source: enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests) host_test_result = run_host_test( None, None, None, None, None, hooks=greentea_hooks, digest_source=opts.digest_source, enum_host_tests_path=enum_host_tests_path, verbose=opts.verbose_test_result_only) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result) if single_test_result in TEST_RESULTS else -1 return (status) ### Read yotta module basic information yotta_module = YottaModule() yotta_module.init() # Read actual yotta module data # Check if NO greentea-client is in module.json of repo to test, if so abort if not yotta_module.check_greentea_client(): gt_logger.gt_log(""" ***************************************************************************************** * We've noticed that NO 'greentea-client' module is specified in * * dependency/testDependency section of this module's 'module.json' file. * * * * This version of Greentea requires 'greentea-client' module. * * Please downgrade to Greentea before v0.2.0: * * * * $ pip install "mbed-greentea<0.2.0" --upgrade * * * * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78 * ***************************************************************************************** """) return (0) ### Selecting yotta targets to process yt_targets = [ ] # List of yotta targets specified by user used to process during this run if opts.list_of_targets: yt_targets = opts.list_of_targets.split(',') else: # Trying to use locally set yotta target gt_logger.gt_log("checking for yotta target in current directory") gt_logger.gt_log_tab("reason: no --target switch set") current_target = get_mbed_target_from_current_dir() if current_target: gt_logger.gt_log("assuming default target as '%s'" % gt_logger.gt_bright(current_target)) # Assuming first target printed by 'yotta search' will be used yt_targets = [current_target] else: gt_logger.gt_log_tab( "yotta target in current directory is not set") gt_logger.gt_log_err( "yotta target is not specified. Use '%s' or '%s' command to set target" % (gt_logger.gt_bright('mbedgt -t <yotta_target>'), gt_logger.gt_bright('yotta target <yotta_target>'))) return (-1) ### Query with mbedls for available mbed-enabled devices gt_logger.gt_log("detecting connected mbed-enabled devices...") # Detect devices connected to system mbeds = mbed_lstools.create() mbeds_list = mbeds.list_mbeds_ext() ready_mbed_devices = [] # Devices which can be used (are fully detected) if mbeds_list: gt_logger.gt_log( "detected %d device%s" % (len(mbeds_list), 's' if len(mbeds_list) != 1 else '')) for mut in mbeds_list: if not all(mut.values()): gt_logger.gt_log_err( "can't detect all properties of the device!") for prop in mut: if not mut[prop]: gt_logger.gt_log_tab("property '%s' is '%s'" % (prop, str(mut[prop]))) else: ready_mbed_devices.append(mut) gt_logger.gt_log_tab( "detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'" % (gt_logger.gt_bright(mut['platform_name']), gt_logger.gt_bright(mut['platform_name_unique']), gt_logger.gt_bright(mut['serial_port']), gt_logger.gt_bright(mut['mount_point']), gt_logger.gt_bright(mut['target_id']))) else: gt_logger.gt_log_err("no devices detected") return (RET_NO_DEVICES) ### Use yotta to search mapping between platform names and available platforms # Convert platform:target, ... mapping to data structure map_platform_to_yt_target = {} if opts.map_platform_to_yt_target: gt_logger.gt_log( "user defined platform -> target supported mapping definition (specified with --map-target switch)" ) p_to_t_mappings = opts.map_platform_to_yt_target.split(',') for mapping in p_to_t_mappings: if len(mapping.split(':')) == 2: platform, yt_target = mapping.split(':') if platform not in map_platform_to_yt_target: map_platform_to_yt_target[platform] = [] map_platform_to_yt_target[platform].append(yt_target) gt_logger.gt_log_tab( "mapped platform '%s' to be compatible with '%s'" % (gt_logger.gt_bright(platform), gt_logger.gt_bright(yt_target))) else: gt_logger.gt_log_tab( "unknown format '%s', use 'platform:target' format" % mapping) # Check if mbed classic target name can be translated to yotta target name mut_info_map = { } # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]] for mut in ready_mbed_devices: platfrom_name = mut['platform_name'] if platfrom_name not in mut_info_map: mut_info = get_mbed_clasic_target_info( platfrom_name, map_platform_to_yt_target, use_yotta_registry=opts.yotta_search_for_mbed_target) if mut_info: mut_info_map[platfrom_name] = mut_info ### List of unique ready platform names unique_mbed_devices = list(set(mut_info_map.keys())) ### Identify which targets has to be build because platforms are present yt_target_platform_map = {} # yt_target_to_test : platforms to test on for yt_target in yt_targets: for platform_name in unique_mbed_devices: if yt_target in [ k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"] ]: if yt_target not in yt_target_platform_map: yt_target_platform_map[yt_target] = [] if platform_name not in yt_target_platform_map[yt_target]: yt_target_platform_map[yt_target].append(platform_name) ### We can filter in only specific target ids accepted_target_ids = None if opts.use_target_ids: gt_logger.gt_log( "filtering out target ids not on below list (specified with --use-tids switch)" ) accepted_target_ids = opts.use_target_ids.split(',') for tid in accepted_target_ids: gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid)) test_exec_retcode = 0 # Decrement this value each time test case result is not 'OK' test_platforms_match = 0 # Count how many tests were actually ran with current settings target_platforms_match = 0 # Count how many platforms were actually tested with current settings test_report = {} # Test report used to export to Junit, HTML etc... muts_to_test = [] # MUTs to actually be tested test_queue = Queue( ) # contains information about test_bin and image_path for each test case test_result_queue = Queue() # used to store results of each thread execute_threads = [] # list of threads to run test cases ### check if argument of --parallel mode is a integer and greater or equal 1 try: parallel_test_exec = int(opts.parallel_test_exec) if parallel_test_exec < 1: parallel_test_exec = 1 except ValueError: gt_logger.gt_log_err( "argument of mode --parallel is not a int, disable parallel mode") parallel_test_exec = 1 # Values used to generate random seed for test execution order shuffle SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND) # Set shuffle seed if it is provided with command line option if opts.shuffle_test_seed: shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND) ### Testing procedures, for each target, for each target's compatible platform for yotta_target_name in yt_target_platform_map: gt_logger.gt_log( "processing '%s' yotta target compatible platforms..." % gt_logger.gt_bright(yotta_target_name)) for platform_name in yt_target_platform_map[yotta_target_name]: gt_logger.gt_log("processing '%s' platform..." % gt_logger.gt_bright(platform_name)) ### Select MUTS to test from list of available MUTS to start testing mut = None number_of_parallel_instances = 1 for mbed_dev in ready_mbed_devices: if accepted_target_ids and mbed_dev[ 'target_id'] not in accepted_target_ids: continue if mbed_dev['platform_name'] == platform_name: mut = mbed_dev muts_to_test.append(mbed_dev) gt_logger.gt_log("using platform '%s' for test:" % gt_logger.gt_bright(platform_name)) for k in mbed_dev: gt_logger.gt_log_tab("%s = '%s'" % (k, mbed_dev[k])) if number_of_parallel_instances < parallel_test_exec: number_of_parallel_instances += 1 else: break # Configuration print mode: if opts.verbose_test_configuration_only: continue if mut: target_platforms_match += 1 # Demo mode: --run implementation (already added --run to mbedhtrun) # We want to pass file name to mbedhtrun (--run NAME => -f NAME_ and run only one binary if opts.run_app: gt_logger.gt_log("running '%s' for '%s'" % (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name))) disk = mut['mount_point'] port = mut['serial_port'] micro = mut['platform_name'] program_cycle_s = mut_info_map[platfrom_name][ 'properties']['program_cycle_s'] copy_method = opts.copy_method if opts.copy_method else 'shell' enum_host_tests_path = get_local_host_tests_dir( opts.enum_host_tests) yotta_config = YottaConfig() yotta_config.init(yotta_target_name) yotta_config_baudrate = yotta_config.get_baudrate() # We will force configuration specific baudrate if port: port = "%s:%d" % (port, yotta_config_baudrate) test_platforms_match += 1 host_test_result = run_host_test( opts.run_app, disk, port, yotta_target_name, mut['target_id'], micro=micro, copy_method=copy_method, program_cycle_s=program_cycle_s, digest_source=opts.digest_source, json_test_cfg=opts.json_test_configuration, run_app=opts.run_app, enum_host_tests_path=enum_host_tests_path, verbose=True) single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result status = TEST_RESULTS.index( single_test_result ) if single_test_result in TEST_RESULTS else -1 if single_test_result != TEST_RESULT_OK: test_exec_retcode += 1 continue # Regression test mode: # Building sources for given target and perform normal testing yotta_result, yotta_ret = True, 0 # Skip build and assume 'yotta build' was successful if opts.skip_yotta_build: gt_logger.gt_log( "skipping calling yotta (specified with --skip-build option)" ) else: yotta_result, yotta_ret = build_with_yotta( yotta_target_name, verbose=opts.verbose, build_to_release=opts.build_to_release, build_to_debug=opts.build_to_debug) # We need to stop executing if yotta build fails if not yotta_result: gt_logger.gt_log_err("yotta returned %d" % yotta_ret) return (RET_YOTTA_BUILD_FAIL) if opts.only_build_tests: continue # Build phase will be followed by test execution for each target if yotta_result and not opts.only_build_tests: binary_type = mut_info_map[platform_name]['properties'][ 'binary_type'] ctest_test_list = load_ctest_testsuite( os.path.join('.', 'build', yotta_target_name), binary_type=binary_type) #TODO no tests to execute filtered_ctest_test_list = create_filtered_test_list( ctest_test_list, opts.test_by_names, opts.skip_test) gt_logger.gt_log( "running %d test%s for target '%s' and platform '%s'" % (len(filtered_ctest_test_list), "s" if len(filtered_ctest_test_list) != 1 else "", gt_logger.gt_bright(yotta_target_name), gt_logger.gt_bright(platform_name))) # Test execution order can be shuffled (also with provided random seed) # for test execution reproduction. filtered_ctest_test_list_keys = filtered_ctest_test_list.keys() if opts.shuffle_test_order: # We want to shuffle test names randomly random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed) for test_bin in filtered_ctest_test_list_keys: image_path = filtered_ctest_test_list[test_bin] test = {"test_bin": test_bin, "image_path": image_path} test_queue.put(test) #for test_bin, image_path in filtered_ctest_test_list.iteritems(): # test = {"test_bin":test_bin, "image_path":image_path} # test_queue.put(test) number_of_threads = 0 for mut in muts_to_test: ################################################################# # Experimental, parallel test execution ################################################################# if number_of_threads < parallel_test_exec: args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks) t = Thread(target=run_test_thread, args=args) execute_threads.append(t) number_of_threads += 1 gt_logger.gt_log_tab( "use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else '')) for t in execute_threads: t.daemon = True t.start() # merge partial test reports from diffrent threads to final test report for t in execute_threads: try: t.join() #blocking test_return_data = test_result_queue.get(False) except Exception as e: # No test report generated gt_logger.gt_log_err("could not generate test report" + str(e)) test_exec_retcode += -1000 return test_exec_retcode test_platforms_match += test_return_data['test_platforms_match'] test_exec_retcode += test_return_data['test_exec_retcode'] partial_test_report = test_return_data['test_report'] # todo: find better solution, maybe use extend for report_key in partial_test_report.keys(): if report_key not in test_report: test_report[report_key] = {} test_report.update(partial_test_report) else: test_report[report_key].update(partial_test_report[report_key]) if opts.verbose_test_configuration_only: print print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target" return (0) gt_logger.gt_log("all tests finished!") # We will execute post test hooks on tests for yotta_target in test_report: test_name_list = [] # All test case names for particular yotta target for test_name in test_report[yotta_target]: test = test_report[yotta_target][test_name] # Test was successful if test['single_test_result'] in [ TEST_RESULT_OK, TEST_RESULT_FAIL ]: test_name_list.append(test_name) # Call hook executed for each test, just after all tests are finished if greentea_hooks: # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "test_name": test_name, "test_bin_name": test['test_bin_name'], "image_path": test['image_path'], "build_path": test['build_path'], "build_path_abs": test['build_path_abs'], "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_test_end', format) if greentea_hooks: # Call hook executed for each yotta target, just after all tests are finished build_path = os.path.join("./build", yotta_target) build_path_abs = os.path.abspath(build_path) # We can execute this test hook just after all tests are finished ('hook_post_test_end') format = { "build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list, "yotta_target_name": yotta_target, } greentea_hooks.run_hook_ext('hook_post_all_test_end', format) # This tool is designed to work in CI # We want to return success codes based on tool actions, # only if testes were executed and all passed we want to # return 0 (success) if not opts.only_build_tests: # Prints shuffle seed gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed)) # Reports (to file) if opts.report_junit_file_name: gt_logger.gt_log("exporting to JUnit file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name)) junit_report = exporter_testcase_junit( test_report, test_suite_properties=yotta_module.get_data()) with open(opts.report_junit_file_name, 'w') as f: f.write(junit_report) if opts.report_text_file_name: gt_logger.gt_log("exporting to text '%s'..." % gt_logger.gt_bright(opts.report_text_file_name)) text_report, text_results = exporter_text(test_report) text_testcase_report, text_testcase_results = exporter_testcase_text( test_report) with open(opts.report_text_file_name, 'w') as f: f.write('\n'.join([ text_report, text_results, text_testcase_report, text_testcase_results ])) # Reports (to console) if opts.report_json: # We will not print summary and json report together gt_logger.gt_log("json test report:") print exporter_json(test_report) else: # Final summary if test_report: # Test suite report gt_logger.gt_log("test suite report:") text_report, text_results = exporter_text(test_report) print text_report gt_logger.gt_log("test suite results: " + text_results) # test case detailed report gt_logger.gt_log("test case report:") text_testcase_report, text_testcase_results = exporter_testcase_text( test_report, test_suite_properties=yotta_module.get_data()) print text_testcase_report gt_logger.gt_log("test case results: " + text_testcase_results) # This flag guards 'build only' so we expect only yotta errors if test_platforms_match == 0: # No tests were executed gt_logger.gt_log_warn( "no platform/target matching tests were found!") test_exec_retcode += -10 if target_platforms_match == 0: # No platforms were tested gt_logger.gt_log_warn("no target matching platforms were found!") test_exec_retcode += -100 return (test_exec_retcode)
) return None with open(target_json_path, 'r') as f: data = f.read() try: target_json = json.loads(data) except (TypeError, ValueError), e: gt_logger.gt_log_err( 'Failed to load json data from target.json! error [%s]\n' % str(e) + 'Can not determine required mbed platform name!') return None if 'keywords' not in target_json: gt_logger.gt_log_err( "No 'keywords' in target.json! Can not determine required mbed platform name!" ) return None platform_name = None for keyword in target_json['keywords']: m = re.search('mbed-target:(.*)', keyword) if m is not None: platform_name = m.group(1).upper() if platform_name is None: gt_logger.gt_log_err( 'No keyword with format "mbed-target:<platform name>" found in target.json!\n' + 'Can not determine required mbed platform name!') return None return platform_name