Esempio n. 1
0
def get_mbed_targets_from_yotta(mbed_classic_name):
    """! Function is using 'yotta search' command to fetch matching mbed device target's name
    @return Function returns list of possible targets or empty list if value not found
    @details Example:
             $ yt search -k mbed-target:k64f target
             frdm-k64f-gcc 0.0.16: Official mbed build target for the mbed frdm-k64f development board.
             frdm-k64f-armcc 0.0.10: Official mbed build target for the mbed frdm-k64f development board, using the armcc toolchain.

             Note: Function prints on console
    """
    result = []
    cmd = ['yotta', '--plain', 'search', '-k', 'mbed-target:%s'% mbed_classic_name.lower().strip(), 'target']
    gt_logger.gt_log("yotta search for mbed-target '%s'"% gt_logger.gt_bright(mbed_classic_name.lower().strip()))
    gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    if not _ret:
        for line in _stdout.splitlines():
            yotta_target_name = parse_yotta_search_cmd_output(line)
            if yotta_target_name:
                if yotta_target_name and yotta_target_name not in result:
                    result.append(yotta_target_name)
                    gt_logger.gt_log_tab("found target '%s'" % gt_logger.gt_bright(yotta_target_name))
    else:
        gt_logger.gt_log_err("calling yotta search failed!")
    return result
Esempio n. 2
0
def get_mbed_targets_from_yotta(mbed_classic_name):
    """! Function is using 'yotta search' command to fetch matching mbed device target's name
    @return Function returns list of possible targets or empty list if value not found
    @details Example:
             $ yt search -k mbed-target:k64f target
             frdm-k64f-gcc 0.0.16: Official mbed build target for the mbed frdm-k64f development board.
             frdm-k64f-armcc 0.0.10: Official mbed build target for the mbed frdm-k64f development board, using the armcc toolchain.

             Note: Function prints on console
    """
    result = []
    cmd = [
        'yotta', '--plain', 'search', '-k',
        'mbed-target:%s' % mbed_classic_name.lower().strip(), 'target'
    ]
    gt_logger.gt_log("yotta search for mbed-target '%s'" %
                     gt_logger.gt_bright(mbed_classic_name.lower().strip()))
    gt_logger.gt_log_tab("calling yotta: %s" % " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    if not _ret:
        for line in _stdout.splitlines():
            yotta_target_name = parse_yotta_search_cmd_output(line)
            if yotta_target_name:
                if yotta_target_name and yotta_target_name not in result:
                    result.append(yotta_target_name)
                    gt_logger.gt_log_tab(
                        "found target '%s'" %
                        gt_logger.gt_bright(yotta_target_name))
    else:
        gt_logger.gt_log_err("calling yotta search failed!")
    return result
def get_mbed_targets_from_yotta_local_module(mbed_classic_name, yotta_targets_path='./yotta_targets'):
    """! Function is parsing local yotta targets to fetch matching mbed device target's name
    @return Function returns list of possible targets or empty list if value not found
    """
    result = []

    if not os.path.exists(yotta_targets_path):
        return result

    # All local directories with yotta targets
    target_dirs = [target_dir_name for target_dir_name in os.listdir(yotta_targets_path) if os.path.isdir(os.path.join(yotta_targets_path, target_dir_name))]

    gt_logger.gt_log("local yotta target search in '%s' for compatible mbed-target '%s'"% (gt_logger.gt_bright(yotta_targets_path), gt_logger.gt_bright(mbed_classic_name.lower().strip())))

    for target_dir in target_dirs:
        path = os.path.join(yotta_targets_path, target_dir, 'target.json')
        try:
            with open(path, 'r') as data_file:
                target_json_data = json.load(data_file)
                yotta_target_name = parse_mbed_target_from_target_json(mbed_classic_name, target_json_data)
                if yotta_target_name:
                    target_dir_name = os.path.join(yotta_targets_path, target_dir)
                    gt_logger.gt_log_tab("inside '%s' found compatible target '%s'"% (gt_logger.gt_bright(target_dir_name), gt_logger.gt_bright(yotta_target_name)))
                    result.append(yotta_target_name)
        except IOError as e:
            gt_logger.gt_log_err(str(e))
    return result
Esempio n. 4
0
def list_binaries_for_targets(build_dir='./build', verbose_footer=False):
    """! Prints tests in target directories, only if tests exist.
    @param build_dir Yotta default build directory where tests will be
    @param verbose_footer Prints additional "how to use" Greentea footer
    @details Skips empty / no tests for target directories.
    """
    dir = build_dir
    sub_dirs = [os.path.join(dir, o) for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))] \
        if os.path.exists(dir) else []

    def count_tests():
        result = 0
        for sub_dir in sub_dirs:
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                for test in test_list:
                    result += 1
        return result

    if count_tests():
        for sub_dir in sub_dirs:
            target_name = sub_dir.split(os.sep)[-1]
            gt_logger.gt_log("available tests for target '%s', location '%s'"% (target_name, os.path.abspath(os.path.join(build_dir, sub_dir))))
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                for test in sorted(test_list):
                    gt_logger.gt_log_tab("test '%s'"% test)
    else:
        gt_logger.gt_log_warn("no tests found in current location")

    if verbose_footer:
        print
        print "Example: execute 'mbedgt -t TARGET_NAME -n TEST_NAME' to run test TEST_NAME for target TARGET_NAME"
Esempio n. 5
0
    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = []  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = []  # Devices which can't be used (are not fully detected)

        gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else ""))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!")
                gt_logger.gt_log_tab(
                    "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                )
                for prop in mut:
                    if not mut[prop]:
                        # Adding MUT to NOT DETECTED FULLY list
                        if mut not in not_ready_mbed_devices:
                            not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop])))
                        if prop == "serial_port":
                            gt_logger.gt_log_tab("check if your serial port driver is correctly installed!")
                        if prop == "mount_point":
                            gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!")
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)
Esempio n. 6
0
def list_binaries_for_targets(build_dir='./build', verbose_footer=False):
    """! Prints tests in target directories, only if tests exist.
    @details Skips empty / no tests for target directories.
    """
    dir = build_dir
    sub_dirs = [os.path.join(dir, o) for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))] \
        if os.path.exists(dir) else []

    def count_tests():
        result = 0
        for sub_dir in sub_dirs:
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                for test in test_list:
                    result += 1
        return result

    if count_tests():
        gt_logger.gt_log("available tests for built targets, location '%s'" %
                         os.path.abspath(build_dir))
        for sub_dir in sub_dirs:
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                gt_logger.gt_log_tab("target '%s':" %
                                     sub_dir.split(os.sep)[-1])
                for test in sorted(test_list):
                    gt_logger.gt_log_tab("test '%s'" % test)
    else:
        gt_logger.gt_log_warn("no tests found in current location")

    if verbose_footer:
        print
        print "Example: execute 'mbedgt -t TARGET_NAME -n TEST_NAME' to run test TEST_NAME for target TARGET_NAME"
def build_with_yotta(yotta_target_name,
                     verbose=False,
                     build_to_release=False,
                     build_to_debug=False):
    cmd = ["yotta"]  # "yotta %s --target=%s,* build"
    if verbose:
        cmd.append("-v")
    cmd.append("--target=%s,*" % yotta_target_name)
    cmd.append("build")
    if build_to_release:
        cmd.append("-r")
    elif build_to_debug:
        cmd.append("-d")

    gt_logger.gt_log("building your sources and tests with yotta...")
    gt_logger.gt_log_tab("calling yotta: %s" % (" ".join(cmd)))
    yotta_result, yotta_ret = run_cli_command(cmd,
                                              shell=False,
                                              verbose=verbose)
    if yotta_result:
        gt_logger.gt_log("yotta build for target '%s' was successful" %
                         gt_logger.gt_bright(yotta_target_name))
    else:
        gt_logger.gt_log_err("yotta build failed!")
    return yotta_result, yotta_ret
Esempio n. 8
0
def get_mbed_targets_from_yotta_local_module(mbed_classic_name, yotta_targets_path='./yotta_targets'):
    """! Function is parsing local yotta targets to fetch matching mbed device target's name
    @return Function returns list of possible targets or empty list if value not found
    """
    result = []

    if os.path.exists(yotta_targets_path):
        # All local diorectories with yotta targets
        target_dirs = [target_dir_name for target_dir_name in os.listdir(yotta_targets_path) if os.path.isdir(os.path.join(yotta_targets_path, target_dir_name))]

        gt_logger.gt_log("local yotta target search in '%s' for compatible mbed-target '%s'"% (gt_logger.gt_bright(yotta_targets_path), gt_logger.gt_bright(mbed_classic_name.lower().strip())))

        for target_dir in target_dirs:
            path = os.path.join(yotta_targets_path, target_dir, 'target.json')
            try:
                with open(path, 'r') as data_file:
                    target_json_data = json.load(data_file)
                    yotta_target_name = parse_mbed_target_from_target_json(mbed_classic_name, target_json_data)
                    if yotta_target_name:
                        target_dir_name = os.path.join(yotta_targets_path, target_dir)
                        gt_logger.gt_log_tab("inside '%s' found compatible target '%s'"% (gt_logger.gt_bright(target_dir_name), gt_logger.gt_bright(yotta_target_name)))
                        result.append(yotta_target_name)
            except IOError as e:
                gt_logger.gt_log_err(str(e))
    return result
Esempio n. 9
0
def get_mbed_target_call_yotta_target():
    """! Calls yotta's 'yotta target' command to get information about
    """
    cmd = ['yotta', '--plain', 'target']
    gt_logger.gt_log("checking yotta target in current directory")
    gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    return _stdout, _stderr, _ret
def get_mbed_target_call_yotta_target():
    """! Calls yotta's 'yotta target' command to get information about
    """
    cmd = ['yotta', '--plain', 'target']
    gt_logger.gt_log("checking yotta target in current directory")
    gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    return _stdout, _stderr, _ret
Esempio n. 11
0
def list_binaries_for_builds(test_spec, verbose_footer=False):
    """! Parse test spec and list binaries (BOOTABLE) in lexicographical order
    @param test_spec Test specification object
    @param verbose_footer Prints additional "how to use" Greentea footer
    """
    test_builds = test_spec.get_test_builds()
    for tb in test_builds:
        gt_logger.gt_log("available tests for built '%s', location '%s'"% (tb.get_name(), tb.get_path()))
        for tc in sorted(tb.get_tests().keys()):
            gt_logger.gt_log_tab("test '%s'"% tc)

    if verbose_footer:
        print
        print "Example: execute 'mbedgt -t BUILD_NAME -n TEST_NAME' to run test TEST_NAME for build TARGET_NAME in current test specification"
Esempio n. 12
0
def get_coverage_data(build_path, output):
    # Example GCOV output
    # [1456840876.73][CONN][RXD] {{__coverage_start;c:\Work\core-util/source/PoolAllocator.cpp.gcda;6164636772393034c2733f32...a33e...b9}}
    gt_logger.gt_log("checking for GCOV data...")
    re_gcov = re.compile(r"^\[(\d+\.\d+)\][^\{]+\{\{(__coverage_start);([^;]+);([^}]+)\}\}$")
    for line in output.splitlines():
        m = re_gcov.search(line)
        if m:
            _, _, gcov_path, gcov_payload = m.groups()
            try:
                bin_gcov_payload = coverage_pack_hex_payload(gcov_payload)
                coverage_dump_file(build_path, gcov_path, bin_gcov_payload)
            except Exception as e:
                gt_logger.gt_log_err("error while handling GCOV data: " + str(e))
            gt_logger.gt_log_tab("storing %d bytes in '%s'"% (len(bin_gcov_payload), gcov_path))
Esempio n. 13
0
def list_binaries_for_builds(test_spec, verbose_footer=False):
    """! Parse test spec and list binaries (BOOTABLE) in lexicographical order
    @param test_spec Test specification object
    @param verbose_footer Prints additional "how to use" Greentea footer
    """
    test_builds = test_spec.get_test_builds()
    for tb in test_builds:
        gt_logger.gt_log("available tests for built '%s', location '%s'" %
                         (tb.get_name(), tb.get_path()))
        for tc in sorted(tb.get_tests().keys()):
            gt_logger.gt_log_tab("test '%s'" % tc)

    if verbose_footer:
        print
        print "Example: execute 'mbedgt -t BUILD_NAME -n TEST_NAME' to run test TEST_NAME for build TARGET_NAME in current test specification"
Esempio n. 14
0
def get_coverage_data(build_path, output):
    # Example GCOV output
    # [1456840876.73][CONN][RXD] {{__coverage_start;c:\Work\core-util/source/PoolAllocator.cpp.gcda;6164636772393034c2733f32...a33e...b9}}
    gt_logger.gt_log("checking for GCOV data...")
    re_gcov = re.compile(r"^\[(\d+\.\d+)\][^\{]+\{\{(__coverage_start);([^;]+);([^}]+)\}\}$")
    for line in output.splitlines():
        m = re_gcov.search(line)
        if m:
            _, _, gcov_path, gcov_payload = m.groups()
            try:
                bin_gcov_payload = coverage_pack_hex_payload(gcov_payload)
                coverage_dump_file(build_path, gcov_path, bin_gcov_payload)
            except Exception as e:
                gt_logger.gt_log_err("error while handling GCOV data: " + str(e))
            gt_logger.gt_log_tab("storing %d bytes in '%s'"% (len(bin_gcov_payload), gcov_path))
Esempio n. 15
0
 def run(self, format=None):
     """! Runs hook after command is formated with in-place {tags}
     @format Pass format dictionary to replace hook {tags} with real values
     @param format Used to format string with cmd, notation used is e.g: {build_name}
     """
     gt_logger.gt_log("hook '%s' execution"% self.name)
     cmd = self.format_before_run(self.cmd, format)
     gt_logger.gt_log_tab("hook command: %s"% cmd)
     (_stdout, _stderr, ret) = self.run_cli_process(cmd)
     if _stdout:
         print _stdout
     if ret:
         gt_logger.gt_log_err("hook exited with error: %d, dumping stderr..."% ret)
         print _stderr
     return ret
Esempio n. 16
0
 def run(self, format=None):
     """! Runs hook after command is formated with in-place {tags}
     @format Pass format dictionary to replace hook {tags} with real values
     @param format Used to format string with cmd, notation used is e.g: {build_name}
     """
     gt_logger.gt_log("hook '%s' execution" % self.name)
     cmd = self.format_before_run(self.cmd, format)
     gt_logger.gt_log_tab("hook command: %s" % cmd)
     (_stdout, _stderr, ret) = self.run_cli_process(cmd)
     if _stdout:
         print _stdout
     if ret:
         gt_logger.gt_log_err(
             "hook exited with error: %d, dumping stderr..." % ret)
         print _stderr
     return ret
Esempio n. 17
0
def get_mbed_target_from_current_dir():
    """! Function uses yotta target command to check current target
    @return Returns current target or None if target not found (e.g. not yotta package)
    """
    result = None
    cmd = ['yotta', '--plain', 'target']
    gt_logger.gt_log("checking yotta target in current directory")
    gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    if not _ret:
        for line in _stdout.splitlines():
            target = parse_yotta_target_cmd_output(line)
            if target:
                result = target
                break
    return result
Esempio n. 18
0
    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = [
        ]  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = [
        ]  # Devices which can't be used (are not fully detected)

        required_mut_props = [
            'target_id', 'platform_name', 'serial_port', 'mount_point'
        ]

        gt_logger.gt_log(
            "detected %d device%s" %
            (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            for prop in required_mut_props:
                if not mut[prop]:
                    # Adding MUT to NOT DETECTED FULLY list
                    if mut not in not_ready_mbed_devices:
                        not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err(
                            "mbed-ls was unable to enumerate correctly all properties of the device!"
                        )
                        gt_logger.gt_log_tab(
                            "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                        )

                    gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" %
                                         (prop, str(mut[prop])))
                    if prop == 'serial_port':
                        gt_logger.gt_log_tab(
                            "check if your serial port driver is correctly installed!"
                        )
                    if prop == 'mount_point':
                        gt_logger.gt_log_tab(
                            'check if your OS can detect and mount mbed device mount point!'
                        )
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)
Esempio n. 19
0
def create_filtered_test_list(ctest_test_list, test_by_names, skip_test):
    filtered_ctest_test_list = ctest_test_list
    test_list = None
    invalid_test_names = []
    if filtered_ctest_test_list is None:
        return {}
    elif test_by_names:
        filtered_ctest_test_list = {}  # Subset of 'ctest_test_list'
        test_list = test_by_names.split(',')
        gt_logger.gt_log("test case filter (specified with -n option)")

        for test_name in test_list:
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered in '%s'" %
                                     gt_logger.gt_bright(test_name))
                filtered_ctest_test_list[test_name] = ctest_test_list[
                    test_name]
    elif skip_test:
        test_list = skip_test.split(',')
        gt_logger.gt_log("test case filter (specified with -i option)")

        for test_name in test_list:
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test '%s' skipped" %
                                     gt_logger.gt_bright(test_name))
                del filtered_ctest_test_list[test_name]

    if invalid_test_names:
        opt_to_print = '-n' if test_by_names else 'skip-test'
        gt_logger.gt_log_warn(
            "invalid test case names (specified with '%s' option)" %
            opt_to_print)
        for test_name in invalid_test_names:
            gt_logger.gt_log_warn(
                "test name '%s' not found in CTestTestFile.cmake (specified with '%s' option)"
                % (gt_logger.gt_bright(test_name), opt_to_print))
        gt_logger.gt_log_tab("note: test case names are case sensitive")
        gt_logger.gt_log_tab("note: see list of available test cases below")
        list_binaries_for_targets(verbose_footer=False)
    return filtered_ctest_test_list
Esempio n. 20
0
def build_with_yotta(yotta_target_name, verbose = False, build_to_release = False, build_to_debug = False):
    cmd = ["yotta"] # "yotta %s --target=%s,* build"
    if verbose:
        cmd.append("-v")
    cmd.append("--target=%s,*"% yotta_target_name)
    cmd.append("build")
    if build_to_release:
        cmd.append("-r")
    elif build_to_debug:
        cmd.append("-d")

    gt_logger.gt_log("building your sources and tests with yotta...")
    gt_logger.gt_log_tab("calling yotta: %s"% (" ".join(cmd)))
    yotta_result, yotta_ret = run_cli_command(cmd, shell=False, verbose=verbose)
    if yotta_result:
        gt_logger.gt_log("yotta build for target '%s' was successful"% gt_logger.gt_bright(yotta_target_name))
    else:
        gt_logger.gt_log_err("yotta build failed!")
    return yotta_result, yotta_ret
Esempio n. 21
0
def get_yotta_target_from_local_config(yotta_json='.yotta.json'):
    """! Load yotta target from local configuration file
    @param yotta_json File in format of .yotta.json which stores current target names
    @return Yotta target set in currect directory, None if no info is available
    @details
    Example structure of .yotta.json file:
    {
      "build": {
        "target": "frdm-k64f-gcc,*",
        "targetSetExplicitly": true
      }
    }
    """
    result = None
    if os.path.exists(yotta_json):
        try:
            gt_logger.gt_log("parsing local file '%s' for target information"% yotta_json)
            with open(yotta_json, 'r') as f:
                result = parse_yotta_json_for_build_name(json.load(f))
        except Exception as e:
            print str(e)
    return result
Esempio n. 22
0
def list_binaries_for_targets(build_dir='./build', verbose_footer=False):
    """! Prints tests in target directories, only if tests exist.
    @param build_dir Yotta default build directory where tests will be
    @param verbose_footer Prints additional "how to use" Greentea footer
    @details Skips empty / no tests for target directories.
    """
    dir = build_dir
    sub_dirs = [os.path.join(dir, o) for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))] \
        if os.path.exists(dir) else []

    def count_tests():
        result = 0
        for sub_dir in sub_dirs:
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                for test in test_list:
                    result += 1
        return result

    if count_tests():
        for sub_dir in sub_dirs:
            target_name = sub_dir.split(os.sep)[-1]
            gt_logger.gt_log(
                "available tests for target '%s', location '%s'" %
                (target_name, os.path.abspath(os.path.join(build_dir,
                                                           sub_dir))))
            test_list = load_ctest_testsuite(sub_dir, binary_type='')
            if len(test_list):
                for test in sorted(test_list):
                    gt_logger.gt_log_tab("test '%s'" % test)
    else:
        gt_logger.gt_log_warn("no tests found in current location")

    if verbose_footer:
        print
        print(
            "Example: execute 'mbedgt -t TARGET_NAME -n TEST_NAME' to run test TEST_NAME for target TARGET_NAME"
        )
Esempio n. 23
0
def get_yotta_target_from_local_config(yotta_json='.yotta.json'):
    """! Load yotta target from local configuration file
    @param yotta_json File in format of .yotta.json which stores current target names
    @return Yotta target set in currect directory, None if no info is available
    @details
    Example structure of .yotta.json file:
    {
      "build": {
        "target": "frdm-k64f-gcc,*",
        "targetSetExplicitly": true
      }
    }
    """
    result = None
    if os.path.exists(yotta_json):
        try:
            gt_logger.gt_log("parsing local file '%s' for target information" %
                             yotta_json)
            with open(yotta_json, 'r') as f:
                result = parse_yotta_json_for_build_name(json.load(f))
        except Exception as e:
            print str(e)
    return result
Esempio n. 24
0
def create_filtered_test_list(ctest_test_list, test_by_names, skip_test):
    filtered_ctest_test_list = ctest_test_list
    test_list = None
    invalid_test_names = []
    if filtered_ctest_test_list is None:
        return {}
    elif test_by_names:
        filtered_ctest_test_list = {}   # Subset of 'ctest_test_list'
        test_list = test_by_names.split(',')
        gt_logger.gt_log("test case filter (specified with -n option)")

        for test_name in test_list:
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name))
                filtered_ctest_test_list[test_name] = ctest_test_list[test_name]
    elif skip_test:
        test_list = skip_test.split(',')
        gt_logger.gt_log("test case filter (specified with -i option)")

        for test_name in test_list:
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test '%s' skipped"% gt_logger.gt_bright(test_name))
                del filtered_ctest_test_list[test_name]

    if invalid_test_names:
        opt_to_print = '-n' if test_by_names else 'skip-test'
        gt_logger.gt_log_warn("invalid test case names (specified with '%s' option)"% opt_to_print)
        for test_name in invalid_test_names:
            gt_logger.gt_log_warn("test name '%s' not found in CTestTestFile.cmake (specified with '%s' option)"% (gt_logger.gt_bright(test_name),opt_to_print))
        gt_logger.gt_log_tab("note: test case names are case sensitive")
        gt_logger.gt_log_tab("note: see list of available test cases below")
        list_binaries_for_targets(verbose_footer=False)
    return filtered_ctest_test_list
Esempio n. 25
0
def get_test_spec(opts):
    """! Closure encapsulating how we get test specification and load it from file of from yotta module
    @return Returns tuple of (test specification, ret code). Test specification == None if test spec load was not successful
    """
    test_spec = None

    # Check if test_spec.json file exist, if so we will pick it up as default file and load it
    test_spec_file_name = opts.test_spec
    test_spec_file_name_list = []

    # Note: test_spec.json will have higher priority than module.json file
    #       so if we are inside directory with module.json and test_spec.json we will use test spec file
    #       instead of using yotta's module.json file

    def get_all_test_specs_from_build_dir(path_to_scan):
        """! Searches for all test_spec.json files
        @param path_to_scan Directory path used to recursively search for test_spec.json
        @result List of locations of test_spec.json
        """
        return [os.path.join(dp, f) for dp, dn, filenames in os.walk(path_to_scan) for f in filenames if f == 'test_spec.json']

    def merge_multiple_test_specifications_from_file_list(test_spec_file_name_list):
        """! For each file in test_spec_file_name_list merge all test specifications into one
        @param test_spec_file_name_list List of paths to different test specifications
        @return TestSpec object with all test specification data inside
        """

        def copy_builds_between_test_specs(source, destination):
            """! Copies build key-value pairs between two test_spec dicts
                @param source Source dictionary
                @param destination Dictionary with will be applied with 'builds' key-values
                @return Dictionary with merged source
            """
            result = destination.copy()
            if 'builds' in source and 'builds' in destination:
                for k in source['builds']:
                    result['builds'][k] = source['builds'][k]
            return result

        merged_test_spec = {}
        for test_spec_file in test_spec_file_name_list:
            gt_logger.gt_log_tab("using '%s'"% test_spec_file)
            try:
                with open(test_spec_file, 'r') as f:
                    test_spec_data = json.load(f)
                    merged_test_spec = copy_builds_between_test_specs(merged_test_spec, test_spec_data)
            except Exception as e:
                gt_logger.gt_log_err("Unexpected error while processing '%s' test specification file"% test_spec_file)
                gt_logger.gt_log_tab(str(e))
                merged_test_spec = {}

        test_spec = TestSpec()
        test_spec.parse(merged_test_spec)
        return test_spec

    # Test specification look-up
    if opts.test_spec:
        # Loading test specification from command line specified file
        gt_logger.gt_log("test specification file '%s' (specified with --test-spec option)"% opts.test_spec)
    elif os.path.exists('test_spec.json'):
        # Test specification file exists in current directory
        gt_logger.gt_log("using 'test_spec.json' from current directory!")
        test_spec_file_name = 'test_spec.json'
    elif os.path.exists('.build'):
        # Checking .build directory for test specifications
        test_spec_file_name_list = get_all_test_specs_from_build_dir('.build')
    elif os.path.exists(os.path.join('mbed-os', '.build')):
        # Checking mbed-os/.build directory for test specifications
        test_spec_file_name_list = get_all_test_specs_from_build_dir(os.path.join(['mbed-os', '.build']))

    # Actual load and processing of test specification from sources
    if test_spec_file_name:
        # Test specification from command line (--test-spec) or default test_spec.json will be used
        gt_logger.gt_log("using '%s' from current directory!"% test_spec_file_name)
        test_spec = TestSpec(test_spec_file_name)
        if opts.list_binaries:
            list_binaries_for_builds(test_spec)
            return None, 0
    elif test_spec_file_name_list:
        # Merge multiple test specs into one and keep calm
        gt_logger.gt_log("using multiple test specifications from current directory!")
        test_spec = merge_multiple_test_specifications_from_file_list(test_spec_file_name_list)
        if opts.list_binaries:
            list_binaries_for_builds(test_spec)
            return None, 0
    elif os.path.exists('module.json'):
        # If inside yotta module load module data and generate test spec
        gt_logger.gt_log("using 'module.json' from current directory!")
        if opts.list_binaries:
            # List available test binaries (names, no extension)
            list_binaries_for_targets()
            return None, 0
        else:
            test_spec = get_test_spec_from_yt_module(opts)
    else:
        gt_logger.gt_log_err("greentea should be run inside a Yotta module or --test-spec switch should be used")
        return None, -1
    return test_spec, 0
Esempio n. 26
0
def create_filtered_test_list(ctest_test_list,
                              test_by_names,
                              skip_test,
                              test_spec=None):
    """! Filters test case list (filtered with switch -n) and return filtered list.
    @ctest_test_list List iof tests, originally from CTestTestFile.cmake in yotta module. Now comes from test specification
    @test_by_names Command line switch -n <test_by_names>
    @skip_test Command line switch -i <skip_test>
    @param test_spec Test specification object loaded with --test-spec switch
    @return
    """
    def filter_names_by_prefix(test_case_name_list, prefix_name):
        """!
        @param test_case_name_list List of all test cases
        @param prefix_name Prefix of test name we are looking for
        @result Set with names of test names starting with 'prefix_name'
        """
        result = list()
        for test_name in test_case_name_list:
            if test_name.startswith(prefix_name):
                result.append(test_name)
        return sorted(result)

    filtered_ctest_test_list = ctest_test_list
    test_list = None
    invalid_test_names = []
    if filtered_ctest_test_list is None:
        return {}

    if test_by_names:
        filtered_ctest_test_list = {}  # Subset of 'ctest_test_list'
        test_list = test_by_names.split(',')
        gt_logger.gt_log("test case filter (specified with -n option)")

        for test_name in set(test_list):
            if test_name.endswith('*'):
                # This 'star-sufix' filter allows users to filter tests with fixed prefixes
                # Example: -n 'TESTS-mbed_drivers* will filter all test cases with name starting with 'TESTS-mbed_drivers'
                for test_name_filtered in filter_names_by_prefix(
                        ctest_test_list.keys(), test_name[:-1]):
                    gt_logger.gt_log_tab(
                        "test filtered in '%s'" %
                        gt_logger.gt_bright(test_name_filtered))
                    filtered_ctest_test_list[
                        test_name_filtered] = ctest_test_list[
                            test_name_filtered]
            elif test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered in '%s'" %
                                     gt_logger.gt_bright(test_name))
                filtered_ctest_test_list[test_name] = ctest_test_list[
                    test_name]

    if skip_test:
        test_list = skip_test.split(',')
        gt_logger.gt_log("test case filter (specified with -i option)")

        for test_name in set(test_list):
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered out '%s'" %
                                     gt_logger.gt_bright(test_name))
                del filtered_ctest_test_list[test_name]

    if invalid_test_names:
        opt_to_print = '-n' if test_by_names else 'skip-test'
        gt_logger.gt_log_warn(
            "invalid test case names (specified with '%s' option)" %
            opt_to_print)
        for test_name in invalid_test_names:
            if test_spec:
                test_spec_name = test_spec.test_spec_filename
                gt_logger.gt_log_warn(
                    "test name '%s' not found in '%s' (specified with --test-spec option)"
                    % (gt_logger.gt_bright(test_name),
                       gt_logger.gt_bright(test_spec_name)))
            else:
                gt_logger.gt_log_warn(
                    "test name '%s' not found in CTestTestFile.cmake (specified with '%s' option)"
                    % (gt_logger.gt_bright(test_name), opt_to_print))
        gt_logger.gt_log_tab("note: test case names are case sensitive")
        gt_logger.gt_log_tab("note: see list of available test cases below")
        # Print available test suite names (binary names user can use with -n
        if test_spec:
            list_binaries_for_builds(test_spec)
        else:
            list_binaries_for_targets()

    return filtered_ctest_test_list
Esempio n. 27
0
def get_mbed_target_call_yotta_target():
    cmd = ['yotta', '--plain', 'target']
    gt_logger.gt_log("checking yotta target in current directory")
    gt_logger.gt_log_tab("calling yotta: %s"% " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    return _stdout, _stderr, _ret
Esempio n. 28
0
def get_test_spec_from_yt_module(opts):
    """
    Gives test specification created from yotta module environment.

    :return TestSpec:
    """
    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init() # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        error = """
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """
        raise YottaError(error)

    test_spec = TestSpec()

    ### Selecting yotta targets to process
    yt_targets = [] # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab("yotta target in current directory is not set")
            gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"%
            (
                gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                gt_logger.gt_bright('yotta target <yotta_target>')
            ))
            raise YottaError("Yotta target not set in current directory!")

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    yt_target_to_map_platform = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)")
        for mapping in opts.map_platform_to_yt_target.split(','):
            if len(mapping.split(':')) == 2:
                yt_target, platform = mapping.split(':')
                yt_target_to_map_platform[yt_target] = platform
                gt_logger.gt_log_tab("mapped yotta target '%s' to be compatible with platform '%s'"% (
                    gt_logger.gt_bright(yt_target),
                    gt_logger.gt_bright(platform)
                ))
            else:
                gt_logger.gt_log_tab("unknown format '%s', use 'target:platform' format"% mapping)

    for yt_target in yt_targets:
        if yt_target in yt_target_to_map_platform:
            platform = yt_target_to_map_platform[yt_target]
        else:
            # get it from local Yotta target
            platform = get_platform_name_from_yotta_target(yt_target)

        # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform
        toolchain = yt_target
        yotta_config = YottaConfig()
        yotta_config.init(yt_target)
        baud_rate = yotta_config.get_baudrate()
        base_path = os.path.join('.', 'build', yt_target)
        tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path)
        test_spec.add_test_builds(yt_target, tb)

        # Find tests
        ctest_test_list = load_ctest_testsuite(base_path,
                                               binary_type=get_binary_type_for_platform(platform))
        for name, path in ctest_test_list.iteritems():
            t = Test(name)
            t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE)
            tb.add_test(name, t)

    return test_spec
Esempio n. 29
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed")
        return (-1)

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return (0)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(None,
                                         None,
                                         None,
                                         None,
                                         None,
                                         hooks=greentea_hooks,
                                         digest_source=opts.digest_source,
                                         enum_host_tests_path=enum_host_tests_path,
                                         verbose=opts.verbose_test_result_only)

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init() # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        gt_logger.gt_log("""
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """)
        return (0)

    ### Selecting yotta targets to process
    yt_targets = [] # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'"% gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab("yotta target in current directory is not set")
            gt_logger.gt_log_err("yotta target is not specified. Use '%s' or '%s' command to set target"%
            (
                gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                gt_logger.gt_bright('yotta target <yotta_target>')
            ))
            return (-1)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    ready_mbed_devices = [] # Devices which can be used (are fully detected)

    if mbeds_list:
        gt_logger.gt_log("detected %d device%s"% (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err("can't detect all properties of the device!")
                for prop in mut:
                    if not mut[prop]:
                        gt_logger.gt_log_tab("property '%s' is '%s'"% (prop, str(mut[prop])))
            else:
                ready_mbed_devices.append(mut)
                gt_logger.gt_log_tab("detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"% (
                    gt_logger.gt_bright(mut['platform_name']),
                    gt_logger.gt_bright(mut['platform_name_unique']),
                    gt_logger.gt_bright(mut['serial_port']),
                    gt_logger.gt_bright(mut['mount_point']),
                    gt_logger.gt_bright(mut['target_id'])
                ))
    else:
        gt_logger.gt_log_err("no devices detected")
        return (RET_NO_DEVICES)

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    map_platform_to_yt_target = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log("user defined platform -> target supported mapping definition (specified with --map-target switch)")
        p_to_t_mappings = opts.map_platform_to_yt_target.split(',')
        for mapping in p_to_t_mappings:
            if len(mapping.split(':')) == 2:
                platform, yt_target = mapping.split(':')
                if platform not in map_platform_to_yt_target:
                    map_platform_to_yt_target[platform] = []
                map_platform_to_yt_target[platform].append(yt_target)
                gt_logger.gt_log_tab("mapped platform '%s' to be compatible with '%s'"% (
                    gt_logger.gt_bright(platform),
                    gt_logger.gt_bright(yt_target)
                ))
            else:
                gt_logger.gt_log_tab("unknown format '%s', use 'platform:target' format"% mapping)

    # Check if mbed classic target name can be translated to yotta target name

    mut_info_map = {}   # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]]

    for mut in ready_mbed_devices:
        platfrom_name = mut['platform_name']
        if platfrom_name not in mut_info_map:
            mut_info = get_mbed_clasic_target_info(platfrom_name,
                                                   map_platform_to_yt_target,
                                                   use_yotta_registry=opts.yotta_search_for_mbed_target)
            if mut_info:
                mut_info_map[platfrom_name] = mut_info

    ### List of unique ready platform names
    unique_mbed_devices = list(set(mut_info_map.keys()))

    ### Identify which targets has to be build because platforms are present
    yt_target_platform_map = {}     # yt_target_to_test : platforms to test on

    for yt_target in yt_targets:
        for platform_name in unique_mbed_devices:
            if yt_target in [k["yotta_target"] for k in mut_info_map[platform_name]["yotta_targets"]]:
                if yt_target not in yt_target_platform_map:
                    yt_target_platform_map[yt_target] = []
                if platform_name not in yt_target_platform_map[yt_target]:
                    yt_target_platform_map[yt_target].append(platform_name)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)")
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'"% gt_logger.gt_bright(tid))

    test_exec_retcode = 0       # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0    # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}            # Test report used to export to Junit, HTML etc...
    muts_to_test = []           # MUTs to actually be tested
    test_queue = Queue()        # contains information about test_bin and image_path for each test case
    test_result_queue = Queue() # used to store results of each thread
    execute_threads = []        # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1
    try:
        parallel_test_exec = int(opts.parallel_test_exec)
        if parallel_test_exec < 1:
            parallel_test_exec = 1
    except ValueError:
        gt_logger.gt_log_err("argument of mode --parallel is not a int, disable parallel mode")
        parallel_test_exec = 1

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10 # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    for yotta_target_name in yt_target_platform_map:
        gt_logger.gt_log("processing '%s' yotta target compatible platforms..."% gt_logger.gt_bright(yotta_target_name))

        for platform_name in yt_target_platform_map[yotta_target_name]:
            gt_logger.gt_log("processing '%s' platform..."% gt_logger.gt_bright(platform_name))

            ### Select MUTS to test from list of available MUTS to start testing
            mut = None
            number_of_parallel_instances = 1
            for mbed_dev in ready_mbed_devices:
                if accepted_target_ids and mbed_dev['target_id'] not in accepted_target_ids:
                    continue

                if mbed_dev['platform_name'] == platform_name:
                    mut = mbed_dev
                    muts_to_test.append(mbed_dev)
                    gt_logger.gt_log("using platform '%s' for test:"% gt_logger.gt_bright(platform_name))
                    for k in mbed_dev:
                        gt_logger.gt_log_tab("%s = '%s'"% (k, mbed_dev[k]))
                    if number_of_parallel_instances < parallel_test_exec:
                        number_of_parallel_instances += 1
                    else:
                        break

            # Configuration print mode:
            if opts.verbose_test_configuration_only:
                continue

            if mut:
                target_platforms_match += 1

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app:
                    gt_logger.gt_log("running '%s' for '%s'"% (gt_logger.gt_bright(opts.run_app), gt_logger.gt_bright(yotta_target_name)))
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

                    yotta_config = YottaConfig()
                    yotta_config.init(yotta_target_name)

                    yotta_config_baudrate = yotta_config.get_baudrate()

                    # We will force configuration specific baudrate
                    if port:
                        port = "%s:%d"% (port, yotta_config_baudrate)

                    test_platforms_match += 1
                    host_test_result = run_host_test(opts.run_app,
                                                     disk,
                                                     port,
                                                     yotta_target_name,
                                                     mut['target_id'],
                                                     micro=micro,
                                                     copy_method=copy_method,
                                                     program_cycle_s=program_cycle_s,
                                                     digest_source=opts.digest_source,
                                                     json_test_cfg=opts.json_test_configuration,
                                                     run_app=opts.run_app,
                                                     enum_host_tests_path=enum_host_tests_path,
                                                     verbose=True)

                    single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
                    status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing

                yotta_result, yotta_ret = True, 0   # Skip build and assume 'yotta build' was successful
                if opts.skip_yotta_build:
                    gt_logger.gt_log("skipping calling yotta (specified with --skip-build option)")
                else:
                    yotta_result, yotta_ret = build_with_yotta(yotta_target_name,
                        verbose=opts.verbose,
                        build_to_release=opts.build_to_release,
                        build_to_debug=opts.build_to_debug)

                # We need to stop executing if yotta build fails
                if not yotta_result:
                    gt_logger.gt_log_err("yotta returned %d"% yotta_ret)
                    return (RET_YOTTA_BUILD_FAIL)

                if opts.only_build_tests:
                    continue

                # Build phase will be followed by test execution for each target
                if yotta_result and not opts.only_build_tests:
                    binary_type = mut_info_map[platform_name]['properties']['binary_type']
                    ctest_test_list = load_ctest_testsuite(os.path.join('.', 'build', yotta_target_name),
                        binary_type=binary_type)
                    #TODO no tests to execute

                filtered_ctest_test_list = create_filtered_test_list(ctest_test_list, opts.test_by_names, opts.skip_test)

                gt_logger.gt_log("running %d test%s for target '%s' and platform '%s'"% (
                    len(filtered_ctest_test_list),
                    "s" if len(filtered_ctest_test_list) != 1 else "",
                    gt_logger.gt_bright(yotta_target_name),
                    gt_logger.gt_bright(platform_name)
                ))

                # Test execution order can be shuffled (also with provided random seed)
                # for test execution reproduction.
                filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
                if opts.shuffle_test_order:
                    # We want to shuffle test names randomly
                    random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed)

                for test_bin in filtered_ctest_test_list_keys:
                    image_path = filtered_ctest_test_list[test_bin]
                    test = {"test_bin":test_bin, "image_path":image_path}
                    test_queue.put(test)

                #for test_bin, image_path in filtered_ctest_test_list.iteritems():
                #    test = {"test_bin":test_bin, "image_path":image_path}
                #    test_queue.put(test)

                number_of_threads = 0
                for mut in muts_to_test:
                    #################################################################
                    # Experimental, parallel test execution
                    #################################################################
                    if number_of_threads < parallel_test_exec:
                        args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks)
                        t = Thread(target=run_test_thread, args=args)
                        execute_threads.append(t)
                        number_of_threads += 1

    gt_logger.gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
    for t in execute_threads:
        t.daemon = True
        t.start()

    # merge partial test reports from diffrent threads to final test report
    for t in execute_threads:
        try:
            t.join() #blocking
            test_return_data = test_result_queue.get(False)
        except Exception as e:
            # No test report generated
            gt_logger.gt_log_err("could not generate test report" + str(e))
            test_exec_retcode += -1000
            return test_exec_retcode

        test_platforms_match += test_return_data['test_platforms_match']
        test_exec_retcode += test_return_data['test_exec_retcode']
        partial_test_report = test_return_data['test_report']
        # todo: find better solution, maybe use extend
        for report_key in partial_test_report.keys():
            if report_key not in test_report:
                test_report[report_key] = {}
                test_report.update(partial_test_report)
            else:
                test_report[report_key].update(partial_test_report[report_key])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return (0)

    gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for yotta_target in test_report:
        test_name_list = []    # All test case names for particular yotta target
        for test_name in test_report[yotta_target]:
            test = test_report[yotta_target][test_name]
            # Test was successful
            if test['single_test_result'] in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                        "yotta_target_name": yotta_target,
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            # Call hook executed for each yotta target, just after all tests are finished
            build_path = os.path.join("./build", yotta_target)
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
                "yotta_target_name": yotta_target,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f"% (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        # Reports (to file)
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUnit file '%s'..."% gt_logger.gt_bright(opts.report_junit_file_name))
            junit_report = exporter_testcase_junit(test_report, test_suite_properties=yotta_module.get_data())
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to text '%s'..."% gt_logger.gt_bright(opts.report_text_file_name))

            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write('\n'.join([text_report, text_results, text_testcase_report, text_testcase_results]))

        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_logger.gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            if test_report:
                # Test suite report
                gt_logger.gt_log("test suite report:")
                text_report, text_results = exporter_text(test_report)
                print text_report
                gt_logger.gt_log("test suite results: " + text_results)
                # test case detailed report
                gt_logger.gt_log("test case report:")
                text_testcase_report, text_testcase_results = exporter_testcase_text(test_report, test_suite_properties=yotta_module.get_data())
                print text_testcase_report
                gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn("no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no target matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)
def get_test_spec_from_yt_module(opts):
    """
    Gives test specification created from yotta module environment.

    :return TestSpec:
    """
    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init()  # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        error = """
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """
        raise YottaError(error)

    test_spec = TestSpec()

    ### Selecting yotta targets to process
    yt_targets = [
    ]  # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'" %
                             gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab(
                "yotta target in current directory is not set")
            gt_logger.gt_log_err(
                "yotta target is not specified. Use '%s' or '%s' command to set target"
                % (gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                   gt_logger.gt_bright('yotta target <yotta_target>')))
            raise YottaError("Yotta target not set in current directory!")

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    yt_target_to_map_platform = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log(
            "user defined platform -> target supported mapping definition (specified with --map-target switch)"
        )
        for mapping in opts.map_platform_to_yt_target.split(','):
            if len(mapping.split(':')) == 2:
                yt_target, platform = mapping.split(':')
                yt_target_to_map_platform[yt_target] = platform
                gt_logger.gt_log_tab(
                    "mapped yotta target '%s' to be compatible with platform '%s'"
                    % (gt_logger.gt_bright(yt_target),
                       gt_logger.gt_bright(platform)))
            else:
                gt_logger.gt_log_tab(
                    "unknown format '%s', use 'target:platform' format" %
                    mapping)

    for yt_target in yt_targets:
        if yt_target in yt_target_to_map_platform:
            platform = yt_target_to_map_platform[yt_target]
        else:
            # get it from local Yotta target
            platform = get_platform_name_from_yotta_target(yt_target)

        # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform
        toolchain = yt_target
        yotta_config = YottaConfig()
        yotta_config.init(yt_target)
        baud_rate = yotta_config.get_baudrate()
        base_path = os.path.join('.', 'build', yt_target)
        tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path)
        test_spec.add_test_builds(yt_target, tb)

        # Find tests
        ctest_test_list = load_ctest_testsuite(
            base_path, binary_type=get_binary_type_for_platform(platform))
        for name, path in ctest_test_list.items():
            t = Test(name)
            t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE)
            tb.add_test(name, t)

    return test_spec
Esempio n. 31
0
def main():
    """ Closure for main_cli() function """
    parser = optparse.OptionParser()

    parser.add_option(
        "-t",
        "--target",
        dest="list_of_targets",
        help="You can specify list of yotta targets you want to build. Use comma to separate them."
        + "Note: If --test-spec switch is defined this list becomes optional list of builds you want to filter in your test:"
        + "Comma separated list of builds from test specification. Applicable if --test-spec switch is specified",
    )

    parser.add_option(
        "-n",
        "--test-by-names",
        dest="test_by_names",
        help="Runs only test enumerated it this switch. Use comma to separate test case names.",
    )

    parser.add_option(
        "-i",
        "--skip-test",
        dest="skip_test",
        help="Skip tests enumerated it this switch. Use comma to separate test case names.",
    )

    parser.add_option(
        "-O",
        "--only-build",
        action="store_true",
        dest="only_build_tests",
        default=False,
        help="Only build repository and tests, skips actual test procedures (flashing etc.)",
    )

    parser.add_option(
        "-S",
        "--skip-build",
        action="store_true",
        dest="skip_yotta_build",
        default=True,
        help="Skip calling 'yotta build' on this module",
    )

    copy_methods_str = "Plugin support: " + ", ".join(mbed_host_tests.host_tests_plugins.get_plugin_caps("CopyMethod"))
    parser.add_option(
        "-c",
        "--copy",
        dest="copy_method",
        help="Copy (flash the target) method selector. " + copy_methods_str,
        metavar="COPY_METHOD",
    )

    parser.add_option(
        "",
        "--parallel",
        dest="parallel_test_exec",
        default=1,
        help="Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)",
    )

    parser.add_option(
        "-e",
        "--enum-host-tests",
        dest="enum_host_tests",
        help="Define directory with yotta module local host tests. Default: ./test/host_tests",
    )

    parser.add_option(
        "",
        "--config",
        dest="verbose_test_configuration_only",
        default=False,
        action="store_true",
        help="Displays connected boards and detected targets and exits.",
    )

    parser.add_option(
        "",
        "--release",
        dest="build_to_release",
        default=False,
        action="store_true",
        help="If possible force build in release mode (yotta -r).",
    )

    parser.add_option(
        "",
        "--debug",
        dest="build_to_debug",
        default=False,
        action="store_true",
        help="If possible force build in debug mode (yotta -d).",
    )

    parser.add_option(
        "-l", "--list", dest="list_binaries", default=False, action="store_true", help="List available binaries"
    )

    parser.add_option(
        "-g",
        "--grm",
        dest="global_resource_mgr",
        help="Global resource manager service query: platrform name, remote mgr module name, IP address and port, example K64F:module_name:10.2.123.43:3334",
    )

    parser.add_option(
        "-m",
        "--map-target",
        dest="map_platform_to_yt_target",
        help="List of custom mapping between platform name and yotta target. Comma separated list of YOTTA_TARGET:PLATFORM tuples",
    )

    parser.add_option(
        "",
        "--use-tids",
        dest="use_target_ids",
        help="Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)",
    )

    parser.add_option(
        "-u",
        "--shuffle",
        dest="shuffle_test_order",
        default=False,
        action="store_true",
        help="Shuffles test execution order",
    )

    parser.add_option(
        "",
        "--shuffle-seed",
        dest="shuffle_test_seed",
        default=None,
        help="Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)",
    )

    parser.add_option(
        "",
        "--lock",
        dest="lock_by_target",
        default=False,
        action="store_true",
        help="Use simple resource locking mechanism to run multiple application instances",
    )

    parser.add_option(
        "",
        "--digest",
        dest="digest_source",
        help="Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output",
    )

    parser.add_option("-H", "--hooks", dest="hooks_json", help="Load hooks used drive extra functionality")

    parser.add_option("", "--test-spec", dest="test_spec", help="Test specification generated by build system.")

    parser.add_option(
        "", "--test-cfg", dest="json_test_configuration", help="Pass to host test data with host test configuration"
    )

    parser.add_option("", "--run", dest="run_app", help="Flash, reset and dump serial from selected binary application")

    parser.add_option(
        "",
        "--report-junit",
        dest="report_junit_file_name",
        help="You can log test suite results in form of JUnit compliant XML report",
    )

    parser.add_option(
        "", "--report-text", dest="report_text_file_name", help="You can log test suite results to text file"
    )

    parser.add_option(
        "", "--report-json", dest="report_json_file_name", help="You can log test suite results to JSON formatted file"
    )

    parser.add_option(
        "",
        "--report-html",
        dest="report_html_file_name",
        help="You can log test suite results in the form of a HTML page",
    )

    parser.add_option(
        "",
        "--report-fails",
        dest="report_fails",
        default=False,
        action="store_true",
        help="Prints console outputs for failed tests",
    )

    parser.add_option(
        "",
        "--yotta-registry",
        dest="yotta_search_for_mbed_target",
        default=False,
        action="store_true",
        help="Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory",
    )

    parser.add_option(
        "-V",
        "--verbose-test-result",
        dest="verbose_test_result_only",
        default=False,
        action="store_true",
        help="Prints test serial output",
    )

    parser.add_option(
        "-v",
        "--verbose",
        dest="verbose",
        default=False,
        action="store_true",
        help="Verbose mode (prints some extra information)",
    )

    parser.add_option(
        "", "--plain", dest="plain", default=False, action="store_true", help="Do not use colours while logging"
    )

    parser.add_option(
        "", "--version", dest="version", default=False, action="store_true", help="Prints package version and exits"
    )

    parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool"""
    parser.epilog = """Example: mbedgt --target frdm-k64f-gcc"""

    (opts, args) = parser.parse_args()

    cli_ret = 0

    if not opts.version:
        # This string should not appear when fetching plain version string
        gt_logger.gt_log(get_hello_string())

    start = time()
    if opts.lock_by_target:
        # We are using Greentea proprietary locking mechanism to lock between platforms and targets
        gt_logger.gt_log("using (experimental) simple locking mechanism")
        gt_logger.gt_log_tab("kettle: %s" % GREENTEA_KETTLE_PATH)
        gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem()
        with gt_file_sem:
            greentea_update_kettle(gt_instance_uuid)
            try:
                cli_ret = main_cli(opts, args, gt_instance_uuid)
            except KeyboardInterrupt:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
                return -2  # Keyboard interrupt
            except:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("unexpected error:")
                gt_logger.gt_log_tab(sys.exc_info()[0])
                raise
            greentea_clean_kettle(gt_instance_uuid)
    else:
        # Standard mode of operation
        # Other instance must provide mutually exclusive access control to platforms and targets
        try:
            cli_ret = main_cli(opts, args)
        except KeyboardInterrupt:
            gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
            return -2  # Keyboard interrupt
        except Exception as e:
            gt_logger.gt_log_err("unexpected error:")
            gt_logger.gt_log_tab(str(e))
            raise

    if not any([opts.list_binaries, opts.version]):
        delta = time() - start  # Test execution time delta
        gt_logger.gt_log("completed in %.2f sec" % delta)

    if cli_ret:
        gt_logger.gt_log_err("exited with code %d" % cli_ret)

    return cli_ret
Esempio n. 32
0
def main():
    """ Closure for main_cli() function """
    parser = optparse.OptionParser()

    parser.add_option('-t', '--target',
                    dest='list_of_targets',
                    help='You can specify list of targets you want to build. Use comma to sepatate them')

    parser.add_option('-n', '--test-by-names',
                    dest='test_by_names',
                    help='Runs only test enumerated it this switch. Use comma to separate test case names.')

    parser.add_option('-i', '--skip-test',
                    dest='skip_test',
                    help='Skip tests enumerated it this switch. Use comma to separate test case names.')

    parser.add_option("-O", "--only-build",
                    action="store_true",
                    dest="only_build_tests",
                    default=False,
                    help="Only build repository and tests, skips actual test procedures (flashing etc.)")

    parser.add_option("-S", "--skip-build",
                    action="store_true",
                    dest="skip_yotta_build",
                    default=False,
                    help="Skip calling 'yotta build' on this module")

    copy_methods_str = "Plugin support: " + ', '.join(mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod'))
    parser.add_option("-c", "--copy",
                    dest="copy_method",
                    help="Copy (flash the target) method selector. " + copy_methods_str,
                    metavar="COPY_METHOD")

    parser.add_option('', '--parallel',
                    dest='parallel_test_exec',
                    default=1,
                    help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')

    parser.add_option("-e", "--enum-host-tests",
                    dest="enum_host_tests",
                    help="Define directory with yotta module local host tests. Default: ./test/host_tests")

    parser.add_option('', '--config',
                    dest='verbose_test_configuration_only',
                    default=False,
                    action="store_true",
                    help='Displays connected boards and detected targets and exits.')

    parser.add_option('', '--release',
                    dest='build_to_release',
                    default=False,
                    action="store_true",
                    help='If possible force build in release mode (yotta -r).')

    parser.add_option('', '--debug',
                    dest='build_to_debug',
                    default=False,
                    action="store_true",
                    help='If possible force build in debug mode (yotta -d).')

    parser.add_option('-l', '--list',
                    dest='list_binaries',
                    default=False,
                    action="store_true",
                    help='List available binaries')

    parser.add_option('-m', '--map-target',
                    dest='map_platform_to_yt_target',
                    help='List of custom mapping between platform name and yotta target. Comma separated list of PLATFORM:TARGET tuples')

    parser.add_option('', '--use-tids',
                    dest='use_target_ids',
                    help='Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)')

    parser.add_option('-u', '--shuffle',
                    dest='shuffle_test_order',
                    default=False,
                    action="store_true",
                    help='Shuffles test execution order')

    parser.add_option('', '--shuffle-seed',
                    dest='shuffle_test_seed',
                    default=None,
                    help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')

    parser.add_option('', '--lock',
                    dest='lock_by_target',
                    default=False,
                    action="store_true",
                    help='Use simple resource locking mechanism to run multiple application instances')

    parser.add_option('', '--digest',
                    dest='digest_source',
                    help='Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output')

    parser.add_option('-H', '--hooks',
                    dest='hooks_json',
                    help='Load hooks used drive extra functionality')

    parser.add_option('', '--test-cfg',
                    dest='json_test_configuration',
                    help='Pass to host test data with host test configuration')

    parser.add_option('', '--run',
                    dest='run_app',
                    help='Flash, reset and dump serial from selected binary application')

    parser.add_option('', '--report-junit',
                    dest='report_junit_file_name',
                    help='You can log test suite results in form of JUnit compliant XML report')

    parser.add_option('', '--report-text',
                    dest='report_text_file_name',
                    help='You can log test suite results to text file')

    parser.add_option('', '--report-json',
                    dest='report_json',
                    default=False,
                    action="store_true",
                    help='Outputs test results in JSON')

    parser.add_option('', '--report-fails',
                    dest='report_fails',
                    default=False,
                    action="store_true",
                    help='Prints console outputs for failed tests')

    parser.add_option('', '--yotta-registry',
                    dest='yotta_search_for_mbed_target',
                    default=False,
                    action="store_true",
                    help='Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory')

    parser.add_option('-V', '--verbose-test-result',
                    dest='verbose_test_result_only',
                    default=False,
                    action="store_true",
                    help='Prints test serial output')

    parser.add_option('-v', '--verbose',
                    dest='verbose',
                    default=False,
                    action="store_true",
                    help='Verbose mode (prints some extra information)')

    parser.add_option('', '--plain',
                    dest='plain',
                    default=False,
                    action="store_true",
                    help='Do not use colours while logging')

    parser.add_option('', '--version',
                    dest='version',
                    default=False,
                    action="store_true",
                    help='Prints package version and exits')

    parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool"""
    parser.epilog = """Example: mbedgt --target frdm-k64f-gcc"""

    (opts, args) = parser.parse_args()

    cli_ret = 0

    start = time()
    if opts.lock_by_target:
        # We are using Greentea proprietary locking mechanism to lock between platforms and targets
        gt_logger.gt_log("using (experimental) simple locking mechanism")
        gt_logger.gt_log_tab("kettle: %s"% GREENTEA_KETTLE_PATH)
        gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem()
        with gt_file_sem:
            greentea_update_kettle(gt_instance_uuid)
            try:
                cli_ret = main_cli(opts, args, gt_instance_uuid)
            except KeyboardInterrupt:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
                return(-2)    # Keyboard interrupt
            except:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("unexpected error:")
                gt_logger.gt_log_tab(sys.exc_info()[0])
                raise
            greentea_clean_kettle(gt_instance_uuid)
    else:
        # Standard mode of operation
        # Other instance must provide mutually exclusive access control to platforms and targets
        try:
            cli_ret = main_cli(opts, args)
        except KeyboardInterrupt:
            gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
            return(-2)    # Keyboard interrupt
        except Exception as e:
            gt_logger.gt_log_err("unexpected error:")
            gt_logger.gt_log_tab(str(e))
            raise

    if not any([opts.list_binaries, opts.version]):
        delta = time() - start  # Test execution time delta
        gt_logger.gt_log("completed in %.2f sec"% delta)

    if cli_ret:
        gt_logger.gt_log_err("exited with code %d"% cli_ret)

    return(cli_ret)
Esempio n. 33
0
def run_host_test(image_path,
                  disk,
                  port,
                  duration=10,
                  micro=None,
                  reset=None,
                  reset_tout=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @return Tuple with test results, test output and test duration times
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-nebaled device name
    @param reset Reset type
    @param reset_tout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
                           stdin will be used via StdInObserver or file (if
                           file name was given as switch option)
    """

    class StdInObserver(Thread):
        """ Process used to read stdin only as console input from MUT
        """
        def __init__(self):
            Thread.__init__(self)
            self.queue = Queue()
            self.daemon = True
            self.active = True
            self.start()

        def run(self):
            while self.active:
                c = sys.stdin.read(1)
                self.queue.put(c)

        def stop(self):
            self.active = False

    class FileObserver(Thread):
        """ process used to read file content as console input from MUT
        """
        def __init__(self, filename):
            Thread.__init__(self)
            self.filename = filename
            self.queue = Queue()
            self.daemon = True
            self.active = True
            self.start()

        def run(self):
            with open(self.filename) as f:
                while self.active:
                    c = f.read(1)
                    self.queue.put(c)

        def stop(self):
            self.active = False

    class ProcessObserver(Thread):
        """ Default process used to observe stdout of another process as console input from MUT
        """
        def __init__(self, proc):
            Thread.__init__(self)
            self.proc = proc
            self.queue = Queue()
            self.daemon = True
            self.active = True
            self.start()

        def run(self):
            while self.active:
                c = self.proc.stdout.read(1)
                self.queue.put(c)

        def stop(self):
            self.active = False

            # Try stopping mbed-host-test
            try:
                self.proc.stdin.close()
            finally:
                pass

            # Give 5 sec for mbedhtrun to exit
            ret_code = None
            for i in range(5):
                ret_code = self.proc.poll()
                # A None value indicates that the process hasn't terminated yet.
                if ret_code is not None:
                    break
                sleep(1)

            if ret_code is None:            # Kill it
                print 'Terminating mbed-host-test(mbedhtrun) process (PID %s)' % self.proc.pid
                try:
                    self.proc.terminate()
                except Exception as e:
                    print "ProcessObserver.stop(): %s" % str(e)

    def get_char_from_queue(obs):
        """ Get character from queue safe way
        """
        try:
            c = obs.queue.get(block=True, timeout=0.5)
            # signals to queue job is done
            obs.queue.task_done()
        except Empty:
            c = None
        except:
            raise
        return c

    def filter_queue_char(c):
        """ Filters out non ASCII characters from serial port
        """
        if ord(c) not in range(128):
            c = ' '
        return c

    def get_auto_property_value(property_name, line):
        """! Scans auto detection line from MUT and returns scanned parameter 'property_name'
        @details Host test case has to print additional properties for test to be set up
        @return Returns string or None if property search failed
        """
        result = None
        if re.search("HOST: Property '%s'"% property_name, line) is not None:
            property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
            if property is not None and len(property.groups()) == 1:
                result = property.groups()[0]
        return result

    # Detect from where input should be taken, if no --digest switch is specified
    # normal test execution can be performed

    if verbose:
        gt_logger.gt_log("selecting test case observer...")
        if digest_source:
            gt_logger.gt_log_tab("selected digest source: %s"% digest_source)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        obs = StdInObserver()
    elif digest_source is not None:
        # When we want to open file to scan for test results
        obs = FileObserver(digest_source)
    else:
        # Command executing CLI for host test supervisor (in detect-mode)
        cmd = ["mbedhtrun",
                '-d', disk,
                '-p', port,
                '-f', '"%s"'% image_path,
                ]

        # Add extra parameters to host_test
        if program_cycle_s is not None:
            cmd += ["-C", str(program_cycle_s)]
        if copy_method is not None:
            cmd += ["-c", copy_method]
        if micro is not None:
            cmd += ["-m", micro]
        if reset is not None:
            cmd += ["-r", reset]
        if reset_tout is not None:
            cmd += ["-R", str(reset_tout)]
        if json_test_cfg is not None:
            cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
        if run_app is not None:
            cmd += ["--run"]    # -f stores binary name!
        if enum_host_tests_path:
            cmd += ["-e", '"%s"'% enum_host_tests_path]

        if verbose:
            gt_logger.gt_log_tab("calling mbedhtrun: %s"% " ".join(cmd))
            gt_logger.gt_log("mbed-host-test-runner: started")
        proc = Popen(cmd, stdin=PIPE, stdout=PIPE)
        obs = ProcessObserver(proc)

    result = None
    update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
    unknown_property_count = 0
    total_duration = 20     # This for flashing, reset and other serial port operations
    line = ''
    output = []
    start_time = time()
    while (time() - start_time) < (total_duration):
        try:
            c = get_char_from_queue(obs)
        except Exception as e:
            output.append('get_char_from_queue(obs): %s'% str(e))
            break
        if c:
            if verbose:
                sys.stdout.write(c)
            c = filter_queue_char(c)
            output.append(c)
            # Give the mbed under test a way to communicate the end of the test
            if c in ['\n', '\r']:

                # Check for unknown property prints
                # If there are too many we will stop test execution and assume test is not ported
                if "HOST: Unknown property" in line:
                    unknown_property_count += 1
                    if unknown_property_count >= max_failed_properties:
                        output.append('{{error}}')
                        break

                # Checking for auto-detection information from the test about MUT reset moment
                if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
                    # We will update this marker only once to prevent multiple time resets
                    update_once_flag['reset_target'] = True
                    start_time = time()
                    total_duration = duration   # After reset we gonna use real test case duration

                # Checking for auto-detection information from the test about timeout
                auto_timeout_val = get_auto_property_value('timeout', line)
                if 'timeout' not in update_once_flag and auto_timeout_val is not None:
                    # We will update this marker only once to prevent multiple time resets
                    update_once_flag['timeout'] = True
                    total_duration = int(auto_timeout_val)

                # Detect mbed assert:
                if 'mbed assertation failed: ' in line:
                    output.append('{{mbed_assert}}')
                    break

                # Check for test end. Only a '{{end}}' in the start of line indicates a test end.
                # A sub string '{{end}}' may also appear in an error message.
                if re.search('^\{\{end\}\}', line, re.I):
                    break
                line = ''
            else:
                line += c
    else:
        result = TEST_RESULT_TIMEOUT

    if not '{end}' in line:
        output.append('{{end}}')

    c = get_char_from_queue(obs)

    if c:
        if verbose:
            sys.stdout.write(c)
        c = filter_queue_char(c)
        output.append(c)

    # Stop test process
    obs.stop()

    end_time = time()
    testcase_duration = end_time - start_time   # Test case duration from reset to {end}

    if verbose:
        gt_logger.gt_log("mbed-host-test-runner: stopped")
    if not result:
        result = get_test_result(output)
    if verbose:
        gt_logger.gt_log("mbed-host-test-runner: returned '%s'"% result)
    return (result, "".join(output), testcase_duration, duration)
Esempio n. 34
0
def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_spec=None):
    """! Filters test case list (filtered with switch -n) and return filtered list.
    @ctest_test_list List iof tests, originally from CTestTestFile.cmake in yotta module. Now comes from test specification
    @test_by_names Command line switch -n <test_by_names>
    @skip_test Command line switch -i <skip_test>
    @param test_spec Test specification object loaded with --test-spec switch
    @return
    """

    def filter_names_by_prefix(test_case_name_list, prefix_name):
        """!
        @param test_case_name_list List of all test cases
        @param prefix_name Prefix of test name we are looking for
        @result Set with names of test names starting with 'prefix_name'
        """
        result = list()
        for test_name in test_case_name_list:
            if test_name.startswith(prefix_name):
                result.append(test_name)
        return sorted(result)

    filtered_ctest_test_list = ctest_test_list
    test_list = None
    invalid_test_names = []
    if filtered_ctest_test_list is None:
        return {}

    if test_by_names:
        filtered_ctest_test_list = {}  # Subset of 'ctest_test_list'
        test_list = test_by_names.split(",")
        gt_logger.gt_log("test case filter (specified with -n option)")

        for test_name in set(test_list):
            if test_name.endswith("*"):
                # This 'star-sufix' filter allows users to filter tests with fixed prefixes
                # Example: -n 'TESTS-mbed_drivers* will filter all test cases with name starting with 'TESTS-mbed_drivers'
                for test_name_filtered in filter_names_by_prefix(ctest_test_list.keys(), test_name[:-1]):
                    gt_logger.gt_log_tab("test filtered in '%s'" % gt_logger.gt_bright(test_name_filtered))
                    filtered_ctest_test_list[test_name_filtered] = ctest_test_list[test_name_filtered]
            elif test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered in '%s'" % gt_logger.gt_bright(test_name))
                filtered_ctest_test_list[test_name] = ctest_test_list[test_name]

    if skip_test:
        test_list = skip_test.split(",")
        gt_logger.gt_log("test case filter (specified with -i option)")

        for test_name in set(test_list):
            if test_name not in ctest_test_list:
                invalid_test_names.append(test_name)
            else:
                gt_logger.gt_log_tab("test filtered out '%s'" % gt_logger.gt_bright(test_name))
                del filtered_ctest_test_list[test_name]

    if invalid_test_names:
        opt_to_print = "-n" if test_by_names else "skip-test"
        gt_logger.gt_log_warn("invalid test case names (specified with '%s' option)" % opt_to_print)
        for test_name in invalid_test_names:
            if test_spec:
                test_spec_name = test_spec.test_spec_filename
                gt_logger.gt_log_warn(
                    "test name '%s' not found in '%s' (specified with --test-spec option)"
                    % (gt_logger.gt_bright(test_name), gt_logger.gt_bright(test_spec_name))
                )
            else:
                gt_logger.gt_log_warn(
                    "test name '%s' not found in CTestTestFile.cmake (specified with '%s' option)"
                    % (gt_logger.gt_bright(test_name), opt_to_print)
                )
        gt_logger.gt_log_tab("note: test case names are case sensitive")
        gt_logger.gt_log_tab("note: see list of available test cases below")
        # Print available test suite names (binary names user can use with -n
        if test_spec:
            list_binaries_for_builds(test_spec)
        else:
            list_binaries_for_targets()

    return filtered_ctest_test_list
Esempio n. 35
0
def get_test_spec(opts):
    """! Closure encapsulating how we get test specification and load it from file of from yotta module
    @return Returns tuple of (test specification, ret code). Test specification == None if test spec load was not successful
    """
    test_spec = None

    # Check if test_spec.json file exist, if so we will pick it up as default file and load it
    test_spec_file_name = opts.test_spec
    test_spec_file_name_list = []

    # Note: test_spec.json will have higher priority than module.json file
    #       so if we are inside directory with module.json and test_spec.json we will use test spec file
    #       instead of using yotta's module.json file

    def get_all_test_specs_from_build_dir(path_to_scan):
        """! Searches for all test_spec.json files
        @param path_to_scan Directory path used to recursively search for test_spec.json
        @result List of locations of test_spec.json
        """
        return [
            os.path.join(dp, f) for dp, dn, filenames in os.walk(path_to_scan)
            for f in filenames if f == 'test_spec.json'
        ]

    def merge_multiple_test_specifications_from_file_list(
            test_spec_file_name_list):
        """! For each file in test_spec_file_name_list merge all test specifications into one
        @param test_spec_file_name_list List of paths to different test specifications
        @return TestSpec object with all test specification data inside
        """
        def copy_builds_between_test_specs(source, destination):
            """! Copies build key-value pairs between two test_spec dicts
                @param source Source dictionary
                @param destination Dictionary with will be applied with 'builds' key-values
                @return Dictionary with merged source
            """
            result = destination.copy()
            if 'builds' in source and 'builds' in destination:
                for k in source['builds']:
                    result['builds'][k] = source['builds'][k]
            return result

        merged_test_spec = {}
        for test_spec_file in test_spec_file_name_list:
            gt_logger.gt_log_tab("using '%s'" % test_spec_file)
            try:
                with open(test_spec_file, 'r') as f:
                    test_spec_data = json.load(f)
                    merged_test_spec = copy_builds_between_test_specs(
                        merged_test_spec, test_spec_data)
            except Exception as e:
                gt_logger.gt_log_err(
                    "Unexpected error while processing '%s' test specification file"
                    % test_spec_file)
                gt_logger.gt_log_tab(str(e))
                merged_test_spec = {}

        test_spec = TestSpec()
        test_spec.parse(merged_test_spec)
        return test_spec

    # Test specification look-up
    if opts.test_spec:
        # Loading test specification from command line specified file
        gt_logger.gt_log(
            "test specification file '%s' (specified with --test-spec option)"
            % opts.test_spec)
    elif os.path.exists('test_spec.json'):
        # Test specification file exists in current directory
        gt_logger.gt_log("using 'test_spec.json' from current directory!")
        test_spec_file_name = 'test_spec.json'
    elif 'BUILD' in os.listdir(os.getcwd()):
        # Checking 'BUILD' directory for test specifications
        # Using `os.listdir()` since it preserves case
        test_spec_file_name_list = get_all_test_specs_from_build_dir('BUILD')
    elif os.path.exists('.build'):
        # Checking .build directory for test specifications
        test_spec_file_name_list = get_all_test_specs_from_build_dir('.build')
    elif os.path.exists('mbed-os') and 'BUILD' in os.listdir('mbed-os'):
        # Checking mbed-os/.build directory for test specifications
        # Using `os.listdir()` since it preserves case
        test_spec_file_name_list = get_all_test_specs_from_build_dir(
            os.path.join(['mbed-os', 'BUILD']))
    elif os.path.exists(os.path.join('mbed-os', '.build')):
        # Checking mbed-os/.build directory for test specifications
        test_spec_file_name_list = get_all_test_specs_from_build_dir(
            os.path.join(['mbed-os', '.build']))

    # Actual load and processing of test specification from sources
    if test_spec_file_name:
        # Test specification from command line (--test-spec) or default test_spec.json will be used
        gt_logger.gt_log("using '%s' from current directory!" %
                         test_spec_file_name)
        test_spec = TestSpec(test_spec_file_name)
        if opts.list_binaries:
            list_binaries_for_builds(test_spec)
            return None, 0
    elif test_spec_file_name_list:
        # Merge multiple test specs into one and keep calm
        gt_logger.gt_log(
            "using multiple test specifications from current directory!")
        test_spec = merge_multiple_test_specifications_from_file_list(
            test_spec_file_name_list)
        if opts.list_binaries:
            list_binaries_for_builds(test_spec)
            return None, 0
    elif os.path.exists('module.json'):
        # If inside yotta module load module data and generate test spec
        gt_logger.gt_log("using 'module.json' from current directory!")
        if opts.list_binaries:
            # List available test binaries (names, no extension)
            list_binaries_for_targets()
            return None, 0
        else:
            test_spec = get_test_spec_from_yt_module(opts)
    else:
        gt_logger.gt_log_err(
            "greentea should be run inside a Yotta module or --test-spec switch should be used"
        )
        return None, -1
    return test_spec, 0
Esempio n. 36
0
def get_mbed_target_call_yotta_target():
    cmd = ['yotta', '--plain', 'target']
    gt_logger.gt_log("checking yotta target in current directory")
    gt_logger.gt_log_tab("calling yotta: %s" % " ".join(cmd))
    _stdout, _stderr, _ret = run_cli_process(cmd)
    return _stdout, _stderr, _ret
Esempio n. 37
0
def run_host_test(image_path,
                  disk,
                  port,
                  build_path,
                  target_id,
                  duration=10,
                  micro=None,
                  reset=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  forced_reset_timeout=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  global_resource_mgr=None,
                  fast_model_connection=None,
                  num_sync_packtes=None,
                  polling_timeout=None,
                  retry_count=1,
                  tags=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-enabled device name
    @param reset Reset type
    @param forced_reset_timeout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param num_sync_packtes sync packets to send for host <---> device communication
    @param polling_timeout Timeout in sec for readiness of mount point and serial port of local or remote device
    @param tags Filter list of available devices under test to only run on devices with the provided list
           of tags  [tag-filters tag1,tag]
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
           stdin will be used via StdInObserver or file (if
           file name was given as switch option)
    @return Tuple with test results, test output, test duration times, test case results, and memory metrics.
            Return int > 0 if running mbedhtrun process failed.
            Retrun int < 0 if something went wrong during mbedhtrun execution.
    """

    def get_binary_host_tests_dir(binary_path, level=2):
        """! Checks if in binary test group has host_tests directory
        @param binary_path Path to binary in test specification
        @param level How many directories above test host_tests dir exists
        @return Path to host_tests dir in group binary belongs too, None if not found
        """
        try:
            binary_path_norm = os.path.normpath(binary_path)
            current_path_norm = os.path.normpath(os.getcwd())
            host_tests_path = binary_path_norm.split(os.sep)[:-level] + ['host_tests']
            build_dir_candidates = ['BUILD', '.build']
            idx = None

            for build_dir_candidate in build_dir_candidates:
                if build_dir_candidate in host_tests_path:
                    idx = host_tests_path.index(build_dir_candidate)
                    break

            if idx is None:
                msg = 'The following directories were not in the path: %s' % (', '.join(build_dir_candidates))
                raise Exception(msg)

            # Cut /<build dir>/tests/TOOLCHAIN/TARGET
            host_tests_path = host_tests_path[:idx] + host_tests_path[idx+4:]
            host_tests_path = os.sep.join(host_tests_path)
        except Exception as e:
            gt_logger.gt_log_warn("there was a problem while looking for host_tests directory")
            gt_logger.gt_log_tab("level %d, path: %s"% (level, binary_path))
            gt_logger.gt_log_tab(str(e))
            return None

        if os.path.isdir(host_tests_path):
            return host_tests_path
        return None

    if not enum_host_tests_path:
        # If there is -e specified we will try to find a host_tests path ourselves
        #
        # * Path to binary starts from "build" directory, and goes 4 levels
        #   deep: ./build/tests/compiler/toolchain
        # * Binary is inside test group.
        #   For example: <app>/tests/test_group_name/test_dir/*,cpp.
        # * We will search for directory called host_tests on the level of test group (level=2)
        #   or on the level of tests directory (level=3).
        #
        # If host_tests directory is found above test code will will pass it to mbedhtrun using
        # switch -e <path_to_host_tests_dir>
        gt_logger.gt_log("checking for 'host_tests' directory above image directory structure", print_text=verbose)
        test_group_ht_path = get_binary_host_tests_dir(image_path, level=2)
        TESTS_dir_ht_path = get_binary_host_tests_dir(image_path, level=3)
        if test_group_ht_path:
            enum_host_tests_path = test_group_ht_path
        elif TESTS_dir_ht_path:
            enum_host_tests_path = TESTS_dir_ht_path

        if enum_host_tests_path:
            gt_logger.gt_log_tab("found 'host_tests' directory in: '%s'"% enum_host_tests_path, print_text=verbose)
        else:
            gt_logger.gt_log_tab("'host_tests' directory not found: two directory levels above image path checked", print_text=verbose)

    gt_logger.gt_log("selecting test case observer...", print_text=verbose)
    if digest_source:
        gt_logger.gt_log_tab("selected digest source: %s"% digest_source, print_text=verbose)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        raise NotImplementedError
    elif digest_source is not None:
        # When we want to open file to scan for test results
        raise NotImplementedError

    # Command executing CLI for host test supervisor (in detect-mode)
    cmd = ["mbedhtrun",
            '-m', micro,
            '-p', port,
            '-f', '"%s"'% image_path,
            ]

    if enum_host_tests_path:
        cmd += ["-e", '"%s"'% enum_host_tests_path]

    if global_resource_mgr:
        # Use global resource manager to execute test
        # Example:
        # $ mbedhtrun -p :9600 -f "tests-mbed_drivers-generic_tests.bin" -m K64F --grm raas_client:10.2.203.31:8000
        cmd += ['--grm', global_resource_mgr]
    else:
        # Use local resources to execute tests
        # Add extra parameters to host_test
        if disk:
            cmd += ["-d", disk]
        if copy_method:
            cmd += ["-c", copy_method]
        if target_id:
            cmd += ["-t", target_id]
        if reset:
            cmd += ["-r", reset]
        if run_app:
            cmd += ["--run"]    # -f stores binary name!

    if fast_model_connection:
        # Use simulator resource manager to execute test
        # Example:
        # $ mbedhtrun -f "tests-mbed_drivers-generic_tests.elf" -m FVP_MPS2_M3 --fm DEFAULT
        cmd += ['--fm', fast_model_connection]

    if program_cycle_s:
        cmd += ["-C", str(program_cycle_s)]
    if forced_reset_timeout:
        cmd += ["-R", str(forced_reset_timeout)]
    if json_test_cfg:
        cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
    if num_sync_packtes:
        cmd += ["--sync",str(num_sync_packtes)]
    if tags:
        cmd += ["--tag-filters", tags]
    if polling_timeout:
        cmd += ["-P", str(polling_timeout)]

    gt_logger.gt_log_tab("calling mbedhtrun: %s" % " ".join(cmd), print_text=verbose)
    gt_logger.gt_log("mbed-host-test-runner: started")

    for retry in range(1, 1 + retry_count):
        start_time = time()
        returncode, htrun_output = run_htrun(cmd, verbose)
        end_time = time()
        if returncode < 0:
            return returncode
        elif returncode == 0:
            break
        gt_logger.gt_log("retry mbedhtrun {}/{}".format(retry, retry_count))
    else:
        gt_logger.gt_log("{} failed after {} count".format(cmd, retry_count))

    testcase_duration = end_time - start_time   # Test case duration from reset to {end}
    htrun_output = get_printable_string(htrun_output)
    result = get_test_result(htrun_output)
    result_test_cases = get_testcase_result(htrun_output)
    test_cases_summary = get_testcase_summary(htrun_output)
    max_heap, reserved_heap, thread_stack_info = get_memory_metrics(htrun_output)

    thread_stack_summary = []

    if thread_stack_info:
        thread_stack_summary = get_thread_stack_info_summary(thread_stack_info)

    memory_metrics = {
        "max_heap": max_heap,
        "reserved_heap": reserved_heap,
        "thread_stack_info": thread_stack_info,
        "thread_stack_summary": thread_stack_summary
    }
    get_coverage_data(build_path, htrun_output)

    gt_logger.gt_log("mbed-host-test-runner: stopped and returned '%s'"% result, print_text=verbose)
    return (result, htrun_output, testcase_duration, duration, result_test_cases, test_cases_summary, memory_metrics)
Esempio n. 38
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """
    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = [
        ]  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = [
        ]  # Devices which can't be used (are not fully detected)

        gt_logger.gt_log(
            "detected %d device%s" %
            (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err(
                    "mbed-ls was unable to enumerate correctly all properties of the device!"
                )
                gt_logger.gt_log_tab(
                    "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                )
                for prop in mut:
                    if not mut[prop]:
                        # Adding MUT to NOT DETECTED FULLY list
                        if mut not in not_ready_mbed_devices:
                            not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" %
                                             (prop, str(mut[prop])))
                        if prop == 'serial_port':
                            gt_logger.gt_log_tab(
                                "check if your serial port driver is correctly installed!"
                            )
                        if prop == 'mount_point':
                            gt_logger.gt_log_tab(
                                'check if your OS can detect and mount mbed device mount point!'
                            )
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)

    def get_parallel_value(value):
        """! Get correct value for parallel switch (--parallel)
        @param value Value passed from --parallel
        @return Refactored version of parallel number
        """
        try:
            parallel_test_exec = int(value)
            if parallel_test_exec < 1:
                parallel_test_exec = 1
        except ValueError:
            gt_logger.gt_log_err(
                "argument of mode --parallel is not a int, disabled parallel mode"
            )
            parallel_test_exec = 1
        return parallel_test_exec

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err(
            "error: mbed-host-tests proprietary module not installed")
        return (-1)

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # Load test specification or print warnings / info messages and exit CLI mode
    test_spec, ret = get_test_spec(opts)
    if not test_spec:
        return ret

    # Verbose flag
    verbose = opts.verbose_test_result_only

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(
        opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            hooks=greentea_hooks,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose)

        # Some error in htrun, abort test execution
        if isinstance(host_test_result, int):
            # int(host_test_result) > 0 - Call to mbedhtrun failed
            # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
            return host_test_result

        # If execution was successful 'run_host_test' return tuple with results
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        status = TEST_RESULTS.index(
            single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    if opts.global_resource_mgr:
        # Mocking available platform requested by --grm switch
        grm_values = parse_global_resource_mgr(opts.global_resource_mgr)
        if grm_values:
            gt_logger.gt_log_warn(
                "entering global resource manager mbed-ls dummy mode!")
            grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values
            mbeds_list = []
            mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name))
            opts.global_resource_mgr = ':'.join(grm_values[1:])
            gt_logger.gt_log_tab("adding dummy platform '%s'" %
                                 grm_platform_name)
        else:
            gt_logger.gt_log(
                "global resource manager switch '--grm %s' in wrong format!" %
                opts.global_resource_mgr)
            return (-1)

    ready_mbed_devices = []  # Devices which can be used (are fully detected)
    not_ready_mbed_devices = [
    ]  # Devices which can't be used (are not fully detected)

    if mbeds_list:
        ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(
            mbeds_list)
        if ready_mbed_devices:
            # devices in form of a pretty formatted table
            for line in log_mbed_devices_in_table(
                    ready_mbed_devices).splitlines():
                gt_logger.gt_log_tab(line.strip(), print_text=verbose)
    else:
        gt_logger.gt_log_err("no compatible devices detected")
        return (RET_NO_DEVICES)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log(
            "filtering out target ids not on below list (specified with --use-tids switch)"
        )
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" %
                                 gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    muts_to_test = []  # MUTs to actually be tested
    test_queue = Queue(
    )  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1

    parallel_test_exec = get_parallel_value(opts.parallel_test_exec)

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed),
                                    SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets>
    # is used to enumerate builds from test spec we are supplying
    filter_test_builds = opts.list_of_targets.split(
        ',') if opts.list_of_targets else None
    for test_build in test_spec.get_test_builds(filter_test_builds):
        platform_name = test_build.get_platform()
        gt_logger.gt_log(
            "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)"
            % (gt_logger.gt_bright(platform_name),
               gt_logger.gt_bright(
                   test_build.get_toolchain()), int(opts.parallel_test_exec)))

        baudrate = test_build.get_baudrate()

        ### Select MUTS to test from list of available MUTS to start testing
        mut = None
        number_of_parallel_instances = 1
        for mbed_dev in ready_mbed_devices:
            if accepted_target_ids and mbed_dev[
                    'target_id'] not in accepted_target_ids:
                continue

            if mbed_dev['platform_name'] == platform_name:
                # We will force configuration specific baudrate by adding baudrate to serial port
                # Only add baudrate decoration for serial port if it's not already there
                # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>'
                if not mbed_dev['serial_port'].endswith(str(baudrate)):
                    mbed_dev['serial_port'] = "%s:%d" % (
                        mbed_dev['serial_port'], baudrate)
                mut = mbed_dev
                muts_to_test.append(mbed_dev)
                if number_of_parallel_instances < parallel_test_exec:
                    number_of_parallel_instances += 1
                else:
                    break

        # devices in form of a pretty formatted table
        for line in log_mbed_devices_in_table(muts_to_test).splitlines():
            gt_logger.gt_log_tab(line.strip(), print_text=verbose)

        # Configuration print mode:
        if opts.verbose_test_configuration_only:
            continue

        ### If we have at least one available device we can proceed
        if mut:
            target_platforms_match += 1

            build = test_build.get_name()
            build_path = test_build.get_path()

            # Demo mode: --run implementation (already added --run to mbedhtrun)
            # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
            if opts.run_app:
                gt_logger.gt_log(
                    "running '%s' for '%s'-'%s'" %
                    (gt_logger.gt_bright(
                        opts.run_app), gt_logger.gt_bright(platform_name),
                     gt_logger.gt_bright(test_build.get_toolchain())))
                disk = mut['mount_point']
                port = mut['serial_port']
                micro = mut['platform_name']
                program_cycle_s = get_platform_property(
                    micro, "program_cycle_s")
                copy_method = opts.copy_method if opts.copy_method else 'shell'
                enum_host_tests_path = get_local_host_tests_dir(
                    opts.enum_host_tests)

                test_platforms_match += 1
                host_test_result = run_host_test(
                    opts.run_app,
                    disk,
                    port,
                    build_path,
                    mut['target_id'],
                    micro=micro,
                    copy_method=copy_method,
                    program_cycle_s=program_cycle_s,
                    digest_source=opts.digest_source,
                    json_test_cfg=opts.json_test_configuration,
                    run_app=opts.run_app,
                    enum_host_tests_path=enum_host_tests_path,
                    verbose=True)

                # Some error in htrun, abort test execution
                if isinstance(host_test_result, int):
                    # int(host_test_result) > 0 - Call to mbedhtrun failed
                    # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
                    return host_test_result

                # If execution was successful 'run_host_test' return tuple with results
                single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
                status = TEST_RESULTS.index(
                    single_test_result
                ) if single_test_result in TEST_RESULTS else -1
                if single_test_result != TEST_RESULT_OK:
                    test_exec_retcode += 1

            test_list = test_build.get_tests()

            filtered_ctest_test_list = create_filtered_test_list(
                test_list,
                opts.test_by_names,
                opts.skip_test,
                test_spec=test_spec)

            gt_logger.gt_log(
                "running %d test%s for platform '%s' and toolchain '%s'" %
                (len(filtered_ctest_test_list),
                 "s" if len(filtered_ctest_test_list) != 1 else "",
                 gt_logger.gt_bright(platform_name),
                 gt_logger.gt_bright(test_build.get_toolchain())))

            # Test execution order can be shuffled (also with provided random seed)
            # for test execution reproduction.
            filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
            if opts.shuffle_test_order:
                # We want to shuffle test names randomly
                random.shuffle(filtered_ctest_test_list_keys,
                               lambda: shuffle_random_seed)

            for test_name in filtered_ctest_test_list_keys:
                image_path = filtered_ctest_test_list[test_name].get_binary(
                    binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path()
                if image_path is None:
                    gt_logger.gt_log_err(
                        "Failed to find test binary for test %s flash method %s"
                        % (test_name, 'usb'))
                else:
                    test = {"test_bin": test_name, "image_path": image_path}
                    test_queue.put(test)

            number_of_threads = 0
            for mut in muts_to_test:
                # Experimental, parallel test execution
                if number_of_threads < parallel_test_exec:
                    args = (test_result_queue, test_queue, opts, mut, build,
                            build_path, greentea_hooks)
                    t = Thread(target=run_test_thread, args=args)
                    execute_threads.append(t)
                    number_of_threads += 1

        gt_logger.gt_log_tab(
            "use %s instance%s of execution threads for testing" %
            (len(execute_threads),
             's' if len(execute_threads) != 1 else str()),
            print_text=verbose)
        for t in execute_threads:
            t.daemon = True
            t.start()

        # merge partial test reports from different threads to final test report
        for t in execute_threads:
            try:
                t.join()  #blocking
                test_return_data = test_result_queue.get(False)
            except Exception as e:
                # No test report generated
                gt_logger.gt_log_err("could not generate test report" + str(e))
                test_exec_retcode += -1000
                return test_exec_retcode

            test_platforms_match += test_return_data['test_platforms_match']
            test_exec_retcode += test_return_data['test_exec_retcode']
            partial_test_report = test_return_data['test_report']
            # todo: find better solution, maybe use extend
            for report_key in partial_test_report.keys():
                if report_key not in test_report:
                    test_report[report_key] = {}
                    test_report.update(partial_test_report)
                else:
                    test_report[report_key].update(
                        partial_test_report[report_key])

        execute_threads = []

        if opts.verbose_test_configuration_only:
            print
            print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
            return (0)

        gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for build_name in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[build_name]:
            test = test_report[build_name][test_name]
            # Test was successful
            if test['single_test_result'] in [
                    TEST_RESULT_OK, TEST_RESULT_FAIL
            ]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            build = test_spec.get_test_build(build_name)
            assert build is not None, "Failed to find build info for build %s" % build_name

            # Call hook executed for each yotta target, just after all tests are finished
            build_path = build.get_path()
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" %
                         (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        def dump_report_to_text_file(filename, content):
            """! Closure for report dumps to text files
            @param filename Name of destination file
            @parm content Text content of the file to write
            @return True if write was successful, else return False
            """
            try:
                with open(filename, 'w') as f:
                    f.write(content)
            except IOError as e:
                gt_logger.gt_log_err("can't export to '%s', reason:" %
                                     filename)
                gt_logger.gt_log_err(str(e))
                return False
            return True

        # Reports to JUNIT file
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUNIT file '%s'..." %
                             gt_logger.gt_bright(opts.report_junit_file_name))
            # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer)
            test_suite_properties = {}
            for target_name in test_report:
                test_build_properties = get_test_build_properties(
                    test_spec, target_name)
                if test_build_properties:
                    test_suite_properties[target_name] = test_build_properties
            junit_report = exporter_testcase_junit(
                test_report, test_suite_properties=test_suite_properties)
            dump_report_to_text_file(opts.report_junit_file_name, junit_report)

        # Reports to text file
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to TEXT '%s'..." %
                             gt_logger.gt_bright(opts.report_text_file_name))
            # Useful text reporter for those who do not like to copy paste to files tabale with results
            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            text_final_report = '\n'.join([
                text_report, text_results, text_testcase_report,
                text_testcase_results
            ])
            dump_report_to_text_file(opts.report_text_file_name,
                                     text_final_report)

        # Reports to JSON file
        if opts.report_json_file_name:
            # We will not print summary and json report together
            gt_logger.gt_log("exporting to JSON '%s'..." %
                             gt_logger.gt_bright(opts.report_json_file_name))
            json_report = exporter_json(test_report)
            dump_report_to_text_file(opts.report_json_file_name, json_report)

        # Reports to HTML file
        if opts.report_html_file_name:
            gt_logger.gt_log("exporting to HTML file '%s'..." %
                             gt_logger.gt_bright(opts.report_html_file_name))
            # Generate a HTML page displaying all of the results
            html_report = exporter_html(test_report)
            dump_report_to_text_file(opts.report_html_file_name, html_report)

        # Final summary
        if test_report:
            # Test suite report
            gt_logger.gt_log("test suite report:")
            text_report, text_results = exporter_text(test_report)
            print text_report
            gt_logger.gt_log("test suite results: " + text_results)
            # test case detailed report
            gt_logger.gt_log("test case report:")
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            print text_testcase_report
            gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn(
                "no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)
Esempio n. 39
0
def run_host_test(image_path,
                  disk,
                  port,
                  yotta_target,
                  target_id,
                  duration=10,
                  micro=None,
                  reset=None,
                  reset_tout=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @return Tuple with test results, test output, test duration times and test case results
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-nebaled device name
    @param reset Reset type
    @param reset_tout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
                           stdin will be used via StdInObserver or file (if
                           file name was given as switch option)
    """
    def run_command(cmd):
        """! Runs command and prints proc stdout on screen """
        try:
            p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
        except OSError as e:
            print "mbedgt: run_command(%s) ret= %d failed: %s" % (
                str(cmd), str(e), e.child_traceback)
        return p

    if verbose:
        gt_logger.gt_log("selecting test case observer...")
        if digest_source:
            gt_logger.gt_log_tab("selected digest source: %s" % digest_source)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        raise NotImplementedError
    elif digest_source is not None:
        # When we want to open file to scan for test results
        raise NotImplementedError

    # Command executing CLI for host test supervisor (in detect-mode)
    cmd = [
        "mbedhtrun",
        '-d',
        disk,
        '-p',
        port,
        '-f',
        '"%s"' % image_path,
    ]

    # Add extra parameters to host_test
    if program_cycle_s is not None:
        cmd += ["-C", str(program_cycle_s)]
    if copy_method is not None:
        cmd += ["-c", copy_method]
    if micro is not None:
        cmd += ["-m", micro]
    if target_id is not None:
        cmd += ["-t", target_id]
    if reset is not None:
        cmd += ["-r", reset]
    if reset_tout is not None:
        cmd += ["-R", str(reset_tout)]
    if json_test_cfg is not None:
        cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
    if run_app is not None:
        cmd += ["--run"]  # -f stores binary name!
    if enum_host_tests_path:
        cmd += ["-e", '"%s"' % enum_host_tests_path]

    if verbose:
        gt_logger.gt_log_tab("calling mbedhtrun: %s" % " ".join(cmd))
    gt_logger.gt_log("mbed-host-test-runner: started")

    htrun_output = ''
    start_time = time()

    p = run_command(cmd)
    for line in iter(p.stdout.readline, b''):
        htrun_output += line
        # When dumping output to file both \r and \n will be a new line
        # To avoid this "extra new-line" we only use \n at the end
        if verbose:
            sys.stdout.write(line.rstrip() + '\n')
            sys.stdout.flush()

    # Check if process was terminated by signal
    returncode = p.wait()
    if returncode < 0:
        return returncode

    end_time = time()
    testcase_duration = end_time - start_time  # Test case duration from reset to {end}

    result = get_test_result(htrun_output)
    result_test_cases = get_testcase_result(htrun_output)
    test_cases_summary = get_testcase_summary(htrun_output)
    get_coverage_data(yotta_target, htrun_output)

    if verbose:
        gt_logger.gt_log("mbed-host-test-runner: stopped")
        gt_logger.gt_log("mbed-host-test-runner: returned '%s'" % result)
    return (result, htrun_output, testcase_duration, duration,
            result_test_cases, test_cases_summary)
Esempio n. 40
0
from mbed_greentea.mbed_greentea_dlm import GREENTEA_KETTLE_PATH
from mbed_greentea.mbed_greentea_dlm import greentea_get_app_sem
from mbed_greentea.mbed_greentea_dlm import greentea_update_kettle
from mbed_greentea.mbed_greentea_dlm import greentea_clean_kettle
from mbed_greentea.mbed_yotta_api import build_with_yotta
from mbed_greentea.mbed_greentea_hooks import GreenteaHooks
from mbed_greentea.mbed_yotta_module_parse import YottaConfig
from mbed_greentea.mbed_yotta_module_parse import YottaModule

try:
    import mbed_lstools
    import mbed_host_tests
except ImportError as e:
    gt_logger.gt_log_err("Not all required Python modules were imported!")
    gt_logger.gt_log_err(str(e))
    gt_logger.gt_log("Check if:")
    gt_logger.gt_log_tab("1. You've correctly installed dependency module using setup tools or pip:")
    gt_logger.gt_log_tab("* python setup.py install", tab_count=2)
    gt_logger.gt_log_tab("* pip install <module-name>", tab_count=2)
    gt_logger.gt_log_tab("2. There are no errors preventing import in dependency modules")
    gt_logger.gt_log_tab("See: https://github.com/ARMmbed/greentea#installing-greentea")
    exit(-2342)

MBED_LMTOOLS = 'mbed_lstools' in sys.modules
MBED_HOST_TESTS = 'mbed_host_tests' in sys.modules

RET_NO_DEVICES = 1001
RET_YOTTA_BUILD_FAIL = -1
LOCAL_HOST_TESTS_DIR = './test/host_tests'  # Used by mbedhtrun -e <dir>

def get_local_host_tests_dir(path):
Esempio n. 41
0
def main():
    """ Closure for main_cli() function """
    parser = optparse.OptionParser()

    parser.add_option(
        '-t',
        '--target',
        dest='list_of_targets',
        help=
        'You can specify list of targets you want to build. Use comma to sepatate them'
    )

    parser.add_option(
        '-n',
        '--test-by-names',
        dest='test_by_names',
        help=
        'Runs only test enumerated it this switch. Use comma to separate test case names.'
    )

    parser.add_option(
        '-i',
        '--skip-test',
        dest='skip_test',
        help=
        'Skip tests enumerated it this switch. Use comma to separate test case names.'
    )

    parser.add_option(
        "-O",
        "--only-build",
        action="store_true",
        dest="only_build_tests",
        default=False,
        help=
        "Only build repository and tests, skips actual test procedures (flashing etc.)"
    )

    parser.add_option("-S",
                      "--skip-build",
                      action="store_true",
                      dest="skip_yotta_build",
                      default=False,
                      help="Skip calling 'yotta build' on this module")

    copy_methods_str = "Plugin support: " + ', '.join(
        mbed_host_tests.host_tests_plugins.get_plugin_caps('CopyMethod'))
    parser.add_option("-c",
                      "--copy",
                      dest="copy_method",
                      help="Copy (flash the target) method selector. " +
                      copy_methods_str,
                      metavar="COPY_METHOD")

    parser.add_option(
        '',
        '--parallel',
        dest='parallel_test_exec',
        default=1,
        help=
        'Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)'
    )

    parser.add_option(
        "-e",
        "--enum-host-tests",
        dest="enum_host_tests",
        help=
        "Define directory with yotta module local host tests. Default: ./test/host_tests"
    )

    parser.add_option(
        '',
        '--config',
        dest='verbose_test_configuration_only',
        default=False,
        action="store_true",
        help='Displays connected boards and detected targets and exits.')

    parser.add_option(
        '',
        '--release',
        dest='build_to_release',
        default=False,
        action="store_true",
        help='If possible force build in release mode (yotta -r).')

    parser.add_option('',
                      '--debug',
                      dest='build_to_debug',
                      default=False,
                      action="store_true",
                      help='If possible force build in debug mode (yotta -d).')

    parser.add_option('-l',
                      '--list',
                      dest='list_binaries',
                      default=False,
                      action="store_true",
                      help='List available binaries')

    parser.add_option(
        '-m',
        '--map-target',
        dest='map_platform_to_yt_target',
        help=
        'List of custom mapping between platform name and yotta target. Comma separated list of PLATFORM:TARGET tuples'
    )

    parser.add_option(
        '',
        '--use-tids',
        dest='use_target_ids',
        help=
        'Specify explicitly which devices can be used by Greentea for testing by creating list of allowed Target IDs (use comma separated list)'
    )

    parser.add_option('-u',
                      '--shuffle',
                      dest='shuffle_test_order',
                      default=False,
                      action="store_true",
                      help='Shuffles test execution order')

    parser.add_option(
        '',
        '--shuffle-seed',
        dest='shuffle_test_seed',
        default=None,
        help=
        'Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)'
    )

    parser.add_option(
        '',
        '--lock',
        dest='lock_by_target',
        default=False,
        action="store_true",
        help=
        'Use simple resource locking mechanism to run multiple application instances'
    )

    parser.add_option(
        '',
        '--digest',
        dest='digest_source',
        help=
        'Redirect input from where test suite should take console input. You can use stdin or file name to get test case console output'
    )

    parser.add_option('-H',
                      '--hooks',
                      dest='hooks_json',
                      help='Load hooks used drive extra functionality')

    parser.add_option(
        '',
        '--test-cfg',
        dest='json_test_configuration',
        help='Pass to host test data with host test configuration')

    parser.add_option(
        '',
        '--run',
        dest='run_app',
        help='Flash, reset and dump serial from selected binary application')

    parser.add_option(
        '',
        '--report-junit',
        dest='report_junit_file_name',
        help=
        'You can log test suite results in form of JUnit compliant XML report')

    parser.add_option('',
                      '--report-text',
                      dest='report_text_file_name',
                      help='You can log test suite results to text file')

    parser.add_option('',
                      '--report-json',
                      dest='report_json',
                      default=False,
                      action="store_true",
                      help='Outputs test results in JSON')

    parser.add_option('',
                      '--report-fails',
                      dest='report_fails',
                      default=False,
                      action="store_true",
                      help='Prints console outputs for failed tests')

    parser.add_option(
        '',
        '--yotta-registry',
        dest='yotta_search_for_mbed_target',
        default=False,
        action="store_true",
        help=
        'Use on-line yotta registry to search for compatible with connected mbed devices yotta targets. Default: search is done in yotta_targets directory'
    )

    parser.add_option('-V',
                      '--verbose-test-result',
                      dest='verbose_test_result_only',
                      default=False,
                      action="store_true",
                      help='Prints test serial output')

    parser.add_option('-v',
                      '--verbose',
                      dest='verbose',
                      default=False,
                      action="store_true",
                      help='Verbose mode (prints some extra information)')

    parser.add_option('',
                      '--plain',
                      dest='plain',
                      default=False,
                      action="store_true",
                      help='Do not use colours while logging')

    parser.add_option('',
                      '--version',
                      dest='version',
                      default=False,
                      action="store_true",
                      help='Prints package version and exits')

    parser.description = """This automated test script is used to test mbed SDK 3.0 on mbed-enabled devices with support from yotta build tool"""
    parser.epilog = """Example: mbedgt --target frdm-k64f-gcc"""

    (opts, args) = parser.parse_args()

    cli_ret = 0

    start = time()
    if opts.lock_by_target:
        # We are using Greentea proprietary locking mechanism to lock between platforms and targets
        gt_logger.gt_log("using (experimental) simple locking mechanism")
        gt_logger.gt_log_tab("kettle: %s" % GREENTEA_KETTLE_PATH)
        gt_file_sem, gt_file_sem_name, gt_instance_uuid = greentea_get_app_sem(
        )
        with gt_file_sem:
            greentea_update_kettle(gt_instance_uuid)
            try:
                cli_ret = main_cli(opts, args, gt_instance_uuid)
            except KeyboardInterrupt:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
                return (-2)  # Keyboard interrupt
            except:
                greentea_clean_kettle(gt_instance_uuid)
                gt_logger.gt_log_err("unexpected error:")
                gt_logger.gt_log_tab(sys.exc_info()[0])
                raise
            greentea_clean_kettle(gt_instance_uuid)
    else:
        # Standard mode of operation
        # Other instance must provide mutually exclusive access control to platforms and targets
        try:
            cli_ret = main_cli(opts, args)
        except KeyboardInterrupt:
            gt_logger.gt_log_err("ctrl+c keyboard interrupt!")
            return (-2)  # Keyboard interrupt
        except Exception as e:
            gt_logger.gt_log_err("unexpected error:")
            gt_logger.gt_log_tab(str(e))
            raise

    if not any([opts.list_binaries, opts.version]):
        delta = time() - start  # Test execution time delta
        gt_logger.gt_log("completed in %.2f sec" % delta)

    if cli_ret:
        gt_logger.gt_log_err("exited with code %d" % cli_ret)

    return (cli_ret)
Esempio n. 42
0
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name, greentea_hooks):
    test_exec_retcode = 0
    test_platforms_match = 0
    test_report = {}
    yotta_config_baudrate = None    # Default serial port baudrate forced by configuration

    yotta_config = YottaConfig()
    yotta_config.init(yotta_target_name)

    yotta_config_baudrate = yotta_config.get_baudrate()

    while not test_queue.empty():
        try:
            test = test_queue.get(False)
        except Exception as e:
            gt_logger.gt_log_err(str(e))
            break

        test_result = 'SKIPPED'

        disk = mut['mount_point']
        port = mut['serial_port']
        micro = mut['platform_name']
        program_cycle_s = mut_info['properties']['program_cycle_s']
        copy_method = opts.copy_method if opts.copy_method else 'shell'
        verbose = opts.verbose_test_result_only
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

        # We will force configuration specific baudrate
        if port:
            port = "%s:%d"% (port, yotta_config_baudrate)

        test_platforms_match += 1
        host_test_result = run_host_test(test['image_path'],
                                         disk,
                                         port,
                                         yotta_target_name,
                                         mut['target_id'],
                                         micro=micro,
                                         copy_method=copy_method,
                                         program_cycle_s=program_cycle_s,
                                         digest_source=opts.digest_source,
                                         json_test_cfg=opts.json_test_configuration,
                                         enum_host_tests_path=enum_host_tests_path,
                                         verbose=verbose)

        # Some error in htrun, abort test execution
        if host_test_result < 0:
            break
        
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        test_result = single_test_result

        build_path = os.path.join("./build", yotta_target_name)
        build_path_abs = os.path.abspath(build_path)

        if single_test_result != TEST_RESULT_OK:
            test_exec_retcode += 1

        if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
            if greentea_hooks:
                # Test was successful
                # We can execute test hook just after test is finished ('hook_test_end')
                format = {
                    "test_name": test['test_bin'],
                    "test_bin_name": os.path.basename(test['image_path']),
                    "image_path": test['image_path'],
                    "build_path": build_path,
                    "build_path_abs": build_path_abs,
                    "yotta_target_name": yotta_target_name,
                }
                greentea_hooks.run_hook_ext('hook_test_end', format)

        # Update report for optional reporting feature
        test_suite_name = test['test_bin'].lower()
        if yotta_target_name not in test_report:
            test_report[yotta_target_name] = {}

        if test_suite_name not in test_report[yotta_target_name]:
            test_report[yotta_target_name][test_suite_name] = {}

        if not test_cases_summary and not result_test_cases:
            gt_logger.gt_log_warn("test case summary event not found")
            gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!")

            # We will map test suite result to test case to
            # output valid test case in report

            # Generate "artificial" test case name from test suite name#
            # E.g:
            #   mbed-drivers-test-dev_null -> dev_null
            test_case_name = test_suite_name
            test_str_idx = test_suite_name.find("-test-")
            if test_str_idx != -1:
                test_case_name = test_case_name[test_str_idx + 6:]

            gt_logger.gt_log_tab("test suite: %s"% test_suite_name)
            gt_logger.gt_log_tab("test case: %s"% test_case_name)

            # Test case result: OK, FAIL or ERROR
            tc_result_text = {
                "OK": "OK",
                "FAIL": "FAIL",
            }.get(single_test_result, 'ERROR')

            # Test case integer success code OK, FAIL and ERROR: (0, >0, <0)
            tc_result = {
                "OK": 0,
                "FAIL": 1024,
                "ERROR": -1024,
            }.get(tc_result_text, '-2048')

            # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure)
            tc_passed, tc_failed = {
                0: (1, 0),
            }.get(tc_result, (0, 1))

            # Test case report build for whole binary
            # Add test case made from test suite result to test case report
            result_test_cases = {
                test_case_name: {
                        'duration': single_testduration,
                        'time_start': 0.0,
                        'time_end': 0.0,
                        'utest_log': single_test_output.splitlines(),
                        'result_text': tc_result_text,
                        'passed': tc_passed,
                        'failed': tc_failed,
                        'result': tc_result,
                    }
            }

            # Test summary build for whole binary (as a test case)
            test_cases_summary = (tc_passed, tc_failed, )

        gt_logger.gt_log("test on hardware with target id: %s"% (mut['target_id']))
        gt_logger.gt_log("test suite '%s' %s %s in %.2f sec"% (test['test_bin'],
            '.' * (80 - len(test['test_bin'])),
            test_result,
            single_testduration))

        # Test report build for whole binary
        test_report[yotta_target_name][test_suite_name]['single_test_result'] = single_test_result
        test_report[yotta_target_name][test_suite_name]['single_test_output'] = single_test_output
        test_report[yotta_target_name][test_suite_name]['elapsed_time'] = single_testduration
        test_report[yotta_target_name][test_suite_name]['platform_name'] = micro
        test_report[yotta_target_name][test_suite_name]['copy_method'] = copy_method
        test_report[yotta_target_name][test_suite_name]['testcase_result'] = result_test_cases

        test_report[yotta_target_name][test_suite_name]['build_path'] = build_path
        test_report[yotta_target_name][test_suite_name]['build_path_abs'] = build_path_abs
        test_report[yotta_target_name][test_suite_name]['image_path'] = test['image_path']
        test_report[yotta_target_name][test_suite_name]['test_bin_name'] = os.path.basename(test['image_path'])

        passes_cnt, failures_cnt = 0, 0
        for tc_name in sorted(result_test_cases.keys()):
            gt_logger.gt_log_tab("test case: '%s' %s %s in %.2f sec"% (tc_name,
                '.' * (80 - len(tc_name)),
                result_test_cases[tc_name].get('result_text', '_'),
                result_test_cases[tc_name].get('duration', 0.0)))
            if result_test_cases[tc_name].get('result_text', '_') == 'OK':
                passes_cnt += 1
            else:
                failures_cnt += 1

        if test_cases_summary:
            passes, failures = test_cases_summary
            gt_logger.gt_log("test case summary: %d pass%s, %d failur%s"% (passes,
                '' if passes == 1 else 'es',
                failures,
                'e' if failures == 1 else 'es'))
            if passes != passes_cnt or failures != failures_cnt:
                gt_logger.gt_log_err("test case summary mismatch: reported passes vs failures miscount!")
                gt_logger.gt_log_tab("(%d, %d) vs (%d, %d)"% (passes, failures, passes_cnt, failures_cnt))

        if single_test_result != 'OK' and not verbose and opts.report_fails:
            # In some cases we want to print console to see why test failed
            # even if we are not in verbose mode
            gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)")
            print
            print single_test_output

    #greentea_release_target_id(mut['target_id'], gt_instance_uuid)
    test_result_queue.put({'test_platforms_match': test_platforms_match,
                           'test_exec_retcode': test_exec_retcode,
                           'test_report': test_report})
    return
Esempio n. 43
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err(
            "error: mbed-host-tests proprietary module not installed")
        return (-1)

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return (0)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(
        opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            hooks=greentea_hooks,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=opts.verbose_test_result_only)

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        status = TEST_RESULTS.index(
            single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init()  # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        gt_logger.gt_log("""
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """)
        return (0)

    ### Selecting yotta targets to process
    yt_targets = [
    ]  # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'" %
                             gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab(
                "yotta target in current directory is not set")
            gt_logger.gt_log_err(
                "yotta target is not specified. Use '%s' or '%s' command to set target"
                % (gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                   gt_logger.gt_bright('yotta target <yotta_target>')))
            return (-1)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    ready_mbed_devices = []  # Devices which can be used (are fully detected)

    if mbeds_list:
        gt_logger.gt_log(
            "detected %d device%s" %
            (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err(
                    "can't detect all properties of the device!")
                for prop in mut:
                    if not mut[prop]:
                        gt_logger.gt_log_tab("property '%s' is '%s'" %
                                             (prop, str(mut[prop])))
            else:
                ready_mbed_devices.append(mut)
                gt_logger.gt_log_tab(
                    "detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"
                    % (gt_logger.gt_bright(mut['platform_name']),
                       gt_logger.gt_bright(mut['platform_name_unique']),
                       gt_logger.gt_bright(mut['serial_port']),
                       gt_logger.gt_bright(mut['mount_point']),
                       gt_logger.gt_bright(mut['target_id'])))
    else:
        gt_logger.gt_log_err("no devices detected")
        return (RET_NO_DEVICES)

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    map_platform_to_yt_target = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log(
            "user defined platform -> target supported mapping definition (specified with --map-target switch)"
        )
        p_to_t_mappings = opts.map_platform_to_yt_target.split(',')
        for mapping in p_to_t_mappings:
            if len(mapping.split(':')) == 2:
                platform, yt_target = mapping.split(':')
                if platform not in map_platform_to_yt_target:
                    map_platform_to_yt_target[platform] = []
                map_platform_to_yt_target[platform].append(yt_target)
                gt_logger.gt_log_tab(
                    "mapped platform '%s' to be compatible with '%s'" %
                    (gt_logger.gt_bright(platform),
                     gt_logger.gt_bright(yt_target)))
            else:
                gt_logger.gt_log_tab(
                    "unknown format '%s', use 'platform:target' format" %
                    mapping)

    # Check if mbed classic target name can be translated to yotta target name

    mut_info_map = {
    }  # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]]

    for mut in ready_mbed_devices:
        platfrom_name = mut['platform_name']
        if platfrom_name not in mut_info_map:
            mut_info = get_mbed_clasic_target_info(
                platfrom_name,
                map_platform_to_yt_target,
                use_yotta_registry=opts.yotta_search_for_mbed_target)
            if mut_info:
                mut_info_map[platfrom_name] = mut_info

    ### List of unique ready platform names
    unique_mbed_devices = list(set(mut_info_map.keys()))

    ### Identify which targets has to be build because platforms are present
    yt_target_platform_map = {}  # yt_target_to_test : platforms to test on

    for yt_target in yt_targets:
        for platform_name in unique_mbed_devices:
            if yt_target in [
                    k["yotta_target"]
                    for k in mut_info_map[platform_name]["yotta_targets"]
            ]:
                if yt_target not in yt_target_platform_map:
                    yt_target_platform_map[yt_target] = []
                if platform_name not in yt_target_platform_map[yt_target]:
                    yt_target_platform_map[yt_target].append(platform_name)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log(
            "filtering out target ids not on below list (specified with --use-tids switch)"
        )
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" %
                                 gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    muts_to_test = []  # MUTs to actually be tested
    test_queue = Queue(
    )  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1
    try:
        parallel_test_exec = int(opts.parallel_test_exec)
        if parallel_test_exec < 1:
            parallel_test_exec = 1
    except ValueError:
        gt_logger.gt_log_err(
            "argument of mode --parallel is not a int, disable parallel mode")
        parallel_test_exec = 1

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed),
                                    SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    for yotta_target_name in yt_target_platform_map:
        gt_logger.gt_log(
            "processing '%s' yotta target compatible platforms..." %
            gt_logger.gt_bright(yotta_target_name))

        for platform_name in yt_target_platform_map[yotta_target_name]:
            gt_logger.gt_log("processing '%s' platform..." %
                             gt_logger.gt_bright(platform_name))

            ### Select MUTS to test from list of available MUTS to start testing
            mut = None
            number_of_parallel_instances = 1
            for mbed_dev in ready_mbed_devices:
                if accepted_target_ids and mbed_dev[
                        'target_id'] not in accepted_target_ids:
                    continue

                if mbed_dev['platform_name'] == platform_name:
                    mut = mbed_dev
                    muts_to_test.append(mbed_dev)
                    gt_logger.gt_log("using platform '%s' for test:" %
                                     gt_logger.gt_bright(platform_name))
                    for k in mbed_dev:
                        gt_logger.gt_log_tab("%s = '%s'" % (k, mbed_dev[k]))
                    if number_of_parallel_instances < parallel_test_exec:
                        number_of_parallel_instances += 1
                    else:
                        break

            # Configuration print mode:
            if opts.verbose_test_configuration_only:
                continue

            if mut:
                target_platforms_match += 1

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app:
                    gt_logger.gt_log("running '%s' for '%s'" %
                                     (gt_logger.gt_bright(opts.run_app),
                                      gt_logger.gt_bright(yotta_target_name)))
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info_map[platfrom_name][
                        'properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    enum_host_tests_path = get_local_host_tests_dir(
                        opts.enum_host_tests)

                    yotta_config = YottaConfig()
                    yotta_config.init(yotta_target_name)

                    yotta_config_baudrate = yotta_config.get_baudrate()

                    # We will force configuration specific baudrate
                    if port:
                        port = "%s:%d" % (port, yotta_config_baudrate)

                    test_platforms_match += 1
                    host_test_result = run_host_test(
                        opts.run_app,
                        disk,
                        port,
                        yotta_target_name,
                        mut['target_id'],
                        micro=micro,
                        copy_method=copy_method,
                        program_cycle_s=program_cycle_s,
                        digest_source=opts.digest_source,
                        json_test_cfg=opts.json_test_configuration,
                        run_app=opts.run_app,
                        enum_host_tests_path=enum_host_tests_path,
                        verbose=True)

                    single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
                    status = TEST_RESULTS.index(
                        single_test_result
                    ) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing

                yotta_result, yotta_ret = True, 0  # Skip build and assume 'yotta build' was successful
                if opts.skip_yotta_build:
                    gt_logger.gt_log(
                        "skipping calling yotta (specified with --skip-build option)"
                    )
                else:
                    yotta_result, yotta_ret = build_with_yotta(
                        yotta_target_name,
                        verbose=opts.verbose,
                        build_to_release=opts.build_to_release,
                        build_to_debug=opts.build_to_debug)

                # We need to stop executing if yotta build fails
                if not yotta_result:
                    gt_logger.gt_log_err("yotta returned %d" % yotta_ret)
                    return (RET_YOTTA_BUILD_FAIL)

                if opts.only_build_tests:
                    continue

                # Build phase will be followed by test execution for each target
                if yotta_result and not opts.only_build_tests:
                    binary_type = mut_info_map[platform_name]['properties'][
                        'binary_type']
                    ctest_test_list = load_ctest_testsuite(
                        os.path.join('.', 'build', yotta_target_name),
                        binary_type=binary_type)
                    #TODO no tests to execute

                filtered_ctest_test_list = create_filtered_test_list(
                    ctest_test_list, opts.test_by_names, opts.skip_test)

                gt_logger.gt_log(
                    "running %d test%s for target '%s' and platform '%s'" %
                    (len(filtered_ctest_test_list),
                     "s" if len(filtered_ctest_test_list) != 1 else "",
                     gt_logger.gt_bright(yotta_target_name),
                     gt_logger.gt_bright(platform_name)))

                # Test execution order can be shuffled (also with provided random seed)
                # for test execution reproduction.
                filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
                if opts.shuffle_test_order:
                    # We want to shuffle test names randomly
                    random.shuffle(filtered_ctest_test_list_keys,
                                   lambda: shuffle_random_seed)

                for test_bin in filtered_ctest_test_list_keys:
                    image_path = filtered_ctest_test_list[test_bin]
                    test = {"test_bin": test_bin, "image_path": image_path}
                    test_queue.put(test)

                #for test_bin, image_path in filtered_ctest_test_list.iteritems():
                #    test = {"test_bin":test_bin, "image_path":image_path}
                #    test_queue.put(test)

                number_of_threads = 0
                for mut in muts_to_test:
                    #################################################################
                    # Experimental, parallel test execution
                    #################################################################
                    if number_of_threads < parallel_test_exec:
                        args = (test_result_queue, test_queue, opts, mut,
                                mut_info, yotta_target_name, greentea_hooks)
                        t = Thread(target=run_test_thread, args=args)
                        execute_threads.append(t)
                        number_of_threads += 1

    gt_logger.gt_log_tab(
        "use %s instance%s for testing" %
        (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
    for t in execute_threads:
        t.daemon = True
        t.start()

    # merge partial test reports from diffrent threads to final test report
    for t in execute_threads:
        try:
            t.join()  #blocking
            test_return_data = test_result_queue.get(False)
        except Exception as e:
            # No test report generated
            gt_logger.gt_log_err("could not generate test report" + str(e))
            test_exec_retcode += -1000
            return test_exec_retcode

        test_platforms_match += test_return_data['test_platforms_match']
        test_exec_retcode += test_return_data['test_exec_retcode']
        partial_test_report = test_return_data['test_report']
        # todo: find better solution, maybe use extend
        for report_key in partial_test_report.keys():
            if report_key not in test_report:
                test_report[report_key] = {}
                test_report.update(partial_test_report)
            else:
                test_report[report_key].update(partial_test_report[report_key])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return (0)

    gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for yotta_target in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[yotta_target]:
            test = test_report[yotta_target][test_name]
            # Test was successful
            if test['single_test_result'] in [
                    TEST_RESULT_OK, TEST_RESULT_FAIL
            ]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                        "yotta_target_name": yotta_target,
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            # Call hook executed for each yotta target, just after all tests are finished
            build_path = os.path.join("./build", yotta_target)
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
                "yotta_target_name": yotta_target,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" %
                         (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        # Reports (to file)
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUnit file '%s'..." %
                             gt_logger.gt_bright(opts.report_junit_file_name))
            junit_report = exporter_testcase_junit(
                test_report, test_suite_properties=yotta_module.get_data())
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to text '%s'..." %
                             gt_logger.gt_bright(opts.report_text_file_name))

            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write('\n'.join([
                    text_report, text_results, text_testcase_report,
                    text_testcase_results
                ]))

        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_logger.gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            if test_report:
                # Test suite report
                gt_logger.gt_log("test suite report:")
                text_report, text_results = exporter_text(test_report)
                print text_report
                gt_logger.gt_log("test suite results: " + text_results)
                # test case detailed report
                gt_logger.gt_log("test case report:")
                text_testcase_report, text_testcase_results = exporter_testcase_text(
                    test_report, test_suite_properties=yotta_module.get_data())
                print text_testcase_report
                gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn(
                "no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no target matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)
Esempio n. 44
0
def run_test_thread(test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks):
    test_exec_retcode = 0
    test_platforms_match = 0
    test_report = {}

    while not test_queue.empty():
        try:
            test = test_queue.get(False)
        except Exception as e:
            gt_logger.gt_log_err(str(e))
            break

        test_result = "SKIPPED"

        disk = mut["mount_point"]
        port = mut["serial_port"]
        micro = mut["platform_name"]
        program_cycle_s = get_platform_property(micro, "program_cycle_s")
        forced_reset_timeout = get_platform_property(micro, "forced_reset_timeout")
        copy_method = opts.copy_method if opts.copy_method else "shell"
        verbose = opts.verbose_test_result_only
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

        test_platforms_match += 1
        host_test_result = run_host_test(
            test["image_path"],
            disk,
            port,
            build_path,
            mut["target_id"],
            micro=micro,
            copy_method=copy_method,
            program_cycle_s=program_cycle_s,
            forced_reset_timeout=forced_reset_timeout,
            digest_source=opts.digest_source,
            json_test_cfg=opts.json_test_configuration,
            enum_host_tests_path=enum_host_tests_path,
            global_resource_mgr=opts.global_resource_mgr,
            verbose=verbose,
        )

        # Some error in htrun, abort test execution
        if isinstance(host_test_result, int):
            # int(host_test_result) > 0 - Call to mbedhtrun failed
            # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
            gt_logger.gt_log_err("run_test_thread.run_host_test() failed, aborting...")
            break

        # If execution was successful 'run_host_test' return tuple with results
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = (
            host_test_result
        )
        test_result = single_test_result

        build_path_abs = os.path.abspath(build_path)

        if single_test_result != TEST_RESULT_OK:
            test_exec_retcode += 1

        if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
            if greentea_hooks:
                # Test was successful
                # We can execute test hook just after test is finished ('hook_test_end')
                format = {
                    "test_name": test["test_bin"],
                    "test_bin_name": os.path.basename(test["image_path"]),
                    "image_path": test["image_path"],
                    "build_path": build_path,
                    "build_path_abs": build_path_abs,
                    "build_name": build,
                }
                greentea_hooks.run_hook_ext("hook_test_end", format)

        # Update report for optional reporting feature
        test_suite_name = test["test_bin"].lower()
        if build not in test_report:
            test_report[build] = {}

        if test_suite_name not in test_report[build]:
            test_report[build][test_suite_name] = {}

        if not test_cases_summary and not result_test_cases:
            gt_logger.gt_log_warn("test case summary event not found")
            gt_logger.gt_log_tab("no test case report present, assuming test suite to be a single test case!")

            # We will map test suite result to test case to
            # output valid test case in report

            # Generate "artificial" test case name from test suite name#
            # E.g:
            #   mbed-drivers-test-dev_null -> dev_null
            test_case_name = test_suite_name
            test_str_idx = test_suite_name.find("-test-")
            if test_str_idx != -1:
                test_case_name = test_case_name[test_str_idx + 6 :]

            gt_logger.gt_log_tab("test suite: %s" % test_suite_name)
            gt_logger.gt_log_tab("test case: %s" % test_case_name)

            # Test case result: OK, FAIL or ERROR
            tc_result_text = {"OK": "OK", "FAIL": "FAIL"}.get(single_test_result, "ERROR")

            # Test case integer success code OK, FAIL and ERROR: (0, >0, <0)
            tc_result = {"OK": 0, "FAIL": 1024, "ERROR": -1024}.get(tc_result_text, "-2048")

            # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure)
            tc_passed, tc_failed = {0: (1, 0)}.get(tc_result, (0, 1))

            # Test case report build for whole binary
            # Add test case made from test suite result to test case report
            result_test_cases = {
                test_case_name: {
                    "duration": single_testduration,
                    "time_start": 0.0,
                    "time_end": 0.0,
                    "utest_log": single_test_output.splitlines(),
                    "result_text": tc_result_text,
                    "passed": tc_passed,
                    "failed": tc_failed,
                    "result": tc_result,
                }
            }

            # Test summary build for whole binary (as a test case)
            test_cases_summary = (tc_passed, tc_failed)

        gt_logger.gt_log("test on hardware with target id: %s" % (mut["target_id"]))
        gt_logger.gt_log(
            "test suite '%s' %s %s in %.2f sec"
            % (test["test_bin"], "." * (80 - len(test["test_bin"])), test_result, single_testduration)
        )

        # Test report build for whole binary
        test_report[build][test_suite_name]["single_test_result"] = single_test_result
        test_report[build][test_suite_name]["single_test_output"] = single_test_output
        test_report[build][test_suite_name]["elapsed_time"] = single_testduration
        test_report[build][test_suite_name]["platform_name"] = micro
        test_report[build][test_suite_name]["copy_method"] = copy_method
        test_report[build][test_suite_name]["testcase_result"] = result_test_cases

        test_report[build][test_suite_name]["build_path"] = build_path
        test_report[build][test_suite_name]["build_path_abs"] = build_path_abs
        test_report[build][test_suite_name]["image_path"] = test["image_path"]
        test_report[build][test_suite_name]["test_bin_name"] = os.path.basename(test["image_path"])

        passes_cnt, failures_cnt = 0, 0
        for tc_name in sorted(result_test_cases.keys()):
            gt_logger.gt_log_tab(
                "test case: '%s' %s %s in %.2f sec"
                % (
                    tc_name,
                    "." * (80 - len(tc_name)),
                    result_test_cases[tc_name].get("result_text", "_"),
                    result_test_cases[tc_name].get("duration", 0.0),
                )
            )
            if result_test_cases[tc_name].get("result_text", "_") == "OK":
                passes_cnt += 1
            else:
                failures_cnt += 1

        if test_cases_summary:
            passes, failures = test_cases_summary
            gt_logger.gt_log(
                "test case summary: %d pass%s, %d failur%s"
                % (passes, "" if passes == 1 else "es", failures, "e" if failures == 1 else "es")
            )
            if passes != passes_cnt or failures != failures_cnt:
                gt_logger.gt_log_err("utest test case summary mismatch: utest reported passes and failures miscount!")
                gt_logger.gt_log_tab("reported by utest: passes = %d, failures %d)" % (passes, failures))
                gt_logger.gt_log_tab("test case result count: passes = %d, failures %d)" % (passes_cnt, failures_cnt))

        if single_test_result != "OK" and not verbose and opts.report_fails:
            # In some cases we want to print console to see why test failed
            # even if we are not in verbose mode
            gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)")
            print
            print single_test_output

    # greentea_release_target_id(mut['target_id'], gt_instance_uuid)
    test_result_queue.put(
        {
            "test_platforms_match": test_platforms_match,
            "test_exec_retcode": test_exec_retcode,
            "test_report": test_report,
        }
    )
    return
Esempio n. 45
0
def run_host_test(image_path,
                  disk,
                  port,
                  build_path,
                  target_id,
                  duration=10,
                  micro=None,
                  reset=None,
                  reset_tout=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  forced_reset_timeout=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  global_resource_mgr=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-enabled device name
    @param reset Reset type
    @param reset_tout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
           stdin will be used via StdInObserver or file (if
           file name was given as switch option)
    @return Tuple with test results, test output, test duration times, test case results, and memory metrics.
            Return int > 0 if running mbedhtrun process failed.
            Retrun int < 0 if something went wrong during mbedhtrun execution.
    """
    def run_command(cmd):
        """! Runs command and prints proc stdout on screen
        @paran cmd List with command line to execute e.g. ['ls', '-l]
        @return Value returned by subprocess.Popen, if failed return None
        """
        try:
            p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
        except OSError as e:
            gt_logger.gt_log_err("run_host_test.run_command(%s) failed!" %
                                 str(cmd))
            gt_logger.gt_log_tab(str(e))
            return None
        return p

    def get_binary_host_tests_dir(binary_path, level=2):
        """! Checks if in binary test group has host_tests directory
        @param binary_path Path to binary in test specification
        @param level How many directories above test host_tests dir exists
        @return Path to host_tests dir in group binary belongs too, None if not found
        """
        try:
            binary_path_norm = os.path.normpath(binary_path)
            current_path_norm = os.path.normpath(os.getcwd())
            host_tests_path = binary_path_norm.split(
                os.sep)[:-level] + ['host_tests']
            build_dir_candidates = ['BUILD', '.build']
            idx = None

            for build_dir_candidate in build_dir_candidates:
                if build_dir_candidate in host_tests_path:
                    idx = host_tests_path.index(build_dir_candidate)
                    break

            if idx is None:
                msg = 'The following directories were not in the path: %s' % (
                    ', '.join(build_dir_candidates))
                raise Exception(msg)

            # Cut /<build dir>/tests/TOOLCHAIN/TARGET
            host_tests_path = host_tests_path[:idx] + host_tests_path[idx + 4:]
            host_tests_path = os.sep.join(host_tests_path)
        except Exception as e:
            gt_logger.gt_log_warn(
                "there was a problem while looking for host_tests directory")
            gt_logger.gt_log_tab("level %d, path: %s" % (level, binary_path))
            gt_logger.gt_log_tab(str(e))
            return None

        if os.path.isdir(host_tests_path):
            return host_tests_path
        return None

    if not enum_host_tests_path:
        # If there is -e specified we will try to find a host_tests path ourselves
        #
        # * Path to binary starts from "build" directory, and goes 4 levels
        #   deep: ./build/tests/compiler/toolchain
        # * Binary is inside test group.
        #   For example: <app>/tests/test_group_name/test_dir/*,cpp.
        # * We will search for directory called host_tests on the level of test group (level=2)
        #   or on the level of tests directory (level=3).
        #
        # If host_tests directory is found above test code will will pass it to mbedhtrun using
        # switch -e <path_to_host_tests_dir>
        gt_logger.gt_log(
            "checking for 'host_tests' directory above image directory structure",
            print_text=verbose)
        test_group_ht_path = get_binary_host_tests_dir(image_path, level=2)
        TESTS_dir_ht_path = get_binary_host_tests_dir(image_path, level=3)
        if test_group_ht_path:
            enum_host_tests_path = test_group_ht_path
        elif TESTS_dir_ht_path:
            enum_host_tests_path = TESTS_dir_ht_path

        if enum_host_tests_path:
            gt_logger.gt_log_tab("found 'host_tests' directory in: '%s'" %
                                 enum_host_tests_path,
                                 print_text=verbose)
        else:
            gt_logger.gt_log_tab(
                "'host_tests' directory not found: two directory levels above image path checked",
                print_text=verbose)

    gt_logger.gt_log("selecting test case observer...", print_text=verbose)
    if digest_source:
        gt_logger.gt_log_tab("selected digest source: %s" % digest_source,
                             print_text=verbose)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        raise NotImplementedError
    elif digest_source is not None:
        # When we want to open file to scan for test results
        raise NotImplementedError

    # Command executing CLI for host test supervisor (in detect-mode)
    cmd = [
        "mbedhtrun",
        '-m',
        micro,
        '-p',
        port,
        '-f',
        '"%s"' % image_path,
    ]

    if enum_host_tests_path:
        cmd += ["-e", '"%s"' % enum_host_tests_path]

    if global_resource_mgr:
        # Use global resource manager to execute test
        # Example:
        # $ mbedhtrun -p :9600 -f "tests-mbed_drivers-generic_tests.bin" -m K64F --grm raas_client:10.2.203.31:8000
        cmd += ['--grm', global_resource_mgr]
    else:
        # Use local resources to execute tests
        # Add extra parameters to host_test
        if disk:
            cmd += ["-d", disk]
        if program_cycle_s:
            cmd += ["-C", str(program_cycle_s)]
        if forced_reset_timeout:
            cmd += ["-R", str(forced_reset_timeout)]
        if copy_method:
            cmd += ["-c", copy_method]
        if target_id:
            cmd += ["-t", target_id]
        if reset:
            cmd += ["-r", reset]
        if reset_tout:
            cmd += ["-R", str(reset_tout)]
        if json_test_cfg:
            cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
        if run_app:
            cmd += ["--run"]  # -f stores binary name!

    gt_logger.gt_log_tab("calling mbedhtrun: %s" % " ".join(cmd),
                         print_text=verbose)
    gt_logger.gt_log("mbed-host-test-runner: started")

    htrun_output = str()
    start_time = time()

    # run_command will return None if process can't be opened (Issue #134)
    p = run_command(cmd)
    if not p:
        # int value > 0 notifies caller that starting of host test process failed
        return RUN_HOST_TEST_POPEN_ERROR

    for line in iter(p.stdout.readline, b''):
        htrun_output += line
        # When dumping output to file both \r and \n will be a new line
        # To avoid this "extra new-line" we only use \n at the end
        if verbose:
            sys.stdout.write(line.rstrip() + '\n')
            sys.stdout.flush()

    # Check if process was terminated by signal
    returncode = p.wait()
    if returncode < 0:
        return returncode

    end_time = time()
    testcase_duration = end_time - start_time  # Test case duration from reset to {end}

    htrun_output = get_printable_string(htrun_output)
    result = get_test_result(htrun_output)
    result_test_cases = get_testcase_result(htrun_output)
    test_cases_summary = get_testcase_summary(htrun_output)
    max_heap, thread_stack_info = get_memory_metrics(htrun_output)

    thread_stack_summary = []

    if thread_stack_info:
        thread_stack_summary = get_thread_stack_info_summary(thread_stack_info)

    memory_metrics = {
        "max_heap": max_heap,
        "thread_stack_info": thread_stack_info,
        "thread_stack_summary": thread_stack_summary
    }
    get_coverage_data(build_path, htrun_output)

    gt_logger.gt_log("mbed-host-test-runner: stopped and returned '%s'" %
                     result,
                     print_text=verbose)
    return (result, htrun_output, testcase_duration, duration,
            result_test_cases, test_cases_summary, memory_metrics)
Esempio n. 46
0
from mbed_greentea.mbed_greentea_dlm import GREENTEA_KETTLE_PATH
from mbed_greentea.mbed_greentea_dlm import greentea_get_app_sem
from mbed_greentea.mbed_greentea_dlm import greentea_update_kettle
from mbed_greentea.mbed_greentea_dlm import greentea_clean_kettle
from mbed_greentea.mbed_yotta_api import build_with_yotta
from mbed_greentea.mbed_greentea_hooks import GreenteaHooks
from mbed_greentea.mbed_yotta_module_parse import YottaConfig
from mbed_greentea.mbed_yotta_module_parse import YottaModule

try:
    import mbed_lstools
    import mbed_host_tests
except ImportError as e:
    gt_logger.gt_log_err("Not all required Python modules were imported!")
    gt_logger.gt_log_err(str(e))
    gt_logger.gt_log("Check if:")
    gt_logger.gt_log_tab(
        "1. You've correctly installed dependency module using setup tools or pip:"
    )
    gt_logger.gt_log_tab("* python setup.py install", tab_count=2)
    gt_logger.gt_log_tab("* pip install <module-name>", tab_count=2)
    gt_logger.gt_log_tab(
        "2. There are no errors preventing import in dependency modules")
    gt_logger.gt_log_tab(
        "See: https://github.com/ARMmbed/greentea#installing-greentea")
    exit(-2342)

MBED_LMTOOLS = 'mbed_lstools' in sys.modules
MBED_HOST_TESTS = 'mbed_host_tests' in sys.modules

RET_NO_DEVICES = 1001
Esempio n. 47
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    def filter_ready_devices(mbeds_list):
        """! Filters list of MUTs to check if all MUTs are correctly detected with mbed-ls module.
        @details This function logs a lot to help users figure out root cause of their problems
        @param mbeds_list List of MUTs to verify
        @return Tuple of (MUTS detected correctly, MUTs not detected fully)
        """
        ready_mbed_devices = []  # Devices which can be used (are fully detected)
        not_ready_mbed_devices = []  # Devices which can't be used (are not fully detected)

        gt_logger.gt_log("detected %d device%s" % (len(mbeds_list), "s" if len(mbeds_list) != 1 else ""))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err("mbed-ls was unable to enumerate correctly all properties of the device!")
                gt_logger.gt_log_tab(
                    "check with 'mbedls -j' command if all properties of your device are enumerated properly"
                )
                for prop in mut:
                    if not mut[prop]:
                        # Adding MUT to NOT DETECTED FULLY list
                        if mut not in not_ready_mbed_devices:
                            not_ready_mbed_devices.append(mut)
                        gt_logger.gt_log_err("mbed-ls property '%s' is '%s'" % (prop, str(mut[prop])))
                        if prop == "serial_port":
                            gt_logger.gt_log_tab("check if your serial port driver is correctly installed!")
                        if prop == "mount_point":
                            gt_logger.gt_log_tab("check if your OS can detect and mount mbed device mount point!")
            else:
                # Adding MUT to DETECTED CORRECTLY list
                ready_mbed_devices.append(mut)
        return (ready_mbed_devices, not_ready_mbed_devices)

    def get_parallel_value(value):
        """! Get correct value for parallel switch (--parallel)
        @param value Value passed from --parallel
        @return Refactored version of parallel number
        """
        try:
            parallel_test_exec = int(value)
            if parallel_test_exec < 1:
                parallel_test_exec = 1
        except ValueError:
            gt_logger.gt_log_err("argument of mode --parallel is not a int, disabled parallel mode")
            parallel_test_exec = 1
        return parallel_test_exec

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return -1

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err("error: mbed-host-tests proprietary module not installed")
        return -1

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # Prints version and exits
    if opts.version:
        print_version()
        return 0

    # Load test specification or print warnings / info messages and exit CLI mode
    test_spec, ret = get_test_spec(opts)
    if not test_spec:
        return ret

    # Verbose flag
    verbose = opts.verbose_test_result_only

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            hooks=greentea_hooks,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose,
        )

        # Some error in htrun, abort test execution
        if isinstance(host_test_result, int):
            # int(host_test_result) > 0 - Call to mbedhtrun failed
            # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
            return host_test_result

        # If execution was successful 'run_host_test' return tuple with results
        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = (
            host_test_result
        )
        status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
        return status

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    if opts.global_resource_mgr:
        # Mocking available platform requested by --grm switch
        grm_values = parse_global_resource_mgr(opts.global_resource_mgr)
        if grm_values:
            gt_logger.gt_log_warn("entering global resource manager mbed-ls dummy mode!")
            grm_platform_name, grm_module_name, grm_ip_name, grm_port_name = grm_values
            mbeds_list = []
            mbeds_list.append(mbeds.get_dummy_platform(grm_platform_name))
            opts.global_resource_mgr = ":".join(grm_values[1:])
            gt_logger.gt_log_tab("adding dummy platform '%s'" % grm_platform_name)
        else:
            gt_logger.gt_log("global resource manager switch '--grm %s' in wrong format!" % opts.global_resource_mgr)
            return -1

    ready_mbed_devices = []  # Devices which can be used (are fully detected)
    not_ready_mbed_devices = []  # Devices which can't be used (are not fully detected)

    if mbeds_list:
        ready_mbed_devices, not_ready_mbed_devices = filter_ready_devices(mbeds_list)
        if ready_mbed_devices:
            # devices in form of a pretty formatted table
            for line in log_mbed_devices_in_table(ready_mbed_devices).splitlines():
                gt_logger.gt_log_tab(line.strip(), print_text=verbose)
    else:
        gt_logger.gt_log_err("no compatible devices detected")
        return RET_NO_DEVICES

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log("filtering out target ids not on below list (specified with --use-tids switch)")
        accepted_target_ids = opts.use_target_ids.split(",")
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" % gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    muts_to_test = []  # MUTs to actually be tested
    test_queue = Queue()  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1

    parallel_test_exec = get_parallel_value(opts.parallel_test_exec)

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed), SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    # In case we are using test spec (switch --test-spec) command line option -t <list_of_targets>
    # is used to enumerate builds from test spec we are supplying
    filter_test_builds = opts.list_of_targets.split(",") if opts.list_of_targets else None
    for test_build in test_spec.get_test_builds(filter_test_builds):
        platform_name = test_build.get_platform()
        gt_logger.gt_log(
            "processing target '%s' toolchain '%s' compatible platforms... (note: switch set to --parallel %d)"
            % (
                gt_logger.gt_bright(platform_name),
                gt_logger.gt_bright(test_build.get_toolchain()),
                int(opts.parallel_test_exec),
            )
        )

        baudrate = test_build.get_baudrate()

        ### Select MUTS to test from list of available MUTS to start testing
        mut = None
        number_of_parallel_instances = 1
        for mbed_dev in ready_mbed_devices:
            if accepted_target_ids and mbed_dev["target_id"] not in accepted_target_ids:
                continue

            if mbed_dev["platform_name"] == platform_name:
                # We will force configuration specific baudrate by adding baudrate to serial port
                # Only add baudrate decoration for serial port if it's not already there
                # Format used by mbedhtrun: 'serial_port' = '<serial_port_name>:<baudrate>'
                if not mbed_dev["serial_port"].endswith(str(baudrate)):
                    mbed_dev["serial_port"] = "%s:%d" % (mbed_dev["serial_port"], baudrate)
                mut = mbed_dev
                muts_to_test.append(mbed_dev)
                if number_of_parallel_instances < parallel_test_exec:
                    number_of_parallel_instances += 1
                else:
                    break

        # devices in form of a pretty formatted table
        for line in log_mbed_devices_in_table(muts_to_test).splitlines():
            gt_logger.gt_log_tab(line.strip(), print_text=verbose)

        # Configuration print mode:
        if opts.verbose_test_configuration_only:
            continue

        ### If we have at least one available device we can proceed
        if mut:
            target_platforms_match += 1

            build = test_build.get_name()
            build_path = test_build.get_path()

            # Demo mode: --run implementation (already added --run to mbedhtrun)
            # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
            if opts.run_app:
                gt_logger.gt_log(
                    "running '%s' for '%s'-'%s'"
                    % (
                        gt_logger.gt_bright(opts.run_app),
                        gt_logger.gt_bright(platform_name),
                        gt_logger.gt_bright(test_build.get_toolchain()),
                    )
                )
                disk = mut["mount_point"]
                port = mut["serial_port"]
                micro = mut["platform_name"]
                program_cycle_s = get_platform_property(micro, "program_cycle_s")
                copy_method = opts.copy_method if opts.copy_method else "shell"
                enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

                test_platforms_match += 1
                host_test_result = run_host_test(
                    opts.run_app,
                    disk,
                    port,
                    build_path,
                    mut["target_id"],
                    micro=micro,
                    copy_method=copy_method,
                    program_cycle_s=program_cycle_s,
                    digest_source=opts.digest_source,
                    json_test_cfg=opts.json_test_configuration,
                    run_app=opts.run_app,
                    enum_host_tests_path=enum_host_tests_path,
                    verbose=True,
                )

                # Some error in htrun, abort test execution
                if isinstance(host_test_result, int):
                    # int(host_test_result) > 0 - Call to mbedhtrun failed
                    # int(host_test_result) < 0 - Something went wrong while executing mbedhtrun
                    return host_test_result

                # If execution was successful 'run_host_test' return tuple with results
                single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = (
                    host_test_result
                )
                status = TEST_RESULTS.index(single_test_result) if single_test_result in TEST_RESULTS else -1
                if single_test_result != TEST_RESULT_OK:
                    test_exec_retcode += 1

            test_list = test_build.get_tests()

            filtered_ctest_test_list = create_filtered_test_list(
                test_list, opts.test_by_names, opts.skip_test, test_spec=test_spec
            )

            gt_logger.gt_log(
                "running %d test%s for platform '%s' and toolchain '%s'"
                % (
                    len(filtered_ctest_test_list),
                    "s" if len(filtered_ctest_test_list) != 1 else "",
                    gt_logger.gt_bright(platform_name),
                    gt_logger.gt_bright(test_build.get_toolchain()),
                )
            )

            # Test execution order can be shuffled (also with provided random seed)
            # for test execution reproduction.
            filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
            if opts.shuffle_test_order:
                # We want to shuffle test names randomly
                random.shuffle(filtered_ctest_test_list_keys, lambda: shuffle_random_seed)

            for test_name in filtered_ctest_test_list_keys:
                image_path = (
                    filtered_ctest_test_list[test_name].get_binary(binary_type=TestBinary.BIN_TYPE_BOOTABLE).get_path()
                )
                if image_path is None:
                    gt_logger.gt_log_err("Failed to find test binary for test %s flash method %s" % (test_name, "usb"))
                else:
                    test = {"test_bin": test_name, "image_path": image_path}
                    test_queue.put(test)

            number_of_threads = 0
            for mut in muts_to_test:
                # Experimental, parallel test execution
                if number_of_threads < parallel_test_exec:
                    args = (test_result_queue, test_queue, opts, mut, build, build_path, greentea_hooks)
                    t = Thread(target=run_test_thread, args=args)
                    execute_threads.append(t)
                    number_of_threads += 1

        gt_logger.gt_log_tab(
            "use %s instance%s of execution threads for testing"
            % (len(execute_threads), "s" if len(execute_threads) != 1 else str()),
            print_text=verbose,
        )
        for t in execute_threads:
            t.daemon = True
            t.start()

        # merge partial test reports from different threads to final test report
        for t in execute_threads:
            try:
                t.join()  # blocking
                test_return_data = test_result_queue.get(False)
            except Exception as e:
                # No test report generated
                gt_logger.gt_log_err("could not generate test report" + str(e))
                test_exec_retcode += -1000
                return test_exec_retcode

            test_platforms_match += test_return_data["test_platforms_match"]
            test_exec_retcode += test_return_data["test_exec_retcode"]
            partial_test_report = test_return_data["test_report"]
            # todo: find better solution, maybe use extend
            for report_key in partial_test_report.keys():
                if report_key not in test_report:
                    test_report[report_key] = {}
                    test_report.update(partial_test_report)
                else:
                    test_report[report_key].update(partial_test_report[report_key])

        execute_threads = []

        if opts.verbose_test_configuration_only:
            print
            print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
            return 0

        gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for build_name in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[build_name]:
            test = test_report[build_name][test_name]
            # Test was successful
            if test["single_test_result"] in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test["test_bin_name"],
                        "image_path": test["image_path"],
                        "build_path": test["build_path"],
                        "build_path_abs": test["build_path_abs"],
                    }
                    greentea_hooks.run_hook_ext("hook_post_test_end", format)
        if greentea_hooks:
            build = test_spec.get_test_build(build_name)
            assert build is not None, "Failed to find build info for build %s" % build_name

            # Call hook executed for each yotta target, just after all tests are finished
            build_path = build.get_path()
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {"build_path": build_path, "build_path_abs": build_path_abs, "test_name_list": test_name_list}
            greentea_hooks.run_hook_ext("hook_post_all_test_end", format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" % (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        def dump_report_to_text_file(filename, content):
            """! Closure for report dumps to text files
            @param filename Name of destination file
            @parm content Text content of the file to write
            @return True if write was successful, else return False
            """
            try:
                with open(filename, "w") as f:
                    f.write(content)
            except IOError as e:
                gt_logger.gt_log_err("can't export to '%s', reason:" % filename)
                gt_logger.gt_log_err(str(e))
                return False
            return True

        # Reports to JUNIT file
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUNIT file '%s'..." % gt_logger.gt_bright(opts.report_junit_file_name))
            # This test specification will be used by JUnit exporter to populate TestSuite.properties (useful meta-data for Viewer)
            test_suite_properties = {}
            for target_name in test_report:
                test_build_properties = get_test_build_properties(test_spec, target_name)
                if test_build_properties:
                    test_suite_properties[target_name] = test_build_properties
            junit_report = exporter_testcase_junit(test_report, test_suite_properties=test_suite_properties)
            dump_report_to_text_file(opts.report_junit_file_name, junit_report)

        # Reports to text file
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to TEXT '%s'..." % gt_logger.gt_bright(opts.report_text_file_name))
            # Useful text reporter for those who do not like to copy paste to files tabale with results
            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            text_final_report = "\n".join([text_report, text_results, text_testcase_report, text_testcase_results])
            dump_report_to_text_file(opts.report_text_file_name, text_final_report)

        # Reports to JSON file
        if opts.report_json_file_name:
            # We will not print summary and json report together
            gt_logger.gt_log("exporting to JSON '%s'..." % gt_logger.gt_bright(opts.report_json_file_name))
            json_report = exporter_json(test_report)
            dump_report_to_text_file(opts.report_json_file_name, json_report)

        # Reports to HTML file
        if opts.report_html_file_name:
            gt_logger.gt_log("exporting to HTML file '%s'..." % gt_logger.gt_bright(opts.report_html_file_name))
            # Generate a HTML page displaying all of the results
            html_report = exporter_html(test_report)
            dump_report_to_text_file(opts.report_html_file_name, html_report)

        # Final summary
        if test_report:
            # Test suite report
            gt_logger.gt_log("test suite report:")
            text_report, text_results = exporter_text(test_report)
            print text_report
            gt_logger.gt_log("test suite results: " + text_results)
            # test case detailed report
            gt_logger.gt_log("test case report:")
            text_testcase_report, text_testcase_results = exporter_testcase_text(test_report)
            print text_testcase_report
            gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn("no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no matching platforms were found!")
            test_exec_retcode += -100

    return test_exec_retcode
Esempio n. 48
0
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info,
                    yotta_target_name, greentea_hooks):
    test_exec_retcode = 0
    test_platforms_match = 0
    test_report = {}
    yotta_config_baudrate = None  # Default serial port baudrate forced by configuration

    yotta_config = YottaConfig()
    yotta_config.init(yotta_target_name)

    yotta_config_baudrate = yotta_config.get_baudrate()

    while not test_queue.empty():
        try:
            test = test_queue.get(False)
        except Exception as e:
            gt_logger.gt_log_err(str(e))
            break

        test_result = 'SKIPPED'

        disk = mut['mount_point']
        port = mut['serial_port']
        micro = mut['platform_name']
        program_cycle_s = mut_info['properties']['program_cycle_s']
        copy_method = opts.copy_method if opts.copy_method else 'shell'
        verbose = opts.verbose_test_result_only
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

        # We will force configuration specific baudrate
        if port:
            port = "%s:%d" % (port, yotta_config_baudrate)

        test_platforms_match += 1
        host_test_result = run_host_test(
            test['image_path'],
            disk,
            port,
            yotta_target_name,
            mut['target_id'],
            micro=micro,
            copy_method=copy_method,
            program_cycle_s=program_cycle_s,
            digest_source=opts.digest_source,
            json_test_cfg=opts.json_test_configuration,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose)

        # Some error in htrun, abort test execution
        if host_test_result < 0:
            break

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        test_result = single_test_result

        build_path = os.path.join("./build", yotta_target_name)
        build_path_abs = os.path.abspath(build_path)

        if single_test_result != TEST_RESULT_OK:
            test_exec_retcode += 1

        if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
            if greentea_hooks:
                # Test was successful
                # We can execute test hook just after test is finished ('hook_test_end')
                format = {
                    "test_name": test['test_bin'],
                    "test_bin_name": os.path.basename(test['image_path']),
                    "image_path": test['image_path'],
                    "build_path": build_path,
                    "build_path_abs": build_path_abs,
                    "yotta_target_name": yotta_target_name,
                }
                greentea_hooks.run_hook_ext('hook_test_end', format)

        # Update report for optional reporting feature
        test_suite_name = test['test_bin'].lower()
        if yotta_target_name not in test_report:
            test_report[yotta_target_name] = {}

        if test_suite_name not in test_report[yotta_target_name]:
            test_report[yotta_target_name][test_suite_name] = {}

        if not test_cases_summary and not result_test_cases:
            gt_logger.gt_log_warn("test case summary event not found")
            gt_logger.gt_log_tab(
                "no test case report present, assuming test suite to be a single test case!"
            )

            # We will map test suite result to test case to
            # output valid test case in report

            # Generate "artificial" test case name from test suite name#
            # E.g:
            #   mbed-drivers-test-dev_null -> dev_null
            test_case_name = test_suite_name
            test_str_idx = test_suite_name.find("-test-")
            if test_str_idx != -1:
                test_case_name = test_case_name[test_str_idx + 6:]

            gt_logger.gt_log_tab("test suite: %s" % test_suite_name)
            gt_logger.gt_log_tab("test case: %s" % test_case_name)

            # Test case result: OK, FAIL or ERROR
            tc_result_text = {
                "OK": "OK",
                "FAIL": "FAIL",
            }.get(single_test_result, 'ERROR')

            # Test case integer success code OK, FAIL and ERROR: (0, >0, <0)
            tc_result = {
                "OK": 0,
                "FAIL": 1024,
                "ERROR": -1024,
            }.get(tc_result_text, '-2048')

            # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure)
            tc_passed, tc_failed = {
                0: (1, 0),
            }.get(tc_result, (0, 1))

            # Test case report build for whole binary
            # Add test case made from test suite result to test case report
            result_test_cases = {
                test_case_name: {
                    'duration': single_testduration,
                    'time_start': 0.0,
                    'time_end': 0.0,
                    'utest_log': single_test_output.splitlines(),
                    'result_text': tc_result_text,
                    'passed': tc_passed,
                    'failed': tc_failed,
                    'result': tc_result,
                }
            }

            # Test summary build for whole binary (as a test case)
            test_cases_summary = (
                tc_passed,
                tc_failed,
            )

        gt_logger.gt_log("test on hardware with target id: %s" %
                         (mut['target_id']))
        gt_logger.gt_log(
            "test suite '%s' %s %s in %.2f sec" %
            (test['test_bin'], '.' *
             (80 - len(test['test_bin'])), test_result, single_testduration))

        # Test report build for whole binary
        test_report[yotta_target_name][test_suite_name][
            'single_test_result'] = single_test_result
        test_report[yotta_target_name][test_suite_name][
            'single_test_output'] = single_test_output
        test_report[yotta_target_name][test_suite_name][
            'elapsed_time'] = single_testduration
        test_report[yotta_target_name][test_suite_name][
            'platform_name'] = micro
        test_report[yotta_target_name][test_suite_name][
            'copy_method'] = copy_method
        test_report[yotta_target_name][test_suite_name][
            'testcase_result'] = result_test_cases

        test_report[yotta_target_name][test_suite_name][
            'build_path'] = build_path
        test_report[yotta_target_name][test_suite_name][
            'build_path_abs'] = build_path_abs
        test_report[yotta_target_name][test_suite_name]['image_path'] = test[
            'image_path']
        test_report[yotta_target_name][test_suite_name][
            'test_bin_name'] = os.path.basename(test['image_path'])

        passes_cnt, failures_cnt = 0, 0
        for tc_name in sorted(result_test_cases.keys()):
            gt_logger.gt_log_tab(
                "test case: '%s' %s %s in %.2f sec" %
                (tc_name, '.' * (80 - len(tc_name)),
                 result_test_cases[tc_name].get('result_text', '_'),
                 result_test_cases[tc_name].get('duration', 0.0)))
            if result_test_cases[tc_name].get('result_text', '_') == 'OK':
                passes_cnt += 1
            else:
                failures_cnt += 1

        if test_cases_summary:
            passes, failures = test_cases_summary
            gt_logger.gt_log("test case summary: %d pass%s, %d failur%s" %
                             (passes, '' if passes == 1 else 'es', failures,
                              'e' if failures == 1 else 'es'))
            if passes != passes_cnt or failures != failures_cnt:
                gt_logger.gt_log_err(
                    "test case summary mismatch: reported passes vs failures miscount!"
                )
                gt_logger.gt_log_tab(
                    "(%d, %d) vs (%d, %d)" %
                    (passes, failures, passes_cnt, failures_cnt))

        if single_test_result != 'OK' and not verbose and opts.report_fails:
            # In some cases we want to print console to see why test failed
            # even if we are not in verbose mode
            gt_logger.gt_log_tab(
                "test failed, reporting console output (specified with --report-fails option)"
            )
            print
            print single_test_output

    #greentea_release_target_id(mut['target_id'], gt_instance_uuid)
    test_result_queue.put({
        'test_platforms_match': test_platforms_match,
        'test_exec_retcode': test_exec_retcode,
        'test_report': test_report
    })
    return
Esempio n. 49
0
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name):
    test_exec_retcode = 0
    test_platforms_match = 0
    test_report = {}
    yotta_config_baudrate = None    # Default serial port baudrate forced by configuration

    yotta_config = YottaConfig()
    yotta_config.init(yotta_target_name)

    yotta_config_baudrate = yotta_config.get_baudrate()

    while not test_queue.empty():
        try:
            test = test_queue.get(False)
        except Exception as e:
            gt_logger.gt_log_err(str(e))
            break

        test_result = 'SKIPPED'

        disk = mut['mount_point']
        port = mut['serial_port']
        micro = mut['platform_name']
        program_cycle_s = mut_info['properties']['program_cycle_s']
        copy_method = opts.copy_method if opts.copy_method else 'shell'
        verbose = opts.verbose_test_result_only
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

        # We will force configuration specific baudrate
        if port:
            port = "%s:%d"% (port, yotta_config_baudrate)

        test_platforms_match += 1
        host_test_result = run_host_test(test['image_path'],
                                         disk,
                                         port,
                                         micro=micro,
                                         copy_method=copy_method,
                                         program_cycle_s=program_cycle_s,
                                         digest_source=opts.digest_source,
                                         json_test_cfg=opts.json_test_configuration,
                                         enum_host_tests_path=enum_host_tests_path,
                                         verbose=verbose)

        single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
        test_result = single_test_result
        if single_test_result != TEST_RESULT_OK:
            test_exec_retcode += 1

        # Update report for optional reporting feature
        test_name = test['test_bin'].lower()
        if yotta_target_name not in test_report:
            test_report[yotta_target_name] = {}
        if test_name not in test_report[yotta_target_name]:
            test_report[yotta_target_name][test_name] = {}

        test_report[yotta_target_name][test_name]['single_test_result'] = single_test_result
        test_report[yotta_target_name][test_name]['single_test_output'] = single_test_output
        test_report[yotta_target_name][test_name]['elapsed_time'] = single_testduration
        test_report[yotta_target_name][test_name]['platform_name'] = micro
        test_report[yotta_target_name][test_name]['copy_method'] = copy_method

        gt_logger.gt_log("test on hardware with target id: %s \n\ttest '%s' %s %s in %.2f sec"% (mut['target_id'], test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration))

        if single_test_result != 'OK' and not verbose and opts.report_fails:
            # In some cases we want to print console to see why test failed
            # even if we are not in verbose mode
            gt_logger.gt_log_tab("test failed, reporting console output (specified with --report-fails option)")
            print
            print single_test_output

    #greentea_release_target_id(mut['target_id'], gt_instance_uuid)
    test_result_queue.put({'test_platforms_match': test_platforms_match,
                           'test_exec_retcode': test_exec_retcode,
                           'test_report': test_report})
    return
Esempio n. 50
0
def run_host_test(image_path,
                  disk,
                  port,
                  build_path,
                  target_id,
                  duration=10,
                  micro=None,
                  reset=None,
                  reset_tout=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  forced_reset_timeout=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  global_resource_mgr=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-enabled device name
    @param reset Reset type
    @param reset_tout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
           stdin will be used via StdInObserver or file (if
           file name was given as switch option)
    @return Tuple with test results, test output, test duration times and test case results.
            Return int > 0 if running mbedhtrun process failed.
            Retrun int < 0 if something went wrong during mbedhtrun execution.
    """

    def run_command(cmd):
        """! Runs command and prints proc stdout on screen
        @paran cmd List with command line to execute e.g. ['ls', '-l]
        @return Value returned by subprocess.Popen, if failed return None
        """
        try:
            p = Popen(cmd,
                      stdout=PIPE,
                      stderr=STDOUT)
        except OSError as e:
            gt_logger.gt_log_err("run_host_test.run_command(%s) failed!"% str(cmd))
            gt_logger.gt_log_tab(str(e))
            return None
        return p

    def get_binary_host_tests_dir(binary_path, level=2):
        """! Checks if in binary test group has host_tests directory
        @param binary_path Path to binary in test specification
        @param level How many directories above test host_tests dir exists
        @return Path to host_tests dir in group binary belongs too, None if not found
        """
        try:
            binary_path_norm = os.path.normpath(binary_path)
            current_path_norm = os.path.normpath(os.getcwd())
            host_tests_path = binary_path_norm.split(os.sep)[:-level] + ['host_tests']

            idx = host_tests_path.index('.build')
            # Cut /.build/tests/TOOLCHAIN/TARGET
            host_tests_path = host_tests_path[:idx] + host_tests_path[idx+4:]
            host_tests_path = os.sep.join(host_tests_path)
        except Exception as e:
            gt_logger.gt_log_warn("there was a problem while looking for host_tests directory")
            gt_logger.gt_log_tab("level %d, path: %s"% (level, binary_path))
            gt_logger.gt_log_tab(str(e))
            return None

        if os.path.isdir(host_tests_path):
            return host_tests_path
        return None

    if not enum_host_tests_path:
        # If there is -e specified we will try to find a host_tests path ourselves
        #
        # * Path to binary starts from "build" directory, and goes 4 levels
        #   deep: ./build/tests/compiler/toolchain
        # * Binary is inside test group.
        #   For example: <app>/tests/test_group_name/test_dir/*,cpp.
        # * We will search for directory called host_tests on the level of test group (level=2)
        #   or on the level of tests directory (level=3).
        #
        # If host_tests directory is found above test code will will pass it to mbedhtrun using
        # switch -e <path_to_host_tests_dir>
        gt_logger.gt_log("checking for 'host_tests' directory above image directory structure", print_text=verbose)
        test_group_ht_path = get_binary_host_tests_dir(image_path, level=2)
        TESTS_dir_ht_path = get_binary_host_tests_dir(image_path, level=3)
        if test_group_ht_path:
            enum_host_tests_path = test_group_ht_path
        elif TESTS_dir_ht_path:
            enum_host_tests_path = TESTS_dir_ht_path

        if enum_host_tests_path:
            gt_logger.gt_log_tab("found 'host_tests' directory in: '%s'"% enum_host_tests_path, print_text=verbose)
        else:
            gt_logger.gt_log_tab("'host_tests' directory not found: two directory levels above image path checked", print_text=verbose)

    gt_logger.gt_log("selecting test case observer...", print_text=verbose)
    if digest_source:
        gt_logger.gt_log_tab("selected digest source: %s"% digest_source, print_text=verbose)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        raise NotImplementedError
    elif digest_source is not None:
        # When we want to open file to scan for test results
        raise NotImplementedError

    # Command executing CLI for host test supervisor (in detect-mode)
    cmd = ["mbedhtrun",
            '-m', micro,
            '-p', port,
            '-f', '"%s"'% image_path,
            ]

    if global_resource_mgr:
        # Use global resource manager to execute test
        # Example:
        # $ mbedhtrun -p :9600 -f "tests-mbed_drivers-generic_tests.bin" -m K64F --grm raas_client:10.2.203.31:8000
        cmd += ['--grm', global_resource_mgr]
    else:
        # Use local resources to execute tests
        # Add extra parameters to host_test
        if disk:
            cmd += ["-d", disk]
        if program_cycle_s:
            cmd += ["-C", str(program_cycle_s)]
        if forced_reset_timeout:
            cmd += ["-R", str(forced_reset_timeout)]
        if copy_method:
            cmd += ["-c", copy_method]
        if target_id:
            cmd += ["-t", target_id]
        if reset:
            cmd += ["-r", reset]
        if reset_tout:
            cmd += ["-R", str(reset_tout)]
        if json_test_cfg:
            cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
        if run_app:
            cmd += ["--run"]    # -f stores binary name!
        if enum_host_tests_path:
            cmd += ["-e", '"%s"'% enum_host_tests_path]

    gt_logger.gt_log_tab("calling mbedhtrun: %s"% " ".join(cmd), print_text=verbose)
    gt_logger.gt_log("mbed-host-test-runner: started")

    htrun_output = str()
    start_time = time()

    # run_command will return None if process can't be opened (Issue #134)
    p = run_command(cmd)
    if not p:
        # int value > 0 notifies caller that starting of host test process failed
        return RUN_HOST_TEST_POPEN_ERROR

    for line in iter(p.stdout.readline, b''):
        htrun_output += line
        # When dumping output to file both \r and \n will be a new line
        # To avoid this "extra new-line" we only use \n at the end
        if verbose:
            sys.stdout.write(line.rstrip() + '\n')
            sys.stdout.flush()

    # Check if process was terminated by signal
    returncode = p.wait()
    if returncode < 0:
        return returncode

    end_time = time()
    testcase_duration = end_time - start_time   # Test case duration from reset to {end}

    result = get_test_result(htrun_output)
    result_test_cases = get_testcase_result(htrun_output)
    test_cases_summary = get_testcase_summary(htrun_output)
    get_coverage_data(build_path, htrun_output)

    gt_logger.gt_log("mbed-host-test-runner: stopped and returned '%s'"% result, print_text=verbose)
    return (result, htrun_output, testcase_duration, duration, result_test_cases, test_cases_summary)
Esempio n. 51
0
def run_host_test(image_path,
                  disk,
                  port,
                  yotta_target,
                  target_id,
                  duration=10,
                  micro=None,
                  reset=None,
                  reset_tout=None,
                  verbose=False,
                  copy_method=None,
                  program_cycle_s=None,
                  digest_source=None,
                  json_test_cfg=None,
                  max_failed_properties=5,
                  enum_host_tests_path=None,
                  run_app=None):
    """! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
    @return Tuple with test results, test output, test duration times and test case results
    @param image_path Path to binary file for flashing
    @param disk Currently mounted mbed-enabled devices disk (mount point)
    @param port Currently mounted mbed-enabled devices serial port (console)
    @param duration Test case timeout
    @param micro Mbed-nebaled device name
    @param reset Reset type
    @param reset_tout Reset timeout (sec)
    @param verbose Verbose mode flag
    @param copy_method Copy method type (name)
    @param program_cycle_s Wait after flashing delay (sec)
    @param json_test_cfg Additional test configuration file path passed to host tests in JSON format
    @param max_failed_properties After how many unknown properties we will assume test is not ported
    @param enum_host_tests_path Directory where locally defined host tests may reside
    @param run_app Run application mode flag (we run application and grab serial port data)
    @param digest_source if None mbedhtrun will be executed. If 'stdin',
                           stdin will be used via StdInObserver or file (if
                           file name was given as switch option)
    """

    def run_command(cmd):
        """! Runs command and prints proc stdout on screen """
        try:
            p = Popen(cmd,
                    stdout=PIPE,
                    stderr=STDOUT)
        except OSError as e:
            print "mbedgt: run_command(%s) ret= %d failed: %s"% (str(cmd),
                str(e), e.child_traceback)
        return p

    if verbose:
        gt_logger.gt_log("selecting test case observer...")
        if digest_source:
            gt_logger.gt_log_tab("selected digest source: %s"% digest_source)

    # Select who will digest test case serial port data
    if digest_source == 'stdin':
        # When we want to scan stdin for test results
        raise NotImplementedError
    elif digest_source is not None:
        # When we want to open file to scan for test results
        raise NotImplementedError

    # Command executing CLI for host test supervisor (in detect-mode)
    cmd = ["mbedhtrun",
            '-d', disk,
            '-p', port,
            '-f', '"%s"'% image_path,
            ]

    # Add extra parameters to host_test
    if program_cycle_s is not None:
        cmd += ["-C", str(program_cycle_s)]
    if copy_method is not None:
        cmd += ["-c", copy_method]
    if micro is not None:
        cmd += ["-m", micro]
    if target_id is not None:
        cmd += ["-t", target_id]
    if reset is not None:
        cmd += ["-r", reset]
    if reset_tout is not None:
        cmd += ["-R", str(reset_tout)]
    if json_test_cfg is not None:
        cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
    if run_app is not None:
        cmd += ["--run"]    # -f stores binary name!
    if enum_host_tests_path:
        cmd += ["-e", '"%s"'% enum_host_tests_path]

    if verbose:
        gt_logger.gt_log_tab("calling mbedhtrun: %s"% " ".join(cmd))
    gt_logger.gt_log("mbed-host-test-runner: started")

    htrun_output = ''
    start_time = time()

    p = run_command(cmd)
    for line in iter(p.stdout.readline, b''):
        htrun_output += line
        # When dumping output to file both \r and \n will be a new line
        # To avoid this "extra new-line" we only use \n at the end
        if verbose:
            sys.stdout.write(line.rstrip() + '\n')
            sys.stdout.flush()

    # Check if process was terminated by signal
    returncode = p.wait()
    if returncode < 0:
        return returncode

    end_time = time()
    testcase_duration = end_time - start_time   # Test case duration from reset to {end}

    result = get_test_result(htrun_output)
    result_test_cases = get_testcase_result(htrun_output)
    test_cases_summary = get_testcase_summary(htrun_output)
    get_coverage_data(yotta_target, htrun_output)

    if verbose:
        gt_logger.gt_log("mbed-host-test-runner: stopped")
        gt_logger.gt_log("mbed-host-test-runner: returned '%s'"% result)
    return (result, htrun_output, testcase_duration, duration, result_test_cases, test_cases_summary)