Ejemplo n.º 1
0
 def test_get_test_pins(self):
     yotta_config = YottaConfig()
     yotta_config.set_yotta_config(self.YOTTA_CONFIG_LONG)
     self.assertEqual(yotta_config.get_baudrate(), 9600)
     self.assertIn('spi', yotta_config.get_test_pins())
     self.assertIn('i2c', yotta_config.get_test_pins())
     self.assertIn('serial', yotta_config.get_test_pins())
Ejemplo n.º 2
0
def main_cli(opts, args, gt_instance_uuid=None):
    """! This is main CLI function with all command line parameters
    @details This function also implements CLI workflow depending on CLI parameters inputed
    @return This function doesn't return, it exits to environment with proper success code
    """

    if not MBED_LMTOOLS:
        gt_logger.gt_log_err("error: mbed-ls proprietary module not installed")
        return (-1)

    if not MBED_HOST_TESTS:
        gt_logger.gt_log_err(
            "error: mbed-host-tests proprietary module not installed")
        return (-1)

    # This is how you magically control colours in this piece of art software
    gt_logger.colorful(not opts.plain)

    # List available test binaries (names, no extension)
    if opts.list_binaries:
        list_binaries_for_targets()
        return (0)

    # Prints version and exits
    if opts.version:
        print_version()
        return (0)

    # We will load hooks from JSON file to support extra behaviour during test execution
    greentea_hooks = GreenteaHooks(
        opts.hooks_json) if opts.hooks_json else None

    # Capture alternative test console inputs, used e.g. in 'yotta test command'
    if opts.digest_source:
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
        host_test_result = run_host_test(
            None,
            None,
            None,
            None,
            None,
            hooks=greentea_hooks,
            digest_source=opts.digest_source,
            enum_host_tests_path=enum_host_tests_path,
            verbose=opts.verbose_test_result_only)

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        status = TEST_RESULTS.index(
            single_test_result) if single_test_result in TEST_RESULTS else -1
        return (status)

    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init()  # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        gt_logger.gt_log("""
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """)
        return (0)

    ### Selecting yotta targets to process
    yt_targets = [
    ]  # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'" %
                             gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab(
                "yotta target in current directory is not set")
            gt_logger.gt_log_err(
                "yotta target is not specified. Use '%s' or '%s' command to set target"
                % (gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                   gt_logger.gt_bright('yotta target <yotta_target>')))
            return (-1)

    ### Query with mbedls for available mbed-enabled devices
    gt_logger.gt_log("detecting connected mbed-enabled devices...")

    # Detect devices connected to system
    mbeds = mbed_lstools.create()
    mbeds_list = mbeds.list_mbeds_ext()

    ready_mbed_devices = []  # Devices which can be used (are fully detected)

    if mbeds_list:
        gt_logger.gt_log(
            "detected %d device%s" %
            (len(mbeds_list), 's' if len(mbeds_list) != 1 else ''))
        for mut in mbeds_list:
            if not all(mut.values()):
                gt_logger.gt_log_err(
                    "can't detect all properties of the device!")
                for prop in mut:
                    if not mut[prop]:
                        gt_logger.gt_log_tab("property '%s' is '%s'" %
                                             (prop, str(mut[prop])))
            else:
                ready_mbed_devices.append(mut)
                gt_logger.gt_log_tab(
                    "detected '%s' -> '%s', console at '%s', mounted at '%s', target id '%s'"
                    % (gt_logger.gt_bright(mut['platform_name']),
                       gt_logger.gt_bright(mut['platform_name_unique']),
                       gt_logger.gt_bright(mut['serial_port']),
                       gt_logger.gt_bright(mut['mount_point']),
                       gt_logger.gt_bright(mut['target_id'])))
    else:
        gt_logger.gt_log_err("no devices detected")
        return (RET_NO_DEVICES)

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    map_platform_to_yt_target = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log(
            "user defined platform -> target supported mapping definition (specified with --map-target switch)"
        )
        p_to_t_mappings = opts.map_platform_to_yt_target.split(',')
        for mapping in p_to_t_mappings:
            if len(mapping.split(':')) == 2:
                platform, yt_target = mapping.split(':')
                if platform not in map_platform_to_yt_target:
                    map_platform_to_yt_target[platform] = []
                map_platform_to_yt_target[platform].append(yt_target)
                gt_logger.gt_log_tab(
                    "mapped platform '%s' to be compatible with '%s'" %
                    (gt_logger.gt_bright(platform),
                     gt_logger.gt_bright(yt_target)))
            else:
                gt_logger.gt_log_tab(
                    "unknown format '%s', use 'platform:target' format" %
                    mapping)

    # Check if mbed classic target name can be translated to yotta target name

    mut_info_map = {
    }  # platform_name : mut_info_dict, extract yt_targets with e.g. [k["yotta_target"] for k in d['K64F']["yotta_targets"]]

    for mut in ready_mbed_devices:
        platfrom_name = mut['platform_name']
        if platfrom_name not in mut_info_map:
            mut_info = get_mbed_clasic_target_info(
                platfrom_name,
                map_platform_to_yt_target,
                use_yotta_registry=opts.yotta_search_for_mbed_target)
            if mut_info:
                mut_info_map[platfrom_name] = mut_info

    ### List of unique ready platform names
    unique_mbed_devices = list(set(mut_info_map.keys()))

    ### Identify which targets has to be build because platforms are present
    yt_target_platform_map = {}  # yt_target_to_test : platforms to test on

    for yt_target in yt_targets:
        for platform_name in unique_mbed_devices:
            if yt_target in [
                    k["yotta_target"]
                    for k in mut_info_map[platform_name]["yotta_targets"]
            ]:
                if yt_target not in yt_target_platform_map:
                    yt_target_platform_map[yt_target] = []
                if platform_name not in yt_target_platform_map[yt_target]:
                    yt_target_platform_map[yt_target].append(platform_name)

    ### We can filter in only specific target ids
    accepted_target_ids = None
    if opts.use_target_ids:
        gt_logger.gt_log(
            "filtering out target ids not on below list (specified with --use-tids switch)"
        )
        accepted_target_ids = opts.use_target_ids.split(',')
        for tid in accepted_target_ids:
            gt_logger.gt_log_tab("accepting target id '%s'" %
                                 gt_logger.gt_bright(tid))

    test_exec_retcode = 0  # Decrement this value each time test case result is not 'OK'
    test_platforms_match = 0  # Count how many tests were actually ran with current settings
    target_platforms_match = 0  # Count how many platforms were actually tested with current settings

    test_report = {}  # Test report used to export to Junit, HTML etc...
    muts_to_test = []  # MUTs to actually be tested
    test_queue = Queue(
    )  # contains information about test_bin and image_path for each test case
    test_result_queue = Queue()  # used to store results of each thread
    execute_threads = []  # list of threads to run test cases

    ### check if argument of --parallel mode is a integer and greater or equal 1
    try:
        parallel_test_exec = int(opts.parallel_test_exec)
        if parallel_test_exec < 1:
            parallel_test_exec = 1
    except ValueError:
        gt_logger.gt_log_err(
            "argument of mode --parallel is not a int, disable parallel mode")
        parallel_test_exec = 1

    # Values used to generate random seed for test execution order shuffle
    SHUFFLE_SEED_ROUND = 10  # Value used to round float random seed
    shuffle_random_seed = round(random.random(), SHUFFLE_SEED_ROUND)

    # Set shuffle seed if it is provided with command line option
    if opts.shuffle_test_seed:
        shuffle_random_seed = round(float(opts.shuffle_test_seed),
                                    SHUFFLE_SEED_ROUND)

    ### Testing procedures, for each target, for each target's compatible platform
    for yotta_target_name in yt_target_platform_map:
        gt_logger.gt_log(
            "processing '%s' yotta target compatible platforms..." %
            gt_logger.gt_bright(yotta_target_name))

        for platform_name in yt_target_platform_map[yotta_target_name]:
            gt_logger.gt_log("processing '%s' platform..." %
                             gt_logger.gt_bright(platform_name))

            ### Select MUTS to test from list of available MUTS to start testing
            mut = None
            number_of_parallel_instances = 1
            for mbed_dev in ready_mbed_devices:
                if accepted_target_ids and mbed_dev[
                        'target_id'] not in accepted_target_ids:
                    continue

                if mbed_dev['platform_name'] == platform_name:
                    mut = mbed_dev
                    muts_to_test.append(mbed_dev)
                    gt_logger.gt_log("using platform '%s' for test:" %
                                     gt_logger.gt_bright(platform_name))
                    for k in mbed_dev:
                        gt_logger.gt_log_tab("%s = '%s'" % (k, mbed_dev[k]))
                    if number_of_parallel_instances < parallel_test_exec:
                        number_of_parallel_instances += 1
                    else:
                        break

            # Configuration print mode:
            if opts.verbose_test_configuration_only:
                continue

            if mut:
                target_platforms_match += 1

                # Demo mode: --run implementation (already added --run to mbedhtrun)
                # We want to pass file name to mbedhtrun (--run NAME  =>  -f NAME_ and run only one binary
                if opts.run_app:
                    gt_logger.gt_log("running '%s' for '%s'" %
                                     (gt_logger.gt_bright(opts.run_app),
                                      gt_logger.gt_bright(yotta_target_name)))
                    disk = mut['mount_point']
                    port = mut['serial_port']
                    micro = mut['platform_name']
                    program_cycle_s = mut_info_map[platfrom_name][
                        'properties']['program_cycle_s']
                    copy_method = opts.copy_method if opts.copy_method else 'shell'
                    enum_host_tests_path = get_local_host_tests_dir(
                        opts.enum_host_tests)

                    yotta_config = YottaConfig()
                    yotta_config.init(yotta_target_name)

                    yotta_config_baudrate = yotta_config.get_baudrate()

                    # We will force configuration specific baudrate
                    if port:
                        port = "%s:%d" % (port, yotta_config_baudrate)

                    test_platforms_match += 1
                    host_test_result = run_host_test(
                        opts.run_app,
                        disk,
                        port,
                        yotta_target_name,
                        mut['target_id'],
                        micro=micro,
                        copy_method=copy_method,
                        program_cycle_s=program_cycle_s,
                        digest_source=opts.digest_source,
                        json_test_cfg=opts.json_test_configuration,
                        run_app=opts.run_app,
                        enum_host_tests_path=enum_host_tests_path,
                        verbose=True)

                    single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
                    status = TEST_RESULTS.index(
                        single_test_result
                    ) if single_test_result in TEST_RESULTS else -1
                    if single_test_result != TEST_RESULT_OK:
                        test_exec_retcode += 1
                    continue

                # Regression test mode:
                # Building sources for given target and perform normal testing

                yotta_result, yotta_ret = True, 0  # Skip build and assume 'yotta build' was successful
                if opts.skip_yotta_build:
                    gt_logger.gt_log(
                        "skipping calling yotta (specified with --skip-build option)"
                    )
                else:
                    yotta_result, yotta_ret = build_with_yotta(
                        yotta_target_name,
                        verbose=opts.verbose,
                        build_to_release=opts.build_to_release,
                        build_to_debug=opts.build_to_debug)

                # We need to stop executing if yotta build fails
                if not yotta_result:
                    gt_logger.gt_log_err("yotta returned %d" % yotta_ret)
                    return (RET_YOTTA_BUILD_FAIL)

                if opts.only_build_tests:
                    continue

                # Build phase will be followed by test execution for each target
                if yotta_result and not opts.only_build_tests:
                    binary_type = mut_info_map[platform_name]['properties'][
                        'binary_type']
                    ctest_test_list = load_ctest_testsuite(
                        os.path.join('.', 'build', yotta_target_name),
                        binary_type=binary_type)
                    #TODO no tests to execute

                filtered_ctest_test_list = create_filtered_test_list(
                    ctest_test_list, opts.test_by_names, opts.skip_test)

                gt_logger.gt_log(
                    "running %d test%s for target '%s' and platform '%s'" %
                    (len(filtered_ctest_test_list),
                     "s" if len(filtered_ctest_test_list) != 1 else "",
                     gt_logger.gt_bright(yotta_target_name),
                     gt_logger.gt_bright(platform_name)))

                # Test execution order can be shuffled (also with provided random seed)
                # for test execution reproduction.
                filtered_ctest_test_list_keys = filtered_ctest_test_list.keys()
                if opts.shuffle_test_order:
                    # We want to shuffle test names randomly
                    random.shuffle(filtered_ctest_test_list_keys,
                                   lambda: shuffle_random_seed)

                for test_bin in filtered_ctest_test_list_keys:
                    image_path = filtered_ctest_test_list[test_bin]
                    test = {"test_bin": test_bin, "image_path": image_path}
                    test_queue.put(test)

                #for test_bin, image_path in filtered_ctest_test_list.iteritems():
                #    test = {"test_bin":test_bin, "image_path":image_path}
                #    test_queue.put(test)

                number_of_threads = 0
                for mut in muts_to_test:
                    #################################################################
                    # Experimental, parallel test execution
                    #################################################################
                    if number_of_threads < parallel_test_exec:
                        args = (test_result_queue, test_queue, opts, mut,
                                mut_info, yotta_target_name, greentea_hooks)
                        t = Thread(target=run_test_thread, args=args)
                        execute_threads.append(t)
                        number_of_threads += 1

    gt_logger.gt_log_tab(
        "use %s instance%s for testing" %
        (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
    for t in execute_threads:
        t.daemon = True
        t.start()

    # merge partial test reports from diffrent threads to final test report
    for t in execute_threads:
        try:
            t.join()  #blocking
            test_return_data = test_result_queue.get(False)
        except Exception as e:
            # No test report generated
            gt_logger.gt_log_err("could not generate test report" + str(e))
            test_exec_retcode += -1000
            return test_exec_retcode

        test_platforms_match += test_return_data['test_platforms_match']
        test_exec_retcode += test_return_data['test_exec_retcode']
        partial_test_report = test_return_data['test_report']
        # todo: find better solution, maybe use extend
        for report_key in partial_test_report.keys():
            if report_key not in test_report:
                test_report[report_key] = {}
                test_report.update(partial_test_report)
            else:
                test_report[report_key].update(partial_test_report[report_key])

    if opts.verbose_test_configuration_only:
        print
        print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
        return (0)

    gt_logger.gt_log("all tests finished!")

    # We will execute post test hooks on tests
    for yotta_target in test_report:
        test_name_list = []  # All test case names for particular yotta target
        for test_name in test_report[yotta_target]:
            test = test_report[yotta_target][test_name]
            # Test was successful
            if test['single_test_result'] in [
                    TEST_RESULT_OK, TEST_RESULT_FAIL
            ]:
                test_name_list.append(test_name)
                # Call hook executed for each test, just after all tests are finished
                if greentea_hooks:
                    # We can execute this test hook just after all tests are finished ('hook_post_test_end')
                    format = {
                        "test_name": test_name,
                        "test_bin_name": test['test_bin_name'],
                        "image_path": test['image_path'],
                        "build_path": test['build_path'],
                        "build_path_abs": test['build_path_abs'],
                        "yotta_target_name": yotta_target,
                    }
                    greentea_hooks.run_hook_ext('hook_post_test_end', format)
        if greentea_hooks:
            # Call hook executed for each yotta target, just after all tests are finished
            build_path = os.path.join("./build", yotta_target)
            build_path_abs = os.path.abspath(build_path)
            # We can execute this test hook just after all tests are finished ('hook_post_test_end')
            format = {
                "build_path": build_path,
                "build_path_abs": build_path_abs,
                "test_name_list": test_name_list,
                "yotta_target_name": yotta_target,
            }
            greentea_hooks.run_hook_ext('hook_post_all_test_end', format)

    # This tool is designed to work in CI
    # We want to return success codes based on tool actions,
    # only if testes were executed and all passed we want to
    # return 0 (success)
    if not opts.only_build_tests:
        # Prints shuffle seed
        gt_logger.gt_log("shuffle seed: %.*f" %
                         (SHUFFLE_SEED_ROUND, shuffle_random_seed))

        # Reports (to file)
        if opts.report_junit_file_name:
            gt_logger.gt_log("exporting to JUnit file '%s'..." %
                             gt_logger.gt_bright(opts.report_junit_file_name))
            junit_report = exporter_testcase_junit(
                test_report, test_suite_properties=yotta_module.get_data())
            with open(opts.report_junit_file_name, 'w') as f:
                f.write(junit_report)
        if opts.report_text_file_name:
            gt_logger.gt_log("exporting to text '%s'..." %
                             gt_logger.gt_bright(opts.report_text_file_name))

            text_report, text_results = exporter_text(test_report)
            text_testcase_report, text_testcase_results = exporter_testcase_text(
                test_report)
            with open(opts.report_text_file_name, 'w') as f:
                f.write('\n'.join([
                    text_report, text_results, text_testcase_report,
                    text_testcase_results
                ]))

        # Reports (to console)
        if opts.report_json:
            # We will not print summary and json report together
            gt_logger.gt_log("json test report:")
            print exporter_json(test_report)
        else:
            # Final summary
            if test_report:
                # Test suite report
                gt_logger.gt_log("test suite report:")
                text_report, text_results = exporter_text(test_report)
                print text_report
                gt_logger.gt_log("test suite results: " + text_results)
                # test case detailed report
                gt_logger.gt_log("test case report:")
                text_testcase_report, text_testcase_results = exporter_testcase_text(
                    test_report, test_suite_properties=yotta_module.get_data())
                print text_testcase_report
                gt_logger.gt_log("test case results: " + text_testcase_results)

        # This flag guards 'build only' so we expect only yotta errors
        if test_platforms_match == 0:
            # No tests were executed
            gt_logger.gt_log_warn(
                "no platform/target matching tests were found!")
            test_exec_retcode += -10
        if target_platforms_match == 0:
            # No platforms were tested
            gt_logger.gt_log_warn("no target matching platforms were found!")
            test_exec_retcode += -100

    return (test_exec_retcode)
Ejemplo n.º 3
0
def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info,
                    yotta_target_name, greentea_hooks):
    test_exec_retcode = 0
    test_platforms_match = 0
    test_report = {}
    yotta_config_baudrate = None  # Default serial port baudrate forced by configuration

    yotta_config = YottaConfig()
    yotta_config.init(yotta_target_name)

    yotta_config_baudrate = yotta_config.get_baudrate()

    while not test_queue.empty():
        try:
            test = test_queue.get(False)
        except Exception as e:
            gt_logger.gt_log_err(str(e))
            break

        test_result = 'SKIPPED'

        disk = mut['mount_point']
        port = mut['serial_port']
        micro = mut['platform_name']
        program_cycle_s = mut_info['properties']['program_cycle_s']
        copy_method = opts.copy_method if opts.copy_method else 'shell'
        verbose = opts.verbose_test_result_only
        enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

        # We will force configuration specific baudrate
        if port:
            port = "%s:%d" % (port, yotta_config_baudrate)

        test_platforms_match += 1
        host_test_result = run_host_test(
            test['image_path'],
            disk,
            port,
            yotta_target_name,
            mut['target_id'],
            micro=micro,
            copy_method=copy_method,
            program_cycle_s=program_cycle_s,
            digest_source=opts.digest_source,
            json_test_cfg=opts.json_test_configuration,
            enum_host_tests_path=enum_host_tests_path,
            verbose=verbose)

        # Some error in htrun, abort test execution
        if host_test_result < 0:
            break

        single_test_result, single_test_output, single_testduration, single_timeout, result_test_cases, test_cases_summary = host_test_result
        test_result = single_test_result

        build_path = os.path.join("./build", yotta_target_name)
        build_path_abs = os.path.abspath(build_path)

        if single_test_result != TEST_RESULT_OK:
            test_exec_retcode += 1

        if single_test_result in [TEST_RESULT_OK, TEST_RESULT_FAIL]:
            if greentea_hooks:
                # Test was successful
                # We can execute test hook just after test is finished ('hook_test_end')
                format = {
                    "test_name": test['test_bin'],
                    "test_bin_name": os.path.basename(test['image_path']),
                    "image_path": test['image_path'],
                    "build_path": build_path,
                    "build_path_abs": build_path_abs,
                    "yotta_target_name": yotta_target_name,
                }
                greentea_hooks.run_hook_ext('hook_test_end', format)

        # Update report for optional reporting feature
        test_suite_name = test['test_bin'].lower()
        if yotta_target_name not in test_report:
            test_report[yotta_target_name] = {}

        if test_suite_name not in test_report[yotta_target_name]:
            test_report[yotta_target_name][test_suite_name] = {}

        if not test_cases_summary and not result_test_cases:
            gt_logger.gt_log_warn("test case summary event not found")
            gt_logger.gt_log_tab(
                "no test case report present, assuming test suite to be a single test case!"
            )

            # We will map test suite result to test case to
            # output valid test case in report

            # Generate "artificial" test case name from test suite name#
            # E.g:
            #   mbed-drivers-test-dev_null -> dev_null
            test_case_name = test_suite_name
            test_str_idx = test_suite_name.find("-test-")
            if test_str_idx != -1:
                test_case_name = test_case_name[test_str_idx + 6:]

            gt_logger.gt_log_tab("test suite: %s" % test_suite_name)
            gt_logger.gt_log_tab("test case: %s" % test_case_name)

            # Test case result: OK, FAIL or ERROR
            tc_result_text = {
                "OK": "OK",
                "FAIL": "FAIL",
            }.get(single_test_result, 'ERROR')

            # Test case integer success code OK, FAIL and ERROR: (0, >0, <0)
            tc_result = {
                "OK": 0,
                "FAIL": 1024,
                "ERROR": -1024,
            }.get(tc_result_text, '-2048')

            # Test case passes and failures: (1 pass, 0 failures) or (0 passes, 1 failure)
            tc_passed, tc_failed = {
                0: (1, 0),
            }.get(tc_result, (0, 1))

            # Test case report build for whole binary
            # Add test case made from test suite result to test case report
            result_test_cases = {
                test_case_name: {
                    'duration': single_testduration,
                    'time_start': 0.0,
                    'time_end': 0.0,
                    'utest_log': single_test_output.splitlines(),
                    'result_text': tc_result_text,
                    'passed': tc_passed,
                    'failed': tc_failed,
                    'result': tc_result,
                }
            }

            # Test summary build for whole binary (as a test case)
            test_cases_summary = (
                tc_passed,
                tc_failed,
            )

        gt_logger.gt_log("test on hardware with target id: %s" %
                         (mut['target_id']))
        gt_logger.gt_log(
            "test suite '%s' %s %s in %.2f sec" %
            (test['test_bin'], '.' *
             (80 - len(test['test_bin'])), test_result, single_testduration))

        # Test report build for whole binary
        test_report[yotta_target_name][test_suite_name][
            'single_test_result'] = single_test_result
        test_report[yotta_target_name][test_suite_name][
            'single_test_output'] = single_test_output
        test_report[yotta_target_name][test_suite_name][
            'elapsed_time'] = single_testduration
        test_report[yotta_target_name][test_suite_name][
            'platform_name'] = micro
        test_report[yotta_target_name][test_suite_name][
            'copy_method'] = copy_method
        test_report[yotta_target_name][test_suite_name][
            'testcase_result'] = result_test_cases

        test_report[yotta_target_name][test_suite_name][
            'build_path'] = build_path
        test_report[yotta_target_name][test_suite_name][
            'build_path_abs'] = build_path_abs
        test_report[yotta_target_name][test_suite_name]['image_path'] = test[
            'image_path']
        test_report[yotta_target_name][test_suite_name][
            'test_bin_name'] = os.path.basename(test['image_path'])

        passes_cnt, failures_cnt = 0, 0
        for tc_name in sorted(result_test_cases.keys()):
            gt_logger.gt_log_tab(
                "test case: '%s' %s %s in %.2f sec" %
                (tc_name, '.' * (80 - len(tc_name)),
                 result_test_cases[tc_name].get('result_text', '_'),
                 result_test_cases[tc_name].get('duration', 0.0)))
            if result_test_cases[tc_name].get('result_text', '_') == 'OK':
                passes_cnt += 1
            else:
                failures_cnt += 1

        if test_cases_summary:
            passes, failures = test_cases_summary
            gt_logger.gt_log("test case summary: %d pass%s, %d failur%s" %
                             (passes, '' if passes == 1 else 'es', failures,
                              'e' if failures == 1 else 'es'))
            if passes != passes_cnt or failures != failures_cnt:
                gt_logger.gt_log_err(
                    "test case summary mismatch: reported passes vs failures miscount!"
                )
                gt_logger.gt_log_tab(
                    "(%d, %d) vs (%d, %d)" %
                    (passes, failures, passes_cnt, failures_cnt))

        if single_test_result != 'OK' and not verbose and opts.report_fails:
            # In some cases we want to print console to see why test failed
            # even if we are not in verbose mode
            gt_logger.gt_log_tab(
                "test failed, reporting console output (specified with --report-fails option)"
            )
            print
            print single_test_output

    #greentea_release_target_id(mut['target_id'], gt_instance_uuid)
    test_result_queue.put({
        'test_platforms_match': test_platforms_match,
        'test_exec_retcode': test_exec_retcode,
        'test_report': test_report
    })
    return
Ejemplo n.º 4
0
 def test_get_baudrate_None(self):
     yotta_config = YottaConfig()
     yotta_config.set_yotta_config(None)
     self.assertEqual(yotta_config.get_baudrate(),
                      yotta_config.DEFAULT_BAUDRATE)
     self.assertEqual(115200, yotta_config.DEFAULT_BAUDRATE)
Ejemplo n.º 5
0
 def test_get_baudrate_default_115200_no_yotta_config(self):
     yotta_config = YottaConfig()
     self.assertEqual(yotta_config.get_baudrate(),
                      yotta_config.DEFAULT_BAUDRATE)
Ejemplo n.º 6
0
 def test_get_baudrate_38400(self):
     yotta_config = YottaConfig()
     yotta_config.set_yotta_config(self.YOTTA_CONFIG_SHORT)
     self.assertEqual(yotta_config.get_baudrate(), 38400)
Ejemplo n.º 7
0
 def test_get_baudrate_9600(self):
     yotta_config = YottaConfig()
     yotta_config.set_yotta_config(self.YOTTA_CONFIG_LONG)
     self.assertEqual(yotta_config.get_baudrate(), 9600)
Ejemplo n.º 8
0
def get_test_spec_from_yt_module(opts):
    """
    Gives test specification created from yotta module environment.

    :return TestSpec:
    """
    ### Read yotta module basic information
    yotta_module = YottaModule()
    yotta_module.init()  # Read actual yotta module data

    # Check if NO greentea-client is in module.json of repo to test, if so abort
    if not yotta_module.check_greentea_client():
        error = """
        *****************************************************************************************
        * We've noticed that NO 'greentea-client' module is specified in                        *
        * dependency/testDependency section of this module's 'module.json' file.                *
        *                                                                                       *
        * This version of Greentea requires 'greentea-client' module.                           *
        * Please downgrade to Greentea before v0.2.0:                                           *
        *                                                                                       *
        * $ pip install "mbed-greentea<0.2.0" --upgrade                                         *
        *                                                                                       *
        * or port your tests to new Async model: https://github.com/ARMmbed/greentea/pull/78    *
        *****************************************************************************************
        """
        raise YottaError(error)

    test_spec = TestSpec()

    ### Selecting yotta targets to process
    yt_targets = [
    ]  # List of yotta targets specified by user used to process during this run
    if opts.list_of_targets:
        yt_targets = opts.list_of_targets.split(',')
    else:
        # Trying to use locally set yotta target
        gt_logger.gt_log("checking for yotta target in current directory")
        gt_logger.gt_log_tab("reason: no --target switch set")
        current_target = get_mbed_target_from_current_dir()
        if current_target:
            gt_logger.gt_log("assuming default target as '%s'" %
                             gt_logger.gt_bright(current_target))
            # Assuming first target printed by 'yotta search' will be used
            yt_targets = [current_target]
        else:
            gt_logger.gt_log_tab(
                "yotta target in current directory is not set")
            gt_logger.gt_log_err(
                "yotta target is not specified. Use '%s' or '%s' command to set target"
                % (gt_logger.gt_bright('mbedgt -t <yotta_target>'),
                   gt_logger.gt_bright('yotta target <yotta_target>')))
            raise YottaError("Yotta target not set in current directory!")

    ### Use yotta to search mapping between platform names and available platforms
    # Convert platform:target, ... mapping to data structure
    yt_target_to_map_platform = {}
    if opts.map_platform_to_yt_target:
        gt_logger.gt_log(
            "user defined platform -> target supported mapping definition (specified with --map-target switch)"
        )
        for mapping in opts.map_platform_to_yt_target.split(','):
            if len(mapping.split(':')) == 2:
                yt_target, platform = mapping.split(':')
                yt_target_to_map_platform[yt_target] = platform
                gt_logger.gt_log_tab(
                    "mapped yotta target '%s' to be compatible with platform '%s'"
                    % (gt_logger.gt_bright(yt_target),
                       gt_logger.gt_bright(platform)))
            else:
                gt_logger.gt_log_tab(
                    "unknown format '%s', use 'target:platform' format" %
                    mapping)

    for yt_target in yt_targets:
        if yt_target in yt_target_to_map_platform:
            platform = yt_target_to_map_platform[yt_target]
        else:
            # get it from local Yotta target
            platform = get_platform_name_from_yotta_target(yt_target)

        # Toolchain doesn't matter as Greentea does not have to do any selection for it unlike platform
        toolchain = yt_target
        yotta_config = YottaConfig()
        yotta_config.init(yt_target)
        baud_rate = yotta_config.get_baudrate()
        base_path = os.path.join('.', 'build', yt_target)
        tb = TestBuild(yt_target, platform, toolchain, baud_rate, base_path)
        test_spec.add_test_builds(yt_target, tb)

        # Find tests
        ctest_test_list = load_ctest_testsuite(
            base_path, binary_type=get_binary_type_for_platform(platform))
        for name, path in ctest_test_list.items():
            t = Test(name)
            t.add_binary(path, TestBinary.BIN_TYPE_BOOTABLE)
            tb.add_test(name, t)

    return test_spec