Beispiel #1
0
def run_multiple_devices_cases(env, extra_data):
    """
     extra_data can be two types of value
     1. as dict:
            e.g.
                {"name":  "gpio master/slave test example",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"}
     2. as list dict:
            e.g.
               [{"name":  "gpio master/slave test example1",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"},
               {"name":  "gpio master/slave test example2",
                "child case num": 2,
                "config": "release",
                "env_tag": "UT_T2_1"}]

    """
    failed_cases = []
    case_config = format_test_case_config(extra_data,
                                          env.default_dut_cls.TARGET)
    duts = {}
    for ut_config in case_config:
        Utility.console_log("Running unit test for config: " + ut_config, "O")
        for one_case in case_config[ut_config]:
            log_test_case(
                "multi-device test",
                one_case,
                ut_config,
            )
            result = False
            junit_test_case = TinyFW.JunitReport.create_test_case(
                format_case_name(one_case))
            try:
                result = run_one_multiple_devices_case(duts, ut_config, env,
                                                       one_case,
                                                       one_case.get('app_bin'),
                                                       junit_test_case)
            except TestCaseFailed:
                pass  # result is False, this is handled by the finally block
            except Exception as e:
                handle_unexpected_exception(junit_test_case, e)
            finally:
                if result:
                    Utility.console_log("Success: " +
                                        format_case_name(one_case),
                                        color="green")
                else:
                    failed_cases.append(format_case_name(one_case))
                    Utility.console_log("Failed: " +
                                        format_case_name(one_case),
                                        color="red")
                TinyFW.JunitReport.test_case_finish(junit_test_case)
        # close all DUTs when finish running all cases for one config
        for dut in duts:
            env.close_dut(dut)
        duts = {}
Beispiel #2
0
def run_unit_test_cases(env, extra_data):
    """
    extra_data can be three types of value
    1. as string:
               1. "case_name"
               2. "case_name [reset=RESET_REASON]"
    2. as dict:
               1. with key like {"name": "Intr_alloc test, shared ints"}
               2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
    3. as list of string or dict:
               [case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]

    :param env: test env instance
    :param extra_data: the case name or case list or case dictionary
    :return: None
    """

    case_config = format_test_case_config(extra_data)

    # we don't want stop on failed case (unless some special scenarios we can't handle)
    # this flag is used to log if any of the case failed during executing
    # Before exit test function this flag is used to log if the case fails
    failed_cases = []

    for ut_config in case_config:
        Utility.console_log("Running unit test for config: " + ut_config, "O")
        dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
        if len(case_config[ut_config]) > 0:
            replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
        dut.start_app()
        Utility.console_log("Download finished, start running test cases", "O")

        for one_case in case_config[ut_config]:
            log_test_case("test case", one_case, ut_config)
            performance_items = []
            # create junit report test case
            junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
            try:
                run_one_normal_case(dut, one_case, junit_test_case)
                performance_items = dut.get_performance_items()
            except TestCaseFailed:
                failed_cases.append(format_case_name(one_case))
            except Exception as e:
                handle_unexpected_exception(junit_test_case, e)
                failed_cases.append(format_case_name(one_case))
            finally:
                TinyFW.JunitReport.update_performance(performance_items)
                TinyFW.JunitReport.test_case_finish(junit_test_case)
        # close DUT when finish running all cases for one config
        env.close_dut(dut.name)

    # raise exception if any case fails
    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise TestCaseFailed(*failed_cases)
Beispiel #3
0
def run_multiple_stage_cases(env, extra_data):
    """
    extra_data can be 2 types of value
    1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
    3. as list of string or dict:
               [case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]

    :param env: test env instance
    :param extra_data: the case name or case list or case dictionary
    :return: None
    """

    case_config = format_test_case_config(extra_data)

    # we don't want stop on failed case (unless some special scenarios we can't handle)
    # this flag is used to log if any of the case failed during executing
    # Before exit test function this flag is used to log if the case fails
    failed_cases = []

    for ut_config in case_config:
        Utility.console_log("Running unit test for config: " + ut_config, "O")
        dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
        if len(case_config[ut_config]) > 0:
            replace_app_bin(dut, "unit-test-app", case_config[ut_config][0].get('app_bin'))
        dut.start_app()

        for one_case in case_config[ut_config]:
            log_test_case("multi-stage test", one_case, ut_config)
            performance_items = []
            junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
            try:
                run_one_multiple_stage_case(dut, one_case, junit_test_case)
                performance_items = dut.get_performance_items()
            except TestCaseFailed:
                failed_cases.append(format_case_name(one_case))
            except Exception as e:
                handle_unexpected_exception(junit_test_case, e)
                failed_cases.append(format_case_name(one_case))
            finally:
                TinyFW.JunitReport.update_performance(performance_items)
                TinyFW.JunitReport.test_case_finish(junit_test_case)
        # close DUT when finish running all cases for one config
        env.close_dut(dut.name)

    # raise exception if any case fails
    if failed_cases:
        Utility.console_log("Failed Cases:", color="red")
        for _case_name in failed_cases:
            Utility.console_log("\t" + _case_name, color="red")
        raise TestCaseFailed(*failed_cases)