Ejemplo n.º 1
0
def pytest_generate_tests(metafunc):
    """Generate tests depending on command line options."""
    params = []
    ids = []

    with open(metafunc.config.getoption("test_conf"), "r") as file:
        test_cases = yaml.safe_load(file)

    for test in test_cases:
        model_list = []
        test_id_list = []
        for model in test:
            extra_args = {}
            if "marks" in test:
                extra_args["marks"] = test["marks"]
            model = model["model"]
            is_omz = model.get("type") == "omz"
            if is_omz:
                test_id_list.append(f'{model["name"]}_{model["precision"]}')
            else:
                test_id_list.append(model["path"].split("/")[-1])
                model["path"] = Path(expand_env_vars(model["path"]))
            model_list.append(model)
        ids = ids + ['-'.join(test_id_list)]
        params.append(pytest.param('-'.join(test_id_list), model_list),
                      **extra_args)

    metafunc.parametrize("test_id, models", params, ids=ids)
Ejemplo n.º 2
0
def test_timetest(instance, executable, niter, cl_cache_dir, cpu_cache,
                  vpu_compiler, perf_hint, model_cache_dir, test_info,
                  temp_dir, validate_test_case, prepare_db_info):
    """Parameterized test.

    :param instance: test instance. Should not be changed during test run
    :param executable: timetest executable to run
    :param niter: number of times to run executable
    :param cl_cache_dir: directory to store OpenCL cache
    :param cpu_cache: flag to enable model CPU cache
    :param vpu_compiler: flag to change VPUX compiler type
    :param perf_hint: performance hint (optimize device for latency or throughput settings)
    :param model_cache_dir: directory to store IE model cache
    :param test_info: custom `test_info` field of built-in `request` pytest fixture
    :param temp_dir: path to a temporary directory. Will be cleaned up after test run
    :param validate_test_case: custom pytest fixture. Should be declared as test argument to be enabled
    :param prepare_db_info: custom pytest fixture. Should be declared as test argument to be enabled
    """
    # Prepare model to get model_path
    model_path = instance["model"].get("path")
    assert model_path, "Model path is empty"
    model_path = Path(expand_env_vars(model_path))

    # Copy model to a local temporary directory
    model_dir = temp_dir / "model"
    shutil.copytree(model_path.parent, model_dir)
    model_path = model_dir / model_path.name

    # Run executable
    exe_args = {
        "executable": Path(executable),
        "model": Path(model_path),
        "device": instance["device"]["name"],
        "niter": niter,
        "perf_hint": perf_hint,
        "cpu_cache": cpu_cache,
        "vpu_compiler": vpu_compiler if vpu_compiler else ""
    }
    logging.info("Run timetest once to generate any cache")
    retcode, msg, _, _ = run_timetest({**exe_args, "niter": 1}, log=logging)
    assert retcode == 0, f"Run of executable for warm up failed: {msg}"
    if cl_cache_dir:
        assert os.listdir(cl_cache_dir), "cl_cache isn't generated"
    if model_cache_dir:
        assert os.listdir(model_cache_dir), "model_cache isn't generated"

    retcode, msg, aggr_stats, raw_stats = run_timetest(exe_args, log=logging)
    assert retcode == 0, f"Run of executable failed: {msg}"

    # Add timetest results to submit to database and save in new test conf as references
    test_info["results"] = aggr_stats
    test_info["raw_results"] = raw_stats
Ejemplo n.º 3
0
def test(instance, executable, niter, temp_dir, omz_models_conversion,
         validate_test_case, prepare_db_info):
    """Parameterized test.
    :param instance: test instance. Should not be changed during test run
    :param executable: test executable to run
    :param niter: number of times to run executable
    :param temp_dir: path to a temporary directory. Will be cleaned up after test run
    :param validate_test_case: custom pytest fixture. Should be declared as test argument to be enabled
    :param prepare_db_info: custom pytest fixture. Should be declared as test argument to be enabled
    :param omz_models_conversion: custom pytest fixture. Should be declared as test argument to be enabled
    """
    # Prepare model to get model_path
    model_path = ''
    cache_model_path = instance["instance"]["model"].get("cache_path")
    irs_model_path = instance["instance"]["model"].get("irs_out_path")

    if os.path.isfile(irs_model_path):
        model_path = irs_model_path
    elif os.path.isfile(cache_model_path):
        model_path = cache_model_path

    assert model_path, "Model path is empty"
    model_path = Path(expand_env_vars(model_path))

    # Copy model to a local temporary directory
    model_dir = temp_dir / "model"
    shutil.copytree(model_path.parent, model_dir)
    model_path = model_dir / model_path.name

    # Run executable
    exe_args = {
        "executable": Path(executable),
        "model": Path(model_path),
        "device": instance["instance"]["device"]["name"],
        "niter": niter
    }
    retcode, msg, aggr_stats, raw_stats = run_memorytest(exe_args, log=logging)
    assert retcode == 0, f"Run of executable failed: {msg}"

    # Add test results to submit to database and save in new test conf as references
    instance["results"] = aggr_stats
    instance["raw_results"] = raw_stats

    # Compare with references
    metrics_comparator_status = compare_with_references(
        aggr_stats, instance["orig_instance"]["references"])
    assert metrics_comparator_status == 0, "Comparison with references failed"
Ejemplo n.º 4
0
def pytest_generate_tests(metafunc):
    """Generate tests depending on command line options."""
    params = []
    ids = []

    with open(metafunc.config.getoption("test_conf"), "r") as file:
        test_cases = yaml.safe_load(file)

    for test in test_cases:
        extra_args = {}
        model_path = test["model"]["path"]
        if "marks" in test:
            extra_args["marks"] = test["marks"]

        test_id = model_path.replace("$", "").replace("{", "").replace("}", "")
        params.append(pytest.param(test_id, Path(expand_env_vars(model_path)), **extra_args))
        ids = ids + [test_id]
    metafunc.parametrize("test_id, model", params, ids=ids)
Ejemplo n.º 5
0
def pytest_generate_tests(metafunc):
    """ Generate tests depending on command line options
    """
    params = []
    ids = []

    with open(metafunc.config.getoption('test_conf'), "r") as file:
        test_cases = yaml.safe_load(file)

    for test in test_cases:
        extra_args = {}
        model_path = test["model"]["path"]
        if "marks" in test:
            extra_args["marks"] = test["marks"]

        params.append(
            pytest.param(Path(expand_env_vars(model_path)), **extra_args))
        ids = ids + [model_path]
    metafunc.parametrize("model", params, ids=ids)
Ejemplo n.º 6
0
def pytest_generate_tests(metafunc):
    """Generate tests depending on command line options."""
    params = []
    ids = []

    with open(metafunc.config.getoption("test_conf"), "r") as file:
        test_cases = yaml.safe_load(file)

    for test in test_cases:
        model_list = []
        test_id_list = []
        for models in test:
            extra_args = {}
            model_path = models["model"]["path"]
            if "marks" in test:
                extra_args["marks"] = test["marks"]
            model_list.append(expand_env_vars(model_path))
            test_id_list.append(model_path.split("/")[- 1])
        ids = ids + ['-'.join(test_id_list)]
        params.append(pytest.param('-'.join(test_id_list), model_list), **extra_args)

    metafunc.parametrize("test_id, models", params, ids=ids)