def prepare_omz_model(openvino_ref, model, omz_repo, omz_cache_dir, tmpdir): """ Download and convert Open Model Zoo model to Intermediate Representation, get path to model XML. """ # Step 1: downloader omz_log = logging.getLogger("prepare_omz_model") python_executable = sys.executable downloader_path = omz_repo / "tools" / "downloader" / "downloader.py" model_path_root = tmpdir cmd = [ f'{python_executable}', f'{downloader_path}', '--name', f'{model["name"]}', f'--precisions={model["precision"]}', '--num_attempts', f'{OMZ_NUM_ATTEMPTS}', '--output_dir', f'{model_path_root}' ] if omz_cache_dir: cmd.append('--cache_dir') cmd.append(f'{omz_cache_dir}') return_code, output = cmd_exec(cmd, log=omz_log) assert return_code == 0, "Downloading OMZ models has failed!" # Step 2: converter converter_path = omz_repo / "tools" / "downloader" / "converter.py" ir_path = model_path_root / "_IR" # Note: remove --precisions if both precisions (FP32 & FP16) are required cmd = [ f'{python_executable}', f'{converter_path}', '--name', f'{model["name"]}', '-p', f'{python_executable}', f'--precisions={model["precision"]}', '--output_dir', f'{ir_path}', '--download_dir', f'{model_path_root}', '--mo', f'{openvino_ref / "tools"/ "model_optimizer" / "mo.py"}' ] return_code, output = cmd_exec(cmd, env=get_openvino_environment(openvino_ref), log=omz_log) assert return_code == 0, "Converting OMZ models has failed!" # Step 3: info_dumper info_dumper_path = omz_repo / "tools" / "downloader" / "info_dumper.py" cmd = [ f'{python_executable}', f'{info_dumper_path}', '--name', f'{model["name"]}' ] return_code, output = cmd_exec(cmd, log=omz_log) assert return_code == 0, "Getting information about OMZ models has failed!" model_info = json.loads(output)[0] # Step 4: form model_path model_path = ir_path / model_info["subdirectory"] / model[ "precision"] / f'{model_info["name"]}.xml' return model_path
def omz_models_conversion(instance, request): """ Fixture for preparing omz models and updating test config with new paths """ # Check Open Model Zoo key omz_path = request.config.getoption("omz") if omz_path: cache_dir = request.config.getoption("omz_cache_dir") omz_models_out_dir = request.config.getoption("omz_models_out_dir") omz_irs_out_dir = request.config.getoption("omz_irs_out_dir") mo_path = request.config.getoption("mo") downloader_path = omz_path / "tools" / "downloader" / "downloader.py" converter_path = omz_path / "tools" / "downloader" / "converter.py" info_dumper_path = omz_path / "tools" / "downloader" / "info_dumper.py" if instance["instance"]["model"]["source"] == "omz": model_name = instance["instance"]["model"]["name"] model_precision = instance["instance"]["model"]["precision"] # get full model info cmd = f'"{sys.executable}" "{info_dumper_path}" --name {model_name}' _, info = cmd_exec([cmd], shell=True, log=logging) model_info = json.loads(info)[0] if model_precision not in model_info['precisions']: logging.error( f"Please specify precision for the model " f"{model_name} from the list: {model_info['precisions']}") model_path = Path(model_info["subdirectory"]) / model_precision / ( model_name + ".xml") model_full_path = omz_irs_out_dir / model_info[ "subdirectory"] / model_precision / (model_name + ".xml") # prepare models and convert models to IRs cmd = f'{sys.executable} {downloader_path}' \ f' --name {model_name}' \ f' --precisions={model_precision}' \ f' --num_attempts {OMZ_NUM_ATTEMPTS}' \ f' --output_dir {omz_models_out_dir}' \ f' --cache_dir {cache_dir}' cmd_exec([cmd], shell=True, log=logging) cmd = f'{sys.executable} {converter_path}' \ f' --name {model_name}' \ f' -p {sys.executable}' \ f' --precisions={model_precision}' \ f' --output_dir {omz_irs_out_dir}' \ f' --download_dir {omz_models_out_dir}' \ f' --mo {mo_path}' cmd_exec([cmd], shell=True, log=logging) instance["instance"]["model"]["framework"] = model_info[ "framework"] instance["instance"]["model"]["path"] = model_path instance["instance"]["model"]["full_path"] = model_full_path
def omz_models_conversion(instance, request): """ Fixture for preparing omz models and updating test config with new paths """ # Check Open Model Zoo key omz_path = request.config.getoption("omz") if omz_path: # TODO: After switch to wheel OV installation, omz tools should be accessible through command line downloader_path = omz_path / "tools" / "model_tools" / "downloader.py" converter_path = omz_path / "tools" / "model_tools" / "converter.py" info_dumper_path = omz_path / "tools" / "model_tools" / "info_dumper.py" if instance["instance"]["model"]["source"] == "omz": model_name = instance["instance"]["model"]["name"] model_precision = instance["instance"]["model"]["precision"] cache_dir = request.config.getoption("omz_cache_dir") omz_models_out_dir = request.config.getoption("omz_models_out_dir") omz_irs_out_dir = request.config.getoption("omz_irs_out_dir") # get full model info cmd = [f'{sys.executable}', f'{info_dumper_path}', '--name', f'{model_name}'] return_code, info = cmd_exec(cmd, log=logging) assert return_code == 0, "Getting information about OMZ models has failed!" model_info = json.loads(info)[0] if model_precision not in model_info['precisions']: logging.error(f"Please specify precision for the model " f"{model_name} from the list: {model_info['precisions']}") sub_model_path = str(Path(model_info["subdirectory"]) / model_precision / (model_name + ".xml")) model_out_path = omz_models_out_dir / sub_model_path model_irs_out_path = omz_irs_out_dir / sub_model_path # prepare models and convert models to IRs cmd = [f'{sys.executable}', f'{downloader_path}', '--name', f'{model_name}', '--precisions', f'{model_precision}', '--num_attempts', f'{OMZ_NUM_ATTEMPTS}', '--output_dir', f'{omz_models_out_dir}', '--cache_dir', f'{cache_dir}'] return_code, _ = cmd_exec(cmd, log=logging) assert return_code == 0, "Downloading OMZ models has failed!" cmd = [f'{sys.executable}', f'{converter_path}', '--name', f'{model_name}', '-p', f'{sys.executable}', '--precisions', f'{model_precision}', '--output_dir', f'{omz_irs_out_dir}', '--download_dir', f'{omz_models_out_dir}'] return_code, _ = cmd_exec(cmd, log=logging) assert return_code == 0, "Converting OMZ models has failed!" instance["orig_instance"]["model"]["framework"] = model_info["framework"] instance["orig_instance"]["model"]["path"] = sub_model_path instance["instance"]["model"]["cache_path"] = model_out_path instance["instance"]["model"]["irs_out_path"] = model_irs_out_path
def test_cc_collect(test_id, model, sea_runtool, benchmark_app, collector_dir, artifacts): """ Test conditional compilation statistics collection """ out = artifacts / test_id # cleanup old data if any prev_results = glob.glob(f"{out}.pid*.csv") for path in prev_results: os.remove(path) # run use case returncode, output = cmd_exec([ sys.executable, str(sea_runtool), f"-o={out}", f"--bindir={collector_dir}", "!", str(benchmark_app), "-d=CPU", f"-m={model}", "-niter=1", "-nireq=1", ]) assert returncode == 0, f"Command exited with non-zero status {returncode}:\n {output}" assert (len(glob.glob(f"{out}.pid*.csv")) == 1 ), f'Multiple or none "{out}.pid*.csv" files'
def test_cc_collect(test_id, prepared_models, openvino_ref, test_info, save_session_info, sea_runtool, collector_dir, artifacts): # pylint: disable=unused-argument """Test conditional compilation statistics collection :param test_info: custom `test_info` field of built-in `request` pytest fixture. contain a dictionary to store test metadata. """ out = artifacts / test_id infer_out_dir = out / "inference_result" test_info["test_id"] = test_id # cleanup old data if any prev_result = glob.glob(f"{out / test_id}.pid*.csv") for path in prev_result: os.remove(path) # Create a directory for infer results, if it haven't done before infer_out_dir.mkdir(parents=True, exist_ok=True) # run use case return_code, output = cmd_exec([ sys.executable, str(sea_runtool), f"--output={out / test_id}", f"--bindir={collector_dir}", "!", sys.executable, infer_tool, *[f"-m={model}" for model in prepared_models], "-d=CPU", f"-r={infer_out_dir}" ]) out_csv = glob.glob(f"{out / test_id}.pid*.csv") test_info["out_csv"] = out_csv assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}" assert len( out_csv) == 1, f'Multiple or none "{out / test_id}.pid*.csv" files'
def test_cc_collect(model, sea_runtool, benchmark_app, artifacts): """ Test conditional compilation statistics collection """ out = artifacts / model.parent / model.stem # cleanup old data if any prev_results = glob.glob(f"{out}.pid*.csv") for path in prev_results: os.remove(path) # run use case returncode, _ = cmd_exec( [ "python", str(sea_runtool), f"-o={out}", "-f=stat", "!", str(benchmark_app), "-d=CPU", f"-m={model}", "-niter=1", "-nireq=1", ] ) assert returncode == 0, f"Command exited with non-zero status {returncode}" assert ( len(glob.glob(f"{out}.pid*.csv")) == 1 ), f'Multiple or none "{out}.pid*.csv" files'
def test_cc_collect(test_id, model, sea_runtool, benchmark_app, collector_dir, artifacts, test_info): """ Test conditional compilation statistics collection :param test_info: custom `test_info` field of built-in `request` pytest fixture. contain a dictionary to store test metadata. """ out = artifacts / test_id test_info["test_id"] = test_id # cleanup old data if any prev_result = glob.glob(f"{out}.pid*.csv") for path in prev_result: os.remove(path) # run use case return_code, output = cmd_exec([ sys.executable, str(sea_runtool), f"--output={out}", f"--bindir={collector_dir}", "--app_status", "!", str(benchmark_app), "-d=CPU", f"-m={model}", "-niter=1", "-nireq=1", ]) out_csv = glob.glob(f"{out}.pid*.csv") test_info["out_csv"] = out_csv assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}" assert (len(out_csv) == 1), f'Multiple or none "{out}.pid*.csv" files'
def test_cc_collect(test_id, model, sea_runtool, collector_dir, artifacts, test_info, save_session_info): """ Test conditional compilation statistics collection :param test_info: custom `test_info` field of built-in `request` pytest fixture. contain a dictionary to store test metadata. """ out = artifacts / test_id test_info["test_id"] = test_id # cleanup old data if any prev_result = glob.glob(f"{out}.pid*.csv") for path in prev_result: os.remove(path) # run use case sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \ else os.path.join(sys.prefix, 'bin', 'python') return_code, output = cmd_exec( [ sys_executable, str(sea_runtool), f"--output={out}", f"--bindir={collector_dir}", "!", sys_executable, infer_tool, f"-m={model}", "-d=CPU", f"-r={out}", ] ) out_csv = glob.glob(f"{out}.pid*.csv") test_info["out_csv"] = out_csv assert return_code == 0, f"Command exited with non-zero status {return_code}:\n {output}" assert (len(out_csv) == 1), f'Multiple or none "{out}.pid*.csv" files'
def prepare_omz_model(openvino_ref, model, omz_repo, omz_cache_dir, tmpdir): """ Download and convert Open Model Zoo model to Intermediate Representation, get path to model XML. """ # Step 1: downloader omz_log = logging.getLogger("prepare_omz_model") python_executable = sys.executable downloader_path = omz_repo / "tools" / "downloader" / "downloader.py" model_path_root = tmpdir cmd = f'{python_executable} {downloader_path} --name {model["name"]}' \ f' --precisions={model["precision"]}' \ f' --num_attempts {OMZ_NUM_ATTEMPTS}' \ f' --output_dir {model_path_root}' if omz_cache_dir: cmd += f' --cache_dir {omz_cache_dir}' cmd_exec(cmd, log=omz_log) # Step 2: converter converter_path = omz_repo / "tools" / "downloader" / "converter.py" ir_path = model_path_root / "_IR" # Note: remove --precisions if both precisions (FP32 & FP16) are required cmd = f'{python_executable} {converter_path} --name {model["name"]}' \ f' -p {python_executable}' \ f' --precisions={model["precision"]}' \ f' --output_dir {ir_path}' \ f' --download_dir {model_path_root}' \ f' --mo {Path("../../model-optimizer/mo.py").resolve()}' cmd_exec(cmd, env=get_openvino_environment(openvino_ref), log=omz_log) # Step 3: info_dumper info_dumper_path = omz_repo / "tools" / "downloader" / "info_dumper.py" cmd = f'"{python_executable}" "{info_dumper_path}" --name {model["name"]}' return_code, output = cmd_exec(cmd, log=omz_log) model_info = json.loads(output)[0] # Step 4: form model_path model_path = ir_path / model_info["subdirectory"] / model[ "precision"] / f'{model_info["name"]}.xml' return model_path
def test_infer(model, models_root, benchmark_app): """ Test inference with conditional compiled binaries """ returncode, _ = cmd_exec([ str(benchmark_app), "-d=CPU", f"-m={models_root / model}", "-niter=1", "-nireq=1" ]) assert returncode == 0, f"Command exited with non-zero status {returncode}"
def run_infer(model, out_file, install_dir): """ Function running inference """ return_code, output = cmd_exec( [sys.executable, infer_tool, "-d=CPU", f"-m={model}", f"-r={out_file}" ], env=get_openvino_environment(install_dir), ) return return_code, output
def test_infer(test_id, model, artifacts): """ Test inference with conditional compiled binaries """ install_prefix = artifacts / test_id / "install_pkg" exe_suffix = ".exe" if sys.platform == "win32" else "" benchmark_app = install_prefix / "bin" / f"benchmark_app{exe_suffix}" returncode, _ = cmd_exec( [str(benchmark_app), "-d=CPU", f"-m={model}", "-niter=1", "-nireq=1"], env=get_openvino_environment(install_prefix), ) assert returncode == 0, f"Command exited with non-zero status {returncode}"
def make_build(openvino_root_dir, build_dir, install_dir, cmake_additional_args=None, log=None): """Parametrized build and install OpenVINO package.""" additional_args_line = " ".join(cmake_additional_args) + " " if cmake_additional_args else "" nproc = multiprocessing.cpu_count() cmd = ( f"cmake -DENABLE_PROFILING_ITT=ON -DCMAKE_BUILD_TYPE=Release " f"-DPYTHON_EXECUTABLE={sys.executable} {additional_args_line}" f"-S {openvino_root_dir} -B {build_dir} &&" f"cmake --build {build_dir} -j{nproc} && " f"cmake --install {build_dir} --prefix {install_dir}" ) return cmd_exec([cmd], shell=True, log=log)
def run_infer(models, out_dir, install_dir): """ Function running inference """ out_dir.mkdir(parents=True, exist_ok=True) return_code, output = cmd_exec( [ sys.executable, infer_tool, "-d=CPU", *[f"-m={model}" for model in models], f"-r={out_dir}" ], env=get_openvino_environment(install_dir), ) return return_code, output
def run_infer(model, out_file, install_dir): """ Function running inference """ sys_executable = os.path.join(sys.prefix, 'python.exe') if sys.platform == "win32" \ else os.path.join(sys.prefix, 'bin', 'python') return_code, output = cmd_exec( [ sys_executable, infer_tool, "-d=CPU", f"-m={model}", f"-r={out_file}" ], env=get_openvino_environment(install_dir), ) return return_code, output
def run_timetest(args: dict, log=None): """Run provided executable several times and aggregate collected statistics""" if log is None: log = logging.getLogger('run_timetest') cmd_common = prepare_executable_cmd(args) # Run executable and collect statistics stats = {} for run_iter in range(args["niter"]): tmp_stats_path = tempfile.NamedTemporaryFile().name retcode, msg = cmd_exec(cmd_common + ["-s", str(tmp_stats_path)], log=log) if retcode != 0: log.error( "Run of executable '{}' failed with return code '{}'. Error: {}\n" "Statistics aggregation is skipped.".format( args["executable"], retcode, msg)) return retcode, msg, {}, {} # Read raw statistics with open(tmp_stats_path, "r") as file: raw_data = list(yaml.load_all(file, Loader=yaml.SafeLoader)) os.unlink(tmp_stats_path) # Parse raw data flatten_data = {} parse_stats(raw_data[0], flatten_data) log.debug("Statistics after run of executable #{}: {}".format( run_iter, flatten_data)) # Combine statistics from several runs stats = dict((step_name, stats.get(step_name, []) + [duration]) for step_name, duration in flatten_data.items()) # Remove outliers filtered_stats = filter_timetest_result(stats) # Aggregate results aggregated_stats = aggregate_stats(filtered_stats) log.debug( "Aggregated statistics after full run: {}".format(aggregated_stats)) return 0, "", aggregated_stats, stats