def emit_ir(graph: Graph, argv: argparse.Namespace): NormalizeTI().find_and_replace_pattern(graph) for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern) prepare_emit_ir(graph=graph, data_type=graph.graph['cmd_params'].data_type, output_dir=argv.output_dir, output_model_name=argv.model_name, mean_data=graph.graph['mf'] if 'mf' in graph.graph else None, input_names=graph.graph['input_names'] if 'input_names' in graph.graph else [], meta_info=get_meta_info(argv)) if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name)) return_code = "not executed" # This try-except is additional reinsurance that the IE # dependency search does not break the MO pipeline try: if find_ie_version(silent=True): path_to_offline_transformations = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'back', 'offline_transformations.py') status = subprocess.run([sys.executable, path_to_offline_transformations, orig_model_name], env=os.environ, timeout=10) return_code = status.returncode if return_code != 0 and not argv.silent: print("[ WARNING ] offline_transformations return code {}".format(return_code)) except Exception as e: pass message = str(dict({ "platform": platform.system(), "mo_version": get_simplified_mo_version(), "ie_version": get_simplified_ie_version(env=os.environ), "python_version": sys.version, "return_code": return_code })) t = tm.Telemetry() t.send_event('mo', 'offline_transformations_status', message) print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version(argv))) print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name)) print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name)) return 0
def emit_ir(graph: Graph, argv: argparse.Namespace): NormalizeTI().find_and_replace_pattern(graph) for_graph_and_each_sub_graph_recursively( graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively( graph, CreateConstNodesReplacement().find_and_replace_pattern) if 'feManager' in argv: del argv.feManager mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None input_names = deepcopy( graph.graph['input_names']) if 'input_names' in graph.graph else [] prepare_emit_ir(graph=graph, data_type=graph.graph['cmd_params'].data_type, output_dir=argv.output_dir, output_model_name=argv.model_name, mean_data=mean_data, input_names=input_names, meta_info=get_meta_info(argv), use_temporary_path=True) # This graph cleanup is required to avoid double memory consumption graph.clear() if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() orig_model_name = os.path.normpath( os.path.join(output_dir, argv.model_name)) return_code = "not executed" # This try-except is additional reinsurance that the IE # dependency search does not break the MO pipeline try: if not argv.legacy_ir_generation: path_to_offline_transformations = os.path.join( os.path.realpath(os.path.dirname(__file__)), 'back', 'offline_transformations.py') cmd = [ sys.executable, path_to_offline_transformations, "--input_model", orig_model_name, "--framework", argv.framework, "--transform", argv.transform ] if "compress_fp16" in argv and argv.compress_fp16: cmd += ["--compress_fp16"] # restore data_type cmd parameter argv.data_type = 'FP16' status = subprocess.run(cmd, env=os.environ) return_code = status.returncode except Exception as e: return_code = "failed" log.error(e) message = str( dict({ "platform": platform.system(), "mo_version": get_simplified_mo_version(), "ie_version": get_simplified_ie_version(env=os.environ), "python_version": sys.version, "return_code": return_code })) t = tm.Telemetry() t.send_event('mo', 'offline_transformations_status', message) if return_code != 0: raise Error("offline transformations step has failed.") for suf in [".xml", ".bin", ".mapping"]: # remove existing files path_to_file = orig_model_name + "_tmp" + suf if os.path.exists(path_to_file): os.remove(path_to_file) # add meta information to IR append_ir_info(file=orig_model_name, meta_info=get_meta_info(argv), mean_data=mean_data, input_names=input_names) print('[ SUCCESS ] Generated IR version {} model.'.format( get_ir_version(argv))) print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name)) print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name)) return 0
def import_core_modules(silent: bool, path_to_module: str): """ This function checks that InferenceEngine Python API is available and necessary python modules exists. So the next list of imports must contain all IE/NG Python API imports that are used inside MO. :param silent: enables or disables logs printing to stdout :param path_to_module: path where python API modules were found :return: True if all imports were successful and False otherwise """ try: from openvino.inference_engine import get_version, read_network # pylint: disable=import-error,no-name-in-module from openvino.offline_transformations import ApplyMOCTransformations, ApplyLowLatencyTransformation, \ ApplyMakeStatefulTransformation, GenerateMappingFile # pylint: disable=import-error,no-name-in-module # TODO: it is temporary import to check that nGraph python API is available. But in future # we need to replace it with Frontend imports from ngraph.impl.op import Parameter # pylint: disable=import-error,no-name-in-module from _pyngraph import PartialShape, Dimension # pylint: disable=import-error,no-name-in-module import openvino # pylint: disable=import-error,no-name-in-module import ngraph # pylint: disable=import-error,no-name-in-module import ngraph.frontend # pylint: disable=import-error,no-name-in-module if silent: return True ie_version = str(get_version()) mo_version = str(v.get_version()) # pylint: disable=no-member,no-name-in-module print("\t- {}: \t{}".format("Inference Engine found in", os.path.dirname(openvino.__file__))) # TODO: when nGraph version will be available we need to start compare it to IE and MO versions. Ticket: 58091 print("\t- {}: \t{}".format("nGraph found in", os.path.dirname(ngraph.__file__))) print("{}: \t{}".format("Inference Engine version", ie_version)) print("{}: \t{}".format("Model Optimizer version", mo_version)) versions_mismatch = False if mo_version != ie_version: versions_mismatch = True extracted_mo_release_version = v.extract_release_version( mo_version) mo_is_custom = extracted_mo_release_version == (None, None) print( "[ WARNING ] Model Optimizer and Inference Engine versions do no match." ) print( "[ WARNING ] Consider building the Inference Engine Python API from sources or reinstall OpenVINO " "(TM) toolkit using", end=" ") if mo_is_custom: print( "\"pip install openvino\" (may be incompatible with the current Model Optimizer version)" ) else: print("\"pip install openvino=={}.{}\"".format( *extracted_mo_release_version)) simplified_mo_version = v.get_simplified_mo_version() message = str( dict({ "platform": platform.system(), "mo_version": simplified_mo_version, "ie_version": v.get_simplified_ie_version(version=ie_version), "versions_mismatch": versions_mismatch, })) send_telemetry(simplified_mo_version, message, 'ie_version_check') return True except Exception as e: # Do not print a warning if module wasn't found or silent mode is on if "No module named 'openvino'" not in str(e): print( "[ WARNING ] Failed to import Inference Engine Python API in: {}" .format(path_to_module)) print("[ WARNING ] {}".format(e)) # Send telemetry message about warning simplified_mo_version = v.get_simplified_mo_version() message = str( dict({ "platform": platform.system(), "mo_version": simplified_mo_version, "ie_version": v.get_simplified_ie_version(env=os.environ), "python_version": sys.version, "error_type": classify_error_type(e), })) send_telemetry(simplified_mo_version, message, 'ie_import_failed') return False
def emit_ir(graph: Graph, argv: argparse.Namespace): NormalizeTI().find_and_replace_pattern(graph) for_graph_and_each_sub_graph_recursively( graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively( graph, CreateConstNodesReplacement().find_and_replace_pattern) mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None input_names = deepcopy( graph.graph['input_names']) if 'input_names' in graph.graph else [] # Remove temporary ie_is_available key from argv no to have it in IR ie_is_available = argv.ie_is_available del argv.ie_is_available prepare_emit_ir(graph=graph, data_type=graph.graph['cmd_params'].data_type, output_dir=argv.output_dir, output_model_name=argv.model_name, mean_data=mean_data, input_names=input_names, meta_info=get_meta_info(argv), use_temporary_path=True) # This graph cleanup is required to avoid double memory consumption graph.clear() if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() orig_model_name = os.path.normpath( os.path.join(output_dir, argv.model_name)) return_code = "not executed" # This try-except is additional reinsurance that the IE # dependency search does not break the MO pipeline try: if not argv.legacy_ir_generation and ie_is_available: path_to_offline_transformations = os.path.join( os.path.realpath(os.path.dirname(__file__)), 'back', 'offline_transformations.py') status = subprocess.run([ sys.executable, path_to_offline_transformations, "--input_model", orig_model_name, "--framework", argv.framework, "--transform", argv.transform ], env=os.environ) return_code = status.returncode except Exception as e: return_code = "failed" log.error(e, extra={'is_warning': True}) message = str( dict({ "platform": platform.system(), "mo_version": get_simplified_mo_version(), "ie_version": get_simplified_ie_version(env=os.environ), "python_version": sys.version, "return_code": return_code })) t = tm.Telemetry() t.send_event('mo', 'offline_transformations_status', message) # if IR wasn't produced by offline_transformations step we need to fallback to IR # produced by prepare_ir. This IR needs to be renamed from XXX_tmp.xml to XXX.xml suffixes = [".xml", ".bin", ".mapping"] if return_code != 0: if len(argv.transform) != 0: # Remove temporary IR before throwing exception for suf in suffixes: path_to_file = orig_model_name + "_tmp" + suf if os.path.exists(path_to_file): os.remove(path_to_file) raise Error("Failed to apply transformations: {}".format( argv.transform)) log.error("Using fallback to produce IR.", extra={'is_warning': True}) for suf in suffixes: # remove existing files path_to_file = orig_model_name + suf if os.path.exists(path_to_file): os.remove(path_to_file) # rename tmp IR to original name os.rename(orig_model_name + "_tmp" + suf, orig_model_name + suf) else: for suf in suffixes: # remove existing files path_to_file = orig_model_name + "_tmp" + suf if os.path.exists(path_to_file): os.remove(path_to_file) # add meta information to IR append_ir_info(file=orig_model_name, meta_info=get_meta_info(argv), mean_data=mean_data, input_names=input_names) print('[ SUCCESS ] Generated IR version {} model.'.format( get_ir_version(argv))) print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name)) print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name)) return 0
def test_simplify_ie_version_custom(self): self.assertEqual( get_simplified_ie_version(version="2.1.custom_my/branch/3_4c8eae"), "custom")
def test_simplify_ie_version_release_neg(self): self.assertEqual( get_simplified_ie_version(version="custom_releases/2021/3_4c8eae"), "custom")
def test_simplify_ie_version_release(self): self.assertEqual( get_simplified_ie_version( version="2.1.custom_releases/2021/3_4c8eae"), "2021.3")
def import_core_modules(silent: bool, path_to_module: str): try: from openvino.inference_engine import IECore, get_version # pylint: disable=import-error from openvino.offline_transformations import ApplyMOCTransformations, CheckAPI # pylint: disable=import-error import openvino # pylint: disable=import-error if silent: return True ie_version = str(get_version()) mo_version = str(v.get_version()) # pylint: disable=no-member print("\t- {}: \t{}".format("Inference Engine found in", os.path.dirname(openvino.__file__))) print("{}: \t{}".format("Inference Engine version", ie_version)) print("{}: \t {}".format("Model Optimizer version", mo_version)) versions_mismatch = False # MO and IE version have a small difference in the beginning of version because # IE version also includes API version. For example: # Inference Engine version: 2.1.custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9 # Model Optimizer version: custom_HEAD_4c8eae0ee2d403f8f5ae15b2c9ad19cfa5a9e1f9 # So to match this versions we skip IE API version. if not re.match(r"^([0-9]+).([0-9]+).{}$".format(mo_version), ie_version): versions_mismatch = True extracted_mo_release_version = v.extract_release_version( mo_version) mo_is_custom = extracted_mo_release_version == (None, None) print( "[ WARNING ] Model Optimizer and Inference Engine versions do no match." ) print( "[ WARNING ] Consider building the Inference Engine Python API from sources or reinstall OpenVINO (TM) toolkit using", end=" ") if mo_is_custom: print( "\"pip install openvino\" (may be incompatible with the current Model Optimizer version)" ) else: print("\"pip install openvino=={}.{}\"".format( *extracted_mo_release_version)) simplified_mo_version = v.get_simplified_mo_version() message = str( dict({ "platform": platform.system(), "mo_version": simplified_mo_version, "ie_version": v.get_simplified_ie_version(version=ie_version), "versions_mismatch": versions_mismatch, })) send_telemetry(simplified_mo_version, message, 'ie_version_check') return True except Exception as e: # Do not print a warning if module wasn't found or silent mode is on if "No module named 'openvino'" not in str(e) and not silent: print( "[ WARNING ] Failed to import Inference Engine Python API in: {}" .format(path_to_module)) print("[ WARNING ] {}".format(e)) # Send telemetry message about warning simplified_mo_version = v.get_simplified_mo_version() message = str( dict({ "platform": platform.system(), "mo_version": simplified_mo_version, "ie_version": v.get_simplified_ie_version(env=os.environ), "python_version": sys.version, "error_type": classify_error_type(e), })) send_telemetry(simplified_mo_version, message, 'ie_import_failed') return False