def main(cli_parser: argparse.ArgumentParser, framework: str): try: # Initialize logger with 'ERROR' as default level to be able to form nice messages # before arg parser deliver log_level requested by user init_logger('ERROR', False) argv = cli_parser.parse_args() if framework: argv.framework = framework append_exp_keys_to_namespace(argv) return driver(argv) except (FileNotFoundError, NotADirectoryError) as e: log.error('File {} was not found'.format( str(e).split('No such file or directory:')[1])) log.debug(traceback.format_exc()) except Error as err: log.error(err) log.debug(traceback.format_exc()) except FrameworkError as err: log.error(err, extra={'framework_error': True}) log.debug(traceback.format_exc()) except Exception as err: log.error("-------------------------------------------------") log.error("----------------- INTERNAL ERROR ----------------") log.error("Unexpected exception happened.") log.error( "Please contact Model Optimizer developers and forward the following information:" ) log.error(str(err)) log.error(traceback.format_exc()) log.error("---------------- END OF BUG REPORT --------------") log.error("-------------------------------------------------") return 1
def driver(argv: argparse.Namespace): init_logger(argv.log_level.upper(), argv.silent) start_time = datetime.datetime.now() ret_res = emit_ir(prepare_ir(argv), argv) if ret_res != 0: return ret_res elapsed_time = datetime.datetime.now() - start_time print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format( elapsed_time.total_seconds())) try: import resource mem_usage = round( resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024) if sys.platform == 'darwin': mem_usage = round(mem_usage / 1024) print('[ SUCCESS ] Memory consumed: {} MB. '.format(mem_usage)) except ImportError: pass return ret_res
def main(cli_parser: argparse.ArgumentParser, fem: FrontEndManager, framework: str): telemetry = tm.Telemetry(tid=get_tid(), app_name='Model Optimizer', app_version=get_simplified_mo_version()) telemetry.start_session('mo') telemetry.send_event('mo', 'version', get_simplified_mo_version()) try: # Initialize logger with 'ERROR' as default level to be able to form nice messages # before arg parser deliver log_level requested by user init_logger('ERROR', False) argv = cli_parser.parse_args() send_params_info(argv, cli_parser) if framework: argv.framework = framework argv.feManager = fem ov_update_message = None if not hasattr(argv, 'silent') or not argv.silent: ov_update_message = get_ov_update_message() ret_code = driver(argv) if ov_update_message: print(ov_update_message) telemetry.send_event('mo', 'conversion_result', 'success') telemetry.end_session('mo') telemetry.force_shutdown(1.0) return ret_code except (FileNotFoundError, NotADirectoryError) as e: log.error('File {} was not found'.format( str(e).split('No such file or directory:')[1])) log.debug(traceback.format_exc()) except Error as err: analysis_results = AnalysisResults() if analysis_results.get_messages() is not None: for el in analysis_results.get_messages(): log.error(el, extra={'analysis_info': True}) log.error(err) log.debug(traceback.format_exc()) except FrameworkError as err: log.error(err, extra={'framework_error': True}) log.debug(traceback.format_exc()) except Exception as err: log.error("-------------------------------------------------") log.error("----------------- INTERNAL ERROR ----------------") log.error("Unexpected exception happened.") log.error( "Please contact Model Optimizer developers and forward the following information:" ) log.error(str(err)) log.error(traceback.format_exc()) log.error("---------------- END OF BUG REPORT --------------") log.error("-------------------------------------------------") telemetry.send_event('mo', 'conversion_result', 'fail') telemetry.end_session('mo') telemetry.force_shutdown(1.0) return 1
def convert_fp32(onnx_modelproto_bytes): try: init_logger('ERROR', False) framework = 'onnx' weights, xml_string = driver_entry(onnx_modelproto_bytes, precision='FP32') float_array = np.asarray(weights, dtype=np.float32) return float_array, xml_string except: return 1
def convert_fp32(onnx_modelproto_bytes): try: init_logger('ERROR', False) framework = 'onnx' weights, xml_string = driver_entry(onnx_modelproto_bytes, precision='FP32') float_array = np.asarray(weights, dtype=np.float32) return float_array, xml_string except: # log.error('File {} was not found'.format(str(e).split('No such file or directory:')[1])) # log.debug(traceback.format_exc()) return 1
def main(cli_parser: argparse.ArgumentParser, framework: str): try: # Initialize logger with 'ERROR' as default level to be able to form nice messages # before arg parser deliver log_level requested by user init_logger('ERROR', False) argv = cli_parser.parse_args() if framework: argv.framework = framework append_exp_keys_to_namespace(argv) # set output precision for operations producing bool values to be I32 as it was for the IRv7 if argv.generate_deprecated_IR_V7: from mo.middle.passes.convert_data_type import SUPPORTED_DATA_TYPES SUPPORTED_DATA_TYPES['bool'] = (np.bool, 'I32', 'boolean') ov_update_message = None if not hasattr(argv, 'silent') or not argv.silent: ov_update_message = get_ov_update_message() ret_code = driver(argv) if ov_update_message: print(ov_update_message) return ret_code except (FileNotFoundError, NotADirectoryError) as e: log.error('File {} was not found'.format( str(e).split('No such file or directory:')[1])) log.debug(traceback.format_exc()) except Error as err: analysis_results = AnalysisResults() if analysis_results.get_messages() is not None: for el in analysis_results.get_messages(): log.error(el, extra={'analysis_info': True}) log.error(err) log.debug(traceback.format_exc()) except FrameworkError as err: log.error(err, extra={'framework_error': True}) log.debug(traceback.format_exc()) except Exception as err: log.error("-------------------------------------------------") log.error("----------------- INTERNAL ERROR ----------------") log.error("Unexpected exception happened.") log.error( "Please contact Model Optimizer developers and forward the following information:" ) log.error(str(err)) log.error(traceback.format_exc()) log.error("---------------- END OF BUG REPORT --------------") log.error("-------------------------------------------------") return 1
def main(cli_parser: argparse.ArgumentParser, framework: str): try: # Initialize logger with 'ERROR' as default level to be able to form nice messages # before arg parser deliver log_level requested by user init_logger('ERROR', False) argv = cli_parser.parse_args() if framework: argv.framework = framework ov_update_message = None if not hasattr(argv, 'silent') or not argv.silent: ov_update_message = get_ov_update_message() ret_code = driver(argv) if ov_update_message: print(ov_update_message) return ret_code except (FileNotFoundError, NotADirectoryError) as e: log.error('File {} was not found'.format( str(e).split('No such file or directory:')[1])) log.debug(traceback.format_exc()) except Error as err: analysis_results = AnalysisResults() if analysis_results.get_messages() is not None: for el in analysis_results.get_messages(): log.error(el, extra={'analysis_info': True}) log.error(err) log.debug(traceback.format_exc()) except FrameworkError as err: log.error(err, extra={'framework_error': True}) log.debug(traceback.format_exc()) except Exception as err: log.error("-------------------------------------------------") log.error("----------------- INTERNAL ERROR ----------------") log.error("Unexpected exception happened.") log.error( "Please contact Model Optimizer developers and forward the following information:" ) log.error(str(err)) log.error(traceback.format_exc()) log.error("---------------- END OF BUG REPORT --------------") log.error("-------------------------------------------------") return 1
def driver(argv: argparse.Namespace): init_logger(argv.log_level.upper(), argv.silent) start_time = datetime.datetime.now() is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = deduce_framework_by_namespace( argv) if not any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]): raise Error( 'Framework {} is not a valid target. Please use --framework with one from the list: caffe, tf, ' 'mxnet, kaldi, onnx. ' + refer_to_faq_msg(15), argv.framework) if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph: raise Error( 'Path to input model or saved model dir is required: use --input_model, --saved_model_dir or ' '--input_meta_graph') elif is_mxnet and not argv.input_model and not argv.input_symbol and not argv.pretrained_model_name: raise Error( 'Path to input model or input symbol or pretrained_model_name is required: use --input_model or ' '--input_symbol or --pretrained_model_name') elif is_caffe and not argv.input_model and not argv.input_proto: raise Error( 'Path to input model or input proto is required: use --input_model or --input_proto' ) elif (is_kaldi or is_onnx) and not argv.input_model: raise Error('Path to input model is required: use --input_model.') log.debug(str(argv)) log.debug("Model Optimizer started") model_name = "<UNKNOWN_NAME>" if argv.model_name: model_name = argv.model_name elif argv.input_model: model_name = get_model_name(argv.input_model) elif is_tf and argv.saved_model_dir: model_name = "saved_model" elif is_tf and argv.input_meta_graph: model_name = get_model_name(argv.input_meta_graph) elif is_mxnet and argv.input_symbol: model_name = get_model_name(argv.input_symbol) log.debug('Output model name would be {}{{.xml, .bin}}'.format(model_name)) # if --input_proto is not provided, try to retrieve another one # by suffix substitution from model file name if is_caffe and not argv.input_proto: argv.input_proto = replace_ext(argv.input_model, '.caffemodel', '.prototxt') if not argv.input_proto: raise Error( "Cannot find prototxt file: for Caffe please specify --input_proto - a " + "protobuf file that stores topology and --input_model that stores " + "pretrained weights. " + refer_to_faq_msg(20)) log.info('Deduced name for prototxt: {}'.format(argv.input_proto)) if not argv.silent: print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx, model_name) ret_code = check_requirements(framework=argv.framework) if ret_code: return ret_code if is_mxnet and not argv.input_shape: raise Error( 'Input shape is required to convert MXNet model. Please provide it with --input_shape. ' + refer_to_faq_msg(16)) mean_file_offsets = None if is_caffe and argv.mean_file and argv.mean_values: raise Error( 'Both --mean_file and mean_values are specified. Specify either mean file or mean values. ' + refer_to_faq_msg(17)) elif is_caffe and argv.mean_file and argv.mean_file_offsets: values = get_tuple_values(argv.mean_file_offsets, t=int, num_exp_values=2) mean_file_offsets = np.array([int(x) for x in values[0].split(',')]) if not all([offset >= 0 for offset in mean_file_offsets]): raise Error( "Negative value specified for --mean_file_offsets option. " "Please specify positive integer values in format '(x,y)'. " + refer_to_faq_msg(18)) argv.mean_file_offsets = mean_file_offsets custom_layers_mapping_path = argv.k if is_caffe and argv.k else None if argv.scale and argv.scale_values: raise Error( 'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input ' + 'channels. ' + refer_to_faq_msg(19)) if argv.scale and argv.scale < 1.0: log.error( "The scale value is less than 1.0. This is most probably an issue because the scale value specifies " "floating point value which all input values will be *divided*.", extra={'is_warning': True}) if argv.input_model and (is_tf and argv.saved_model_dir): raise Error('Both --input_model and --saved_model_dir are defined. ' 'Specify either input model or saved model directory.') if is_tf: if argv.saved_model_tags is not None: if ' ' in argv.saved_model_tags: raise Error( 'Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it' ) argv.saved_model_tags = argv.saved_model_tags.split(',') argv.output = argv.output.split(',') if argv.output else None argv.placeholder_shapes = get_placeholder_shapes(argv.input, argv.input_shape, argv.batch) mean_values = parse_tuple_pairs(argv.mean_values) scale_values = parse_tuple_pairs(argv.scale_values) mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input) argv.mean_scale_values = mean_scale if not os.path.exists(argv.output_dir): try: os.makedirs(argv.output_dir) except PermissionError as e: raise Error( "Failed to create directory {}. Permission denied! " + refer_to_faq_msg(22), argv.output_dir) from e else: if not os.access(argv.output_dir, os.W_OK): raise Error( "Output directory {} is not writable for current user. " + refer_to_faq_msg(22), argv.output_dir) log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes)) ret_res = 1 if hasattr(argv, 'extensions') and argv.extensions and argv.extensions != '': extensions = argv.extensions.split(',') else: extensions = None argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values( argv.input, argv.freeze_placeholder_with_value) if is_tf: import mo.pipeline.tf as mo_tf from mo.front.tf.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) ret_res = mo_tf.tf2nx(argv, argv.input_model, model_name, argv.output_dir, is_binary=not argv.input_model_is_text) elif is_caffe: import mo.pipeline.caffe as mo_caffe from mo.front.caffe.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) ret_res = mo_caffe.driver( argv, argv.input_proto, argv.input_model, model_name, argv.output_dir, argv.caffe_parser_path, custom_layers_mapping_path=custom_layers_mapping_path) elif is_mxnet: import mo.pipeline.mx as mo_mxnet from mo.front.mxnet.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) ret_res = mo_mxnet.driver(argv, argv.input_model, model_name, argv.output_dir) elif is_kaldi: import mo.pipeline.kaldi as mo_kaldi from mo.front.kaldi.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) ret_res = mo_kaldi.driver(argv, argv.input_model, model_name, argv.output_dir) elif is_onnx: import mo.pipeline.onnx as mo_onnx from mo.front.onnx.register_custom_ops import get_front_classes import_extensions.load_dirs(argv.framework, extensions, get_front_classes) ret_res = mo_onnx.driver(argv, argv.input_model, model_name, argv.output_dir) if ret_res != 0: return ret_res if not (is_tf and argv.tensorflow_custom_operations_config_update): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() print('\n[ SUCCESS ] Generated IR model.') print('[ SUCCESS ] XML file: {}.xml'.format( os.path.join(output_dir, model_name))) print('[ SUCCESS ] BIN file: {}.bin'.format( os.path.join(output_dir, model_name))) elapsed_time = datetime.datetime.now() - start_time print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format( elapsed_time.total_seconds())) return ret_res
# Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os import tempfile from copy import deepcopy from mo.graph.graph import Graph from mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph from mo.utils.logger import init_logger from openvino.inference_engine import IECore # pylint: disable=E0611 from openvino.offline_transformations import ApplyPOTTransformations # pylint: disable=import-error,no-name-in-module from ..graph.passes import ModelPreprocessor, remove_converts, add_removed_converts from ..utils.logger import stdout_redirect init_logger('ERROR', False) ie = IECore() def load_graph(model_config, target_device='ANY'): """ Loads model from specified path :return NetworkX model """ special_transform_devices = ['GNA'] serialized_bin_path = os.path.join(tempfile.gettempdir(), 'serialized_ir.bin') serialized_xml_path = os.path.join(tempfile.gettempdir(), 'serialized_ir.xml') bin_path = model_config.weights xml_path = model_config.model