def save_restored_graph(graph: Graph, path: str, meta_data, name=None): """ Function to apply all necessary transforms from back stage to prepare and save restored graph and metadata. :param graph: Graph to save :param path: Path to saved IR :param meta_data: Namespace with converting parameters restored from IR :param name: Name for saved IR :return: """ if name is None: name = graph.name if 'data_type' not in meta_data: log.debug( 'Provided `meta_data` does not contain `data_type` parameter. Set `data_type`' ' parameter value to `FP32`.') # Set data_type to FP32. All restored constants will be saved in provided data type. data_type = 'FP32' # We need to specify this attribute to pass graph transformations. This information will not be saved into IR. # All constants and placeholders will be saved with same types as restored from IR graph.graph['cmd_params'].data_type = data_type else: data_type = data_type_str_to_precision( graph.graph['cmd_params'].data_type) assert data_type in ['FP16', 'FP32'], '`data_type` value {} is not supported by MO,' \ ' cannot save graph'.format(data_type) # List items order matters, do not change it. transformation_list = [ ConvolutionWithGroupsResolver, StridedSliceMasksNormalizer, PackBinaryWeights, BlobNormalizer, ConvolutionNormalizer, MarkNodesWithShapeValues, ] # We need to run some specific passes from MO back stage. apply_replacements_list(graph, transformation_list) # Transformations with enabled=False should be run manually. for_graph_and_each_sub_graph_recursively( graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively( graph, CreateConstNodesReplacement().find_and_replace_pattern) prepare_emit_ir(graph, data_type, path, name, meta_info=meta_data, used_by_ir_reader=True)
def emit_ir(graph: Graph, argv: argparse.Namespace): NormalizeTI().find_and_replace_pattern(graph) for_graph_and_each_sub_graph_recursively( graph, RemoveConstOps().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively( graph, CreateConstNodesReplacement().find_and_replace_pattern) if 'feManager' in argv: del argv.feManager mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None input_names = deepcopy( graph.graph['input_names']) if 'input_names' in graph.graph else [] prepare_emit_ir(graph=graph, data_type=graph.graph['cmd_params'].data_type, output_dir=argv.output_dir, output_model_name=argv.model_name, mean_data=mean_data, input_names=input_names, meta_info=get_meta_info(argv), use_temporary_path=True) # This graph cleanup is required to avoid double memory consumption graph.clear() if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() orig_model_name = os.path.normpath( os.path.join(output_dir, argv.model_name)) return_code = "not executed" try: if not argv.legacy_ir_generation: from openvino.tools.mo.back.offline_transformations import apply_offline_transformations apply_offline_transformations(orig_model_name, argv) if "compress_fp16" in argv and argv.compress_fp16: # restore data_type cmd parameter argv.data_type = 'FP16' return_code = 0 except Exception as e: return_code = "failed" log.error(e) message = str( dict({ "platform": platform.system(), "mo_version": get_simplified_mo_version(), "ie_version": get_simplified_ie_version(env=os.environ), "python_version": sys.version, "return_code": return_code })) t = tm.Telemetry() t.send_event('mo', 'offline_transformations_status', message) if return_code != 0: raise Error("offline transformations step has failed.") for suf in [".xml", ".bin", ".mapping"]: # remove existing files path_to_file = orig_model_name + "_tmp" + suf if os.path.exists(path_to_file): os.remove(path_to_file) # add meta information to IR append_ir_info(file=orig_model_name, meta_info=get_meta_info(argv), mean_data=mean_data, input_names=input_names) print('[ SUCCESS ] Generated IR version {} model.'.format( get_ir_version(argv))) print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name)) print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name)) return 0