def _run_graph_optimizations(graph_def, input_arrays, output_arrays, graph=None): """Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) Returns: A new, optimized GraphDef. """ meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph) # We need to add a collection called 'train_op' so that grappler # knows what the outputs are. fetch_collection = _meta_graph_pb2.CollectionDef() for array in input_arrays + output_arrays: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) config = _config_pb2.ConfigProto() rewrite_options = config.graph_options.rewrite_options rewrite_options.layout_optimizer = _rewriter_config_pb2.RewriterConfig.ON # Avoid remapping as it creates ops like _FusedConv2D, which are not # supported by TF Lite. rewrite_options.remapping = _rewriter_config_pb2.RewriterConfig.OFF return _tf_optimizer.OptimizeGraph(config, meta_graph)
def run_graph_optimizations(graph_def, input_arrays, output_arrays, config, graph=None): """Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. config: tf.ConfigProto. graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) Returns: A new, optimized GraphDef. """ meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph) # We need to add a collection called 'train_op' so that grappler # knows what the outputs are. fetch_collection = _meta_graph_pb2.CollectionDef() for array in input_arrays + output_arrays: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) return tf_optimizer.OptimizeGraph(config, meta_graph)
def _run_graph_optimizations(graph_def, input_arrays, output_arrays): """Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. Returns: A new, optimized GraphDef. """ meta_graph = _export_meta_graph(graph_def=graph_def) # We need to add a collection called 'train_op' so that grappler # knows what the outputs are. fetch_collection = _meta_graph_pb2.CollectionDef() for array in input_arrays + output_arrays: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) config = _config_pb2.ConfigProto() rewrite_options = config.graph_options.rewrite_options rewrite_options.layout_optimizer = _rewriter_config_pb2.RewriterConfig.ON # Avoid remapping as it creates ops like _FusedConv2D, which are not # supported by TF Lite. rewrite_options.remapping = _rewriter_config_pb2.RewriterConfig.OFF return _tf_optimizer.OptimizeGraph(config, meta_graph)
def _run_graph_optimizations(graph_def, output_arrays): """Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. output_arrays: List of arrays that are considered outputs of the graph. Returns: A new, optimized GraphDef. """ meta_graph = _export_meta_graph(graph_def=graph_def) # We need to add a collection called 'train_op' so that grappler # knows what the outputs are. fetch_collection = _meta_graph_pb2.CollectionDef() for output in output_arrays: fetch_collection.node_list.value.append(output) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) config = _config_pb2.ConfigProto() rewrite_options = config.graph_options.rewrite_options rewrite_options.layout_optimizer = _rewriter_config_pb2.RewriterConfig.ON return _tf_optimizer.OptimizeGraph(config, meta_graph)