def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path):
    """Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:

    Parameters
    -----------
    frozen_graph_path : string
        the path where your frozen graph file save.
    end_node_names : string
        the name of the end node in your graph you want to get in your proto buff
    onnx_output_path : string
        the path where you want to save the onnx file.

    References
    -----------
    - `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`
    """
    with tf.gfile.GFile(frozen_graph_path, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        onnx_model = tensorflow_graph_to_onnx_model(graph_def,
                                                    end_node_names,
                                                    opset=6)
        file = open(onnx_output_path, "wb")
        file.write(onnx_model.SerializeToString())
        file.close()
예제 #2
0
  def do_test_expected(self):
    tf.reset_default_graph()
    tf_op = test_data[1]
    output_name = test_data[2]
    inputs = test_data[3]
    attrs = test_data[4]

    # Now construct input feed dict
    # keyed by input name
    onnx_feed_dict = {}
    # keyed by placeholder op
    tf_feed_dict = {}
    tf_param_list = []
    for idx, input_tensor in enumerate(inputs):
      if type(input_tensor) is np.ndarray:
        placeholder = tf.placeholder(
            input_tensor.dtype, shape=input_tensor.shape, name="in_" + str(idx))
        onnx_feed_dict["in_" + str(idx)] = input_tensor
        tf_feed_dict[placeholder] = input_tensor
        tf_param_list.append(placeholder)
      else:
        tf_param_list.append(input_tensor)
    test_op = tf_op(*tf_param_list, **attrs)
    tf_graph = tf.get_default_graph().as_graph_def(add_shapes=True)
    # Construct onnx graph, run with backend.
    onnx_model = tensorflow_graph_to_onnx_model(
        tf_graph,
        output_name,
        ignore_unimplemented=test_option.get("ignore_unimplemented", False))
    if not test_option.get("ignore_unimplemented", False):
      checker.check_model(onnx_model)
      backend_rep = prepare(onnx_model)
      backend_output = []
      backend_rep_outputs = backend_rep.run(onnx_feed_dict)
      for output in backend_rep.outputs:
        backend_output.append(backend_rep_outputs[output])
      backend_output = np.asarray(backend_output)
      backend_output = np.squeeze(
          backend_output, 0) if backend_output.shape[0] == 1 else backend_output

      with tf.Session() as sess:
        tf_output = sess.run(test_op, tf_feed_dict)

      # make sure backend_output and tf_output are Iterable
      if backend_output.ndim == 0:
        backend_output = backend_output.reshape(1)
      if isinstance(tf_output, Iterable) == False:
        tf_output = [tf_output]

      # skip comparison if test_option specifies that
      # the test is call only.
      if test_option.get("call_only", False):
        return
      for backend_o, tf_o in zip(backend_output, tf_output):
        np.testing.assert_allclose(backend_o, tf_o, rtol=1e-3, atol=1e-7)
예제 #3
0
    def do_test_expected(self):
        tf.reset_default_graph()
        work_dir = "".join([test_model["name"], "-", "workspace"])
        work_dir_prefix = work_dir + "/"
        download_and_extract(test_model["asset_url"], work_dir)
        freeze_graph.freeze_graph(
            work_dir_prefix + test_model["graph_proto_path"], "", True,
            work_dir_prefix + test_model["checkpoint_path"],
            ",".join(test_model["outputs"]), "", "",
            work_dir_prefix + "frozen_graph.pb", "", "")

        with tf.gfile.GFile(work_dir_prefix + "frozen_graph.pb", "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        with tf.Graph().as_default() as graph:
            tf.import_graph_def(graph_def,
                                input_map=None,
                                return_elements=None,
                                name="",
                                producer_op_list=None)

        # Tensorflow feed dict is keyed by tensor.
        tf_feed_dict = {}
        # Backend feed dict is keyed by tensor names.
        backend_feed_dict = {}
        for name, shape in test_model["inputs"].items():
            x_val = get_rnd(shape)
            tf_feed_dict[graph.get_tensor_by_name(name + ":0")] = x_val
            backend_feed_dict[name] = x_val

        tf_output_tensors = []
        backend_output_names = []
        for name in test_model["outputs"]:
            tf_output_tensors.append(graph.get_tensor_by_name(name + ":0"))
            backend_output_names.append(name)

        with tf.Session(graph=graph) as sess:
            logging.debug("ops in the graph:")
            logging.debug(graph.get_operations())
            output_tf = sess.run(tf_output_tensors, feed_dict=tf_feed_dict)

        onnx_model = tensorflow_graph_to_onnx_model(graph_def,
                                                    backend_output_names)

        model = onnx_model
        tf_rep = prepare(model)
        output_onnx_tf = tf_rep.run(backend_feed_dict)

        assert len(output_tf) == len(output_onnx_tf)
        for tf_output, onnx_backend_output in zip(output_tf, output_onnx_tf):
            np.testing.assert_allclose(tf_output,
                                       onnx_backend_output,
                                       rtol=1e-3,
                                       atol=1e-7)
예제 #4
0
def convert(infile, outfile, convert_to, **kwargs):
  """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    convert_to: Format converted to.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
  if convert_to == "tf":
    logger.info("Start converting onnx pb to tf pb:")
    onnx_model = onnx.load(infile)
    tf_rep = backend.prepare(onnx_model, **kwargs)
    tf_rep.export_graph(outfile)
  elif convert_to == "onnx":
    ext = os.path.splitext(infile)[1]
    logger.info("Start converting tf pb to onnx pb:")
    if ext == ".pb":
      with open(infile, "rb") as f:
        graph_def = graph_pb2.GraphDef()
        graph_def.ParseFromString(f.read())
    elif ext == ".ckpt":
      latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(infile))
      saver = tf.train.import_meta_graph(latest_ckpt + ".meta")
      with tf.Session() as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        saver.restore(sess, latest_ckpt)
        output_node_names = get_output_node_names(sess.graph.as_graph_def())
        graph_def = tf.graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(add_shapes=True), output_node_names)
    else:
      raise ValueError(
          "Input file is not supported. Should be .pb or .ckpt, but get {}".
          format(ext))
    onnx_model = frontend.tensorflow_graph_to_onnx_model(
        graph_def, get_output_node_names(graph_def), **kwargs)
    onnx.save(onnx_model, outfile)
  logger.info("Converting completes successfully.")
예제 #5
0
def front(pb_name):
    graph_def = graph_pb2.GraphDef()

    with open(os.path.join('pb', pb_name), "rb") as f:
        graph_def.ParseFromString(f.read())
        node_def = graph_def.node[-1]
    # _opset = 6
    # defs.ONNX_DOMAIN = 'io.leapmind'
    # defs.get_all_schemas_with_history()
    # model = tensorflow_graph_to_onnx_model(graph_def, node_def.name, opset=_opset)
    model = tensorflow_graph_to_onnx_model(graph_def,
                                           node_def.name,
                                           ignore_unimplemented=True)
    # ctx = checker.DEFAULT_CONTEXT
    # ctx.opset_imports = {'': _opset, 'io.leapmind': 1}
    checker.check_graph(model.graph)
    f = open(os.path.join('pb', pb_name).replace('tf', 'onnx'), 'wb')
    f.write(model.SerializeToString())
    f.close()
def convert_model_to_onnx(frozen_graph_path, end_node_names, onnx_output_path):
    """Reimplementation of the TensorFlow-onnx official tutorial convert the proto buff to onnx file:

    Parameters
    -----------
    frozen_graph_path : string
        the path where your frozen graph file save.
    end_node_names : string
        the name of the end node in your graph you want to get in your proto buff
    onnx_output_path : string
        the path where you want to save the onnx file.

    References
    -----------
    - `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`
    """
    with tf.gfile.GFile(frozen_graph_path, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        onnx_model = tensorflow_graph_to_onnx_model(graph_def, end_node_names, opset=6)
        file = open(onnx_output_path, "wb")
        file.write(onnx_model.SerializeToString())
        file.close()
예제 #7
0
import tensorflow as tf
from tensorflow.python.util import compat
from tensorflow.core.protobuf import saved_model_pb2

#from model_converters import KerasToTensorflow
from onnx_tf.frontend import tensorflow_graph_to_onnx_model

import os

# convert Tensorflow frozen .pb format to ONNX format
with tf.gfile.GFile("models/model.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    onnx_model = tensorflow_graph_to_onnx_model(graph_def, "outputs", opset=6)

    file = open("models/model.onnx", "wb")
    file.write(onnx_model.SerializeToString())
    file.close()
예제 #8
0
from onnx_tf.frontend import tensorflow_graph_to_onnx_model
import tensorflow as tf

with tf.gfile.GFile(
        "/home/muffadall/work/github_repo/tensorflow-to-onnx-convert/graphs/frozen_graph.pb",
        "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    onnx_model = tensorflow_graph_to_onnx_model(graph_def,
                                                "global_step/read",
                                                opset=0)

    file = open(
        "/home/muffadall/work/github_repo/tensorflow-to-onnx-convert/graphs/mnist.onnx",
        "wb")
    file.write(onnx_model.SerializeToString())
    file.close()
예제 #9
0
import tensorflow as tf
from onnx_tf.frontend import tensorflow_graph_to_onnx_model

with tf.gfile.GFile("model/cosmoGAN_frozen.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())

    onnx_model = tensorflow_graph_to_onnx_model(graph_def=graph_def,
                                                output="generator/Tanh",
                                                opset=0, ignore_unimplemented=True)

    file = open("model/cosmoGAN.onnx", "wb")
    file.write(onnx_model.SerializeToString())
    file.close()
예제 #10
0
from tensorflow.core.framework import graph_pb2

from onnx_tf.frontend import tensorflow_graph_to_onnx_model
from onnx_tf.pb_wrapper import TensorflowGraph

graph_def = graph_pb2.GraphDef()
with open("input_path", "rb") as f:  # load tf graph def
    graph_def.ParseFromString(f.read())
output = TensorflowGraph.get_output_node_names(
    graph_def)  # get output node names

model = tensorflow_graph_to_onnx_model(
    graph_def, output)  # convert tf graph to onnx model
with open("output_path", 'wb') as f:
    f.write(model.SerializeToString())
예제 #11
0
def convert(infile, outfile, convert_to, graph=None, **kwargs):
    """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    convert_to: Format converted to.
    graph: Inference graph.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
    if convert_to == "tf":
        logger.info("Start converting onnx pb to tf pb:")
        onnx_model = onnx.load(infile)
        tf_rep = backend.prepare(onnx_model, **kwargs)
        tf_rep.export_graph(outfile)
    elif convert_to == "onnx":
        ext = os.path.splitext(infile)[1]
        logger.info("Start converting tf pb to onnx pb:")
        if ext == ".pb":
            with open(infile, "rb") as f:
                graph_def = graph_pb2.GraphDef()
                graph_def.ParseFromString(f.read())
        elif ext == ".ckpt":
            latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(infile))
            saver = tf.train.import_meta_graph(latest_ckpt + ".meta")
            temp_file_suffix = get_unique_suffix()
            workdir = 'onnx-tf_workdir_{}'.format(temp_file_suffix)
            with tf.Session() as sess:
                sess.run([
                    tf.global_variables_initializer(),
                    tf.local_variables_initializer()
                ])
                saver.restore(sess, latest_ckpt)
                # Take users' hint or deduce output node automatically.
                kwargs["output"] = kwargs.get(
                    "output", None) or TensorflowGraph.get_output_node_names(
                        sess.graph.as_graph_def())

                # Save the graph to disk for freezing.
                tf.train.write_graph(sess.graph.as_graph_def(add_shapes=True),
                                     workdir,
                                     "input_model.pb",
                                     as_text=False)

            # Freeze graph:
            freeze_graph.freeze_graph(
                input_graph=graph or workdir + "/input_model.pb",
                input_saver="",
                input_binary=True,
                input_checkpoint=latest_ckpt,
                output_node_names=",".join(kwargs["output"]),
                restore_op_name="",
                filename_tensor_name="",
                output_graph=workdir + "/frozen_model.pb",
                clear_devices=True,
                initializer_nodes="")

            # Load back the frozen graph.
            with open(workdir + "/frozen_model.pb", "rb") as f:
                graph_def = graph_pb2.GraphDef()
                graph_def.ParseFromString(f.read())

            # Remove work directory.
            shutil.rmtree(workdir)
        else:
            raise ValueError(
                "Input file is not supported. Should be .pb or .ckpt, but get {}"
                .format(ext))

        if "rnn_type" in kwargs:
            onnx_model = experiment_frontend.rnn_tf_graph_to_onnx_model(
                graph_def, **kwargs)
        else:
            onnx_model = frontend.tensorflow_graph_to_onnx_model(
                graph_def, **kwargs)
        onnx.save(onnx_model, outfile)
    logger.info("Converting completes successfully.")
예제 #12
0
import tensorflow as tf
from onnx_tf.frontend import tensorflow_graph_to_onnx_model

with tf.gfile.GFile("ckpt/frozen_model.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    onnx_model = tensorflow_graph_to_onnx_model(graph_def,
                                                "output/add",
                                                opset=4)

    file = open("mnist.onnx", "wb")
    file.write(onnx_model.SerializeToString())
    file.close()

    print(onnx_model.graph.node[0])
예제 #13
0
# Get the latest evaluation result
try:
    with open("run_id.json") as f:
        config = json.load(f)
    if not config["run_id"]:
        raise Exception(
            "No new model to register as production model perform better")
except:
    print("No new model to register as production model perform better")
    # raise Exception('No new model to register as production model perform better')
    sys.exit(0)

with tf.gfile.GFile("frozen_graph.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    onnx_model = tensorflow_graph_to_onnx_model(graph_def, "fc2/add", opset=6)

    file = open("mnist.onnx", "wb")
    file.write(onnx_model.SerializeToString())
    file.close()

print(onnx_model.graph.node[0])

run_id = config["run_id"]
experiment_name = config["experiment_name"]
exp = Experiment(workspace=ws, name=experiment_name)

run = Run(experiment=exp, run_id=run_id)
names = run.get_file_names
names()
print("Run ID for last run: {}".format(run_id))
예제 #14
0
from onnx_tf.frontend import tensorflow_graph_to_onnx_model
import tensorflow as tf

with tf.gfile.GFile("yolov2_pb/frozen_yolov2.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    onnx_model = tensorflow_graph_to_onnx_model(graph_def,
                                                "conv_dec/BiasAdd",
                                                opset=0)

    file = open("yolov2_pb/onnx_yolov2.onnx", "wb")
    file.write(onnx_model.SerializeToString())
    file.close()
예제 #15
0
import tensorflow as tf
from onnx_tf.frontend import tensorflow_graph_to_onnx_model

with tf.gfile.GFile("../model/original/hep_frozen_bs_64.pb", "rb") as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())

    #remove unnecessary stuff
    pruned_graph = tf.graph_util.remove_training_nodes(graph_def)
    print(type(pruned_graph))

    #write to model
    onnx_model = tensorflow_graph_to_onnx_model(graph_def=pruned_graph,
                                                output="Softmax",
                                                opset=0, ignore_unimplemented=True)

    with open("../model/onnx/hep_frozen_bs_64.onnx", "wb") as f:
        f.write(onnx_model.SerializeToString())

    for node in onnx_model.graph.node:
        print(node)
    #print(onnx_model.graph.node[-1])