Exemple #1
0
def main():
    args = get_args()

    opset = tf2onnx.utils.find_opset(args.opset)
    print("using tensorflow={}, onnx={}, opset={}, tfonnx={}/{}".format(
        tf.__version__, onnx.__version__, opset, tf2onnx.__version__,
        tf2onnx.version.git_version[:6]))

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: default_custom_op_handler
            for op in args.custom_ops.split(",")
        }
        extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
    else:
        custom_ops = {}
        extra_opset = None

    graph_def = tf.GraphDef()
    with tf.gfile.GFile(args.input, 'rb') as f:
        graph_def.ParseFromString(f.read())

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(args.inputs, args.outputs, graph_def,
                            args.fold_const)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=args.inputs,
                             output_names=args.outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    new_model_proto = GraphUtil.opt_transposes_with_graph(
        g,
        "converted from {}".format(args.input),
        optimize=not args.continue_on_error)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
            print("\nComplete successfully, the onnx model is generated at " +
                  args.output)
Exemple #2
0
def main():
    args = get_args()

    print("using tensorflow={}, onnx={}".format(tf.__version__,
                                                onnx.__version__))

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    graph_def = tf.GraphDef()
    with tf.gfile.FastGFile(args.input, 'rb') as f:
        graph_def.ParseFromString(f.read())
    graph_def = tf_optimize(None, args.inputs, args.outputs, graph_def)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph) as sess:
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset)

    model_proto = g.make_model("converted from {}".format(args.input),
                               args.inputs, args.outputs)

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
def convert_to_onnx(model_filepath, onnx_filepath, output_node_names):
    """
    Convert the model to an ONNX file, which can in turn be used for TensorRT inference

    Arguments:
    model_filepath: the path to the frozen .pb file
    onnx_filepath: the path where the ONNX file should be saved
    output_node_names: list of output node names
    """
    # tf2onnx expects the node names in the format "input/output_node_name:port_id".
    # Hence, we should provide the port ID before conversion.
    input_node_names = [kInputName + ":0"]
    output_node_names = list(map(lambda x: x + ":0", output_node_names))
    # Use in-built function from tf2onnx to import the graph and optimize for conversion
    graph_def, inputs, outputs = loader.from_graphdef(model_filepath,
                                                      input_node_names,
                                                      output_node_names)
    graph_def = tf_optimize(input_node_names, output_node_names, graph_def,
                            False)
    with tf.Graph().as_default() as default_graph:
        tf.import_graph_def(graph_def, name='')
    # Convert to ONNX
    with tf.Session(graph=default_graph):
        onnx_graph = process_tf_graph(default_graph,
                                      opset=8,
                                      input_names=inputs,
                                      output_names=outputs)
    onnx_graph = optimizer.optimize_graph(onnx_graph)
    onnx_model = onnx_graph.make_model("segmentation_onnx_model")
    # Save the ONNX model to disk
    utils.save_protobuf(onnx_filepath, onnx_model)
Exemple #4
0
def convert_frozen_to_onnx(
    settings: SerializationSettings, frozen_graph_def: tf.GraphDef
) -> Any:
    # This is basically https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py

    inputs = _get_input_node_names(frozen_graph_def)
    outputs = _get_output_node_names(frozen_graph_def)
    logger.info(f"onnx export - inputs:{inputs} outputs:{outputs}")

    frozen_graph_def = tf_optimize(
        inputs, outputs, frozen_graph_def, fold_constant=True
    )

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(frozen_graph_def, name="")
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(
            tf_graph,
            input_names=inputs,
            output_names=outputs,
            opset=settings.onnx_opset,
        )

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model(settings.brain_name)

    return model_proto
Exemple #5
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")
Exemple #6
0
    def run_test_case(self, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-07,
                      convert_var_to_const=True, constant_fold=True, check_value=True, check_shape=False,
                      check_dtype=False, process_args=None, onnx_feed_dict=None):
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict
        graph_def = None
        if convert_var_to_const:
            with tf.Session() as sess:
                variables_lib.global_variables_initializer().run()
                output_name_without_port = [n.split(':')[0] for n in output_names_with_port]
                graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
                                                                         output_name_without_port)

            tf.reset_default_graph()
            tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            variables_lib.global_variables_initializer().run()
            output_dict = []
            for out_name in output_names_with_port:
                output_dict.append(sess.graph.get_tensor_by_name(out_name))
            expected = sess.run(output_dict, feed_dict=feed_dict)

        if self.debug_mode():
            model_path = os.path.join(type(self).TMPPATH, self._testMethodName + "_original.pb")
            with open(model_path, "wb") as f:
                f.write(sess.graph_def.SerializeToString())
            self.log.debug("created file %s", model_path)

        graph_def = tf_optimize(input_names_with_port, output_names_with_port,
                                sess.graph_def, constant_fold)

        if self.debug_mode() and constant_fold:
            model_path = os.path.join(type(self).TMPPATH, self._testMethodName + "_after_tf_optimize.pb")
            with open(model_path, "wb") as f:
                f.write(graph_def.SerializeToString())
            self.log.debug("created file  %s", model_path)

        tf.reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            g = process_tf_graph(sess.graph, opset=type(self).OPSET, output_names=output_names_with_port,
                                 **process_args)
            actual = self._run_backend(g, output_names_with_port, onnx_feed_dict)

        for expected_val, actual_val in zip(expected, actual):
            if check_value:
                self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=0.)
            if check_dtype:
                self.assertEqual(expected_val.dtype, actual_val.dtype)
            if check_shape:
                self.assertEqual(expected_val.shape, actual_val.shape)
Exemple #7
0
def convert_tensorflow(frozen_graph_def,
                       name=None,
                       input_names=None,
                       output_names=None,
                       doc_string='',
                       target_opset=None,
                       channel_first_inputs=None,
                       debug_mode=False,
                       custom_op_conversions=None):
    """
    convert a frozen tensorflow graph def into a ONNX model proto, just like how keras does.
    :param frozen_graph_def: the frozen tensorflow graph
    :param name: the converted onnx model internal name
    :param input_names: the inputs name list of the model
    :param output_names: the output name list of the model
    :param doc_string: doc string
    :param target_opset: the targeted onnx model opset
    :param channel_first_inputs: A list of channel first input (not supported yet)
    :param debug_mode: will enable the log and try to convert as much as possible on conversion
    :return an ONNX ModelProto
    """
    set_logger_level(logging.DEBUG if debug_mode else logging.INFO)

    if target_opset is None:
        target_opset = get_opset_number_from_onnx()

    if not doc_string:
        doc_string = "converted from {}".format(name)

    graph_def = tfonnx.tf_optimize(input_names, output_names, frozen_graph_def,
                                   True)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
        if get_tensorboard_writer() is not None:
            get_tensorboard_writer().add_graph(tf_graph)

    custom_op_handlers = tf2onnx_builtin_conversion(target_opset)
    if custom_op_conversions:
        custom_op_handlers.update(custom_op_conversions)
    with tf.Session(graph=tf_graph):
        if not input_names:
            input_nodes = list(_collect_input_nodes(tf_graph, output_names)[0])
            input_names = [nd_.outputs[0].name for nd_ in input_nodes]
        g = tfonnx.process_tf_graph(tf_graph,
                                    continue_on_error=debug_mode,
                                    opset=target_opset,
                                    custom_op_handlers=custom_op_handlers,
                                    inputs_as_nchw=channel_first_inputs,
                                    output_names=output_names,
                                    input_names=input_names)

        onnx_graph = tf2onnx.optimizer.optimize_graph(g)
        model_proto = onnx_graph.make_model(doc_string)

        return model_proto
def main():
    args = get_args()

    opset = tf2onnx.utils.find_opset(args.opset)
    print("using tensorflow={}, onnx={}, opset={}".format(
        tf.__version__, onnx.__version__, opset))

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: default_custom_op_handler
            for op in args.custom_ops.split(",")
        }
        extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
    else:
        custom_ops = {}
        extra_opset = None

    graph_def = tf.GraphDef()
    with tf.gfile.FastGFile(args.input, 'rb') as f:
        graph_def.ParseFromString(f.read())

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(args.inputs, args.outputs, graph_def,
                            args.fold_const)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override)

    optimizer = TransposeOptimizer(g, args.verbose is not None)
    optimizer.optimize()

    model_proto = g.make_model("converted from {}".format(args.input),
                               args.outputs,
                               optimize=not args.continue_on_error)

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
Exemple #9
0
def convert_onnx(sess, graph_def, input_path, inputs_op, outputs_op):

    graphdef = input_path

    if inputs_op:
        inputs_op, shape_override = utils.split_nodename_and_shape(inputs_op)
    if outputs_op:
        outputs_op = outputs_op.split(",")

    logging.basicConfig(level=logging.get_verbosity_level(True))

    utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    graph_def, inputs_op, outputs_op = from_graphdef(sess, graph_def, graphdef,
                                                     inputs_op, outputs_op)
    model_path = graphdef

    graph_def = tf_optimize(inputs_op, outputs_op, graph_def, True)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=False,
                             target=",".join(constants.DEFAULT_TARGET),
                             opset=10,
                             custom_op_handlers=None,
                             extra_opset=None,
                             shape_override=None,
                             input_names=inputs_op,
                             output_names=outputs_op,
                             inputs_as_nchw=None)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    # if args.output:
    output_path = input_path.replace(".pb", ".onnx")
    utils.save_protobuf(output_path, model_proto)
    logger.info("ONNX model is saved at %s", output_path)
def convert_frozen_to_onnx(settings: SerializationSettings,
                           frozen_graph_def: tf.GraphDef) -> Any:
    # This is basically https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py

    # Some constants in the graph need to be read by the inference system.
    # These aren't used by the model anywhere, so trying to make sure they propagate
    # through conversion and import is a losing battle. Instead, save them now,
    # so that we can add them back later.
    constant_values = {}
    for n in frozen_graph_def.node:
        if n.name in MODEL_CONSTANTS:
            val = n.attr["value"].tensor.int_val[0]
            constant_values[n.name] = val

    inputs = _get_input_node_names(frozen_graph_def)
    outputs = _get_output_node_names(frozen_graph_def)
    logger.info(f"onnx export - inputs:{inputs} outputs:{outputs}")

    frozen_graph_def = tf_optimize(inputs,
                                   outputs,
                                   frozen_graph_def,
                                   fold_constant=True)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(frozen_graph_def, name="")
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(
            tf_graph,
            input_names=inputs,
            output_names=outputs,
            opset=settings.onnx_opset,
        )

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model(settings.brain_name)

    # Save the constant values back the graph initializer.
    # This will ensure the importer gets them as global constants.
    constant_nodes = []
    for k, v in constant_values.items():
        constant_node = _make_onnx_node_for_constant(k, v)
        constant_nodes.append(constant_node)
    model_proto.graph.initializer.extend(constant_nodes)
    return model_proto
Exemple #11
0
    def _convert(self):
        logger.info("Converting...")
        self._restore_session_from_model_file()

        tf.reset_default_graph()
        graph_def = tf_optimize(self.original_input_names, self.original_output_names,
                                self.tf_sess.graph_def)
        tf.import_graph_def(graph_def, name="")
        onnx_graph = process_tf_graph(tf.get_default_graph(), opset=self.conversion_config.onnx_opset,
                                      input_names=self.original_input_names, output_names=self.original_output_names)
        try:
            opt_graph = optimizer.optimize_graph(onnx_graph)
        except Exception as e:
            opt_graph = None
            logger.warning("Failed to optimize ONNX graph, original un-optimized graph will be used, e = {}".format(e))
        onnx_graph = opt_graph if opt_graph is not None else onnx_graph
        model_proto = onnx_graph.make_model("onnx-proto")
        with open(self.conversion_config.onnx_model_path, "wb") as f:
            f.write(model_proto.SerializeToString())
Exemple #12
0
    def _run_test_case(self, input_names_with_port, output_names_with_port):
        graph_def = None
        with tf.Session() as sess:
            # freeze graph
            origin_graph = sess.graph
            variables_lib.global_variables_initializer().run()
            output_name_without_port = [
                n.split(':')[0] for n in output_names_with_port
            ]
            graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_name_without_port)

        tf.reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        # optimize graph
        graph_def = tf_optimize(input_names_with_port, output_names_with_port,
                                sess.graph_def, True)

        with tf.Session() as sess:
            if self.config.is_debug_mode:
                if not os.path.exists(self.test_data_directory):
                    os.makedirs(self.test_data_directory)
                model_path = os.path.join(
                    self.test_data_directory,
                    self._testMethodName + "_after_tf_optimize.pb")
                utils.save_protobuf(model_path, graph_def)
                self.logger.debug("created file  %s", model_path)

        tf.reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            inferred_graph = infer_shape_for_graph(sess.graph)
            # compare each operation
            for op in origin_graph.get_operations():
                inferred_op = None
                try:
                    inferred_op = inferred_graph.get_operation_by_name(op.name)
                except KeyError:
                    continue
                self._compare_shape_for_op(op, inferred_op)
def convert_tf2onnx(model,
                    output,
                    inputs,
                    outputs,
                    signature_def=None,
                    opset=7):
    import tensorflow as tf
    from tf2onnx.tfonnx import process_tf_graph, tf_optimize
    from tf2onnx import constants, loader, logging, utils, optimizer
    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    if "pb" in model:
        graph_def, inputs, outputs = loader.from_graphdef(
            model, inputs, outputs)
    elif "meta" in model:
        graph_def, inputs, outputs = loader.from_checkpoint(
            model, inputs, outputs)
    elif "saved_model" in model:
        graph_def, inputs, outputs = loader.from_saved_model(
            model, inputs, outputs, signature_def)

    graph_def = tf_optimize(inputs, outputs, graph_def, None)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             opset=opset,
                             input_names=inputs,
                             output_names=outputs)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model))
    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model)
    utils.save_protobuf(output, model_proto)
    logger.info("ONNX model is saved at %s", output)
Exemple #14
0
tf.import_graph_def(graph_def, name='')

# [array([[[-0.05846359 -0.06566401  0.02254938 -0.26033643 -0.07923548]],
#         [[ 0.04879569  0.04215769 -0.06720451 -0.60583305  0.06223793]],
#         [[-0.05626901 -0.06627436  0.00422506 -0.5533649  -0.0767431 ]]], dtype=float32)]
with tf.Session() as sess:
    # output_dict: get tensor by output name
    output_dict = []
    for out_name in output_names_with_port:
        output_dict.append(sess.graph.get_tensor_by_name(out_name))

    expected = sess.run(output_dict, feed_dict={"input_1:0": x_val})

# tf optimize
graph_def = tf_optimize(input_names_with_port,
                        output_names_with_port,
                        sess.graph_def,
                        fold_constant=True)

tf.reset_default_graph()
tf.import_graph_def(graph_def, name='')

# convert to onnx
with tf.Session() as sess:
    g = process_tf_graph(sess.graph, output_names=output_names_with_port)
    g = optimizer.optimize_graph(g)
    model_proto = g.make_model("lstm")
    utils.save_onnx_model("./models",
                          "lstm",
                          feed_dict={"input_1:0": input},
                          model_proto=model_proto)
Exemple #15
0
    def run_test_case(self,
                      feed_dict,
                      input_names_with_port,
                      output_names_with_port,
                      rtol=1e-07,
                      atol=1e-5,
                      convert_var_to_const=True,
                      constant_fold=True,
                      check_value=True,
                      check_shape=False,
                      check_dtype=True,
                      process_args=None,
                      onnx_feed_dict=None,
                      graph_validator=None):
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict

        graph_def = None
        if convert_var_to_const:
            with tf.Session() as sess:
                variables_lib.global_variables_initializer().run()
                output_name_without_port = [
                    n.split(':')[0] for n in output_names_with_port
                ]
                graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, sess.graph_def, output_name_without_port)

            tf.reset_default_graph()
            tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            variables_lib.global_variables_initializer().run()
            output_dict = []
            for out_name in output_names_with_port:
                output_dict.append(sess.graph.get_tensor_by_name(out_name))
            expected = sess.run(output_dict, feed_dict=feed_dict)

        if self.config.is_debug_mode:
            if not os.path.exists(self.test_data_directory):
                os.makedirs(self.test_data_directory)
            model_path = os.path.join(self.test_data_directory,
                                      self._testMethodName + "_original.pb")
            utils.save_protobuf(model_path, sess.graph_def)
            self.log.debug("created file %s", model_path)

        graph_def = tf_optimize(input_names_with_port, output_names_with_port,
                                sess.graph_def, constant_fold)

        if self.config.is_debug_mode and constant_fold:
            model_path = os.path.join(
                self.test_data_directory,
                self._testMethodName + "_after_tf_optimize.pb")
            utils.save_protobuf(model_path, graph_def)
            self.log.debug("created file  %s", model_path)

        tf.reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 output_names=output_names_with_port,
                                 target=self.config.target,
                                 **process_args)
            g = optimizer.optimize_graph(g)
            actual = self._run_backend(g, output_names_with_port,
                                       onnx_feed_dict)

        for expected_val, actual_val in zip(expected, actual):
            if check_value:
                self.assertAllClose(expected_val,
                                    actual_val,
                                    rtol=rtol,
                                    atol=atol)
            if check_dtype:
                self.assertEqual(expected_val.dtype, actual_val.dtype)
            if check_shape:
                self.assertEqual(expected_val.shape, actual_val.shape)

        if graph_validator:
            self.assertTrue(graph_validator(g))

        return g
Exemple #16
0
    def pb_to_onnx(
        inputs: List[Union[str, tf_compat.Tensor]],
        outputs: List[Union[str, tf_compat.Tensor]],
        pb_path: str,
        onnx_path: str,
        opset: int = default_onnx_opset(),
        custom_op_handlers=None,
        extra_opset=None,
        shape_override: Dict[str, List] = None,
    ):
        """
        Export an ONNX format for the graph from PB format.
        Should not be called within an active graph or session.

        :param inputs: the inputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param outputs: the outputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param pb_path: path to the existing PB file
        :param onnx_path: path to the output ONNX file
        :param opset: ONNX opset
        :param custom_op_handlers: dictionary of custom op handlers
        :param extra_opset: list of extra opset's
        :param shape_override: new shape to override
        """
        try:
            from tf2onnx import constants, optimizer, utils
            from tf2onnx.tfonnx import process_tf_graph, tf_optimize
        except ModuleNotFoundError:
            raise ModuleNotFoundError(
                "tf2onnx must be installed on the system before using export_onnx"
            )

        try:
            from tf2onnx import tf_loader as loader
        except Exception:
            from tf2onnx import loader

        pb_path = clean_path(pb_path)

        if not os.path.exists(pb_path):
            raise FileNotFoundError(
                ("no pb file for the model found at {}").format(pb_path)
            )

        inputs = [inp if isinstance(inp, str) else inp.name for inp in inputs]
        outputs = [out if isinstance(out, str) else out.name for out in outputs]

        graph_def, inputs, outputs = loader.from_graphdef(pb_path, inputs, outputs)
        graph_def = tf_optimize(inputs, outputs, graph_def, fold_constant=True)

        with tf_compat.Graph().as_default() as tf_graph:
            tf_compat.import_graph_def(graph_def, name="")

        with tf_compat.Session(graph=tf_graph):
            graph = process_tf_graph(
                tf_graph,
                continue_on_error=False,
                target=",".join(constants.DEFAULT_TARGET),
                opset=opset,
                custom_op_handlers=custom_op_handlers,
                extra_opset=extra_opset,
                shape_override=shape_override,
                input_names=inputs,
                output_names=outputs,
            )

        onnx_graph = optimizer.optimize_graph(graph)
        model_proto = onnx_graph.make_model("converted from {}".format(pb_path))

        onnx_path = clean_path(onnx_path)
        create_parent_dirs(onnx_path)
        utils.save_protobuf(onnx_path, model_proto)
Exemple #17
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs)
        model_path = args.saved_model

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    new_model_proto = GraphUtil.optimize_model_proto(model_proto)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        print("\nComplete successfully, the onnx model is generated at " +
              args.output)
Exemple #18
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    args = get_args()

    print("using tensorflow={}, onnx={}".format(tf.__version__, onnx.__version__))

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: default_custom_op_handler for op in args.custom_ops.split(",")}
        extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
    else:
        custom_ops = {}
        extra_opset = None

    graph_def = tf.GraphDef()

    with tf.gfile.FastGFile(args.input, 'rb') as f:
        graph_def.ParseFromString(f.read())
    print("args.inputs:", args.inputs)
    print("args.outputs:", args.outputs)
    print("args.middle_inputs:", args.middle_inputs)
    print("args.middle_outputs:", args.middle_outputs)

    if args.middle_outputs:
        graph_def = tf_optimize(None, args.inputs, args.middle_outputs, graph_def)
    else:
        graph_def = tf_optimize(None, args.inputs, args.outputs, graph_def)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')

    with tf.Session(graph=tf_graph) as sess:

        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             inputs=args.inputs,
                             middle_inputs=args.middle_inputs)

    final_inputs = args.inputs
    final_outputs = args.outputs

    if args.middle_outputs:
        final_outputs = args.middle_outputs
    if args.middle_inputs:
        final_inputs = args.middle_inputs

    model_proto = g.make_model(
        "converted from {}".format(args.input), final_inputs, final_outputs,
        optimize=not args.continue_on_error)

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
Exemple #19
0
    def convert2onnx(self):
        inputs = [
            '%s:0' % (layer.name) for layer in self.lwnn_model
            if layer.op == 'Input'
        ]
        if ('output_node' in self.kwargs):
            outputs = ['%s:0' % (o) for o in self.kwargs['output_node']]
        else:
            outputs = [
                '%s:0' % (layer.name) for layer in self.lwnn_model
                if len(self.get_consumers(layer)) == 0
            ]
        custom_ops = {}
        extra_opset = []
        graph_def = tf_optimize(inputs, outputs, self.graph_def, True)
        with tf.Graph().as_default() as tf_graph:
            tf.import_graph_def(graph_def, name='')
        with tf.Session(graph=tf_graph):
            g = process_tf_graph(tf_graph,
                                 continue_on_error=False,
                                 target=None,
                                 opset=None,
                                 custom_op_handlers=custom_ops,
                                 extra_opset=extra_opset,
                                 shape_override=None,
                                 input_names=inputs,
                                 output_names=outputs,
                                 inputs_as_nchw=None)

        onnx_graph = tf2onnx.optimizer.optimize_graph(g)
        model = onnx_graph.make_model(self.name)
        inps = []
        for inp in model.graph.input:
            if (inp.name in inputs):
                shape = [
                    int(dim.dim_value)
                    for dim in inp.type.tensor_type.shape.dim
                ]
                if (len(shape) == 0):
                    layer = self.get_layers(self.LN(inp.name))
                    x = onnx.helper.make_tensor_value_info(
                        inp.name, inp.type.tensor_type.elem_type, layer.shape)
                    inps.append(x)
                else:
                    inps.append(inp)
        outs = []
        for out in model.graph.output:
            if (out.name in inputs):
                shape = [
                    int(dim.dim_value)
                    for dim in out.type.tensor_type.shape.dim
                ]
                if (len(shape) == 0):
                    layer = self.get_layers(self.LN(out.name))[0]
                    x = onnx.helper.make_tensor_value_info(
                        out.name, out.type.tensor_type.elem_type, layer.shape)
                    inps.append(x)
                else:
                    inps.append(out)
        del model.graph.input[:]
        model.graph.input.extend(inps)
        del model.graph.output[:]
        model.graph.output.extend(outs)
        return model