Пример #1
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")
Пример #2
0
def _is_legacy_keras_model(model):
    """Inspects model class to determine if it is from tf or legacy keras"""

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)
    unknown_type_err = "model is not instance of tf.keras.Model or keras.Model"
    if isinstance(model, tf.keras.Model):
        return False
    try:
        import keras  # pylint: disable=import-outside-toplevel
        if isinstance(model, keras.Model):
            return True
        logger.warning(unknown_type_err)
    except ImportError:
        logger.warning(unknown_type_err)
    return False
Пример #3
0
def convert_onnx(sess, graph_def, input_path, inputs_op, outputs_op):

    graphdef = input_path

    if inputs_op:
        inputs_op, shape_override = utils.split_nodename_and_shape(inputs_op)
    if outputs_op:
        outputs_op = outputs_op.split(",")

    logging.basicConfig(level=logging.get_verbosity_level(True))

    utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    graph_def, inputs_op, outputs_op = from_graphdef(sess, graph_def, graphdef,
                                                     inputs_op, outputs_op)
    model_path = graphdef

    graph_def = tf_optimize(inputs_op, outputs_op, graph_def, True)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=False,
                             target=",".join(constants.DEFAULT_TARGET),
                             opset=10,
                             custom_op_handlers=None,
                             extra_opset=None,
                             shape_override=None,
                             input_names=inputs_op,
                             output_names=outputs_op,
                             inputs_as_nchw=None)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    # if args.output:
    output_path = input_path.replace(".pb", ".onnx")
    utils.save_protobuf(output_path, model_proto)
    logger.info("ONNX model is saved at %s", output_path)
Пример #4
0
def convert_tf2onnx(model,
                    output,
                    inputs,
                    outputs,
                    signature_def=None,
                    opset=7):
    import tensorflow as tf
    from tf2onnx.tfonnx import process_tf_graph, tf_optimize
    from tf2onnx import constants, loader, logging, utils, optimizer
    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    if "pb" in model:
        graph_def, inputs, outputs = loader.from_graphdef(
            model, inputs, outputs)
    elif "meta" in model:
        graph_def, inputs, outputs = loader.from_checkpoint(
            model, inputs, outputs)
    elif "saved_model" in model:
        graph_def, inputs, outputs = loader.from_saved_model(
            model, inputs, outputs, signature_def)

    graph_def = tf_optimize(inputs, outputs, graph_def, None)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             opset=opset,
                             input_names=inputs,
                             output_names=outputs)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model))
    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model)
    utils.save_protobuf(output, model_proto)
    logger.info("ONNX model is saved at %s", output)
Пример #5
0
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx dropout op
"""

import numpy as np
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx import logging

logger = logging.getLogger(__name__)

# pylint: disable=missing-docstring


def rewrite_dropout(g, ops):
    patterns = [
        OpTypePattern(
            'Mul',
            name='outputs',
            inputs=[
                OpTypePattern('RealDiv', name="input2"),
                OpTypePattern(
                    'Floor',
                    inputs=[
                        OpTypePattern(
                            'Add',
                            inputs=[
                                OpTypePattern("*", name="input3"),
                                OpTypePattern(
import tensorflow as tf

# contrib ops are registered only when the module is imported, the following import statement is needed,
# otherwise tf runtime error will show up when the tf model is restored from pb file because of un-registered ops.
try:
    import tensorflow.contrib.rnn  # pylint: disable=unused-import
except:  # pylint: disable=bare-except
    # not needed for tf-2.0
    pass

from tf2onnx import tf_loader, logging, optimizer, utils, tf_utils
from tf2onnx.tfonnx import process_tf_graph
from tf2onnx.tf_loader import tf_session, tf_reset_default_graph
from tf2onnx.graph import ExternalTensorStorage

logger = logging.getLogger("run_pretrained")

TEMP_DIR = os.path.join(utils.get_temp_directory(), "run_pretrained")
PERFITER = 1000


def get_beach(shape):
    """Get beach image as input."""
    resize_to = shape[1:3]
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                        "beach.jpg")
    img = PIL.Image.open(path)
    img = img.resize(resize_to, PIL.Image.ANTIALIAS)
    img_np = np.array(img).astype(np.float32)
    img_np = np.stack([img_np] * shape[0], axis=0).reshape(shape)
    return img_np / 255
Пример #7
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    with tf.Graph().as_default() as tf_graph:
        const_node_values = None
        if args.large_model:
            const_node_values = compress_graph_def(graph_def)
        if args.output_frozen_graph:
            utils.save_protobuf(args.output_frozen_graph, graph_def)
        tf.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Пример #8
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    tensors_to_rename = {}
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    graph_def = None
    inputs = None
    outputs = None
    model_path = None

    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True,
            return_tensors_to_rename=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        # Optional, but used to cut graph if provided.
        inputs = args.inputs
        outputs = args.outputs
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    if args.rename_inputs:
        tensors_to_rename.update(zip(inputs, args.rename_inputs))
    if args.rename_outputs:
        tensors_to_rename.update(zip(outputs, args.rename_outputs))

    with tf.device("/cpu:0"):
        model_proto, _ = _convert_common(
            graph_def,
            name=model_path,
            continue_on_error=args.continue_on_error,
            target=args.target,
            opset=args.opset,
            custom_op_handlers=custom_ops,
            extra_opset=extra_opset,
            shape_override=args.shape_override,
            input_names=inputs,
            output_names=outputs,
            inputs_as_nchw=args.inputs_as_nchw,
            large_model=args.large_model,
            tensors_to_rename=tensors_to_rename,
            ignore_default=args.ignore_default,
            use_default=args.use_default,
            tflite_path=tflite_path,
            dequantize=args.dequantize,
            initialized_tables=initialized_tables,
            output_frozen_graph=args.output_frozen_graph,
            output_path=args.output)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)

    logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input])
    logger.info("Model outputs: %s",
                [n.name for n in model_proto.graph.output])
    if args.output:
        if args.large_model:
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Пример #9
0
_HELP_TEXT = """
Usage Examples:

python -m tf2onnx.convert --saved-model saved_model_dir --output model.onnx
python -m tf2onnx.convert --input frozen_graph.pb  --inputs X:0 --outputs output:0 --output model.onnx
python -m tf2onnx.convert --checkpoint checkpoint.meta  --inputs X:0 --outputs output:0 --output model.onnx

For help and additional information see:
    https://github.com/onnx/tensorflow-onnx

If you run into issues, open an issue here:
    https://github.com/onnx/tensorflow-onnx/issues
"""

logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)


def freeze_session(sess,
                   keep_var_names=None,
                   output_names=None,
                   clear_devices=True):
    """Freezes the state of a session into a pruned computation graph."""
    output_names = [i.split(':')[:-1][0] for i in output_names]
    graph = sess.graph
    with graph.as_default():
        freeze_var_names = list(
            set(v.op.name
                for v in tf.global_variables()).difference(keep_var_names
                                                           or []))
        output_names = output_names or []
Пример #10
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        graph_def = None
        inputs = None
        outputs = None
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    tf_graph = None
    const_node_values = None
    if graph_def is not None:
        with tf.Graph().as_default() as tf_graph:
            const_node_values = None
            if args.large_model:
                const_node_values = compress_graph_def(graph_def)
            if args.output_frozen_graph:
                utils.save_protobuf(args.output_frozen_graph, graph_def)
            tf.import_graph_def(graph_def, name='')

    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             ignore_default=args.ignore_default,
                             use_default=args.use_default,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables,
                             tflite_path=tflite_path,
                             dequantize=args.dequantize)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Пример #11
0
 def __init__(self):
     self._logger = logging.getLogger(
         '.'.join(__name__.split('.')[:-1] + [self.__class__.__name__]))