def _convert_common(frozen_graph,
                    name="unknown",
                    large_model=False,
                    output_path=None,
                    output_frozen_graph=None,
                    **kwargs):
    """Common processing for conversion."""

    model_proto = None
    external_tensor_storage = None
    const_node_values = None

    with tf.Graph().as_default() as tf_graph:
        if large_model:
            const_node_values = compress_graph_def(frozen_graph)
            external_tensor_storage = ExternalTensorStorage()
        if output_frozen_graph:
            utils.save_protobuf(output_frozen_graph, frozen_graph)
        if not kwargs.get("tflite_path"):
            tf.import_graph_def(frozen_graph, name='')
        g = process_tf_graph(tf_graph,
                             const_node_values=const_node_values,
                             **kwargs)
        onnx_graph = optimizer.optimize_graph(g, catch_errors=not large_model)
        model_proto = onnx_graph.make_model(
            "converted from {}".format(name),
            external_tensor_storage=external_tensor_storage)
    if output_path:
        if large_model:
            utils.save_onnx_zip(output_path, model_proto,
                                external_tensor_storage)
        else:
            utils.save_protobuf(output_path, model_proto)

    return model_proto, external_tensor_storage
def convert_to_onnx(model_filepath, onnx_filepath, output_node_names):
    """
    Convert the model to an ONNX file, which can in turn be used for TensorRT inference

    Arguments:
    model_filepath: the path to the frozen .pb file
    onnx_filepath: the path where the ONNX file should be saved
    output_node_names: list of output node names
    """
    # tf2onnx expects the node names in the format "input/output_node_name:port_id".
    # Hence, we should provide the port ID before conversion.
    input_node_names = [kInputName + ":0"]
    output_node_names = list(map(lambda x: x + ":0", output_node_names))
    # Use in-built function from tf2onnx to import the graph and optimize for conversion
    graph_def, inputs, outputs = loader.from_graphdef(model_filepath,
                                                      input_node_names,
                                                      output_node_names)
    graph_def = tf_optimize(input_node_names, output_node_names, graph_def,
                            False)
    with tf.Graph().as_default() as default_graph:
        tf.import_graph_def(graph_def, name='')
    # Convert to ONNX
    with tf.Session(graph=default_graph):
        onnx_graph = process_tf_graph(default_graph,
                                      opset=8,
                                      input_names=inputs,
                                      output_names=outputs)
    onnx_graph = optimizer.optimize_graph(onnx_graph)
    onnx_model = onnx_graph.make_model("segmentation_onnx_model")
    # Save the ONNX model to disk
    utils.save_protobuf(onnx_filepath, onnx_model)
Exemple #3
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")
Exemple #4
0
 def create_onnx_file(name, model_proto, inputs, outdir, external_tensor_storage=None):
     os.makedirs(outdir, exist_ok=True)
     if external_tensor_storage is None:
         model_path = os.path.join(outdir, name + ".onnx")
         utils.save_protobuf(model_path, model_proto)
     else:
         model_path = os.path.join(outdir, name + ".zip")
         utils.save_onnx_zip(model_path, model_proto, external_tensor_storage)
     logger.info("Created %s", model_path)
Exemple #5
0
    def _run_test_case(self, input_names_with_port, output_names_with_port):
        try:
            tf.compat.v1.disable_eager_execution()
        except:  # pylint: disable=bare-except
            pass
        graph_def = None
        with tf_session() as sess:
            # freeze graph
            origin_graph = sess.graph
            variables_lib.global_variables_initializer().run()
            output_name_without_port = [
                n.split(':')[0] for n in output_names_with_port
            ]
            graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_name_without_port)

        tf_reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        # optimize graph
        input_tensors = {
            i: sess.graph.get_tensor_by_name(i)
            for i in input_names_with_port
        }
        output_tensors = {
            i: sess.graph.get_tensor_by_name(i)
            for i in output_names_with_port
        }
        graph_def = tf_optimize(input_tensors, output_tensors, sess.graph_def,
                                True)

        with tf_session() as sess:
            if self.config.is_debug_mode:
                if not os.path.exists(self.test_data_directory):
                    os.makedirs(self.test_data_directory)
                model_path = os.path.join(
                    self.test_data_directory,
                    self._testMethodName + "_after_tf_optimize.pb")
                utils.save_protobuf(model_path, graph_def)
                self.logger.debug("created file  %s", model_path)

        tf_reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        with tf_session() as sess:
            inferred_graph = infer_shape_for_graph(sess.graph)
            # compare each operation
            for op in origin_graph.get_operations():
                inferred_op = None
                try:
                    inferred_op = inferred_graph.get_operation_by_name(op.name)
                except KeyError:
                    continue
                self._compare_shape_for_op(op, inferred_op)
Exemple #6
0
def convert(input_ops_dict, output_ops, input_model, output_model):
    '''Convert keras h5 to tensorflow pb
        Args:
            input_ops_dict: input ops dict including names and shapes
            output_ops: output op names
            input_model: input keras h5 model name
            output_model: output pb model name
    '''
    onnx_name = ".tmp.onnx"
    pb_name = ".tmp.pb"
    # keras --> onnx --> pb --> onnx --> pb

    # keras --> onnx
    model = tf.keras.models.load_model(input_model)
    onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=8)
    keras2onnx.save_model(onnx_model, onnx_name)

    # onnx --> tf
    onnx_model = onnx.load(onnx_name)
    tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict)
    tf_rep.export_graph(pb_name)

    # tf --> onnx (fold constants)
    inputs = input_ops_dict.keys()
    inputs = [i + ":0" for i in inputs]
    outputs = output_ops
    graph_def, inputs, outputs = tf_loader.from_graphdef(
        pb_name, inputs, outputs)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name="")

    g = tf2onnx.tfonnx.process_tf_graph(tf_graph,
                                        opset=8,
                                        input_names=inputs,
                                        output_names=outputs)
    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from %s" % pb_name)
    utils.save_protobuf(onnx_name, model_proto)

    # onnx --> tf
    onnx_model = onnx.load(onnx_name)
    tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict)
    tf_rep.export_graph(output_model)

    # remove tmp files
    if os.path.exists(onnx_name):
        os.remove(onnx_name)
    if os.path.exists(pb_name):
        os.remove(pb_name)
Exemple #7
0
def convert_frozen_pb_to_onnx(frozen_pb_or_graph_def, opset=9, tf_graph_optimization=True, input_shape=None, name=None):
    try:
        from tf2onnx.tfonnx import process_tf_graph, tf_optimize
        from tf2onnx import constants, loader, logging, utils, optimizer
    except Exception as e:
        logger.error('import tf2onnx error, "pip install tf2onnx"')
        exit(0)

    graph_def = frozen_pb_or_graph_def
    if isinstance(frozen_pb_or_graph_def, str):
        model_path = frozen_pb_or_graph_def
        output_dir = model_path.replace('.pb', '.onnx')
    else:
        model_path = 'graphdef_buffer'
        assert name, 'name should be give to export an .onnx when converting from a graphdef buffer'
        output_dir = '{}.onnx'.format(name)
    inputs, outputs = _auto_inputs_outputs_detect(graph_def)
    shape_override = {}
    if input_shape:
        assert isinstance(input_shape, list), 'input_shape item need to be list, each for dims of a input tensor'
        for idx, item in enumerate(input_shape):
            shape_override[inputs[idx]] = item
            # graph optimizatin with tf_graph_transform
    if tf_graph_optimization:
        graph_def = graph_optimization(graph_def)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=False,
                             target='',
                             opset=opset,
                             custom_op_handlers={},
                             extra_opset=[],
                             shape_override=shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=None)
    # graph optimization with onnx optimizer
    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    outputs = model_path.replace('.pb', '.onnx')
    utils.save_protobuf(output_dir, model_proto)
    logger.info("ONNX model is saved at %s", output_dir)
Exemple #8
0
def convert_onnx(sess, graph_def, input_path, inputs_op, outputs_op):

    graphdef = input_path

    if inputs_op:
        inputs_op, shape_override = utils.split_nodename_and_shape(inputs_op)
    if outputs_op:
        outputs_op = outputs_op.split(",")

    logging.basicConfig(level=logging.get_verbosity_level(True))

    utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    graph_def, inputs_op, outputs_op = from_graphdef(sess, graph_def, graphdef,
                                                     inputs_op, outputs_op)
    model_path = graphdef

    graph_def = tf_optimize(inputs_op, outputs_op, graph_def, True)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=False,
                             target=",".join(constants.DEFAULT_TARGET),
                             opset=10,
                             custom_op_handlers=None,
                             extra_opset=None,
                             shape_override=None,
                             input_names=inputs_op,
                             output_names=outputs_op,
                             inputs_as_nchw=None)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    # if args.output:
    output_path = input_path.replace(".pb", ".onnx")
    utils.save_protobuf(output_path, model_proto)
    logger.info("ONNX model is saved at %s", output_path)
Exemple #9
0
 def test_dropout(self):
     with tf.Session() as sess:
         x1 = tf.placeholder(tf.float32, [2, 3], name="input1")
         x2 = tf.placeholder(tf.float32, [1, 3], name="input2")
         prop = tf.placeholder(tf.float32, (), name="prob")
         x_ = tf.add(x1, x2)
         x_ = tf.nn.dropout(x_, prop)
         x_ = tf.identity(x_, name="output1")
         x_ = tf.identity(x_, name="output2")
         _ = tf.identity(x_, name="output")
         # feed output_names in order to remove unused nodes.
         g = process_tf_graph(sess.graph,
                              opset=self.config.opset,
                              output_names=["output:0"])
         utils.save_protobuf("./test.onnx", g.make_model("test"))
         actual = onnx_to_graphviz(g)
         expected = 'digraph { prob [op_type=Placeholder shape="[]"] input2 [op_type=Placeholder shape="[1, 3]"] ' \
                    'input1 [op_type=Placeholder shape="[2, 3]"] Add [op_type=Add] output1 [op_type=Identity] ' \
                    'output2 [op_type=Identity] output [op_type=Identity] output_graph_outputs_Identity__3 ' \
                    '[op_type=Identity] input1:0 -> Add input2:0 -> Add Add:0 -> output1 output1:0 -> output2 ' \
                    'output2:0 -> output output_raw_output___2:0 -> output_graph_outputs_Identity__3 }'
         self.assertEqual(expected, actual)
Exemple #10
0
def _convert_common(frozen_graph, name="unknown", large_model=False, output_path=None,
                    output_frozen_graph=None, custom_ops=None, custom_op_handlers=None, **kwargs):
    """Common processing for conversion."""

    model_proto = None
    external_tensor_storage = None
    const_node_values = None

    if custom_ops is not None:
        if custom_op_handlers is None:
            custom_op_handlers = {}
        custom_op_handlers.update(
            {op: (make_default_custom_op_handler(domain), []) for op, domain in custom_ops.items()})

    with tf.Graph().as_default() as tf_graph:
        if large_model:
            const_node_values = compress_graph_def(frozen_graph)
            external_tensor_storage = ExternalTensorStorage()
        if output_frozen_graph:
            utils.save_protobuf(output_frozen_graph, frozen_graph)
        if not kwargs.get("tflite_path") and not kwargs.get("tfjs_path"):
            tf.import_graph_def(frozen_graph, name='')
        g = process_tf_graph(tf_graph, const_node_values=const_node_values,
                             custom_op_handlers=custom_op_handlers, **kwargs)
        if constants.ENV_TF2ONNX_CATCH_ERRORS in os.environ:
            catch_errors = constants.ENV_TF2ONNX_CATCH_ERRORS.upper() == "TRUE"
        else:
            catch_errors = not large_model
        onnx_graph = optimizer.optimize_graph(g, catch_errors)
        model_proto = onnx_graph.make_model("converted from {}".format(name),
                                            external_tensor_storage=external_tensor_storage)
    if output_path:
        if large_model:
            utils.save_onnx_zip(output_path, model_proto, external_tensor_storage)
        else:
            utils.save_protobuf(output_path, model_proto)

    return model_proto, external_tensor_storage
def convert_tf2onnx(model,
                    output,
                    inputs,
                    outputs,
                    signature_def=None,
                    opset=7):
    import tensorflow as tf
    from tf2onnx.tfonnx import process_tf_graph, tf_optimize
    from tf2onnx import constants, loader, logging, utils, optimizer
    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    if "pb" in model:
        graph_def, inputs, outputs = loader.from_graphdef(
            model, inputs, outputs)
    elif "meta" in model:
        graph_def, inputs, outputs = loader.from_checkpoint(
            model, inputs, outputs)
    elif "saved_model" in model:
        graph_def, inputs, outputs = loader.from_saved_model(
            model, inputs, outputs, signature_def)

    graph_def = tf_optimize(inputs, outputs, graph_def, None)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             opset=opset,
                             input_names=inputs,
                             output_names=outputs)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model))
    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model)
    utils.save_protobuf(output, model_proto)
    logger.info("ONNX model is saved at %s", output)
    def run_test(self,
                 name,
                 backend="caffe2",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                input_names,
                outputs,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=self.large_model)
            if self.large_model:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported = loaded
            else:
                graph_def, input_names, outputs = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if self.large_model:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            if self.structured_outputs is None:
                tf_results = list(tf_results_d.values())
            else:
                tf_results = [
                    tf_results_d[output] for output in self.structured_outputs
                ]
            if self.perf:
                logger.info("Running TF perf")
                start = time.time()
                for _ in range(PERFITER):
                    _ = concrete_func(**inputs)
                self.tf_runtime = time.time() - start
            logger.info("TensorFlow OK")

        inputs = {}
        shape_override = {}
        tf_reset_default_graph()

        from tf2onnx.tf_utils import compress_graph_def
        const_node_values = None
        with tf.Graph().as_default() as tf_graph:
            if self.large_model:
                const_node_values = compress_graph_def(graph_def)
            tf.import_graph_def(graph_def, name='')

        with tf_session(graph=tf_graph) as sess:
            # create the input data
            for k in input_names:
                v = self.input_names[k]
                t = sess.graph.get_tensor_by_name(k)
                expected_dtype = tf.as_dtype(t.dtype).name
                if isinstance(v, six.text_type) and v.startswith("np."):
                    np_value = eval(v)  # pylint: disable=eval-used
                    if expected_dtype != np_value.dtype:
                        logger.warning(
                            "dtype mismatch for input %s: expected=%s, actual=%s",
                            k, expected_dtype, np_value.dtype)
                    inputs[k] = np_value.astype(expected_dtype)
                else:
                    inputs[k] = self.make_input(v).astype(expected_dtype)

            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            elif not self.large_model:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          extra_opset=extra_opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys(),
                                          const_node_values=const_node_values)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemple #13
0
    def run_test(self, name, backend="caffe2", onnx_file=None, opset=None, extra_opset=None,
                 perf=None, fold_const=None):
        """Run complete test against backend."""
        logger.info("===================================")
        logger.info("Running %s", name)
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_file()
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local
            dir_name = os.path.dirname(self.local)
        logger.info("Downloaded to %s", model_path)

        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = loader.from_checkpoint(model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            graph_def, input_names, outputs = loader.from_saved_model(model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = loader.from_graphdef(model_path, input_names, outputs)

        # create the input data
        inputs = {}
        for k, v in self.input_names.items():
            if k not in input_names:
                continue
            if isinstance(v, six.text_type) and v.startswith("np."):
                inputs[k] = eval(v)  # pylint: disable=eval-used
            else:
                inputs[k] = self.make_input(v)
        if self.more_inputs:
            for k, v in self.more_inputs.items():
                inputs[k] = v

        graph_def = tf2onnx.tfonnx.tf_optimize(inputs.keys(), self.output_names, graph_def, fold_const)
        if utils.is_debug_mode():
            utils.save_protobuf(os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"), graph_def)
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True), graph=g) as sess:

            # fix inputs if needed
            for k in inputs.keys():  # pylint: disable=consider-iterating-dictionary
                t = sess.graph.get_tensor_by_name(k)
                dtype = tf.as_dtype(t.dtype).name
                v = inputs[k]
                if dtype != v.dtype:
                    logger.warning("input dtype doesn't match tensorflow's")
                    inputs[k] = np.array(v, dtype=dtype)
            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")
            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph, opset=opset, extra_opset=extra_opset,
                                          shape_override=shape_override, input_names=inputs.keys())
                model_proto = onnx_graph.make_model("converted from tf2onnx")
                model_proto = optimizer.optimize_graph(onnx_graph).make_model("optimized")
                logger.info("To_ONNX, OK")
                if utils.is_debug_mode():
                    onnx_graph.dump_graph()
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxmsrtnext":
                onnx_results = self.run_onnxmsrtnext(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemple #14
0
 def create_onnx_file(name, model_proto, inputs, outdir):
     os.makedirs(outdir, exist_ok=True)
     model_path = os.path.join(outdir, name + ".onnx")
     utils.save_protobuf(model_path, model_proto)
     logger.info("Created %s", model_path)
    def freeze_and_run_tf(self, func, feed_dict, outputs, as_session,
                          premade_placeholders, large_model, constant_fold):
        np.random.seed(1)  # Make it reproducible.
        clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
        if is_tf2() and not as_session:
            #
            # use eager to execute the tensorflow func
            #
            # numpy doesn't work for all ops, make it tf.Tensor()
            input_tensors = [
                tf.TensorSpec(shape=v.shape,
                              dtype=tf.as_dtype(v.dtype),
                              name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            input_list = [
                tf.convert_to_tensor(v,
                                     dtype=tf.as_dtype(v.dtype),
                                     name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            tf.random.set_seed(1)
            result = func(*input_list)
            if isinstance(result, (list, tuple)):
                # list or tuple
                result = [x.numpy() for x in result]
            else:
                # single result
                result = [result.numpy()]

            # now make the eager functions a graph
            concrete_func = tf.function(func,
                                        input_signature=tuple(input_tensors))
            concrete_func = concrete_func.get_concrete_function()
            graph_def = from_function(concrete_func,
                                      input_names=list(feed_dict.keys()),
                                      output_names=outputs,
                                      large_model=large_model)
            initialized_tables = None
        else:
            #
            # use graph to execute the tensorflow func
            #
            with tf_session() as sess:
                tf_set_random_seed(1)
                input_list = []
                if not premade_placeholders:
                    for k, v in clean_feed_dict.items():
                        input_list.append(
                            tf_placeholder(name=k,
                                           shape=v.shape,
                                           dtype=tf.as_dtype(v.dtype)))
                func(*input_list)
                variables_lib.global_variables_initializer().run()
                tf_tables_initializer().run()

                output_dict = []
                for out_name in outputs:
                    output_dict.append(sess.graph.get_tensor_by_name(out_name))
                result = sess.run(output_dict, feed_dict=feed_dict)
                graph_def = freeze_session(sess,
                                           input_names=list(feed_dict.keys()),
                                           output_names=outputs)
                table_names, key_dtypes, value_dtypes = get_hash_table_info(
                    graph_def)
                initialized_tables = {}
                for n, k_dtype, val_dtype in zip(table_names, key_dtypes,
                                                 value_dtypes):
                    h = lookup_ops.hash_table_v2(k_dtype,
                                                 val_dtype,
                                                 shared_name=n)
                    k, v = lookup_ops.lookup_table_export_v2(
                        h, k_dtype, val_dtype)
                    initialized_tables[n] = (sess.run(k), sess.run(v))

            tf_reset_default_graph()
            with tf_session() as sess:
                tf.import_graph_def(graph_def, name='')
                graph_def = tf_optimize(list(feed_dict.keys()),
                                        outputs,
                                        graph_def,
                                        fold_constant=constant_fold)

        model_path = os.path.join(
            self.test_data_directory,
            self._testMethodName + "_after_tf_optimize.pb")
        utils.save_protobuf(model_path, graph_def)
        self.logger.debug("created file  %s", model_path)
        return result, graph_def, initialized_tables
    def run_test(self,
                 name,
                 backend="onnxruntime",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        initialized_tables = {}
        outputs = self.output_names
        tflite_path = None
        to_rename = None
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                None,
                None,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=not self.run_tf_frozen,
                return_initialized_tables=True,
                return_tensors_to_rename=True)
            if not self.run_tf_frozen:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported, initialized_tables, to_rename = loaded
            else:
                graph_def, input_names, outputs, initialized_tables, to_rename = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        elif self.model_type in ["tflite"]:
            tflite_path = model_path
            graph_def = None
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if tflite_path is not None:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k] = self.make_input(v)

            interpreter = tf.lite.Interpreter(tflite_path)
            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()
            input_name_to_index = {
                n['name'].split(':')[0]: n['index']
                for n in input_details
            }
            for k, v in inputs.items():
                interpreter.resize_tensor_input(input_name_to_index[k],
                                                v.shape)
            interpreter.allocate_tensors()

            def run_tflite():
                for k, v in inputs.items():
                    interpreter.set_tensor(input_name_to_index[k], v)
                interpreter.invoke()
                result = [
                    interpreter.get_tensor(output['index'])
                    for output in output_details
                ]
                return result

            tf_results = run_tflite()
            if self.perf:
                logger.info("Running TFLite perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = run_tflite()
                    n += PERF_STEP
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TFLite perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TFLite OK")

        if not self.run_tf_frozen:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            # If there is only a single output a dict might not be returned
            if isinstance(tf_results_d, tf.Tensor):
                tf_results = [tf_results_d]
            else:
                tf_results = [
                    tf_results_d[k] for k in sorted(tf_results_d.keys())
                ]
            tf_results = [tf_res.numpy() for tf_res in tf_results]
            if self.perf:
                logger.info("Running TF perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                if self.tf_profile is not None:
                    tf.profiler.experimental.start(self.tf_profile)
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = concrete_func(**inputs)
                    n += PERF_STEP
                if self.tf_profile is not None:
                    tf.profiler.experimental.stop()
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TF perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TensorFlow OK")

        shape_override = {}
        const_node_values = None
        tf_graph = None

        if graph_def is not None:
            inputs = {}
            tf_reset_default_graph()

            with tf.Graph().as_default() as tf_graph:
                from tf2onnx.tf_utils import compress_graph_def
                if self.large_model:
                    const_node_values = compress_graph_def(graph_def)
                tf.import_graph_def(graph_def, name='')

            with tf_session(graph=tf_graph) as sess:
                # create the input data
                for k in input_names:
                    v = self.input_names[k]
                    t = sess.graph.get_tensor_by_name(k)
                    expected_dtype = tf.as_dtype(t.dtype).name
                    if isinstance(v, six.text_type) and v.startswith("np."):
                        np_value = eval(v)  # pylint: disable=eval-used
                        if expected_dtype != np_value.dtype:
                            logger.warning(
                                "dtype mismatch for input %s: expected=%s, actual=%s",
                                k, expected_dtype, np_value.dtype)
                        inputs[k] = np_value.astype(expected_dtype)
                    else:
                        if expected_dtype == "string":
                            inputs[k] = self.make_input(v).astype(
                                np.str).astype(np.object)
                        else:
                            inputs[k] = self.make_input(v).astype(
                                expected_dtype)

                if self.force_input_shape:
                    for k, v in inputs.items():
                        shape_override[k] = list(v.shape)

                # run the model with tensorflow
                if self.skip_tensorflow:
                    logger.info("TensorFlow SKIPPED")
                elif self.run_tf_frozen:
                    if self.tf_profile is not None:
                        tf.profiler.experimental.start(self.tf_profile)
                    tf_results = self.run_tensorflow(sess, inputs)
                    if self.tf_profile is not None:
                        tf.profiler.experimental.stop()
                    logger.info("TensorFlow OK")
                tf_graph = sess.graph

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(
                    tf_graph,
                    opset=opset,
                    extra_opset=extra_opset,
                    shape_override=shape_override,
                    input_names=inputs.keys(),
                    const_node_values=const_node_values,
                    initialized_tables=initialized_tables,
                    tflite_path=tflite_path,
                    tensors_to_rename=to_rename)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "onnxruntime":
                if to_rename is None:
                    struc_outputs = self.output_names
                else:
                    struc_outputs = [
                        to_rename.get(k, k) for k in self.output_names
                    ]
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    struc_outputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            good_cnt = np.count_nonzero(
                                np.isclose(tf_res,
                                           onnx_res,
                                           rtol=self.rtol,
                                           atol=self.atol))
                            bad_cnt = tf_res.size - good_cnt
                            if bad_cnt > self.ptol / 100 * tf_res.size:
                                # Prints a nice error message with stats
                                np.testing.assert_allclose(tf_res,
                                                           onnx_res,
                                                           rtol=self.rtol,
                                                           atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemple #17
0
def main():
    args = get_args()

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
    else:
        custom_ops = {}
        extra_opset = None

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs)
        model_path = args.saved_model

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    new_model_proto = GraphUtil.optimize_model_proto(model_proto)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        print("\nComplete successfully, the onnx model is generated at " +
              args.output)
Exemple #18
0
    def run_test(self,
                 name,
                 backend="caffe2",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            graph_def, input_names, outputs = tf_loader.from_saved_model(
                model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        # remove unused input names
        input_names = list(
            set(input_names).intersection(self.input_names.keys()))
        graph_def = tf2onnx.tfonnx.tf_optimize(input_names, self.output_names,
                                               graph_def, fold_const)
        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        inputs = {}
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                        graph=g) as sess:
            # create the input data
            for k in input_names:
                v = self.input_names[k]
                t = sess.graph.get_tensor_by_name(k)
                expected_dtype = tf.as_dtype(t.dtype).name
                if isinstance(v, six.text_type) and v.startswith("np."):
                    np_value = eval(v)  # pylint: disable=eval-used
                    if expected_dtype != np_value.dtype:
                        logger.warning(
                            "dtype mismatch for input %s: expected=%s, actual=%s",
                            k, expected_dtype, np_value.dtype)
                    inputs[k] = np_value.astype(expected_dtype)
                else:
                    inputs[k] = self.make_input(v).astype(expected_dtype)

            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")

            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          extra_opset=extra_opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys())
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                model_proto = onnx_graph.make_model("converted from tf2onnx")
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemple #19
0
    def pb_to_onnx(
        inputs: List[Union[str, tf_compat.Tensor]],
        outputs: List[Union[str, tf_compat.Tensor]],
        pb_path: str,
        onnx_path: str,
        opset: int = default_onnx_opset(),
        custom_op_handlers=None,
        extra_opset=None,
        shape_override: Dict[str, List] = None,
    ):
        """
        Export an ONNX format for the graph from PB format.
        Should not be called within an active graph or session.

        :param inputs: the inputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param outputs: the outputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param pb_path: path to the existing PB file
        :param onnx_path: path to the output ONNX file
        :param opset: ONNX opset
        :param custom_op_handlers: dictionary of custom op handlers
        :param extra_opset: list of extra opset's
        :param shape_override: new shape to override
        """
        try:
            from tf2onnx import constants, optimizer, utils
            from tf2onnx.tfonnx import process_tf_graph, tf_optimize
        except ModuleNotFoundError:
            raise ModuleNotFoundError(
                "tf2onnx must be installed on the system before using export_onnx"
            )

        try:
            from tf2onnx import tf_loader as loader
        except Exception:
            from tf2onnx import loader

        pb_path = clean_path(pb_path)

        if not os.path.exists(pb_path):
            raise FileNotFoundError(
                ("no pb file for the model found at {}").format(pb_path)
            )

        inputs = [inp if isinstance(inp, str) else inp.name for inp in inputs]
        outputs = [out if isinstance(out, str) else out.name for out in outputs]

        graph_def, inputs, outputs = loader.from_graphdef(pb_path, inputs, outputs)
        graph_def = tf_optimize(inputs, outputs, graph_def, fold_constant=True)

        with tf_compat.Graph().as_default() as tf_graph:
            tf_compat.import_graph_def(graph_def, name="")

        with tf_compat.Session(graph=tf_graph):
            graph = process_tf_graph(
                tf_graph,
                continue_on_error=False,
                target=",".join(constants.DEFAULT_TARGET),
                opset=opset,
                custom_op_handlers=custom_op_handlers,
                extra_opset=extra_opset,
                shape_override=shape_override,
                input_names=inputs,
                output_names=outputs,
            )

        onnx_graph = optimizer.optimize_graph(graph)
        model_proto = onnx_graph.make_model("converted from {}".format(pb_path))

        onnx_path = clean_path(onnx_path)
        create_parent_dirs(onnx_path)
        utils.save_protobuf(onnx_path, model_proto)
 def create_onnx_file(name, model_proto, inputs, outdir):
     os.makedirs(outdir, exist_ok=True)
     model_path = os.path.join(outdir, name + ".onnx")
     utils.save_protobuf(model_path, model_proto)
     print("\tcreated", model_path)
Exemple #21
0
    def run_test_case(self,
                      func,
                      feed_dict,
                      input_names_with_port,
                      output_names_with_port,
                      rtol=1e-07,
                      atol=1e-5,
                      mtol=None,
                      convert_var_to_const=True,
                      constant_fold=True,
                      check_value=True,
                      check_shape=True,
                      check_dtype=True,
                      process_args=None,
                      onnx_feed_dict=None,
                      graph_validator=None,
                      as_session=False,
                      large_model=False,
                      premade_placeholders=False,
                      use_custom_ops=False,
                      optimize=True):
        """
        This function tests all scenarios available through the command line.
        The command line always runs the optimizers.
        However, they may modify the final graph into something different than the
        tested converter implements. Set `optimize=False` to keep the original
        set of nodes and helps debugging. However, the same function should
        be called with `optimize=True` to test what the user would actually get.
        """
        test_tf = not self.config.skip_tf_tests
        test_tflite = not self.config.skip_tflite_tests
        test_tfjs = not self.config.skip_tfjs_tests
        run_tfl_consistency_test = test_tf and test_tflite and self.config.run_tfl_consistency_test
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict
        input_names_with_port = list(feed_dict)
        tf_reset_default_graph()
        if tf_lite is None:
            test_tflite = False
        g = None

        expected, graph_def, initialized_tables = \
            self.freeze_and_run_tf(func, feed_dict, output_names_with_port, as_session,
                                   premade_placeholders, large_model, constant_fold)

        graph_def_path = os.path.join(
            self.test_data_directory,
            self._testMethodName + "_after_tf_optimize.pb")
        utils.save_protobuf(graph_def_path, graph_def)
        self.logger.debug("created file  %s", graph_def_path)

        if test_tfjs:
            tfjs_path = self.convert_to_tfjs(graph_def_path,
                                             output_names_with_port)
            if tfjs_path is None:
                test_tfjs = False

        if test_tflite:
            tflite_path = self.convert_to_tflite(graph_def, feed_dict,
                                                 output_names_with_port)
            test_tflite = tflite_path is not None and self.tflite_has_supported_types(
                tflite_path)

        if test_tf:
            tf_reset_default_graph()
            with tf_session() as sess:
                const_node_values = None
                if large_model:
                    const_node_values = compress_graph_def(graph_def)
                tf.import_graph_def(graph_def, name='')

                g = process_tf_graph(sess.graph,
                                     opset=self.config.opset,
                                     input_names=list(feed_dict.keys()),
                                     output_names=output_names_with_port,
                                     target=self.config.target,
                                     const_node_values=const_node_values,
                                     initialized_tables=initialized_tables,
                                     **process_args)
                if optimize:
                    g = optimizer.optimize_graph(g, catch_errors=False)
                actual = self.run_backend(g,
                                          output_names_with_port,
                                          onnx_feed_dict,
                                          large_model,
                                          use_custom_ops=use_custom_ops)

            self.assert_results_equal(expected, actual, rtol, atol, mtol,
                                      check_value, check_shape, check_dtype)
            self.assert_shapes_correct(g, self.config.allow_missing_shapes,
                                       not self.config.skip_onnx_checker)

            if graph_validator:
                self.assertTrue(graph_validator(g))

        if test_tflite:
            tfl_res, tfl_outputs = self.run_tflite(tflite_path, feed_dict)
            test_tflite = tfl_res is not None

        if test_tflite:
            if run_tfl_consistency_test:
                self.assert_results_equal(expected, tfl_res, rtol, atol, mtol,
                                          check_value, check_shape,
                                          check_dtype)

            tfl_process_args = process_args.copy()
            if 'inputs_as_nchw' in tfl_process_args:
                nchw_inps_with_port = tfl_process_args['inputs_as_nchw']
                tfl_process_args['inputs_as_nchw'] = [
                    i.split(':')[0] for i in nchw_inps_with_port
                ]
            input_names_without_port = [
                inp.split(':')[0] for inp in feed_dict.keys()
            ]

            g = process_tf_graph(None,
                                 opset=self.config.opset,
                                 input_names=input_names_without_port,
                                 output_names=tfl_outputs,
                                 target=self.config.target,
                                 tflite_path=tflite_path,
                                 **tfl_process_args)
            if optimize:
                g = optimizer.optimize_graph(g)
            onnx_feed_dict_without_port = {
                k.split(':')[0]: v
                for k, v in onnx_feed_dict.items()
            }
            onnx_tfl_res = self.run_backend(g,
                                            tfl_outputs,
                                            onnx_feed_dict_without_port,
                                            postfix="_from_tflite",
                                            use_custom_ops=use_custom_ops)

            self.assert_results_equal(tfl_res, onnx_tfl_res, rtol, atol, mtol,
                                      check_value, check_shape, check_dtype)
            self.assert_shapes_correct(g, self.config.allow_missing_shapes,
                                       not self.config.skip_onnx_checker)

            if graph_validator:
                self.assertTrue(graph_validator(g))

        if test_tfjs:
            try:
                tfjs_res = run_tfjs(tfjs_path, feed_dict)
            except RuntimeError as e:
                ignored_errors = [
                    "is not yet supported",
                    "Operands could not be broadcast together",
                    "unknown dtype null", "must be [NaN",
                    "Cannot read property 'name' of undefined",
                    "Either strides or dilations must be 1", "does not support"
                ]
                if any(err in str(e) for err in ignored_errors):
                    test_tfjs = False
                else:
                    raise e

        if test_tfjs:
            g = process_tf_graph(None,
                                 opset=self.config.opset,
                                 input_names=list(feed_dict.keys()),
                                 output_names=None,
                                 target=self.config.target,
                                 tfjs_path=tfjs_path,
                                 **process_args)
            g = optimizer.optimize_graph(g)
            onnx_tfjs_res = self.run_backend(g,
                                             None,
                                             onnx_feed_dict,
                                             large_model,
                                             postfix="_from_tfjs",
                                             use_custom_ops=use_custom_ops)

            self.assert_results_equal(tfjs_res,
                                      onnx_tfjs_res,
                                      rtol,
                                      atol,
                                      mtol,
                                      check_value,
                                      check_shape,
                                      check_dtype=False)
            self.assert_shapes_correct(g, self.config.allow_missing_shapes,
                                       not self.config.skip_onnx_checker)

            if graph_validator:
                self.assertTrue(graph_validator(g))

        if g is None:
            raise unittest.SkipTest("tf, tflite, and tfjs marked to skip")
        return g
Exemple #22
0
    def run_test_case(self,
                      feed_dict,
                      input_names_with_port,
                      output_names_with_port,
                      rtol=1e-07,
                      atol=1e-5,
                      convert_var_to_const=True,
                      constant_fold=True,
                      check_value=True,
                      check_shape=False,
                      check_dtype=True,
                      process_args=None,
                      onnx_feed_dict=None,
                      graph_validator=None):
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict

        graph_def = None
        if convert_var_to_const:
            with tf.Session() as sess:
                variables_lib.global_variables_initializer().run()
                output_name_without_port = [
                    n.split(':')[0] for n in output_names_with_port
                ]
                graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, sess.graph_def, output_name_without_port)

            tf.reset_default_graph()
            tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            variables_lib.global_variables_initializer().run()
            output_dict = []
            for out_name in output_names_with_port:
                output_dict.append(sess.graph.get_tensor_by_name(out_name))
            expected = sess.run(output_dict, feed_dict=feed_dict)

        if self.config.is_debug_mode:
            if not os.path.exists(self.test_data_directory):
                os.makedirs(self.test_data_directory)
            model_path = os.path.join(self.test_data_directory,
                                      self._testMethodName + "_original.pb")
            utils.save_protobuf(model_path, sess.graph_def)
            self.log.debug("created file %s", model_path)

        graph_def = tf_optimize(input_names_with_port, output_names_with_port,
                                sess.graph_def, constant_fold)

        if self.config.is_debug_mode and constant_fold:
            model_path = os.path.join(
                self.test_data_directory,
                self._testMethodName + "_after_tf_optimize.pb")
            utils.save_protobuf(model_path, graph_def)
            self.log.debug("created file  %s", model_path)

        tf.reset_default_graph()
        tf.import_graph_def(graph_def, name='')

        with tf.Session() as sess:
            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 output_names=output_names_with_port,
                                 target=self.config.target,
                                 **process_args)
            g = optimizer.optimize_graph(g)
            actual = self._run_backend(g, output_names_with_port,
                                       onnx_feed_dict)

        for expected_val, actual_val in zip(expected, actual):
            if check_value:
                self.assertAllClose(expected_val,
                                    actual_val,
                                    rtol=rtol,
                                    atol=atol)
            if check_dtype:
                self.assertEqual(expected_val.dtype, actual_val.dtype)
            if check_shape:
                self.assertEqual(expected_val.shape, actual_val.shape)

        if graph_validator:
            self.assertTrue(graph_validator(g))

        return g
Exemple #23
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    with tf.Graph().as_default() as tf_graph:
        const_node_values = None
        if args.large_model:
            const_node_values = compress_graph_def(graph_def)
        if args.output_frozen_graph:
            utils.save_protobuf(args.output_frozen_graph, graph_def)
        tf.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
    def run_test_case(self,
                      func,
                      feed_dict,
                      input_names_with_port,
                      output_names_with_port,
                      rtol=1e-07,
                      atol=1e-5,
                      convert_var_to_const=True,
                      constant_fold=True,
                      check_value=True,
                      check_shape=True,
                      check_dtype=True,
                      process_args=None,
                      onnx_feed_dict=None,
                      graph_validator=None,
                      as_session=False,
                      large_model=False):
        # optional - passed to process_tf_graph
        if process_args is None:
            process_args = {}
        # optional - pass distinct feed_dict to onnx runtime
        if onnx_feed_dict is None:
            onnx_feed_dict = feed_dict
        input_names_with_port = list(feed_dict)
        tf_reset_default_graph()
        graph_def = None

        np.random.seed(1)  # Make it reproducible.
        clean_feed_dict = {utils.node_name(k): v for k, v in feed_dict.items()}
        if is_tf2() and not as_session:
            #
            # use eager to execute the tensorflow func
            #
            # numpy doesn't work for all ops, make it tf.Tensor()
            input_tensors = [
                tf.TensorSpec(shape=v.shape,
                              dtype=tf.as_dtype(v.dtype),
                              name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            input_list = [
                tf.convert_to_tensor(v,
                                     dtype=tf.as_dtype(v.dtype),
                                     name=utils.node_name(k))
                for k, v in feed_dict.items()
            ]
            tf.random.set_seed(1)
            expected = func(*input_list)
            if isinstance(expected, (list, tuple)):
                # list or tuple
                expected = [x.numpy() for x in expected]
            else:
                # single result
                expected = [expected.numpy()]

            # now make the eager functions a graph
            concrete_func = tf.function(func,
                                        input_signature=tuple(input_tensors))
            concrete_func = concrete_func.get_concrete_function()
            graph_def = from_function(concrete_func,
                                      input_names=list(feed_dict.keys()),
                                      output_names=output_names_with_port,
                                      large_model=large_model)
        else:
            #
            # use graph to execute the tensorflow func
            #
            with tf_session() as sess:
                tf_set_random_seed(1)
                input_list = []
                for k, v in clean_feed_dict.items():
                    input_list.append(
                        tf_placeholder(name=k,
                                       shape=v.shape,
                                       dtype=tf.as_dtype(v.dtype)))
                func(*input_list)
                variables_lib.global_variables_initializer().run()
                tf_tables_initializer().run()
                output_dict = []
                for out_name in output_names_with_port:
                    output_dict.append(sess.graph.get_tensor_by_name(out_name))
                expected = sess.run(output_dict, feed_dict=feed_dict)
                graph_def = freeze_session(sess,
                                           input_names=list(feed_dict.keys()),
                                           output_names=output_names_with_port)

            tf_reset_default_graph()
            with tf_session() as sess:
                tf.import_graph_def(graph_def, name='')
                graph_def = tf_optimize(list(feed_dict.keys()),
                                        output_names_with_port,
                                        graph_def,
                                        fold_constant=constant_fold)

        tf_reset_default_graph()
        with tf_session() as sess:
            const_node_values = None
            if large_model:
                const_node_values = compress_graph_def(graph_def)
            tf.import_graph_def(graph_def, name='')

            if self.config.is_debug_mode:
                model_path = os.path.join(
                    self.test_data_directory,
                    self._testMethodName + "_after_tf_optimize.pb")
                utils.save_protobuf(model_path, graph_def)
                self.logger.debug("created file  %s", model_path)

            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 input_names=list(feed_dict.keys()),
                                 output_names=output_names_with_port,
                                 target=self.config.target,
                                 const_node_values=const_node_values,
                                 **process_args)
            g = optimizer.optimize_graph(g)
            actual = self.run_backend(g, output_names_with_port,
                                      onnx_feed_dict, large_model)

        for expected_val, actual_val in zip(expected, actual):
            if check_value:
                self.assertAllClose(expected_val,
                                    actual_val,
                                    rtol=rtol,
                                    atol=atol)
            if check_dtype:
                self.assertEqual(expected_val.dtype, actual_val.dtype)
            # why need shape checke: issue when compare [] with scalar
            # https://github.com/numpy/numpy/issues/11071
            if check_shape:
                self.assertEqual(expected_val.shape, actual_val.shape)

        if graph_validator:
            self.assertTrue(graph_validator(g))

        return g
Exemple #25
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        graph_def = None
        inputs = None
        outputs = None
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    tf_graph = None
    const_node_values = None
    if graph_def is not None:
        with tf.Graph().as_default() as tf_graph:
            const_node_values = None
            if args.large_model:
                const_node_values = compress_graph_def(graph_def)
            if args.output_frozen_graph:
                utils.save_protobuf(args.output_frozen_graph, graph_def)
            tf.import_graph_def(graph_def, name='')

    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             ignore_default=args.ignore_default,
                             use_default=args.use_default,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables,
                             tflite_path=tflite_path,
                             dequantize=args.dequantize)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")