Exemplo n.º 1
0
    def execute(self):
        if self._tf_format == 'TF_PB':
            graph_def = graph_pb2.GraphDef()
            with tf.io.gfile.GFile(self._tf_file, 'rb') as f:
                graph_def.ParseFromString(f.read())
            inputs, outputs = find_out_terminal_node(graph_def, postfix=True)
            graph_def, inputs, outputs = tf_loader.from_graphdef(
                self._tf_file, inputs, outputs)
        elif self._tf_format == 'SAVED_MODEL':
            graph_def, inputs, outputs = tf_loader.from_saved_model(
                self._tf_file, None, None)
        else:
            if self._outputs is None:
                raise ImportError("Missing '--outputs' parameter.")
            if self._inputs is None:
                raise ImportError("Missing '--inputs' parameter.")

            inputs = [i + ":0" for i in self._inputs.split(",")]
            outputs = [i + ":0" for i in self._outputs.split(",")]
            if self._tf_format == 'TF_CKPT_V1':
                graph_def = self.load_checkpoint_v1()
            elif self._tf_format == 'TF_CKPT_V2':
                graph_def, inputs, outputs = tf_loader.from_checkpoint(
                    self._tf_file, inputs, outputs)
        onnx_model = self.convert_to_onnx(graph_def, inputs, outputs)
        onnx_importer = OnnxImporter()
        onnx_importer.import_from_onnx_model(onnx_model)
        return onnx_importer.execute()
Exemplo n.º 2
0
def convert(input_ops_dict, output_ops, input_model, output_model):
    '''Convert keras h5 to tensorflow pb
        Args:
            input_ops_dict: input ops dict including names and shapes
            output_ops: output op names
            input_model: input keras h5 model name
            output_model: output pb model name
    '''
    onnx_name = ".tmp.onnx"
    pb_name = ".tmp.pb"
    # keras --> onnx --> pb --> onnx --> pb

    # keras --> onnx
    model = tf.keras.models.load_model(input_model)
    onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=8)
    keras2onnx.save_model(onnx_model, onnx_name)

    # onnx --> tf
    onnx_model = onnx.load(onnx_name)
    tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict)
    tf_rep.export_graph(pb_name)

    # tf --> onnx (fold constants)
    inputs = input_ops_dict.keys()
    inputs = [i + ":0" for i in inputs]
    outputs = output_ops
    graph_def, inputs, outputs = tf_loader.from_graphdef(
        pb_name, inputs, outputs)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name="")

    g = tf2onnx.tfonnx.process_tf_graph(tf_graph,
                                        opset=8,
                                        input_names=inputs,
                                        output_names=outputs)
    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from %s" % pb_name)
    utils.save_protobuf(onnx_name, model_proto)

    # onnx --> tf
    onnx_model = onnx.load(onnx_name)
    tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict)
    tf_rep.export_graph(output_model)

    # remove tmp files
    if os.path.exists(onnx_name):
        os.remove(onnx_name)
    if os.path.exists(pb_name):
        os.remove(pb_name)
Exemplo n.º 3
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = tf_loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
    def run_test(self,
                 name,
                 backend="caffe2",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                input_names,
                outputs,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=self.large_model)
            if self.large_model:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported = loaded
            else:
                graph_def, input_names, outputs = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if self.large_model:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            if self.structured_outputs is None:
                tf_results = list(tf_results_d.values())
            else:
                tf_results = [
                    tf_results_d[output] for output in self.structured_outputs
                ]
            if self.perf:
                logger.info("Running TF perf")
                start = time.time()
                for _ in range(PERFITER):
                    _ = concrete_func(**inputs)
                self.tf_runtime = time.time() - start
            logger.info("TensorFlow OK")

        inputs = {}
        shape_override = {}
        tf_reset_default_graph()

        from tf2onnx.tf_utils import compress_graph_def
        const_node_values = None
        with tf.Graph().as_default() as tf_graph:
            if self.large_model:
                const_node_values = compress_graph_def(graph_def)
            tf.import_graph_def(graph_def, name='')

        with tf_session(graph=tf_graph) as sess:
            # create the input data
            for k in input_names:
                v = self.input_names[k]
                t = sess.graph.get_tensor_by_name(k)
                expected_dtype = tf.as_dtype(t.dtype).name
                if isinstance(v, six.text_type) and v.startswith("np."):
                    np_value = eval(v)  # pylint: disable=eval-used
                    if expected_dtype != np_value.dtype:
                        logger.warning(
                            "dtype mismatch for input %s: expected=%s, actual=%s",
                            k, expected_dtype, np_value.dtype)
                    inputs[k] = np_value.astype(expected_dtype)
                else:
                    inputs[k] = self.make_input(v).astype(expected_dtype)

            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            elif not self.large_model:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          extra_opset=extra_opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys(),
                                          const_node_values=const_node_values)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
    def run_test(self,
                 name,
                 backend="onnxruntime",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        initialized_tables = {}
        outputs = self.output_names
        tflite_path = None
        to_rename = None
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                None,
                None,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=not self.run_tf_frozen,
                return_initialized_tables=True,
                return_tensors_to_rename=True)
            if not self.run_tf_frozen:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported, initialized_tables, to_rename = loaded
            else:
                graph_def, input_names, outputs, initialized_tables, to_rename = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        elif self.model_type in ["tflite"]:
            tflite_path = model_path
            graph_def = None
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if tflite_path is not None:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k] = self.make_input(v)

            interpreter = tf.lite.Interpreter(tflite_path)
            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()
            input_name_to_index = {
                n['name'].split(':')[0]: n['index']
                for n in input_details
            }
            for k, v in inputs.items():
                interpreter.resize_tensor_input(input_name_to_index[k],
                                                v.shape)
            interpreter.allocate_tensors()

            def run_tflite():
                for k, v in inputs.items():
                    interpreter.set_tensor(input_name_to_index[k], v)
                interpreter.invoke()
                result = [
                    interpreter.get_tensor(output['index'])
                    for output in output_details
                ]
                return result

            tf_results = run_tflite()
            if self.perf:
                logger.info("Running TFLite perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = run_tflite()
                    n += PERF_STEP
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TFLite perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TFLite OK")

        if not self.run_tf_frozen:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            # If there is only a single output a dict might not be returned
            if isinstance(tf_results_d, tf.Tensor):
                tf_results = [tf_results_d]
            else:
                tf_results = [
                    tf_results_d[k] for k in sorted(tf_results_d.keys())
                ]
            tf_results = [tf_res.numpy() for tf_res in tf_results]
            if self.perf:
                logger.info("Running TF perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                if self.tf_profile is not None:
                    tf.profiler.experimental.start(self.tf_profile)
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = concrete_func(**inputs)
                    n += PERF_STEP
                if self.tf_profile is not None:
                    tf.profiler.experimental.stop()
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TF perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TensorFlow OK")

        shape_override = {}
        const_node_values = None
        tf_graph = None

        if graph_def is not None:
            inputs = {}
            tf_reset_default_graph()

            with tf.Graph().as_default() as tf_graph:
                from tf2onnx.tf_utils import compress_graph_def
                if self.large_model:
                    const_node_values = compress_graph_def(graph_def)
                tf.import_graph_def(graph_def, name='')

            with tf_session(graph=tf_graph) as sess:
                # create the input data
                for k in input_names:
                    v = self.input_names[k]
                    t = sess.graph.get_tensor_by_name(k)
                    expected_dtype = tf.as_dtype(t.dtype).name
                    if isinstance(v, six.text_type) and v.startswith("np."):
                        np_value = eval(v)  # pylint: disable=eval-used
                        if expected_dtype != np_value.dtype:
                            logger.warning(
                                "dtype mismatch for input %s: expected=%s, actual=%s",
                                k, expected_dtype, np_value.dtype)
                        inputs[k] = np_value.astype(expected_dtype)
                    else:
                        if expected_dtype == "string":
                            inputs[k] = self.make_input(v).astype(
                                np.str).astype(np.object)
                        else:
                            inputs[k] = self.make_input(v).astype(
                                expected_dtype)

                if self.force_input_shape:
                    for k, v in inputs.items():
                        shape_override[k] = list(v.shape)

                # run the model with tensorflow
                if self.skip_tensorflow:
                    logger.info("TensorFlow SKIPPED")
                elif self.run_tf_frozen:
                    if self.tf_profile is not None:
                        tf.profiler.experimental.start(self.tf_profile)
                    tf_results = self.run_tensorflow(sess, inputs)
                    if self.tf_profile is not None:
                        tf.profiler.experimental.stop()
                    logger.info("TensorFlow OK")
                tf_graph = sess.graph

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(
                    tf_graph,
                    opset=opset,
                    extra_opset=extra_opset,
                    shape_override=shape_override,
                    input_names=inputs.keys(),
                    const_node_values=const_node_values,
                    initialized_tables=initialized_tables,
                    tflite_path=tflite_path,
                    tensors_to_rename=to_rename)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "onnxruntime":
                if to_rename is None:
                    struc_outputs = self.output_names
                else:
                    struc_outputs = [
                        to_rename.get(k, k) for k in self.output_names
                    ]
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    struc_outputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            good_cnt = np.count_nonzero(
                                np.isclose(tf_res,
                                           onnx_res,
                                           rtol=self.rtol,
                                           atol=self.atol))
                            bad_cnt = tf_res.size - good_cnt
                            if bad_cnt > self.ptol / 100 * tf_res.size:
                                # Prints a nice error message with stats
                                np.testing.assert_allclose(tf_res,
                                                           onnx_res,
                                                           rtol=self.rtol,
                                                           atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemplo n.º 6
0
    def run_test(self,
                 name,
                 backend="caffe2",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            graph_def, input_names, outputs = tf_loader.from_saved_model(
                model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        # remove unused input names
        input_names = list(
            set(input_names).intersection(self.input_names.keys()))
        graph_def = tf2onnx.tfonnx.tf_optimize(input_names, self.output_names,
                                               graph_def, fold_const)
        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        inputs = {}
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                        graph=g) as sess:
            # create the input data
            for k in input_names:
                v = self.input_names[k]
                t = sess.graph.get_tensor_by_name(k)
                expected_dtype = tf.as_dtype(t.dtype).name
                if isinstance(v, six.text_type) and v.startswith("np."):
                    np_value = eval(v)  # pylint: disable=eval-used
                    if expected_dtype != np_value.dtype:
                        logger.warning(
                            "dtype mismatch for input %s: expected=%s, actual=%s",
                            k, expected_dtype, np_value.dtype)
                    inputs[k] = np_value.astype(expected_dtype)
                else:
                    inputs[k] = self.make_input(v).astype(expected_dtype)

            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")

            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          extra_opset=extra_opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys())
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                model_proto = onnx_graph.make_model("converted from tf2onnx")
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
Exemplo n.º 7
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    tensors_to_rename = {}
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    graph_def = None
    inputs = None
    outputs = None
    model_path = None

    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True,
            return_tensors_to_rename=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        # Optional, but used to cut graph if provided.
        inputs = args.inputs
        outputs = args.outputs
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    if args.rename_inputs:
        tensors_to_rename.update(zip(inputs, args.rename_inputs))
    if args.rename_outputs:
        tensors_to_rename.update(zip(outputs, args.rename_outputs))

    with tf.device("/cpu:0"):
        model_proto, _ = _convert_common(
            graph_def,
            name=model_path,
            continue_on_error=args.continue_on_error,
            target=args.target,
            opset=args.opset,
            custom_op_handlers=custom_ops,
            extra_opset=extra_opset,
            shape_override=args.shape_override,
            input_names=inputs,
            output_names=outputs,
            inputs_as_nchw=args.inputs_as_nchw,
            large_model=args.large_model,
            tensors_to_rename=tensors_to_rename,
            ignore_default=args.ignore_default,
            use_default=args.use_default,
            tflite_path=tflite_path,
            dequantize=args.dequantize,
            initialized_tables=initialized_tables,
            output_frozen_graph=args.output_frozen_graph,
            output_path=args.output)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)

    logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input])
    logger.info("Model outputs: %s",
                [n.name for n in model_proto.graph.output])
    if args.output:
        if args.large_model:
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Exemplo n.º 8
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    with tf.Graph().as_default() as tf_graph:
        const_node_values = None
        if args.large_model:
            const_node_values = compress_graph_def(graph_def)
        if args.output_frozen_graph:
            utils.save_protobuf(args.output_frozen_graph, graph_def)
        tf.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             ignore_default=args.ignore_default,
                             use_default=args.use_default,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Exemplo n.º 9
0
    def pb_to_onnx(
        inputs: List[Union[str, tf_compat.Tensor]],
        outputs: List[Union[str, tf_compat.Tensor]],
        pb_path: str,
        onnx_path: str,
        opset: int = default_onnx_opset(),
        custom_op_handlers=None,
        extra_opset=None,
        shape_override: Dict[str, List] = None,
    ):
        """
        Export an ONNX format for the graph from PB format.
        Should not be called within an active graph or session.

        :param inputs: the inputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param outputs: the outputs the graph should be created for,
            can be either a list of names or a list of tensors
        :param pb_path: path to the existing PB file
        :param onnx_path: path to the output ONNX file
        :param opset: ONNX opset
        :param custom_op_handlers: dictionary of custom op handlers
        :param extra_opset: list of extra opset's
        :param shape_override: new shape to override
        """
        try:
            from tf2onnx import constants, optimizer, utils
            from tf2onnx.tfonnx import process_tf_graph, tf_optimize
        except ModuleNotFoundError:
            raise ModuleNotFoundError(
                "tf2onnx must be installed on the system before using export_onnx"
            )

        try:
            from tf2onnx import tf_loader as loader
        except Exception:
            from tf2onnx import loader

        pb_path = clean_path(pb_path)

        if not os.path.exists(pb_path):
            raise FileNotFoundError(
                ("no pb file for the model found at {}").format(pb_path)
            )

        inputs = [inp if isinstance(inp, str) else inp.name for inp in inputs]
        outputs = [out if isinstance(out, str) else out.name for out in outputs]

        graph_def, inputs, outputs = loader.from_graphdef(pb_path, inputs, outputs)
        graph_def = tf_optimize(inputs, outputs, graph_def, fold_constant=True)

        with tf_compat.Graph().as_default() as tf_graph:
            tf_compat.import_graph_def(graph_def, name="")

        with tf_compat.Session(graph=tf_graph):
            graph = process_tf_graph(
                tf_graph,
                continue_on_error=False,
                target=",".join(constants.DEFAULT_TARGET),
                opset=opset,
                custom_op_handlers=custom_op_handlers,
                extra_opset=extra_opset,
                shape_override=shape_override,
                input_names=inputs,
                output_names=outputs,
            )

        onnx_graph = optimizer.optimize_graph(graph)
        model_proto = onnx_graph.make_model("converted from {}".format(pb_path))

        onnx_path = clean_path(onnx_path)
        create_parent_dirs(onnx_path)
        utils.save_protobuf(onnx_path, model_proto)
Exemplo n.º 10
0
        for n in onnx_nodes:
            if n.input and n.input[0] not in nodes_outputs:
                continue
            if len(n.output) == 0:
                n.output.append(n.name + ':0')
                graph_output_names.add(n.output[0])
            else:
                output_name = n.output[0]
                if (output_name not in nodes_inputs) and (0 < len(n.input)):
                    graph_output_names.add(output_name)

    logging.info('Model Inputs: %s', str(list(graph_input_names)))
    logging.info('Model Outputs: %s', str(list(graph_output_names)))

    graph_def, inputs, outputs = tf_loader.from_graphdef(
        model_path=args.in_file,
        input_names=list(graph_input_names),
        output_names=list(graph_output_names))

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')

    if 160 <= TF2ONNX_VERSION:
        with tf_loader.tf_session(graph=tf_graph):
            onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph=tf_graph,
                                                         input_names=inputs,
                                                         output_names=outputs,
                                                         opset=9)
    else:
        with tf.Session(graph=tf_graph):
            onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph=tf_graph,
                                                         input_names=inputs,
Exemplo n.º 11
0
def tf2onnx_flow(pb_path: str, test_mode =False) -> onnx.ModelProto:
    """Convert frozen graph pb file into onnx

    Args:
        pb_path (str): input pb file path
        test_mode (bool, optional): test mode. Defaults to False.

    Raises:
        Exception: invalid input file

    Returns:
        onnx.ModelProto: converted onnx
    """
    TF2ONNX_VERSION = int(tf2onnx.version.version.replace('.', ''))

    if 160 <= TF2ONNX_VERSION:
        from tf2onnx import tf_loader
    else:
        from tf2onnx import loader as tf_loader
 
    if pb_path[-3:] == '.pb':
        model_name = pb_path.split('/')[-1][:-3]

        # always reset tensorflow session at begin 
        tf.reset_default_graph()
        
        with tf.Session() as sess:
            with gfile.FastGFile(pb_path, 'rb') as f:
                graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            tf.import_graph_def(graph_def, name='')

            if 160 <= int(tf2onnx.version.version.replace('.', '')):
                onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, functions = tf2onnx.tf_utils.tflist_to_onnx(
                    sess.graph,
                    {})
            else:
                onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes = tf2onnx.tfonnx.tflist_to_onnx(
                    sess.graph.get_operations(),
                    {})

            for n in onnx_nodes:
                if len(n.output) == 0:
                    onnx_nodes.remove(n)

            # find inputs and outputs of graph
            nodes_inputs = set()
            nodes_outputs = set()

            for n in onnx_nodes:
                if n.op_type == 'Placeholder':
                    continue
                for input in n.input:
                    nodes_inputs.add(input)
                for output in n.output:
                   nodes_outputs.add(output)

            graph_input_names = set()
            for input_name in nodes_inputs:
                if input_name not in nodes_outputs:
                    graph_input_names.add(input_name)

            graph_output_names = set()
            for n in onnx_nodes:
                if n.input and n.input[0] not in nodes_outputs:
                    continue
                if len(n.output) == 0:
                    n.output.append(n.name + ':0')
                    graph_output_names.add(n.output[0])
                else:
                    output_name = n.output[0]
                    if (output_name not in nodes_inputs) and (0 < len(n.input)):
                        graph_output_names.add(output_name)

        logging.info('Model Inputs: %s', str(list(graph_input_names)))
        logging.info('Model Outputs: %s', str(list(graph_output_names)))

        graph_def, inputs, outputs = tf_loader.from_graphdef(model_path=pb_path,
                                                             input_names=list(graph_input_names),
                                                             output_names=list(graph_output_names))

        with tf.Graph().as_default() as tf_graph:
            tf.import_graph_def(graph_def, name='')

        if 160 <= TF2ONNX_VERSION:
            with tf_loader.tf_session(graph=tf_graph):
                onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph=tf_graph,
                                                             input_names=inputs,
                                                             output_names=outputs,
                                                             opset=11)
        else:
            with tf.Session(graph=tf_graph):
                onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph=tf_graph,
                                                             input_names=inputs,
                                                             output_names=outputs,
                                                             opset=11)

        # Optimize with tf2onnx.optimizer
        onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
        model_proto = onnx_graph.make_model(model_name)

        # Make tf2onnx output compatible with the spec. of onnx.utils.polish_model
        replacing.replace_initializer_with_Constant(model_proto.graph)
        model_proto = onnx.utils.polish_model(model_proto)

    else:
        raise Exception('expect .pb file as input, but got "' + str(pb_path) + '"')

    # rename
    m = model_proto

    m = combo.preprocess(m)
    m = combo.common_optimization(m)
    m = combo.tensorflow_optimization(m)
    m = combo.postprocess(m)

    if not test_mode:
        g = m.graph
        eliminating.eliminate_shape_changing_after_input(g)

    m = onnx.utils.polish_model(m)
    return m