示例#1
0
    def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,
                        remaining_op_num, debug=False, rtol=1e-07):
        utils.make_sure(op_type is not None, "op_type should be specified")
        utils.make_sure(remaining_op_num is not None, "remaining_op_num should be specified")

        origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix="_origin")

        new_proto = GraphUtil.optimize_model_proto(origin_proto)

        self.assertTrue(new_proto, msg="model proto after optimizer should not be None")

        new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix="_opt")
        current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)

        self.assertTrue(current[op_type] == remaining_op_num,
                        msg="Expect " + str(remaining_op_num) + " " + op_type + " ops left, but actually " + str(
                            current[op_type]) + " left")

        if self.config.is_onnxruntime_backend:
            expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)
            actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)
        else:
            raise ValueError("only onnxruntime is supported to test transpose optimizer")

        for expected_val, actual_val in zip(expected, actual):
            self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
            self.assertEqual(expected_val.dtype, actual_val.dtype)
            self.assertEqual(expected_val.shape, actual_val.shape)
示例#2
0
    def run_and_compare(self,
                        output_names_with_port,
                        onnx_feed_dict,
                        origin_proto,
                        op_type,
                        debug=False,
                        rtol=1e-07,
                        catch_errors=True):
        optimizers = OrderedDict([("optimize_einsum", EinsumOptimizer)])
        utils.make_sure(op_type is not None, "op_type should be specified")
        utils.make_sure(
            self.config.is_onnxruntime_backend,
            "only onnxruntime is supported to test transpose optimizer")

        origin_model_path = self.save_onnx_model(origin_proto,
                                                 onnx_feed_dict,
                                                 postfix="_origin")
        expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict,
                                        output_names_with_port)

        new_proto, new_graph = GraphUtil.optimize_model_proto(
            origin_proto,
            catch_errors=catch_errors,
            return_graph=True,
            optimizers=optimizers)
        self.assertTrue(new_proto,
                        msg="model proto after optimizer should not be None")

        new_model_path = self.save_onnx_model(new_proto,
                                              onnx_feed_dict,
                                              postfix="_opt")
        # current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)
        actual = self.run_onnxruntime(new_model_path, onnx_feed_dict,
                                      output_names_with_port)

        for expected_val, actual_val in zip(expected, actual):
            self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
            self.assertEqual(expected_val.dtype, actual_val.dtype)
            self.assertEqual(expected_val.shape, actual_val.shape)

        self.assert_shapes_correct(new_graph,
                                   allow_missing=False,
                                   run_checker=True)
        return new_proto
示例#3
0
    def test_extra_opset(self):
        extra_opset = [
            utils.make_opsetid(constants.MICROSOFT_DOMAIN, 1),
            utils.make_opsetid("my.domain", 1024),
        ]
        with tf.Session() as sess:
            x = tf.placeholder(tf.float32, [2, 3], name="input1")
            x_ = tf.add(x, x)
            _ = tf.identity(x_, name="output")
            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 extra_opset=extra_opset)
            self.assertEqual(g.opset, self.config.opset)
            self.assertEqual(g.extra_opset, extra_opset)

            # convert between graph and model proto, make sure extra opset is preserved
            model_proto = g.make_model("test")
            model_proto = GraphUtil.optimize_model_proto(model_proto)
            g = GraphUtil.create_graph_from_onnx_model(model_proto)
            self.assertEqual(g.opset, self.config.opset)
            self.assertEqual(g.extra_opset, extra_opset)
示例#4
0
    def convert_to_onnx(self, graph_def, inputs, outputs):

        # FIXME: folding const = False
        graph_def = tf2onnx.tfonnx.tf_optimize(
            inputs, outputs, graph_def, False)
        with tf.Graph().as_default() as tf_graph:
            tf.import_graph_def(graph_def, name='')
        with tf.Session(graph=tf_graph):
            onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph,
                                                         continue_on_error=False,
                                                         verbose=False,
                                                         target=",".join(
                                                             constants.DEFAULT_TARGET),
                                                         opset=9,
                                                         input_names=inputs,
                                                         output_names=outputs,
                                                         inputs_as_nchw=None)
        model_proto = onnx_graph.make_model(
            "converted from {}".format(self._tf_file))
        new_model_proto = GraphUtil.optimize_model_proto(model_proto)
        if new_model_proto:
            model_proto = new_model_proto
        return model_proto
示例#5
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs)
        model_path = args.saved_model

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    new_model_proto = GraphUtil.optimize_model_proto(model_proto)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        print("\nComplete successfully, the onnx model is generated at " +
              args.output)
示例#6
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    logger.info("")
    model_proto = GraphUtil.optimize_model_proto(model_proto)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")