Ejemplo n.º 1
0
    def test_node_attr_onnx(self):
        n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", my_attr="my_attr")
        graph_proto = helper.make_graph(
            nodes=[n1],
            name="test",
            inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
                    helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
            outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
            initializer=[]
        )
        g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
        n1 = g.get_node_by_name("n1")
        self.assertTrue("my_attr" in n1.attr)
        self.assertTrue("my_attr" not in n1.get_onnx_attrs())

        n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", domain="my_domain", my_attr="my_attr")
        graph_proto = helper.make_graph(
            nodes=[n1],
            name="test",
            inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
                    helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
            outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
            initializer=[]
        )
        g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
        n1 = g.get_node_by_name("n1")
        self.assertTrue("my_attr" in n1.attr)
        self.assertTrue("my_attr" in n1.get_onnx_attrs())
Ejemplo n.º 2
0
    def test_trans_output_as_graph_outputs(self):
        """
        If transpose's output is graph's output, don't optimize it.
        """
        trans = helper.make_node("Transpose", ["X"], ["Y"], name="trans", perm=[0, 2, 3, 1])
        graph_proto = helper.make_graph(
            [trans],
            "trans-to-graph-output",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 4, 5, 3))],
        )

        graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)
        # remove identity to graph output
        identity_op = graph.get_node_by_output(graph.outputs[0])
        graph.outputs = [identity_op.input[0]]
        graph.remove_node(identity_op.name)

        optimized_graph = GraphUtil.optimize_graph(graph)

        self.assertTrue(optimized_graph, msg="graph after optimizer should not be None")

        trans_cnt = len(group_nodes_by_type(optimized_graph)["Transpose"])

        self.assertTrue(trans_cnt == 1, msg="Expect 1 Transpose ops left, but actually " + str(trans_cnt) + " left")
Ejemplo n.º 3
0
    def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,
                        remaining_op_num, debug=False, rtol=1e-07):
        utils.make_sure(op_type is not None, "op_type should be specified")
        utils.make_sure(remaining_op_num is not None, "remaining_op_num should be specified")

        origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix="_origin")

        new_proto = GraphUtil.optimize_graph_with_model_proto(origin_proto)

        self.assertTrue(new_proto, msg="model proto after optimizer should not be None")

        new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix="_opt")
        current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)

        self.assertTrue(current[op_type] == remaining_op_num,
                        msg="Expect " + str(remaining_op_num) + " " + op_type + " ops left, but actually " +
                        str(current[op_type]) + " left")

        if self.config.is_onnxruntime_backend:
            expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)
            actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)
        else:
            raise ValueError("only onnxruntime is supported to test transpose optimizer")

        for expected_val, actual_val in zip(expected, actual):
            self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=0.)
            self.assertEqual(expected_val.dtype, actual_val.dtype)
            self.assertEqual(expected_val.shape, actual_val.shape)
Ejemplo n.º 4
0
def main():
    args = get_args()

    opset = tf2onnx.utils.find_opset(args.opset)
    print("using tensorflow={}, onnx={}, opset={}, tfonnx={}/{}".format(
        tf.__version__, onnx.__version__, opset, tf2onnx.__version__,
        tf2onnx.version.git_version[:6]))

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    tf2onnx.utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: default_custom_op_handler
            for op in args.custom_ops.split(",")
        }
        extra_opset = [helper.make_opsetid(_TENSORFLOW_DOMAIN, 1)]
    else:
        custom_ops = {}
        extra_opset = None

    graph_def = tf.GraphDef()
    with tf.gfile.GFile(args.input, 'rb') as f:
        graph_def.ParseFromString(f.read())

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(args.inputs, args.outputs, graph_def,
                            args.fold_const)
    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             verbose=args.verbose,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=args.inputs,
                             output_names=args.outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    new_model_proto = GraphUtil.opt_transposes_with_graph(
        g,
        "converted from {}".format(args.input),
        optimize=not args.continue_on_error)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
            print("\nComplete successfully, the onnx model is generated at " +
                  args.output)
Ejemplo n.º 5
0
def load_graph(fname, target):
    model_proto = onnx.ModelProto()
    with open(fname, "rb") as f:
        data = f.read()
        model_proto.ParseFromString(data)
    g = GraphUtil.create_graph_from_onnx_model(model_proto, target)
    return g, model_proto
Ejemplo n.º 6
0
    def test_match_flipped(self):
        n1 = helper.make_node("Sub", ["i1", "i1"], ["n1:0"], name="n1")
        n2 = helper.make_node("Add", ["i2", "i2"], ["n2:0"], name="n2")
        n3 = helper.make_node("Mul", ["n1:0", "n2:0"], ["n3:0"], name="n3")

        graph_proto = helper.make_graph(
            nodes=[n1, n2, n3],
            name="test",
            inputs=[
                helper.make_tensor_value_info("i1", TensorProto.FLOAT, [2, 2]),
                helper.make_tensor_value_info("i2", TensorProto.FLOAT, [2, 2])
            ],
            outputs=[
                helper.make_tensor_value_info("n2:0", TensorProto.FLOAT,
                                              [2, 2])
            ],
            initializer=[])
        g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
        pattern = OpTypePattern(
            'Mul', inputs=[OpTypePattern('Add'),
                           OpTypePattern('Sub')])
        ops = g.get_nodes()
        matcher = GraphMatcher(pattern, allow_reorder=True)
        match_results = list(matcher.match_ops(ops))
        self.assertEqual(1, len(match_results))
Ejemplo n.º 7
0
 def test_rewrite_subgraph(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     pattern = \
         OpTypePattern('Abs', name='output', inputs=[
             OpTypePattern('Add', name='input')
         ])
     ops = g.get_nodes()
     matcher = GraphMatcher(pattern)
     match_results = list(matcher.match_ops(ops))
     for match in match_results:
         input_node = match.get_op('input')
         output_node = match.get_op('output')
         op_name = utils.make_name("ReplacedOp")
         out_name = utils.port_name(op_name)
         new_node = g.make_node("Sub", inputs=input_node.input, outputs=[out_name], name=op_name)
         g.replace_all_inputs(output_node.output[0], new_node.output[0])  # ops=ops
         for n in set(match.get_nodes()):
             g.remove_node(n.name)
     g.topological_sort(ops)
     result = onnx_to_graphviz(g)
     expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] ' \
                'n3 [op_type=Abs] n2 [op_type=Abs] ReplacedOp__5 [op_type=Sub] ' \
                'n6 [op_type=Identity] n5_graph_outputs_Identity__3 [op_type=Identity] ' \
                'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> ReplacedOp__5 n3:0 -> ReplacedOp__5 ' \
                'ReplacedOp__5:0 -> n6 ReplacedOp__5:0 -> n5_graph_outputs_Identity__3 }'
     self.assertEqual(expected, result)
Ejemplo n.º 8
0
    def _import_from_tf_pb(self, graph_def):
        inputs, outputs = _find_out_terminal_node(graph_def, postfix=True)
        print("inputs:{}".format(inputs))
        print("outputs:{}".format(outputs))

        # FIXME: folding const = False
        graph_def = tf2onnx.tfonnx.tf_optimize(inputs, outputs, graph_def,
                                               False)
        with tf.Graph().as_default() as tf_graph:
            tf.import_graph_def(graph_def, name='')
        with tf.Session(graph=tf_graph):
            onnx_graph = tf2onnx.tfonnx.process_tf_graph(
                tf_graph,
                continue_on_error=False,
                verbose=False,
                target=",".join(tf2onnx.tfonnx.DEFAULT_TARGET),
                opset=6,
                input_names=inputs,
                output_names=outputs,
                inputs_as_nchw=None)
        model_proto = onnx_graph.make_model("tf_model")
        new_model_proto = GraphUtil.opt_transposes_with_graph(onnx_graph,
                                                              'tf_model',
                                                              optimize=True)
        if new_model_proto:
            model_proto = new_model_proto
        return model_proto
Ejemplo n.º 9
0
 def test_remove_input(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     n4 = g.get_node_by_name("n4")
     g.remove_input(n4, n4.input[1])
     result = onnx_to_graphviz(g)
     expected = 'digraph { n1 [op_type=Abs] n2 [op_type=Abs] n3 [op_type=Abs] n4 [op_type=Add] ' \
                'n5 [op_type=Abs] n6 [op_type=Identity] graph_outputs_Identity__3 ' \
                '[op_type=Identity] Placeholder__4 [op_type=Placeholder] input -> n1 ' \
                'n1:0 -> n2 n1:0 -> n3 n2:0 -> n4 n4:0 -> n5 raw_output___2:0 -> n6 ' \
                'raw_output___2:0 -> graph_outputs_Identity__3 }'
     self.assertEqual(expected, result)
Ejemplo n.º 10
0
    def test_extra_opset(self):
        extra_opset = [
            utils.make_opsetid(constants.MICROSOFT_DOMAIN, 1),
            utils.make_opsetid("my.domain", 1024),
        ]
        with tf.Session() as sess:
            x = tf.placeholder(tf.float32, [2, 3], name="input1")
            x_ = tf.add(x, x)
            _ = tf.identity(x_, name="output")
            g = process_tf_graph(sess.graph,
                                 opset=self.config.opset,
                                 extra_opset=extra_opset)
            self.assertEqual(g.opset, self.config.opset)
            self.assertEqual(g.extra_opset, extra_opset)

            # convert between graph and model proto, make sure extra opset is preserved
            model_proto = g.make_model("test")
            model_proto = GraphUtil.optimize_model_proto(model_proto)
            g = GraphUtil.create_graph_from_onnx_model(model_proto)
            self.assertEqual(g.opset, self.config.opset)
            self.assertEqual(g.extra_opset, extra_opset)
Ejemplo n.º 11
0
 def test_insert_node2(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     g.insert_new_node_on_output("Abs", "n1:0", name="n7")
     ops = g.get_nodes()
     g.topological_sort(ops)
     result = onnx_to_graphviz(g)
     expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n7 [op_type=Abs] ' \
                'n3 [op_type=Abs] n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] ' \
                'n6 [op_type=Identity] n5_graph_outputs_Identity__3 [op_type=Identity] ' \
                'input -> n1 n1:0 -> n7 n7:0 -> n3 n7:0 -> n2 n2:0 -> n4 n3:0 -> n4 n4:0 -> n5 ' \
                'n5_raw_output___2:0 -> n6 n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 }'
     self.assertEqual(expected, result)
    def run_and_compare(self,
                        output_names_with_port,
                        onnx_feed_dict,
                        origin_proto,
                        debug=False,
                        rtol=1e-07):
        origin_model_path = self.save_onnx_model(origin_proto,
                                                 onnx_feed_dict,
                                                 postfix="_origin")

        new_proto = GraphUtil.opt_transposes_with_model_proto(origin_proto)

        self.assertTrue(new_proto,
                        msg="model proto after optimizer should not be None")

        new_model_path = self.save_onnx_model(new_proto,
                                              onnx_feed_dict,
                                              postfix="_opt")

        previous = GraphUtil.get_node_count_from_onnx_graph(origin_proto.graph)
        current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)

        self.assertTrue(current["Transpose"] < previous["Transpose"],
                        msg="transpose ops count not changed")

        if type(self).BACKEND == "onnxruntime":
            expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict,
                                            output_names_with_port)
            actual = self.run_onnxruntime(new_model_path, onnx_feed_dict,
                                          output_names_with_port)
        else:
            raise ValueError(
                "only onnxruntime is supported to test transpose optimizer")

        for expected_val, actual_val in zip(expected, actual):
            self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=0.)
            self.assertEqual(expected_val.dtype, actual_val.dtype)
            self.assertEqual(expected_val.shape, actual_val.shape)
Ejemplo n.º 13
0
 def test_data_format(self):
     n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", data_format="NHWC")
     graph_proto = helper.make_graph(
         nodes=[n1],
         name="test",
         inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
                 helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
         outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
         initializer=[]
     )
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     n = g.get_node_by_name("n1")
     self.assertEqual(n.data_format, "NHWC")
     self.assertTrue(n.is_nhwc())
Ejemplo n.º 14
0
 def test_remove_input(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     n4 = g.get_node_by_name("n4")
     g.remove_input(n4, n4.input[1])
     ops = g.get_nodes()
     g.topological_sort(ops)
     result = onnx_to_graphviz(g)
     expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n3 [op_type=Abs] ' \
                'n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] n6 [op_type=Identity] ' \
                'n5_graph_outputs_Identity__3 [op_type=Identity] input -> n1 n1:0 -> n3 ' \
                'n1:0 -> n2 n2:0 -> n4 n4:0 -> n5 n5_raw_output___2:0 -> n6 ' \
                'n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 }'
     self.assertEqual(expected, result)
Ejemplo n.º 15
0
 def test_make_const_string(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     arr1 = np.array("test", np.object)
     arr2 = np.array([["A", "B"], ["C", "D"]], np.object)
     arr3 = np.array(b"test", np.object)
     arr4 = np.array([[b"A", b"B"], [b"C", b"D"]], np.object)
     const1 = g.make_const("const1", arr1)
     const2 = g.make_const("const2", arr2)
     const3 = g.make_const("const3", arr3)
     const4 = g.make_const("const4", arr4)
     np.testing.assert_equal(const1.get_tensor_value(False), arr1)
     np.testing.assert_equal(const2.get_tensor_value(False), arr2)
     np.testing.assert_equal(const3.get_tensor_value(False), arr1)
     np.testing.assert_equal(const4.get_tensor_value(False), arr2)
Ejemplo n.º 16
0
 def test_insert_node1(self):
     graph_proto = self.sample_net()
     g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
     n2 = g.get_node_by_name("n2")
     n7 = g.insert_new_node_on_input(n2, "Abs", "n1:0", name="n7")
     ops = g.get_nodes()
     ops.append(n7)
     g.topological_sort(ops)
     result = onnx_to_graphviz(g)
     expected = 'digraph { Placeholder__4 [op_type=Placeholder] ' \
                'n1 [op_type=Abs] n7 [op_type=Abs] n2 [op_type=Abs] n3 [op_type=Abs] ' \
                'n4 [op_type=Add] n5 [op_type=Abs] graph_outputs_Identity__3 [op_type=Identity] ' \
                'n6 [op_type=Identity] input -> n1 n1:0 -> n7 n7:0 -> n2 n1:0 -> n3 ' \
                'n2:0 -> n4 n3:0 -> n4 n4:0 -> n5 raw_output___2:0 -> graph_outputs_Identity__3 ' \
                'raw_output___2:0 -> n6 }'
     self.assertEqual(expected, result)
Ejemplo n.º 17
0
    def run_and_compare(self,
                        output_names_with_port,
                        onnx_feed_dict,
                        origin_proto,
                        op_type,
                        debug=False,
                        rtol=1e-07,
                        catch_errors=True):
        optimizers = OrderedDict([("optimize_einsum", EinsumOptimizer)])
        utils.make_sure(op_type is not None, "op_type should be specified")
        utils.make_sure(
            self.config.is_onnxruntime_backend,
            "only onnxruntime is supported to test transpose optimizer")

        origin_model_path = self.save_onnx_model(origin_proto,
                                                 onnx_feed_dict,
                                                 postfix="_origin")
        expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict,
                                        output_names_with_port)

        new_proto, new_graph = GraphUtil.optimize_model_proto(
            origin_proto,
            catch_errors=catch_errors,
            return_graph=True,
            optimizers=optimizers)
        self.assertTrue(new_proto,
                        msg="model proto after optimizer should not be None")

        new_model_path = self.save_onnx_model(new_proto,
                                              onnx_feed_dict,
                                              postfix="_opt")
        # current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)
        actual = self.run_onnxruntime(new_model_path, onnx_feed_dict,
                                      output_names_with_port)

        for expected_val, actual_val in zip(expected, actual):
            self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
            self.assertEqual(expected_val.dtype, actual_val.dtype)
            self.assertEqual(expected_val.shape, actual_val.shape)

        self.assert_shapes_correct(new_graph,
                                   allow_missing=False,
                                   run_checker=True)
        return new_proto
Ejemplo n.º 18
0
    def convert_to_onnx(self, graph_def, inputs, outputs):

        # FIXME: folding const = False
        graph_def = tf2onnx.tfonnx.tf_optimize(
            inputs, outputs, graph_def, False)
        with tf.Graph().as_default() as tf_graph:
            tf.import_graph_def(graph_def, name='')
        with tf.Session(graph=tf_graph):
            onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph,
                                                         continue_on_error=False,
                                                         verbose=False,
                                                         target=",".join(
                                                             constants.DEFAULT_TARGET),
                                                         opset=9,
                                                         input_names=inputs,
                                                         output_names=outputs,
                                                         inputs_as_nchw=None)
        model_proto = onnx_graph.make_model(
            "converted from {}".format(self._tf_file))
        new_model_proto = GraphUtil.optimize_model_proto(model_proto)
        if new_model_proto:
            model_proto = new_model_proto
        return model_proto
Ejemplo n.º 19
0
def main():
    args = get_args()

    g, org_model_proto = load_graph(args.input, args.target)

    if g.is_target(constants.TARGET_CHANNELS_FIRST):
        g.reset_nodes(rewrite_channels_first(g, g.get_nodes()))
    if g.is_target(constants.TARGET_CHANNELS_LAST):
        g.reset_nodes(rewrite_channels_last(g, g.get_nodes()))

    g = optimizer.optimize_graph(g)

    onnx_graph = g.make_graph(org_model_proto.graph.doc_string +
                              " (+tf2onnx/onnx-optimize)")

    kwargs = GraphUtil.get_onnx_model_properties(org_model_proto)

    model_proto = helper.make_model(onnx_graph, **kwargs)

    # write onnx graph
    if args.output:
        with open(args.output, "wb") as f:
            f.write(model_proto.SerializeToString())
    def run_test(self,
                 name,
                 backend="caffe2",
                 debug=False,
                 onnx_file=None,
                 opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        print(name)
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_file()
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local
            dir_name = os.path.dirname(self.local)
        print("\tdownloaded", model_path)

        inputs = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, inputs, outputs = loader.from_checkpoint(
                model_path, inputs, outputs)
        elif self.model_type in ["saved_model"]:
            graph_def, inputs, outputs = loader.from_saved_model(
                model_path, inputs, outputs)
        else:
            graph_def, inputs, outputs = loader.from_graphdef(
                model_path, inputs, outputs)

        # create the input data
        inputs = {}
        for k, v in self.input_names.items():
            if isinstance(v, six.text_type) and v.startswith("np."):
                inputs[k] = eval(v)  # pylint: disable=eval-used
            else:
                inputs[k] = self.make_input(v)
        if self.more_inputs:
            for k, v in self.more_inputs.items():
                inputs[k] = v

        graph_def = tf2onnx.tfonnx.tf_optimize(inputs.keys(),
                                               self.output_names, graph_def,
                                               fold_const)
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                        graph=g) as sess:

            # fix inputs if needed
            for k in inputs.keys():  # pylint: disable=consider-iterating-dictionary
                t = sess.graph.get_tensor_by_name(k)
                dtype = tf.as_dtype(t.dtype).name
                if type != "float32":
                    v = inputs[k]
                    inputs[k] = v.astype(dtype)
            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                print("\ttensorflow", "SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                print("\ttensorflow", "OK")
            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys())
                model_proto = onnx_graph.make_model("converted from tf2onnx")
                new_model_proto = GraphUtil.optimize_graph(onnx_graph,
                                                           "test",
                                                           debug=debug)
                if new_model_proto:
                    model_proto = new_model_proto
                else:
                    print(
                        "\tNON-CRITICAL, optimizers are not applied successfully"
                    )
                print("\tto_onnx", "OK")
                if debug:
                    onnx_graph.dump_graph()
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception as ex:
                tb = traceback.format_exc()
                print("\tto_onnx", "FAIL", ex, tb)

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxmsrtnext":
                onnx_results = self.run_onnxmsrtnext(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            print("\trun_onnx OK")

            try:
                if self.skip_tensorflow:
                    print("\tResults: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    print("\tResults: OK")
                return True
            except Exception as ex:
                print("\tResults: ", ex)

        except Exception as ex:
            print("\trun_onnx", "FAIL", ex)

        return False
Ejemplo n.º 21
0
    def run_test(self,
                 name,
                 backend="caffe2",
                 debug=False,
                 onnx_file=None,
                 opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        print(name)
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_file()
            model_path = os.path.join(dir_name, self.local)
        else:
            model_path = self.local
            dir_name = os.path.dirname(self.local)
        print("\tdownloaded", model_path)

        if self.model_type in ["checkpoint"]:
            #
            # if the input model is a checkpoint, convert it to a frozen model
            saver = tf.train.import_meta_graph(model_path)
            with tf.Session() as sess:
                saver.restore(sess, model_path[:-5])
                frozen_graph = freeze_session(sess,
                                              output_names=self.output_names)
                tf.train.write_graph(frozen_graph,
                                     dir_name,
                                     "frozen.pb",
                                     as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")
        elif self.model_type in ["saved_model"]:
            try:
                from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
                get_signature_def = lambda meta_graph_def, k: \
                    signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
            except ImportError:
                # TF1.12 changed the api
                get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[
                    k]

            # saved_model format - convert to checkpoint
            with tf.Session() as sess:
                meta_graph_def = tf.saved_model.loader.load(
                    sess, [tf.saved_model.tag_constants.SERVING], model_path)
                inputs = {}
                outputs = {}
                for k in meta_graph_def.signature_def.keys():
                    inputs_tensor_info = get_signature_def(meta_graph_def,
                                                           k).inputs
                    for _, input_tensor in sorted(inputs_tensor_info.items()):
                        inputs[
                            input_tensor.name] = sess.graph.get_tensor_by_name(
                                input_tensor.name)
                    outputs_tensor_info = get_signature_def(meta_graph_def,
                                                            k).outputs
                    for _, output_tensor in sorted(
                            outputs_tensor_info.items()):
                        outputs[output_tensor.
                                name] = sess.graph.get_tensor_by_name(
                                    output_tensor.name)
                # freeze uses the node name derived from output:0 so only pass in output:0;
                # it will provide all outputs of that node.
                for o in list(outputs.keys()):
                    if not o.endswith(":0"):
                        del outputs[o]
                frozen_graph = freeze_session(sess,
                                              output_names=list(
                                                  outputs.keys()))
                tf.train.write_graph(frozen_graph,
                                     dir_name,
                                     "frozen.pb",
                                     as_text=False)
            model_path = os.path.join(dir_name, "frozen.pb")

        # create the input data
        inputs = {}
        for k, v in self.input_names.items():
            if isinstance(v, six.text_type) and v.startswith("np."):
                inputs[k] = eval(v)  # pylint: disable=eval-used
            else:
                inputs[k] = self.make_input(v)
        if self.more_inputs:
            for k, v in self.more_inputs.items():
                inputs[k] = v
        tf.reset_default_graph()
        graph_def = graph_pb2.GraphDef()
        with open(model_path, "rb") as f:
            graph_def.ParseFromString(f.read())

        graph_def = tf2onnx.tfonnx.tf_optimize(inputs, self.output_names,
                                               graph_def, fold_const)
        shape_override = {}
        g = tf.import_graph_def(graph_def, name='')
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                        graph=g) as sess:

            # fix inputs if needed
            for k in inputs.keys():  # pylint: disable=consider-iterating-dictionary
                t = sess.graph.get_tensor_by_name(k)
                dtype = tf.as_dtype(t.dtype).name
                if type != "float32":
                    v = inputs[k]
                    inputs[k] = v.astype(dtype)
            if self.force_input_shape:
                shape_override = self.input_names

            # run the model with tensorflow
            if self.skip_tensorflow:
                print("\ttensorflow", "SKIPPED")
            else:
                tf_results = self.run_tensorflow(sess, inputs)
                print("\ttensorflow", "OK")
            model_proto = None
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          shape_override=shape_override)
                new_model_proto = GraphUtil.opt_transposes_with_graph(
                    onnx_graph, "test", debug=debug)
                if new_model_proto:
                    model_proto = new_model_proto
                else:
                    print(
                        "\tNON-CRITICAL, optimizers are not applied successfully"
                    )
                print("\tto_onnx", "OK")
                if debug:
                    onnx_graph.dump_graph()
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file)
            except Exception as ex:
                tb = traceback.format_exc()
                print("\tto_onnx", "FAIL", ex, tb)

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxmsrtnext":
                onnx_results = self.run_onnxmsrtnext(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs)
            else:
                raise ValueError("unknown backend")
            print("\trun_onnx OK")

            try:
                if self.skip_tensorflow:
                    print("\tResults: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    print("\tResults: OK")
                return True
            except Exception as ex:
                print("\tResults: ", ex)

        except Exception as ex:
            print("\trun_onnx", "FAIL", ex)

        return False
Ejemplo n.º 22
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    # override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
    # support unknown dimensions.
    utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs)
        model_path = args.saved_model

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    new_model_proto = GraphUtil.optimize_model_proto(model_proto)
    if new_model_proto:
        model_proto = new_model_proto
    else:
        print("NON-CRITICAL, optimizers are not applied successfully")

    # write onnx graph
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        print("\nComplete successfully, the onnx model is generated at " +
              args.output)
Ejemplo n.º 23
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    model_proto = g.make_model("converted from {}".format(model_path))

    logger.info("")
    model_proto = GraphUtil.optimize_model_proto(model_proto)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")
Ejemplo n.º 24
0
def load_graph(fname):
    model_proto = onnx.ModelProto()
    g = GraphUtil.create_graph_from_onnx_model(model_proto)
    return g, model_proto.producer_name