Esempio n. 1
0
    def test_nested_graph(self):  # type: () -> None
        n1 = helper.make_node(
            "Scale", ["X"], ["Y"], scale=2., name="n1")
        n2 = helper.make_node(
            "Scale", ["Y"], ["Z"], scale=3., name="n2")

        graph = helper.make_graph(
            [n1, n2],
            "nested",
            inputs=[
                helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])
            ],
            outputs=[
                helper.make_tensor_value_info("Z", TensorProto.FLOAT, [1, 2])
            ]
        )

        i1 = helper.make_node(
            "If", ["cond"], ["Z"], then_branch=graph, else_branch=graph)

        graph = helper.make_graph(
            [i1],
            "test",
            inputs=[
                helper.make_tensor_value_info("cond", TensorProto.BOOL, [1])
            ],
            outputs=[],
        )

        checker.check_graph(graph)
Esempio n. 2
0
def test_shape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.INT32,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    shape_node = helper.make_node("Shape", ['out'], ['final_out'])

    graph = helper.make_graph([ref_node, reshape_node, shape_node],
                              "shape_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("final_out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='shape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

    tvm.testing.assert_allclose(ref_shape, tvm_out)
Esempio n. 3
0
 def test_onnx_to_caffe2_loop(self):
     body_nodes = [helper.make_node(
         "MatMul", ["_X", "W"], ["_Y"])]
     nodes = self._make_fake_loop_op(body_nodes,
                                     [(TensorProto.FLOAT, (2, 2), "X")],
                                     [(TensorProto.FLOAT, (2, 2), "Y")])
     X = np.random.rand(2, 2).astype(np.float32)
     W = np.random.rand(2, 2).flatten().astype(np.float32)
     graph_def = helper.make_graph(
         nodes,
         "test",
         [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
          helper.make_tensor_value_info("W", TensorProto.FLOAT, (2, 2))],
         [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
         initializer=[helper.make_tensor("W",
                                         TensorProto.FLOAT,
                                         [2, 2],
                                         W.tolist())]
     )
     model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
     Y = X
     for _ in range(10):
         Y = np.matmul(Y, W.reshape(2, 2))
     p = c2.prepare(model_def)
     out = p.run(X)
     np.testing.assert_allclose(out.Y, Y)
Esempio n. 4
0
 def _make_fake_if_op(self, true_nodes, false_nodes, output_types):
     true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
     true_graph = helper.make_graph(true_nodes, "true_graph", [], [
         helper.make_tensor_value_info("_Y", TensorProto.FLOAT, (2, 2)),
     ])
     false_graph = helper.make_graph(false_nodes, "false_graph", [], [
         helper.make_tensor_value_info("_Y", TensorProto.FLOAT, (2, 2)),
     ])
     if_inputs = ["condition"]
     if_outputs = [name for _, _, name in output_types]
     retval_nodes = [
         helper.make_node("Constant", [], ["condition"], value=true),
         helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
                          else_branch=false_graph)
     ]
     return retval_nodes
Esempio n. 5
0
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
    in_array = np.random.uniform(size=shape).astype(dtype)

    if alpha == None and beta == None and bias==None:
        alpha = 0.0001
        beta = 0.75
        bias = 1.0
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize)
    else:
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
                                     beta=beta, bias=bias, size=nsize)

    graph = helper.make_graph([node],
                              "lrn_test",
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
    model = helper.make_model(graph, producer_name='lrn_test')

    def _get_python_lrn():
        square_sum = np.zeros(shape).astype(dtype)
        for n, c, h, w in np.ndindex(in_array.shape):
            square_sum[n, c, h, w] = sum(in_array[n,
                                         max(0, c - int(math.floor((nsize - 1) / 2))): \
                                             min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
                                         h,
                                         w] ** 2)
        py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
        return py_out

    for target, ctx in ctx_list():
        input_name = model.graph.input[0].name
        py_out = _get_python_lrn()
        tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32')
        tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_spacetodepth():
    n, c, h, w = shape = (1, 1, 4, 6)
    input1 = np.random.rand(n, c, h, w).astype("float32")
    blocksize = 2
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=shape)]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 4, 2, 3))]

    nodes = [helper.make_node("SpaceToDepth", ["input1"], ["output"], block_size=blocksize)]

    graph = helper.make_graph(nodes,
                              "spacetodepth_test",
                              inputs,
                              outputs)

    spacetodepth_model = helper.make_model(graph)

    bkd_rep = backend.prepare(spacetodepth_model)
    output = bkd_rep.run([input1])

    tmp = np.reshape(input1, [n, c,
                    h // blocksize, blocksize,
                    w // blocksize, blocksize])
    tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
    numpy_op = np.reshape(tmp, [n, c * (blocksize**2),
                    h // blocksize,
                    w // blocksize])

    npt.assert_almost_equal(output[0], numpy_op)
Esempio n. 7
0
    def test_eliminate_identity_single_use(self):  # type: () -> None
        nodes = [helper.make_node("Identity", ["X"], ["Y"])]
        nodes.extend(self._make_fake_loop_op(
            [helper.make_node("Identity", ["_Y"], ["_Y2"])],
            [(TensorProto.FLOAT, (5,), "Y")],
            [(TensorProto.FLOAT, (5,), "Y2")]))
        graph = helper.make_graph(
            nodes,
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
             helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
        optimized_model = self._optimized(graph, ["eliminate_identity"])

        # All identity nodes should have been eliminated
        def check_identity(node):  # type: (NodeProto) -> None
            assert node.op_type != "Identity"
        self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
        # Use of the output from the Identity node in the main graph should
        # have been replaced with the input to the identity node
        assert len(optimized_model.graph.output) == 2
        assert optimized_model.graph.output[0].name == "X"
        # Use of the output from the Identity node in the loop graph should
        # have been replaced with the input to that identity node
        assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
        assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
Esempio n. 8
0
    def test_small_model(self):
        # Create one input
        X = helper.make_tensor_value_info('IN', TensorProto.FLOAT, [2, 3])
        # Create one output
        Y = helper.make_tensor_value_info('OUT', TensorProto.FLOAT, [2, 3])
        # Create a node
        node_def = helper.make_node('Abs', ['IN'], ['OUT'])

        # Create the model
        graph_def = helper.make_graph([node_def], "test-model", [X], [Y])
        onnx_model = helper.make_model(graph_def,
                                       producer_name='onnx-example')

        model = Model()
        model.BuildFromOnnxModel(onnx_model)
        schedule = model.OptimizeSchedule()
        schedule = schedule.replace('\n', ' ')
        expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func OUT = pipeline.get_func\(1\);.+{.+}.+'
        self.assertRegex(schedule, expected_schedule)

        input = np.random.rand(2, 3) - 0.5
        outputs = model.run([input])
        self.assertEqual(1, len(outputs))
        output = outputs[0]
        expected = np.abs(input)
        np.testing.assert_allclose(expected, output)
Esempio n. 9
0
    def test_initializer(self):
        X = np.array([[1, 2], [3, 4]]).astype(np.float32)
        Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
        weight = np.array([[1, 0], [0, 1]])
        graph_def = make_graph(
            [make_node("Add", ["X", "Y"], ["Z0"]),
             make_node("Cast", ["Z0"], ["Z"], to="float"),
             make_node("Mul", ["Z", "weight"], ["W0"]),
             make_node("Tanh", ["W0"], ["W1"]),
             make_node("Sigmoid", ["W1"], ["W2"]),
             make_node("Scale", ["W2"], ["W3"], scale=-1.0)],
            name="test_initializer",
            inputs=[
                make_tensor_value_info("X", onnx.TensorProto.FLOAT, (2, 2)),
                make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (2, 2)),
                make_tensor_value_info("weight", onnx.TensorProto.FLOAT, (2, 2)),
            ],
            outputs=[
                make_tensor_value_info("W3", onnx.TensorProto.FLOAT, (2, 2))
            ],
            initializer=[make_tensor("weight",
                                     onnx.TensorProto.FLOAT,
                                     [2, 2],
                                     weight.flatten().astype(float))]
        )

        def sigmoid(x):
            return 1 / (1 + np.exp(-x))

        W_ref = -sigmoid(np.tanh((X + Y) * weight))
        c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
        output = c2_rep.run({"X": X, "Y": Y})
        np.testing.assert_almost_equal(output["W3"], W_ref)
Esempio n. 10
0
    def test_split(self):  # type: () -> None
        node = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['X'],
            value=onnx.helper.make_tensor(
                name='X',
                data_type=TensorProto.FLOAT,
                dims=[1],
                vals=[5],
            ),
        )
        graph = helper.make_graph(
            [node],
            'test-optimize-split',
            [],
            [helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])

        init_model = self._optimized(graph, ['split_init'])
        self.assertEqual(len(init_model.graph.node), 1)
        self.assertEqual(len(init_model.graph.output), 1)
        self.assertEqual(init_model.graph.node[0].op_type, 'Constant')

        predict_model = self._optimized(graph, ['split_predict'])
        self.assertEqual(len(predict_model.graph.node), 0)
        self.assertEqual(len(predict_model.graph.input), 1)
        self.assertEqual(predict_model.graph.input[0].name, 'X')
Esempio n. 11
0
    def test_lift_lex_if(self):  # type: () -> None
        nodes = [helper.make_node("Identity", ["X"], ["Y"])]
        nodes.extend(self._make_fake_if_op(
            [helper.make_node("Identity", ["X"], ["_Y2"]),
             helper.make_node("Identity", ["Y"], ["_Y3"])],
            [helper.make_node("Identity", ["X"], ["_Y2"]),
             helper.make_node("Identity", ["X"], ["_Y3"])],
            [(TensorProto.FLOAT, (5,), "Y2"),
             (TensorProto.FLOAT, (5,), "Y3")]))
        graph = helper.make_graph(
            nodes,
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
             helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
        # "If" node now diverges from ONNX schema. Disable checking.
        optimized_model = self._optimized(graph, ["lift_lexical_references"])

        # Identity, Constant (condition), If
        assert len(optimized_model.graph.node) == 3
        # else_branch, then_branch, __control_inputs
        assert len(optimized_model.graph.node[2].attribute) == 3
        assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
        assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
        assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
Esempio n. 12
0
    def test_fuse_add_bias_into_conv_use_move_constant(self):  # type: () -> None
        conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
        constant = helper.make_node("Constant", [], ["A"],
                                    value=helper.make_tensor(
                                        name="bias",
                                        data_type=TensorProto.FLOAT,
                                        dims=(16,),
                                        vals=np.random.randn(16).astype(np.float32).tolist()))
        add = helper.make_node("Add", ["Z", "A"], ["B"])
        graph = helper.make_graph(
            [conv, constant, add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
            [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
            value_info=[
                helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1)),
            ]
        )
        optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])

        assert len(optimized_model.graph.node) == 3
        assert optimized_model.graph.node[0].op_type == 'Constant'
        assert optimized_model.graph.node[1].op_type == 'Squeeze'
        assert optimized_model.graph.node[2].op_type == 'Conv'
        assert optimized_model.graph.output[0].name == 'Z'
        assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
        assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
Esempio n. 13
0
 def _make_fake_if_op(self,
                      true_nodes,  # type: Sequence[NodeProto]
                      false_nodes,  # type: Sequence[NodeProto]
                      output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                      ):  # type: (...) -> List[NodeProto]
     true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
     true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
     false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
     if_inputs = ["condition"]
     if_outputs = [name for _, _, name in output_types]
     retval_nodes = [
         helper.make_node("Constant", [], ["condition"], value=true),
         helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
                          else_branch=false_graph)
     ]
     return retval_nodes
Esempio n. 14
0
    def test_fuse_add_bias_into_conv_use_weight_shape(self):  # type: () -> None
        nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
                 helper.make_node("Add", ["Z", "A"], ["B"])]
        nodes.extend(self._make_fake_loop_op(
            [helper.make_node("Conv", ["_X", "_Y"], ["_Z"]),
             helper.make_node("Add", ["_Z", "_A"], ["_B2"])],
            [(TensorProto.FLOAT, (1, 5, 3, 3), "X"),
             (TensorProto.FLOAT, (16, 5, 3, 3), "Y"),
             (TensorProto.FLOAT, (16, 1, 1), "A")],
            [(TensorProto.FLOAT, (1, 16, 3, 3), "B2")]))
        graph = helper.make_graph(
            nodes,
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 1, 1))],
            [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
        )
        optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])

        # Squeeze, Conv, Constant (trip count), Constant (condition), Loop
        assert len(list(optimized_model.graph.node)) == 5
        assert optimized_model.graph.node[0].op_type == 'Squeeze'
        assert optimized_model.graph.node[1].op_type == 'Conv'
        assert optimized_model.graph.output[0].name == 'Z'
        # Squeeze, Conv
        assert len(optimized_model.graph.node[4].attribute[0].g.node) == 2
        assert optimized_model.graph.node[4].attribute[0].g.node[0].op_type == 'Squeeze'
        assert optimized_model.graph.node[4].attribute[0].g.node[1].op_type == 'Conv'
        # Output 1 since 0 is 'cond'
        assert optimized_model.graph.node[4].attribute[0].g.output[1].name == '_Z'
Esempio n. 15
0
    def test_fuse_add_bias_into_conv_use_conv_shape(self):  # type: () -> None
        sub = helper.make_node("Sub", ["M", "N"], ["Y"])
        conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
        add = helper.make_node("Add", ["Z", "A"], ["B"])
        graph = helper.make_graph(
            [sub, conv, add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
             helper.make_tensor_value_info("M", TensorProto.FLOAT, (16, 5, 3, 3)),
             helper.make_tensor_value_info("N", TensorProto.FLOAT, (16, 5, 3, 3)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 1, 1))],
            [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
            value_info=[
                helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 3, 3))
            ],
        )
        optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])

        assert len(optimized_model.graph.node) == 3
        assert optimized_model.graph.node[0].op_type == 'Sub'
        assert optimized_model.graph.node[1].op_type == 'Squeeze'
        assert optimized_model.graph.node[2].op_type == 'Conv'
        assert optimized_model.graph.output[0].name == 'Z'
        assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
        assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
Esempio n. 16
0
    def test_extract_constant_to_initializer(self):  # type: () -> None
        conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
        constant = helper.make_node("Constant", [], ["A"],
                                    value=helper.make_tensor(
                                        name="bias",
                                        data_type=TensorProto.FLOAT,
                                        dims=(16,),
                                        vals=np.random.randn(16).astype(np.float32).tolist()))
        add = helper.make_node("Add", ["Z", "A"], ["B"])
        graph = helper.make_graph(
            [conv, constant, add],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
            [helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
        )
        optimized_model = self._optimized(graph, ["extract_constant_to_initializer"])
        self.assertEqual(
            set(vi.name for vi in optimized_model.graph.input),
            {'X', 'Y', 'A'})

        self.assertEqual(len(optimized_model.graph.initializer), 1)
        init = optimized_model.graph.initializer[0]
        self.assertEqual(init.name, 'A')
        self.assertEqual(init.dims, [16])
        self.assertEqual(init.data_type, TensorProto.FLOAT)

        self.assertEqual([n.op_type for n in optimized_model.graph.node], ['Conv', 'Add'])
Esempio n. 17
0
    def test_fuse_transpose_into_gemm(self):  # type: () -> None
        nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
                 helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
                 helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
        nodes.extend(self._make_fake_loop_op(
            [helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
             helper.make_node("Transpose", ["_Y"], ["_B"], perm=[1, 0]),
             helper.make_node("Gemm", ["_A", "_B", "_C"], ["_Z2"])],
            [(TensorProto.FLOAT, (2, 3), "X"),
             (TensorProto.FLOAT, (5, 2), "Y"),
             (TensorProto.FLOAT, (3, 5), "C")],
            [(TensorProto.FLOAT, (2, 3), "Z2")]))
        graph = helper.make_graph(
            nodes,
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
             helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
             helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
        optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])

        # Gemm, Constant (trip count), Constant (cond), Loop
        assert len(list(optimized_model.graph.node)) == 4
        assert optimized_model.graph.node[0].op_type == "Gemm"
        # Gemm
        assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
        assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
Esempio n. 18
0
    def test_eliminate_unused_initializer_no_eliminate_used(self):  # type: () -> None
        nodes = [helper.make_node("Add", ["X", "A"], ["Z"])]
        nodes.extend(self._make_fake_loop_op(
            [helper.make_node("Add", ["_X", "_A"], ["_Z2"])],
            [(TensorProto.FLOAT, (1, 2), "X"),
             (TensorProto.FLOAT, (1, 2), "A")],
            [(TensorProto.FLOAT, (1, 2), "Z2")]))
        graph = helper.make_graph(
            nodes,
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 2)),
             helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 2))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 2))],
            [helper.make_tensor("A", TensorProto.FLOAT,
                                dims=(1, 2),
                                vals=np.random.randn(1, 2).astype(np.float32).tobytes(),
                                raw=True)])
        optimized_model = self._optimized(graph, ["eliminate_unused_initializer"])

        # Add, Constant (trip count), Constant (cond), Loop
        assert len(list(optimized_model.graph.node)) == 4
        assert optimized_model.graph.node[0].op_type == "Add"
        assert optimized_model.graph.output[0].name == "Z"
        # Add
        assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
        assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Add'
        assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z2'

        assert len(list(optimized_model.graph.initializer)) == 1
Esempio n. 19
0
 def _make_fake_loop_op(self,
                        body_nodes,  # type: Sequence[NodeProto]
                        input_types,  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                        output_types  # type: Sequence[Tuple[TensorProto.DataType, Sequence[int], Text]]
                        ):  # type: (...) -> List[NodeProto]
     zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
     true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
     # lcd is a dummy loop-carried dependency that only exists because
     # right now the schema checker is broken and assumes a variadic
     # input needs at least one value.
     graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
                     helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
     for type, shape, name in input_types:
         graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
     graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
     for type, shape, name in output_types:
         graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
     body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
                                    graph_outputs)
     loop_inputs = ["trip_count", "condition"]
     loop_inputs.extend([name for _, _, name in input_types])
     # TODO: fix checker to accept 0-input variadic inputs
     if len(loop_inputs) == 2:
         loop_inputs.append("")
     loop_outputs = [name for _, _, name in output_types]
     retval_nodes = [
         helper.make_node("Constant", [], ["trip_count"], value=zero),
         helper.make_node("Constant", [], ["condition"], value=true),
         helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
     ]
     return retval_nodes
Esempio n. 20
0
def verify_split(indata, outdatas, split, axis=0):
    indata = np.array(indata).astype(np.float32)
    outdatas = [np.array(o).astype(np.float32) for o in outdatas]
    node = helper.make_node(
        'Split',
        inputs=['input'],
        outputs=['output_{}'.format(i) for i in range(len(split))],
        axis=axis,
        split=split
    )
    graph = helper.make_graph([node],
                              'split_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output_{}".format(i),
                                            TensorProto.FLOAT, list(outdatas[i].shape))
                                            for i in range(len(split))
                                         ])
    model = helper.make_model(graph, producer_name='split_test')

    for target, ctx in ctx_list():
        output_shape = [o.shape for o in outdatas]
        output_type = ['float32', 'float32', 'float32']
        tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)
    for o, t in zip(outdatas, tvm_out):
        tvm.testing.assert_allclose(o, t)
Esempio n. 21
0
 def test_model_docstring(self):  # type: () -> None
     graph = helper.make_graph([], "my graph", [], [])
     model_def = helper.make_model(graph, doc_string='test')
     # models may have their own documentation, but don't have a name
     # their name is the domain-qualified name of the underlying graph.
     self.assertFalse(hasattr(model_def, "name"))
     self.assertEqual(model_def.doc_string, 'test')
Esempio n. 22
0
def verify_pad(indata, pads, value=0.0):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    len_dim = len(pads) // 2
    np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
    outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value)
    #  onnx graph
    node = helper.make_node(
        'Pad',
        inputs=['input'],
        outputs=['output'],
        mode='constant',
        pads=pads,
        value=value
    )
    graph = helper.make_graph([node],
                              'pad_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='pad_test')
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 23
0
def verify_reduce_x(name, indata, axis, keepdims):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    if name == 'ReduceMax':
        outdata = np.maximum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMin':
        outdata = np.minimum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceSum':
        outdata = np.sum(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMean':
        outdata = np.mean(indata, axis=axis, keepdims=keepdims == 1)
    else:
        raise Exception('unsupport op: {}'.format(name))
    if len(np.asarray(outdata).shape) == 0:
        outdata = np.asarray([outdata])
    #  onnx graph
    if axis is None:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                keepdims=keepdims)
    else:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                axis=axis, keepdims=keepdims)
    graph = helper.make_graph([node],
                              '{}_test'.format(name),
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='{}_test'.format(name))
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 24
0
def verify_mean(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.mean((a_np1, a_np2, a_np3), axis=0)

    mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])

    graph = helper.make_graph([mean_node],
                              "Mean_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np2",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np3",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='Mean_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
        tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 25
0
    def test_scalars(self):
        # Create 2 inputs
        X = helper.make_tensor_value_info('A', TensorProto.INT32, [])
        Y = helper.make_tensor_value_info('B', TensorProto.INT32, [])
        # Create one output
        Z = helper.make_tensor_value_info('C', TensorProto.INT32, [])
        # Create a node
        node_def = helper.make_node('Add', ['A', 'B'], ['C'])

        # Create the model
        graph_def = helper.make_graph([node_def], "scalar-model", [X, Y], [Z])
        onnx_model = helper.make_model(graph_def,
                                       producer_name='onnx-example')

        model = Model()
        model.BuildFromOnnxModel(onnx_model)
        schedule = model.OptimizeSchedule()
        schedule = schedule.replace('\n', ' ')
        expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func C = pipeline.get_func\(2\);.+{.+}.+'
        self.assertRegex(schedule, expected_schedule)

        input1 = np.random.randint(-10, 10, size=())
        input2 = np.random.randint(-10, 10, size=())
        outputs = model.run([input1, input2])
        self.assertEqual(1, len(outputs))
        output = outputs[0]
        expected = input1 + input2
        np.testing.assert_allclose(expected, output)
Esempio n. 26
0
def _test_power_iteration(x_shape, y_shape):
    if isinstance(y_shape, int):
        y_shape = [y_shape]

    x = np.random.uniform(size=x_shape).astype(np.float32)
    y = np.random.uniform(size=y_shape).astype(np.float32)

    np_res = np.power(x, y).astype(np.float32)

    res = helper.make_node("Pow", ['x', 'y'], ['out'])

    graph = helper.make_graph([res],
                              'power_test',
                              inputs = [helper.make_tensor_value_info("x",
                                            TensorProto.FLOAT, list(x_shape)),
                                        helper.make_tensor_value_info("y",
                                            TensorProto.FLOAT, list(y_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(np_res.shape))])

    model = helper.make_model(graph, producer_name='power_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
        tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 27
0
def _test_upsample_bilinear_opset9():
    scale = 2
    in_shape = (1, 1, 3, 3)
    out_shape = (1, 1, 3*scale, 3*scale)
    y = helper.make_node("Upsample", ['in','scales'], ['out'], mode='linear')
    scales=[1.0, 1.0, 2.0, 2.0]
    in_array = np.random.uniform(size=in_shape).astype(np.float32)
    out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW")

    ref_array = np.array(scales)
    ref_node = helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['scales'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))

    graph = helper.make_graph([ref_node, y],
                              'upsample_bilinear_opset9_test',
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='upsample_bilinear_opset9_test')
    inputs = []
    inputs.append(in_array)

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, inputs, target, ctx, out_shape, 'float32')
        tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 28
0
def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs):
    input_a = np.random.uniform(size=input_dim).astype(dtype)
    out = np.empty(shape=out_dim, dtype=dtype)
    out.fill(value)

    if is_shape == True:
        fill_node = helper.make_node("ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs)
    else:
        fill_node = helper.make_node("ConstantFill", ["input_a"], ["out"], value=value, dtype=dtype, **kwargs)

    graph = helper.make_graph([fill_node],
                              "fill_test",
                              inputs = [helper.make_tensor_value_info("input_a",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out.shape))])

    model = helper.make_model(graph, producer_name='fill_test')

    for target, ctx in ctx_list():
        if is_shape == True:
            tvm_out = get_tvm_output(model, [], target, ctx, out.shape)
        else:
            tvm_out = get_tvm_output(model, [input_a], target, ctx, out.shape)

        tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
Esempio n. 29
0
def test_reshape_like():
    in_shape = (4, 3, 3, 4)
    ref_shape = (3, 4, 4, 3)

    ref_array = np.random.uniform(size=ref_shape).astype('float32')
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))
    copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
    reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])

    graph = helper.make_graph([ref_node, copy_node, reshape_node],
                              "reshape_like_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_like_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
Esempio n. 30
0
    def test_onnx_to_caffe2_zipfile(self):
        buf = tempfile.NamedTemporaryFile()
        onnx_model = zipfile.ZipFile(buf, 'w')
        output = tempfile.NamedTemporaryFile()
        init_net_output = tempfile.NamedTemporaryFile()

        node_def = helper.make_node(
            "MatMul", ["X", "W"], ["Y"])
        X = np.random.rand(2, 3).astype(np.float32)
        W = np.random.rand(3, 2).flatten().astype(np.float32)
        graph_def = helper.make_graph(
            [node_def],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
             helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
            initializer=[helper.make_tensor("W",
                                            TensorProto.FLOAT,
                                            [3, 2],
                                            b'__EXTERNAL',
                                            raw=True)])
        model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
        onnx_model.writestr('__MODEL_PROTO', model_def.SerializeToString())
        onnx_model.writestr('W', W.tobytes())
        onnx_model.close()

        W = W.reshape((3, 2))
        Y_expect = np.matmul(X, W)

        c2_model = c2.prepare_zip_archive(buf)
        Y = c2_model.run(X).Y
        np.testing.assert_allclose(Y, Y_expect)
Esempio n. 31
0
onnxdomain.version = 12
onnxdomain.domain = ""  # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)

kwargs = {}
kwargs['opset_imports'] = opsets

# Create the model (ModelProto)
bias = helper.make_node("Add", ["A", "B"], ["add0_out"], "add0")
dropout_12 = helper.make_node("Dropout",
                              ["add0_out", "ratio_const", "training_mode"],
                              ["C", "mask"], "dropout0")

graph = helper.make_graph(
    [bias, dropout_12],
    "Bias_Dropout_Fusion",  #name
    [A, B],
    [C],
    [ratio, training_mode])

model = helper.make_model(graph, producer_name='onnx-example', **kwargs)
onnx.save(model, 'bias_dropout_fusion1.onnx')

# Create the model (ModelProto)
bias = helper.make_node("Add", ["B", "A"], ["add0_out"], "add0")
dropout_12 = helper.make_node("Dropout",
                              ["add0_out", "ratio_const", "training_mode"],
                              ["C", "mask"], "dropout0")

graph = helper.make_graph(
    [bias, dropout_12],
    "Bias_Dropout_Fusion",  #name
    def test_slice(self):
        # test case 1 with normal inputs
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node("Slice",
                                        ["X", "starts", "ends", "axes"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)

        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({"X": x})
            np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes
            })
            np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])

        # test case 2 with negative, out-of-bound and default inputs
        axes = [0, 2]
        starts = [0, -7]
        ends = [-8, 20]
        steps = [1, 1]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node(
                "Slice", ["X", "starts", "ends", "axes", "steps"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("steps", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({"X": x})
            np.testing.assert_almost_equal(output["S"], x[0:-8, :, -7:20])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes,
                "steps": steps
            })
            np.testing.assert_almost_equal(output["S"], x[0:-8, :, -7:20])

        # test case 3 with non-default steps
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]
        steps = [2, -2, -1]

        if not legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes,
                "steps": steps
            })
            np.testing.assert_almost_equal(output["S"], x[0:2:2, 0:2:-2,
                                                          0:2:-1])
Esempio n. 33
0
    def construct_model_conv_pad(self,
                                 output_model_path,
                                 conv_input_shape,
                                 conv_weight_shape,
                                 pad_input_shape,
                                 pad_mode,
                                 pad_dims,
                                 constant_value=None):
        #      (input)
        #          \
        #         Conv
        #        /    \
        #   Identity   Pad
        #    /            \
        # (identity_out)  (output)
        rank = len(pad_input_shape)
        self.assertEqual(rank * 2, len(pad_dims))

        input_tensor = helper.make_tensor_value_info('input',
                                                     TensorProto.FLOAT,
                                                     conv_input_shape)

        conv_weight_arr = np.random.randint(-1, 2, conv_weight_shape).astype(
            np.float32)
        conv_weight_initializer = onnx.numpy_helper.from_array(
            conv_weight_arr, name='conv1_weight')
        conv_node = onnx.helper.make_node('Conv', ['input', 'conv1_weight'],
                                          ['conv_output'],
                                          name='conv_node')

        identity_out = helper.make_tensor_value_info('identity_out',
                                                     TensorProto.FLOAT,
                                                     pad_input_shape)
        identity_node = helper.make_node('Identity', ['conv_output'],
                                         ['identity_out'],
                                         name='IdentityNode')

        pad_dims_initializer = helper.make_tensor('pad_dims',
                                                  TensorProto.INT64,
                                                  [2 * rank], pad_dims)
        output_shape = [
            sum(e) for e in list(
                zip(pad_input_shape, pad_dims[:rank], pad_dims[rank:]))
        ]
        output_tensor = helper.make_tensor_value_info('output',
                                                      TensorProto.FLOAT,
                                                      output_shape)
        pad_inputs = ['conv_output', 'pad_dims']
        initializers = [conv_weight_initializer, pad_dims_initializer]
        if (constant_value is not None) and (pad_mode is None
                                             or pad_mode == 'constant'):
            constant_value_tensor = helper.make_tensor('padding_value',
                                                       TensorProto.FLOAT, [],
                                                       [constant_value])
            pad_inputs.extend(['padding_value'])
            initializers.extend([constant_value_tensor])
        kwargs = {'mode': pad_mode} if pad_mode is not None else {}
        pad_node = helper.make_node('Pad',
                                    pad_inputs, ['output'],
                                    name='pad_node',
                                    **kwargs)

        graph = helper.make_graph([conv_node, identity_node, pad_node],
                                  'TestOpQuantizerPad_test_model',
                                  [input_tensor],
                                  [identity_out, output_tensor],
                                  initializer=initializers)
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        model.ir_version = onnx.IR_VERSION
        onnx.save(model, output_model_path)
Esempio n. 34
0
def add_loss_output(onx,
                    score_name='squared_error',
                    loss_name='loss',
                    label_name='label',
                    weight_name=None,
                    penalty=None,
                    output_index=None,
                    **kwargs):
    """
    Modifies an ONNX graph to add operators to score and allow training.

    :param onx: onx graph
    :param score_name: name of the score
    :param loss_name: name of the output loss
    :param label_name: name of the label input
    :param weight_name: None or any value to consider weight
        while computing loss
    :param penalty: dictionary similar to the
        following one `{ weight_name: {'l1': alpha, 'l2': beta} }`
        or `{ weight_name: beta}`,
        it adds a L1 and/or L2 penalty to one input or initializer,
        penalty = :math:`|w| \\alpha + w^2 \\beta`
    :param output_index: the output used to compute the loss,
        if None, the function assumes there is only one output,
        it must be specified if there are more than 1,
        it can be an integer or a string (output name)
    :param kwargs: additional arguments for losses (see below)
    :return: modified graph

    Possible values for *score_name*:

    * `'squared_error'` or `'l2`': :math:`\\sum_i{(f(x_i)-y_i)^2}` or
      :math:`\\sum_i{w_i (f(x_i)-y_i)^2}` if *weight_name*
      is not None
    * `'absolute_error'` or `'l1`': :math:`\\sum_i{|f(x_i)-y_i|}` or
      :math:`\\sum_i{w_i |f(x_i)-y_i|}` if *weight_name*
      is not None
    * `'elastic'`: mixture of losses, kwargs must define
      *l1_weight* and *l2_weight*, undefined, default value are 0.5
    * `'log'`: log loss :math:`(1-yt)\\log(1-yp) - yt\\log(yp)`,
        this only works for a binary classification where *yp* is the
        predicted probability, *yt* is the expected probability.
        *yt* is expected to be binary, *yp* is a matrix with two
        columns, the sum on every line is 1.

    See example :ref:`l-orttraining-nn-gpu`.
    Next example shows the loss with L1 and L2 loss.

    .. gdot::
        :script: DOT-SECTION

        import numpy
        from sklearn.datasets import make_regression
        from sklearn.model_selection import train_test_split
        from sklearn.linear_model import LinearRegression
        from mlprodict.onnx_conv import to_onnx
        from mlprodict.onnxrt import OnnxInference
        from onnxcustom import __max_supported_opset__ as opset
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer

        X, y = make_regression(  # pylint: disable=W0632
            100, n_features=10, bias=2, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.float32)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = LinearRegression()
        reg.fit(X_train, y_train, sample_weight=w_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg, X_train, target_opset=opset,
                      black_op={'LinearRegressor'})

        onx_loss = add_loss_output(
            onx, weight_name='weight', score_name='elastic',
            l1_weight=0.1, l2_weight=0.9)

        print("DOT-SECTION", OnnxInference(onx_loss).to_dot())

    Next example shows how to add a L2 loss with L1 and L2 penalties
    on the coefficients.

    .. gdot::
        :script: DOT-SECTION

        import numpy
        from sklearn.datasets import make_regression
        from sklearn.model_selection import train_test_split
        from sklearn.linear_model import LinearRegression
        from mlprodict.onnx_conv import to_onnx
        from mlprodict.onnxrt import OnnxInference
        from onnxcustom import __max_supported_opset__ as opset
        from onnxcustom.utils.orttraining_helper import add_loss_output
        from onnxcustom.training.optimizers import OrtGradientOptimizer

        X, y = make_regression(  # pylint: disable=W0632
            100, n_features=10, bias=2, random_state=0)
        X = X.astype(numpy.float32)
        y = y.astype(numpy.float32)
        w = (numpy.random.rand(y.shape[0]) + 1).astype(X.dtype)
        X_train, _, y_train, __, w_train, ___ = train_test_split(X, y, w)
        reg = LinearRegression()
        reg.fit(X_train, y_train, sample_weight=w_train)
        reg.coef_ = reg.coef_.reshape((1, -1))
        onx = to_onnx(reg, X_train, target_opset=opset,
                      black_op={'LinearRegressor'})

        onx_loss = add_loss_output(
            onx, weight_name='weight', score_name='elastic',
            penalty={'coef': {'l1': 0.5, 'l2':0.5},
                     'intercept': {'l1': 0.5, 'l2':0.5}})

        print("DOT-SECTION", OnnxInference(onx_loss).to_dot())
    """
    from mlprodict.onnx_tools.optim import onnx_remove_node_unused

    # rename every intermediate output call label
    def _replace(ens):
        for i in range(len(ens)):  # pylint: disable=C0200
            if ens[i] == 'label':
                ens[i] = '_label_'

    for node in onx.graph.node:
        if "_label_" in node.input or "_label_" in node.output:
            raise RuntimeError(  # pragma: no cover
                "One intermediate result contains '_label_'. "
                "It should be removed manually.\n%r" % node)
        _replace(node.input)
        _replace(node.output)

    if output_index is None:
        if len(onx.graph.output) != 1:
            raise ValueError(  # pragma: no cover
                "Unable to guess the output to compare to the "
                "expacted labels among %r." %
                ([o.name for o in onx.graph.output]))
        outputs = onx.graph.output
        output_index = 0
    elif isinstance(output_index, int):
        outputs = [onx.graph.output[output_index]]
    elif isinstance(output_index, str):
        outputs = [(i, o) for i, o in enumerate(onx.graph.output)
                   if o.name == output_index]
        if len(outputs) != 1:
            raise ValueError(  # pragma: no cover
                "Unable to find output %r in %r." %
                (output_index, [o.name for o in onx.graph.output]))
        output_index = outputs[0][0]
        outputs = [outputs[0][1]]
    else:
        raise TypeError(  # pragma: no cover
            f"output_index must be an integer or a str not {type(output_index)!r}."
        )

    existing_names = []
    for node in onx.graph.node:
        existing_names.extend(node.output)
        existing_names.extend(node.input)
    existing_names = set(existing_names)

    output_onx = onx.graph.output[output_index]
    output_name = output_onx.name
    elem = output_onx.type.tensor_type.elem_type
    if elem == 0:
        raise TypeError(  # pragma: no cover
            f"Unable to guess input tensor type from {output_onx!r}.")
    shape = []
    for d in output_onx.type.tensor_type.shape.dim:
        shape.append(d.dim_value if d.dim_value > 0 else None)

    if score_name in ('squared_error', 'l2'):
        inits, inputs, nodes, outputs = _loss_l2(existing_names, elem, shape,
                                                 output_name, label_name,
                                                 weight_name, loss_name)
    elif score_name in ('absolute_error', 'l1'):
        inits, inputs, nodes, outputs = _loss_l1(existing_names, elem, shape,
                                                 output_name, label_name,
                                                 weight_name, loss_name)
    elif score_name == 'elastic':
        inits, inputs, nodes, outputs = _loss_elastic(existing_names, elem,
                                                      shape, output_name,
                                                      label_name, weight_name,
                                                      loss_name, **kwargs)
    elif score_name == 'log':
        shape = (None, 1)
        inits, inputs, nodes, outputs = _loss_log(existing_names, elem, shape,
                                                  output_name, label_name,
                                                  weight_name, loss_name,
                                                  **kwargs)
    else:
        raise NotImplementedError(  # pragma: no cover
            f"Unexpected {score_name!r} value for score_name.")

    if penalty is not None:
        final_name = nodes[-1].output[0]
        loss_name = _unique_name(existing_names, "loss_diff")
        nodes[-1].output[0] = loss_name
        names = []
        for k, v in penalty.items():
            if isinstance(v, float):
                v = {'l2': v}
            inits_to_add, nodes_to_add = penalty_loss_onnx(
                k,
                dtype=TENSOR_TYPE_TO_NP_TYPE[elem],
                existing_names=existing_names,
                **v)
            names.append(nodes_to_add[-1].output[0])
            nodes.extend(nodes_to_add)
            inits.extend(inits_to_add)
        # Operator Sum does not have a gradient.
        if len(names) == 1:
            pen_name = names[0]
        else:
            current = names[0]
            for i in range(1, len(names)):
                new_name = _unique_name(existing_names, "sumop")
                nodes.append(make_node('Add', [current, names[i]], [new_name]))
                current = new_name
            pen_name = current

        cst_shape = _unique_name(existing_names, "shapevect")
        inits.append(
            from_array(numpy.array([-1, 1], dtype=numpy.int64),
                       name=cst_shape))
        loss_reshape = _unique_name(existing_names, "loss_reshape")
        pen_reshape = _unique_name(existing_names, "penalty_reshape")
        nodes.extend([
            make_node("Reshape", [pen_name, cst_shape], [pen_reshape]),
            make_node("Reshape", [loss_name, cst_shape], [loss_reshape])
        ])

        nodes.append(
            make_node('Add', [pen_reshape, loss_reshape], [final_name]))

    inits = list(onx.graph.initializer) + inits
    graph = make_graph(
        list(onx.graph.node) + nodes, onx.graph.name,
        list(onx.graph.input) + inputs,
        outputs + [onx.graph.output[output_index]], inits)
    onnx_model = make_model(graph)
    onnx_model.ir_version = onx.ir_version
    onnx_model.producer_name = onx.producer_name
    onnx_model.producer_version = onx.producer_version
    onnx_model.domain = onx.domain
    onnx_model.model_version = onx.model_version
    onnx_model.doc_string = onx.doc_string
    if len(onx.metadata_props) > 0:
        values = {p.key: p.value for p in onx.metadata_props}
        set_model_props(onnx_model, values)

    # fix opset import
    del onnx_model.opset_import[:]  # pylint: disable=E1101
    for oimp in onx.opset_import:
        op_set = onnx_model.opset_import.add()  # pylint: disable=E1101
        op_set.domain = oimp.domain
        op_set.version = oimp.version
    return _rewrite_op_no_grad(onnx_remove_node_unused(onnx_model))
Esempio n. 35
0
    def create_net(self, shape, epsilon, precision, ir_version):
        """
            ONNX net                                     IR net

            Input->InstanceNormalization->Output   =>    Input->MVN->ScaleShift(Power)
        """

        #
        #   Create ONNX model
        #

        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)

        scale_const = np.random.randn(shape[1]).astype(np.float)
        bias_const = np.random.randn(shape[1]).astype(np.float)

        node_scale_def = helper.make_node(
            'Constant',
            inputs=[],
            outputs=['scale'],
            value=helper.make_tensor(
                name='const_tensor',
                data_type=TensorProto.FLOAT,
                dims=scale_const.shape,
                vals=scale_const.flatten(),
            ),
        )

        node_bias_def = helper.make_node(
            'Constant',
            inputs=[],
            outputs=['bias'],
            value=helper.make_tensor(
                name='const_tensor',
                data_type=TensorProto.FLOAT,
                dims=bias_const.shape,
                vals=bias_const.flatten(),
            ),
        )

        args = dict()
        if epsilon:
            args['epsilon'] = epsilon
        node_def = helper.make_node(
            'InstanceNormalization',
            inputs=['input', 'scale', 'bias'],
            outputs=['output'],
            **args
        )

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_scale_def, node_bias_def, node_def],
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_model')

        #
        #   Create reference IR net
        #
        ref_net = None

        return onnx_net, ref_net
Esempio n. 36
0
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

import onnx
from onnx import helper, TensorProto

INPUT1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [2, 3])
INPUT2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [3, 4])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [2, 4])

nodes = [
    helper.make_node(
        'MatMul',
        ['input1', 'input2'],
        ['output'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'matmul',
    [
        INPUT1,
        INPUT2,
    ],
    [OUTPUT],
)
model_def = helper.make_model(
    graph_def,
    producer_name='matmul.py',
    opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'matmul.onnx')
Esempio n. 37
0
    def create_net_const(self, input_value, output_value, precision,
                         ir_version):
        """
            ONNX net                                   IR net

            Input->Concat(+NonZero const)->Output   =>    Input->Concat(+const)->Result

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        concat_axis = 0
        output_shape = list(output_value.shape)
        output_shape[concat_axis] *= 2

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT,
                                              output_value.shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                               output_shape)

        node_const_def = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['const1'],
            value=helper.make_tensor(
                name='const_tensor',
                data_type=TensorProto.FLOAT,
                dims=input_value.shape,
                vals=input_value.flatten(),
            ),
        )

        node_def = onnx.helper.make_node('NonZero',
                                         inputs=['const1'],
                                         outputs=['nonzero1'])

        node_concat_def = onnx.helper.make_node('Concat',
                                                inputs=['input', 'nonzero1'],
                                                outputs=['output'],
                                                axis=concat_axis)

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_const_def, node_def, node_concat_def],
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_model')

        #
        #   Create reference IR net
        #
        ref_net = None
        if check_ir_version(10, None, ir_version):
            nodes_attributes = {
                'input': {
                    'kind': 'op',
                    'type': 'Parameter'
                },
                'input_data': {
                    'shape': output_value.shape,
                    'kind': 'data'
                },
                'input_const_data': {
                    'kind': 'data',
                    'value': output_value.flatten()
                },
                'const': {
                    'kind': 'op',
                    'type': 'Const'
                },
                'const_data': {
                    'shape': output_value.shape,
                    'kind': 'data'
                },
                'concat': {
                    'kind': 'op',
                    'type': 'Concat',
                    'axis': concat_axis
                },
                'concat_data': {
                    'shape': output_shape,
                    'kind': 'data'
                },
                'result': {
                    'kind': 'op',
                    'type': 'Result'
                }
            }
            ref_net = build_graph(nodes_attributes,
                                  [('input', 'input_data'),
                                   ('input_const_data', 'const'),
                                   ('const', 'const_data'),
                                   ('input_data', 'concat'),
                                   ('const_data', 'concat'),
                                   ('concat', 'concat_data'),
                                   ('concat_data', 'result')])

        return onnx_net, ref_net
Esempio n. 38
0
    def create_net(self, shape, ir_version):
        """
            ONNX net                    IR net

            Input->NonZero->Output   =>    Input->NonZero->Result

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT,
                                              shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                               shape)

        node_def = onnx.helper.make_node('NonZero',
                                         inputs=['input'],
                                         outputs=['output'])

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_def],
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_model')

        #
        #   Create reference IR net
        #

        ref_net = None
        if check_ir_version(10, None, ir_version):
            nodes_attributes = {
                'input': {
                    'kind': 'op',
                    'type': 'Parameter'
                },
                'input_data': {
                    'shape': shape,
                    'kind': 'data'
                },
                'node': {
                    'kind': 'op',
                    'type': 'NonZero',
                    'version': 'opset3',
                    'output_type': 'i64'
                },
                'node_data': {
                    'shape': [len(shape), np.prod(shape)],
                    'kind': 'data'
                },
                'result': {
                    'kind': 'op',
                    'type': 'Result'
                }
            }

            ref_net = build_graph(nodes_attributes, [('input', 'input_data'),
                                                     ('input_data', 'node'),
                                                     ('node', 'node_data'),
                                                     ('node_data', 'result')])
        return onnx_net, ref_net
Esempio n. 39
0
def test_cast_errors():
    from onnx.onnx_cpp2py_export.checker import ValidationError

    np.random.seed(133391)
    input_data = np.ceil(np.random.rand(2, 3, 4) * 16)

    # missing 'to' attribute
    node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"])
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(ValidationError):
        import_onnx_model(model)

    # unsupported data type representation
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=1.2345)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.INT32,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(ValidationError):
        import_onnx_model(model)

    # unsupported input tensor data type:
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=onnx.TensorProto.INT32)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.INT32,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises((RuntimeError, NgraphTypeError)):
        import_onnx_model(model)

    # unsupported output tensor data type:
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=onnx.TensorProto.COMPLEX128)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(RuntimeError):
        import_onnx_model(model)
Esempio n. 40
0
)
nodes.append(layer_norm1)

gathernd1 = helper.make_node(
    "GatherND",
    ["layer_norm1", "unsqueezed_masked_lm_positions"],
    ["output"],
    name="gathernd_1",
    batch_dims=1,
)
nodes.append(gathernd1)

graph_def = helper.make_graph(
    nodes,
    "test-model",
    [X, unsqueezed_masked_lm_positions],
    [Y],
    [layer_norm1_weight_initializer, layer_norm1_bias_initializer],
)

opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
onnxdomain.domain = ""  # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)

msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"

opsets.append(msdomain)
Esempio n. 41
0
    def create_net(self, shape, k, axis, ir_version, largest=None, sorted=None, opset=None):
        """
            ONNX net                    IR net

            Input->TopK->Output   =>    Input->TopK

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        output_shape = shape.copy()
        if axis is not None:
            output_shape[axis] = k
        else:
            output_shape[-1] = k
        input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
        values = helper.make_tensor_value_info('cvalues', TensorProto.FLOAT, output_shape)
        indices = helper.make_tensor_value_info('cindices', TensorProto.INT64, output_shape)

        const1 = np.ones(output_shape).astype(np.int64)
        const2 = np.ones(output_shape).astype(np.float)

        nodes = list()
        inputs = ['input']
        if opset > 9:
            node_k_def = onnx.helper.make_node(
                'Constant',
                inputs=[],
                outputs=['k'],
                value=helper.make_tensor(
                    name='const_tensor',
                    data_type=TensorProto.INT64,
                    dims=[1],
                    vals=[k],
                ),
            )
            nodes.append(node_k_def)
            inputs.append('k')

        args = dict()
        if opset < 10:
            args['k'] = k
        if axis is not None:
            args['axis'] = axis
        if sorted is not None:
            args['sorted'] = sorted
        if largest is not None:
            args['largest'] = largest

        node_def = onnx.helper.make_node(
            'TopK',
            inputs=inputs,
            outputs=['values', 'indices'],
            **args
        )

        node_const1_def = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['const1'],
            value=helper.make_tensor(
                name='const_tensor2',
                data_type=TensorProto.INT64,
                dims=const1.shape,
                vals=const1.flatten(),
            ),
        )

        node_add1_def = onnx.helper.make_node(
            'Add',
            inputs=['indices', 'const1'],
            outputs=['cindices']
        )

        node_const2_def = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['const2'],
            value=helper.make_tensor(
                name='const_tensor3',
                data_type=TensorProto.FLOAT,
                dims=const2.shape,
                vals=const2.flatten(),
            ),
        )

        node_add2_def = onnx.helper.make_node(
            'Add',
            inputs=['values', 'const2'],
            outputs=['cvalues']
        )

        nodes.extend([node_def, node_const1_def, node_add1_def, node_const2_def, node_add2_def])

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            nodes,
            'test_model',
            [input],
            [values, indices],
        )

        # Create the model (ModelProto)
        args = dict(producer_name='test_model')
        if opset:
            args['opset_imports'] = [helper.make_opsetid("", opset)]
        onnx_net = helper.make_model(graph_def, **args)

        #
        #   Create reference IR net
        #

        ref_net = None

        return onnx_net, ref_net
Esempio n. 42
0
  def tensorflow_graph_to_onnx_graph(cls, graph_def, output, name="graph"):
    """Function that converts a tensorflow graph to an onnx graph.

    Args:
        graph_def: Tensorflow Graph Proto object.
        output: A Tensorflow NodeDef object specifying which node
          to be taken as output of the ONNX graph.
        name: The name of the output ONNX Graph.

    Returns:
        The equivalent ONNX Graph Proto object.

    """

    # This list holds the protobuf objects of type ValueInfoProto
    # representing the input to the converted ONNX graph.
    inputs_proto = []

    # This list holds the protobuf objects of type NodeProto
    # representing the ops in the converted ONNX graph.
    ops_proto = []

    # This dictionary contains a map from the name of the constant
    # op to the array of values it holds.
    consts = {}

    for node in graph_def.node:
      node = TensorflowNode(node)
      if node.op == "Placeholder":
        # Tensorflow requires dtype to be known.
        # TODO: currently `dtype` is translated to `to`.
        onnx_type = node.attr["to"]
        shape = node.attr["shape"]
        input_proto = make_tensor_value_info(node.name,
                                             onnx_type,
                                             shape)
        inputs_proto.append(input_proto)
      if node.op == "Const":
        consts[node.name] = node.attr["value"]
      elif node.op in TF_OP_STR_TO_ONNX_OP.keys():
        # Remove tensorflow-specific attrs that are not
        # needed/allowed in ONNX.
        attr_to_remove = ["_output_shapes", "T"]
        node.attr = dict(filter(lambda pair: pair[0]
                                not in attr_to_remove, node.attr.items()))

        node_output = node.name
        ops_proto.append(make_node(TF_OP_STR_TO_ONNX_OP[node.op],
                                   node.inputs,
                                   [node_output],
                                   name=node.name,
                                   **node.attr))
      else:
        handler_name = "handle_" + op_name_to_lower(node.op)

        # Check if specialized handler exists.
        if handler_name in dir(cls):
          method_to_call = getattr(cls, handler_name)
          ops_proto.append(method_to_call(node, consts))

    output = TensorflowNode(output)
    # making output proto
    # TODO: deal with multi-output case.
    # TODO: default to BOOL, cf.
    # https://github.com/tensorflow/tensorflow/issues/14769
    output_onnx_type = output.attr.get("T", TensorProto.BOOL)
    output_proto = make_tensor_value_info(output.name,
                                          output_onnx_type,
                                          output.attr["_output_shapes"][0])

    return make_graph(ops_proto,
                      name,
                      inputs_proto,
                      [output_proto])
Esempio n. 43
0
graph = helper.make_graph(
    [  # nodes
        # fusable, const_min_negative should be replaced
        helper.make_node("Conv", ["X", "W"], ["conv0_out"], "Conv0"),
        helper.make_node("Clip", ["conv0_out", "const_min", "const_max"],
                         ["clip0_out"], "Clip0"),

        # mutable input. no fusion.
        helper.make_node("Conv", ["X", "W"], ["conv1_out"], "Conv1"),
        helper.make_node("Clip", ["conv1_out", "mutable_min", "const_max"],
                         ["clip1_out"], "Clip1"),

        # fusabled. default min/max.
        helper.make_node("Conv", ["X", "W"], ["conv2_out"], "Conv2"),
        helper.make_node("Clip", ["conv2_out"], ["clip2_out"], "Clip2"),
    ],
    "ConvClipFusion",  #name
    [  # inputs
        helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 7]),
        helper.make_tensor_value_info('W', TensorProto.FLOAT, [1, 1, 1]),
        helper.make_tensor_value_info('mutable_min', TensorProto.FLOAT, [1]),
    ],
    [  # outputs
        helper.make_tensor_value_info('clip0_out', TensorProto.FLOAT, None),
        helper.make_tensor_value_info('clip1_out', TensorProto.FLOAT, None),
        helper.make_tensor_value_info('clip2_out', TensorProto.FLOAT, None),
    ],
    [  # initializers
        helper.make_tensor('const_min', TensorProto.FLOAT, [1], [-1.0]),
        helper.make_tensor('const_max', TensorProto.FLOAT, [1], [10.0])
    ])
Esempio n. 44
0
    def create_conv_transpose(self,
                              ir_version,
                              input_shape,
                              output_shape,
                              kernel_shape,
                              strides,
                              group=1,
                              dilations=None,
                              pads=None,
                              force_output_shape=False,
                              output_padding=None,
                              bias=False,
                              auto_pad=None):
        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT,
                                              input_shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                               output_shape)

        weights = np.random.randn(*kernel_shape).astype(np.float)

        node_weights_def = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['kernel'],
            value=helper.make_tensor(
                name='const_tensor',
                data_type=TensorProto.FLOAT,
                dims=weights.shape,
                vals=weights.flatten(),
            ),
        )

        conv_attrs = {
            'strides': strides,
            'group': group,
            'kernel_shape': kernel_shape[2:],  # As we have NCHW layout
        }

        if pads is not None:
            if not force_output_shape:
                conv_attrs.update({'pads': pads})
        else:
            pads = np.zeros(2 * (len(input_shape) - 2))
        _pads = np.array(pads).reshape([2, -1])
        if output_padding is not None:
            conv_attrs.update({'output_padding': output_padding})
        if dilations is not None:
            conv_attrs.update({'dilations': dilations})
        else:
            dilations = np.ones(len(input_shape) - 2)
        if force_output_shape:
            conv_attrs.update({'output_shape': output_shape[2:]})

        if auto_pad:
            conv_attrs.update({'auto_pad': auto_pad})

        nodes = [node_weights_def]
        if bias:
            bias_const = np.random.randint(-10, 10,
                                           kernel_shape[0]).astype(np.float32)

            node_bias_def = onnx.helper.make_node(
                'Constant',
                inputs=[],
                outputs=['bias'],
                value=helper.make_tensor(
                    name='const_tensor',
                    data_type=TensorProto.FLOAT,
                    dims=bias_const.shape,
                    vals=bias_const.flatten(),
                ),
            )
            node_conv_transpose = onnx.helper.make_node(
                'ConvTranspose',
                inputs=['input', 'kernel', 'bias'],
                outputs=['output'],
                **conv_attrs)
            nodes.extend([node_bias_def, node_conv_transpose])
        else:
            node_conv_transpose = onnx.helper.make_node(
                'ConvTranspose',
                inputs=['input', 'kernel'],
                outputs=['output'],
                **conv_attrs)
            nodes.append(node_conv_transpose)

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            nodes,
            'test_conv_transpose_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def,
                                     producer_name='test_conv_transpose_model')

        #
        #   Create reference IR net
        #   Please, specify 'type': 'Input' for input node
        #   Moreover, do not forget to validate ALL layer attributes!!!
        #
        ref_net = None

        return onnx_net, ref_net
Esempio n. 45
0
    def test_fan_in(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)),
                name="0")]
        nodes[1:] = [
            helper.make_node('Identity', ['const1'], ['identity1'], name="1")
        ]
        nodes[2:] = [
            helper.make_node('Identity', ['identity1'], ['identity2'],
                             name="2")
        ]
        nodes[3:] = [
            helper.make_node('Max', ['input1', 'identity2'], ['max0'],
                             name="3")
        ]
        nodes[4:] = [
            helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")
        ]
        nodes[5:] = [
            helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")
        ]
        nodes[6:] = [
            helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")
        ]
        nodes[7:] = [
            helper.make_node('Transpose', ['leak1'], ['tranpose0'],
                             perm=[0, 2, 3, 1],
                             name="7")
        ]
        nodes[8:] = [
            helper.make_node('Transpose', ['leak2'], ['tranpose1'],
                             perm=[0, 2, 3, 1],
                             name="8")
        ]
        nodes[9:] = [
            helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'],
                             name="9")
        ]
        nodes[10:] = [
            helper.make_node('Transpose', ['add0'], ['tranpose2'],
                             perm=[0, 3, 1, 2],
                             name="10")
        ]
        nodes[11:] = [
            helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")
        ]

        input0 = helper.make_tensor_value_info('input1',
                                               onnx_proto.TensorProto.FLOAT,
                                               [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0',
                                                onnx_proto.TensorProto.FLOAT,
                                                [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)

        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
        new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
        new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
        graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
        self.assertEqual(len(new_nodes), 7)
        self.assertIsNotNone(model)
Esempio n. 46
0
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.

import onnx
from onnx import helper, TensorProto

INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.INT64, [1])
INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.INT64, [1])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.INT64, [1])

nodes = [
    helper.make_node(
        'Add',
        ['input1', 'input2'],
        ['output'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'add',
    [INPUT_1, INPUT_2],
    [OUTPUT],
)
model_def = helper.make_model(
    graph_def,
    producer_name='add_int64.py',
    opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'add_int64.onnx')
Esempio n. 47
0
    def construct_model_attention_and_matmul(self, output_model_path):
        #      (input)
        #         |
        #     Attention
        #         |
        #       MatMul
        #         |
        #      (output)
        input_name = 'input'
        output_name = 'output'
        initializers = []

        def make_attention_node(input_name, weight_shape, weight_name,
                                bias_shape, bias_name, output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(bias_data, name=bias_name))

            return onnx.helper.make_node('Attention',
                                         [input_name, weight_name, bias_name],
                                         [output_name])

        def make_matmul_node(input_name, weight_shape, weight_name,
                             output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            return onnx.helper.make_node('MatMul', [input_name, weight_name],
                                         [output_name])

        # make attention node
        attention_output_name = "attention_output"
        attention_node = make_attention_node(input_name, [10, 30],
                                             'qkv.weight', [30], 'qkv.bias',
                                             attention_output_name)
        attention_node.domain = "com.microsoft"
        attention_node.attribute.extend(
            [helper.make_attribute("num_heads", 5)])

        # make matmul node
        matmul_node = make_matmul_node(attention_output_name, [10, 10],
                                       'matmul.weight', output_name)

        # make graph
        input_tensor = helper.make_tensor_value_info(input_name,
                                                     TensorProto.FLOAT,
                                                     [1, -1, 10])
        output_tensor = helper.make_tensor_value_info(output_name,
                                                      TensorProto.FLOAT,
                                                      [1, -1, 10])
        graph_name = 'attention_test'
        graph = helper.make_graph([attention_node, matmul_node],
                                  graph_name, [input_tensor], [output_tensor],
                                  initializer=initializers)
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        model.ir_version = onnx.IR_VERSION

        onnx.save(model, output_model_path)
Esempio n. 48
0
    def construct_model_gemm(self, output_model_path):
        #      (input)
        #         |
        #        GEMM
        #         |
        #        Clip
        #         |
        #        GEMM
        #         |
        #      (output)
        input_name = 'input'
        output_name = 'output'
        initializers = []

        def make_gemm(input_name, weight_shape, weight_name, bias_shape,
                      bias_name, output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(bias_data, name=bias_name))

            return onnx.helper.make_node('Gemm',
                                         [input_name, weight_name, bias_name],
                                         [output_name],
                                         alpha=1.0,
                                         beta=1.0,
                                         transB=1)

        # make gemm1 node
        gemm1_output_name = "gemm1_output"
        gemm1_node = make_gemm(input_name, [100, 10], 'linear1.weight', [100],
                               'linear1.bias', gemm1_output_name)

        # make Clip
        clip_min_name = 'clip_min'
        clip_max_name = 'clip_max'
        clip_output_name = 'clip_output'
        clip_inputs = [gemm1_output_name, clip_min_name, clip_max_name]
        clip_outputs = [clip_output_name]
        initializers.append(
            onnx.numpy_helper.from_array(np.array(-1.0, dtype=np.float32),
                                         name=clip_min_name))
        initializers.append(
            onnx.numpy_helper.from_array(np.array(1.0, dtype=np.float32),
                                         name=clip_max_name))
        clip_node = onnx.helper.make_node('Clip', clip_inputs, clip_outputs)

        # make gemm2 node
        gemm2_node = make_gemm(clip_output_name, [10, 100], 'linear2.weight',
                               [10], 'linear2.bias', output_name)

        # make graph
        input_tensor = helper.make_tensor_value_info(input_name,
                                                     TensorProto.FLOAT,
                                                     [-1, 10])
        output_tensor = helper.make_tensor_value_info(output_name,
                                                      TensorProto.FLOAT,
                                                      [-1, 10])
        graph_name = 'gemm_test'
        graph = helper.make_graph([gemm1_node, clip_node, gemm2_node],
                                  graph_name, [input_tensor], [output_tensor],
                                  initializer=initializers)
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        model.ir_version = 7  # use stable onnx ir version

        onnx.save(model, output_model_path)
Esempio n. 49
0
def convert_topology(topology,
                     model_name,
                     doc_string,
                     target_opset,
                     targeted_onnx,
                     channel_first_inputs=None):
    '''
    This function is used to convert our Topology object defined in _parser.py into a ONNX model (type: ModelProto).
    :param topology: The Topology object we are going to convert
    :param model_name: GraphProto's name. Let "model" denote the returned model. The string "model_name" would be
    assigned to "model.graph.name."
    :param doc_string: A string attached to the produced model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx[deprecated]: A string, which specifies the targeted ONNX version of the produced model. Possible values
    include '1.1.2', '1.2', and so on.
    :return: a ONNX ModelProto
    '''
    if targeted_onnx is not None and StrictVersion(
            targeted_onnx) != StrictVersion(onnx.__version__):
        warnings.warn(
            'targeted_onnx is deprecated, please specify target_opset for the target model.\n'
            +
            '*** ONNX version conflict found. The installed version is %s while the targeted version is %s'
            % (onnx.__version__, targeted_onnx))

    opset_from_onnx_version = onnx.defs.onnx_opset_version()
    if target_opset is None:
        target_opset = opset_from_onnx_version
    elif target_opset > opset_from_onnx_version:
        raise RuntimeError(
            "target_opset %d is higher than the number of the installed onnx package."
        )

    topology._initialize_graph_status_for_traversing()

    container = ModelComponentContainer(target_opset)

    # Put roots and leaves as ONNX's model into buffers. They will be added into ModelComponentContainer later.
    tensor_inputs = {}
    other_inputs = {}
    tensor_outputs = {}
    other_outputs = {}
    for scope in topology.scopes:
        for variable in scope.variables.values():
            if variable.is_root:
                if isinstance(variable.type,
                              (TensorType, Int64Type, FloatType, StringType)):
                    tensor_inputs[variable.raw_name] = variable
                else:
                    other_inputs[variable.raw_name] = variable
            if variable.is_leaf:
                if isinstance(variable.type,
                              (TensorType, Int64Type, FloatType, StringType)):
                    tensor_outputs[variable.raw_name] = variable
                else:
                    other_outputs[variable.raw_name] = variable

    # Add roots the graph according to their order in the original model
    invalid_name = []
    nhwc_inputs = []
    if channel_first_inputs is None:
        channel_first_inputs = []
    for name in topology.raw_model.input_names:
        # Check input naming convention
        input_name = name.replace('_', '').replace(":", "").replace("/", "")
        if input_name and (input_name[0].isdigit() or
                           (not input_name.isalnum())):
            invalid_name.append(name)
        if name in tensor_inputs:
            onnx_input = tensor_inputs[name]  # type: Variable
            if name in channel_first_inputs or \
                    (name.endswith(':0') and name[:-2] in channel_first_inputs):
                nhwc_inputs.append(onnx_input.full_name)
                s = onnx_input.type.shape
                onnx_input.type.shape = [s[0], s[3], s[1], s[2]]
            container.add_input(onnx_input)

    if invalid_name:
        warnings.warn(
            'Some input names are not compliant with ONNX naming convention: %s'
            % invalid_name)
    for name in topology.raw_model.input_names:
        if name in other_inputs:
            container.add_input(other_inputs[name])

    # Add leaves the graph according to their order in the original model
    invalid_name = []
    for name in topology.raw_model.output_names:
        # Check output naming convention
        output_name = name.replace('_', '').replace(":", "").replace("/", "")
        if output_name and (output_name[0].isdigit() or
                            (not output_name.isalnum())):
            invalid_name.append(name)
        if name in tensor_outputs:
            container.add_output(tensor_outputs[name])
    if invalid_name:
        warnings.warn(
            'Some output names are not compliant with ONNX naming convention: %s'
            % invalid_name)
    for name in topology.raw_model.output_names:
        if name in other_outputs:
            container.add_output(other_outputs[name])

    # Traverse the graph from roots to leaves
    for operator in topology.topological_operator_iterator():
        scope = next(scope for scope in topology.scopes
                     if scope.name == operator.scope)
        if operator.type in topology.custom_conversion_functions:
            topology.custom_conversion_functions[operator.type](scope,
                                                                operator,
                                                                container)
        else:
            # Convert the selected operator into some ONNX objects and save them into the container
            _registration.get_converter(operator.type)(scope, operator,
                                                       container)

    # When calling ModelComponentContainer's add_initializer(...), nothing is added into the input list.
    # However, for ONNX target opset < 9, initializers should also be model's (GraphProto) inputs.
    # Thus, we create ValueInfoProto objects from initializers (type: TensorProto) directly and
    # then add them into model's input list.
    extra_inputs = []  # ValueInfoProto list of the initializers
    for tensor in container.initializers:
        # Sometimes (especially when creating optional input values such as RNN's initial hidden state), an initializer
        # is also one of the original model's input, so it has been added into the container's input list. If this is
        # the case, we need to skip one iteration to avoid duplicated inputs.
        if tensor.name in [value_info.name for value_info in container.inputs]:
            continue

        # Initializers are always tensors so we can just call make_tensor_value_info(...)
        value_info = helper.make_tensor_value_info(tensor.name,
                                                   tensor.data_type,
                                                   tensor.dims)
        extra_inputs.append(value_info)

    # enable the ONNX optimizations
    nodes = optimize_onnx(container.nodes, nhwc_inputs,
                          container.inputs + extra_inputs, container.outputs)

    # Create a graph from its main components
    if container.target_opset < 9:
        # Before ONNX opset 9, initializers need to be passed in with inputs
        graph = helper.make_graph(nodes, model_name,
                                  container.inputs + extra_inputs,
                                  container.outputs, container.initializers)
    else:
        # In ONNX opset 9 and above, initializers are included as operator
        # inputs, and therefore do not need to be passed as extra_inputs
        graph = helper.make_graph(nodes, model_name, container.inputs,
                                  container.outputs, container.initializers)

    # Add extra information related to the graph
    graph.value_info.extend(container.value_info)

    # Create model
    onnx_model = helper.make_model(graph)

    # Merge operator sets for the same domain, the largest version number would be kept
    purified_operator_set = dict()
    for op_domain, op_version in container.node_domain_version_pair_sets:
        if op_domain not in purified_operator_set:
            purified_operator_set[op_domain] = op_version
        else:
            purified_operator_set[op_domain] = max(
                purified_operator_set[op_domain], op_version)

    # Fill operator sets
    i = 0
    for op_domain, op_version in purified_operator_set.items():
        if i == 0 and len(onnx_model.opset_import) == 1:
            # Overwrite the default operator set created by helper.make_model(...)
            op_set = onnx_model.opset_import[0]
        else:
            # Just create one ONNX element in opset_import
            op_set = onnx_model.opset_import.add()
        op_set.domain = op_domain
        op_set.version = op_version
        i += 1
        if container.target_opset < op_version:
            raise RuntimeError(
                ('The specified opset %d is too low to convert this model, ' +
                 'which requires at least opset %d.') %
                (container.target_opset, op_version))
        elif container.target_opset > op_version:
            getLogger('onnxmltools').warning(
                'The maximum opset needed by this model is only %d.' %
                op_version)

    # Add extra information
    add_metadata_props(onnx_model, topology.metadata_props, target_opset)
    onnx_model.ir_version = onnx_proto.IR_VERSION
    onnx_model.producer_name = utils.get_producer()
    onnx_model.producer_version = utils.get_producer_version()
    onnx_model.domain = utils.get_domain()
    onnx_model.model_version = utils.get_model_version()
    onnx_model.doc_string = doc_string

    return onnx_model
Esempio n. 50
0
    def make_graph(self, doc, graph_name="tf2onnx"):
        """
        Create GraphProto for onnx from internal graph.
        Args:
            optimize: optimize graph via onnx
            doc: text for doc string of the model
        """
        self.update_proto()

        # TODO: we'd want to do something like this so that transpose optimizer is active
        # for  all (unit) tests
        # if optimize:
        #    from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
        #    optimizer = TransposeOptimizer(self, False)
        #    optimizer.optimize()

        # create output_tensor_values
        output_tensor_values = []
        for name in self.output_names:
            dtype = self.get_dtype(name)
            if not dtype:
                raise ValueError("cannot found the output dtype for " + name)
            v = helper.make_tensor_value_info(
                name, dtype, utils.make_onnx_shape(self.get_shape(name)))
            output_tensor_values.append(v)

        # update attributes
        ops = []
        all_inputs = set()
        for op in self.get_nodes():
            all_inputs |= set(op.input)
            all_inputs |= set(op.get_implicit_inputs())
            onnx_op = op.op
            ops.append(onnx_op)

        # create input_tensor_values, initializers
        # if initializer is not used as input by any node, then it will be ignored
        initializers = [
            i for i in list(self._initializers.values())
            if i.name in all_inputs
        ]
        input_with_initializers = []
        for initializer in initializers:
            shape = self.get_shape(initializer.name)
            if shape and list(shape) != initializer.dims:
                raise ValueError("initializer shape is inconsistent for " +
                                 initializer.name)
            val = helper.make_tensor_value_info(
                initializer.name, initializer.data_type,
                utils.make_onnx_shape(initializer.dims))
            input_with_initializers.append(val)

        input_with_initializers.extend(list(self._model_inputs.values()))

        # create model proto
        graph = helper.make_graph(ops,
                                  graph_name,
                                  input_with_initializers,
                                  output_tensor_values,
                                  initializer=initializers,
                                  doc_string=doc)

        return graph
def create_test_onnx_models():
    models = {}
    # Input model 1
    add = onnx.helper.make_node("Add", inputs=["in1", "in2"], outputs=["add_out"])
    split = onnx.helper.make_node("Split", inputs=["add_out"],
                                  outputs=["out1", "out2"], name="split1", axis=0)
    relu = onnx.helper.make_node("Relu", inputs=["in3"], outputs=["out3"])
    mul = onnx.helper.make_node("Mul", inputs=["add_out", "add_out"], outputs=["out4"])

    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in3", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    graph = make_graph([add, split, relu, mul], "test_graph", input_tensors, output_tensors)
    models["input_model.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                            opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Input model 2
    split_2 = onnx.helper.make_node("Split", inputs=["add_out"],
                                    outputs=["sp_out1", "sp_out2"], name="split2", axis=0)
    abs = onnx.helper.make_node("Abs", inputs=["sp_out1"], outputs=["out1"], name="abs1")
    sin = onnx.helper.make_node("Sin", inputs=["sp_out2"], outputs=["out2"])

    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (1, 2)),
    ]
    graph = make_graph([add, split_2, abs, sin], "test_graph_2", input_tensors, output_tensors)
    models["input_model_2.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                              opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for extract_subgraph
    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    graph = make_graph([add], "test_graph", input_tensors, output_tensors)
    models["extract_subgraph.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                 opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for extract_subgraph 2
    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in3", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    graph = make_graph([add, relu], "test_graph", input_tensors, output_tensors)
    models["extract_subgraph_2.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                   opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for extract_subgraph 3
    input_tensors = [
        make_tensor_value_info("out1/placeholder_port_0", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    expected_split = onnx.helper.make_node("Split", inputs=["out1/placeholder_port_0"],
                                           outputs=["out1", "out2"], name="split1", axis=0)
    graph = make_graph([expected_split], "test_graph", input_tensors, output_tensors)
    models["extract_subgraph_3.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                   opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for extract_subgraph 4
    input_tensors = [
        make_tensor_value_info("out4/placeholder_port_0", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out4/placeholder_port_1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out1/placeholder_port_0", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    expected_split = onnx.helper.make_node("Split", inputs=["out1/placeholder_port_0"],
                                           outputs=["out1", "out2"])
    expected_mul = onnx.helper.make_node("Mul", inputs=["out4/placeholder_port_0", "out4/placeholder_port_1"],
                                         outputs=["out4"])
    graph = make_graph([expected_split, expected_mul], "test_graph", input_tensors, output_tensors)
    models["extract_subgraph_4.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                   opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for test_override_all_outputs
    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in3", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    graph = make_graph([add, relu], "test_graph", input_tensors, output_tensors)
    models["test_override_all_outputs.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                          opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for test_override_all_outputs 2
    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    graph = make_graph([add, mul], "test_graph", input_tensors, output_tensors)
    models["test_override_all_outputs_2.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                            opset_imports=[onnx.helper.make_opsetid("", 13)])

    # Expected for test_override_all_inputs
    input_tensors = [
        make_tensor_value_info("in3", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out1/placeholder_port_0", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out4/placeholder_port_0", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out4/placeholder_port_1", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (1, 2)),
        make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (2, 2)),
        make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)),
    ]
    expected_split = onnx.helper.make_node("Split", inputs=["out1/placeholder_port_0"],
                                           outputs=["out1", "out2"])
    expected_mul = onnx.helper.make_node("Mul", inputs=["out4/placeholder_port_0", "out4/placeholder_port_1"],
                                         outputs=["out4"])
    graph = make_graph([expected_split, relu, expected_mul], "test_graph", input_tensors, output_tensors)
    models["test_override_all_inputs.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                         opset_imports=[onnx.helper.make_opsetid("", 13)])

    # test partial shape
    input_tensors = [
        make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (8, 16)),
        make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (8, 16)),
        make_tensor_value_info("in3", onnx.TensorProto.FLOAT, (4, 6)),
    ]
    output_tensors = [
        make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (4, 16)),
        make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (4, 16)),
        make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (4, 6)),
        make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (8, 16)),
    ]
    graph = make_graph([add, split, relu, mul], "test_graph", input_tensors, output_tensors)
    models["test_partial_shape.onnx"] = make_model(graph, producer_name="ONNX Importer",
                                                   opset_imports=[onnx.helper.make_opsetid("", 13)])

    return models
Esempio n. 52
0
    def create_net(self, shape, ir_version, opset, min=None, max=None):
        """
            ONNX net                    IR net

            Input->Clip->Output   =>    Input->Clamp

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT,
                                              shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                               shape)

        nodes = []
        if opset < 11:
            args = dict()
            if min is not None:
                args['min'] = min
            if max is not None:
                args['max'] = max
            node_def = onnx.helper.make_node('Clip',
                                             inputs=['input'],
                                             outputs=['output'],
                                             **args)
            nodes.append(node_def)
        else:
            clip_inputs = ['input']
            if min is not None:
                node_min_def = onnx.helper.make_node(
                    'Constant',
                    inputs=[],
                    outputs=['min_const'],
                    value=helper.make_tensor(
                        name='const_tensor',
                        data_type=TensorProto.FLOAT,
                        dims=[],
                        vals=[min],
                    ),
                )
                clip_inputs.append('min_const')
                nodes.append(node_min_def)
            else:
                clip_inputs.append('')
            if max is not None:
                node_max_def = onnx.helper.make_node(
                    'Constant',
                    inputs=[],
                    outputs=['max_const'],
                    value=helper.make_tensor(
                        name='const_tensor',
                        data_type=TensorProto.FLOAT,
                        dims=[],
                        vals=[max],
                    ),
                )
                clip_inputs.append('max_const')
                nodes.append(node_max_def)
            node_def = onnx.helper.make_node('Clip',
                                             inputs=clip_inputs,
                                             outputs=['output'])
            nodes.append(node_def)

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            nodes,
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        args = dict(producer_name='test_model')
        if opset:
            args['opset_imports'] = [helper.make_opsetid("", opset)]
        onnx_net = helper.make_model(graph_def, **args)

        #
        #   Create reference IR net
        #

        ref_net = None

        if check_ir_version(10, None, ir_version):
            if opset < 11 or min is not None and max is not None:
                nodes_attributes = {
                    'input': {
                        'kind': 'op',
                        'type': 'Parameter'
                    },
                    'input_data': {
                        'shape': shape,
                        'kind': 'data'
                    },
                    'node': {
                        'kind': 'op',
                        'type': 'Clamp',
                        'min': min if min is not None else -3.4028235e+38,
                        'max': max if max is not None else 3.4028235e+38
                    },
                    'node_data': {
                        'shape': shape,
                        'kind': 'data'
                    },
                    'result': {
                        'kind': 'op',
                        'type': 'Result'
                    }
                }
                ref_net = build_graph(nodes_attributes,
                                      [('input', 'input_data'),
                                       ('input_data', 'node'),
                                       ('node', 'node_data'),
                                       ('node_data', 'result')])
            else:
                nodes_attributes = {
                    'input': {
                        'kind': 'op',
                        'type': 'Parameter'
                    },
                    'input_data': {
                        'shape': shape,
                        'kind': 'data'
                    },
                    'input_const_data': {
                        'kind': 'data',
                        'value': [min] if min is not None else [max]
                    },
                    'const': {
                        'kind': 'op',
                        'type': 'Const'
                    },
                    'const_data': {
                        'shape': [],
                        'kind': 'data'
                    },
                    'node': {
                        'kind': 'op',
                        'type': 'Minimum' if max is not None else 'Maximum'
                    },
                    'node_data': {
                        'shape': shape,
                        'kind': 'data'
                    },
                    'result': {
                        'kind': 'op',
                        'type': 'Result'
                    }
                }
                ref_net = build_graph(nodes_attributes,
                                      [('input', 'input_data'),
                                       ('input_const_data', 'const'),
                                       ('const', 'const_data'),
                                       ('input_data', 'node'),
                                       ('const_data', 'node'),
                                       ('node', 'node_data'),
                                       ('node_data', 'result')])

        return onnx_net, ref_net
Esempio n. 53
0
    def _test_overlapping_names(
        self,
        inputs0: List[Text] = ['i0', 'i1'],
        inputs1: List[Text] = ['i2', 'i3'],
        outputs0: List[Text] = ['o0', 'o1'],
        outputs1: List[Text] = ['o2', 'o3'],
        value_info0: List[Text] = ['v0', 'v1'],
        value_info1: List[Text] = ['v2', 'v3'],
        initializer0: List[Text] = ['init0', 'init1'],
        initializer1: List[Text] = ['init2', 'init3'],
        sparse_initializer0: List[Text] = ['sparse_init0', 'sparse_init1'],
        sparse_initializer1: List[Text] = ['sparse_init2', 'sparse_init3'],
    ) -> None:
        n0 = [
            helper.make_node('Identity',
                             inputs=[inputs0[i]],
                             outputs=[outputs0[i]])
            for i in range(len(inputs0))
        ]
        i0 = [
            helper.make_tensor_value_info(inputs0[i], TensorProto.FLOAT, [])
            for i in range(len(inputs0))
        ]
        o0 = [
            helper.make_tensor_value_info(outputs0[i], TensorProto.FLOAT, [])
            for i in range(len(outputs0))
        ]
        vi0 = [
            helper.make_tensor_value_info(value_info0[i], TensorProto.FLOAT,
                                          []) for i in range(len(value_info0))
        ]
        init0 = [
            helper.make_tensor(name=initializer0[i],
                               data_type=TensorProto.INT64,
                               dims=(),
                               vals=[1]) for i in range(len(initializer0))
        ]

        sparse_init0 = [
            _make_sparse_tensor(sparse_initializer0[i])
            for i in range(len(sparse_initializer0))
        ]

        n1 = [
            helper.make_node('Identity',
                             inputs=[inputs1[i]],
                             outputs=[outputs1[i]])
            for i in range(len(inputs1))
        ]
        i1 = [
            helper.make_tensor_value_info(inputs1[i], TensorProto.FLOAT, [])
            for i in range(len(inputs1))
        ]
        o1 = [
            helper.make_tensor_value_info(outputs1[i], TensorProto.FLOAT, [])
            for i in range(len(outputs1))
        ]
        vi1 = [
            helper.make_tensor_value_info(value_info1[i], TensorProto.FLOAT,
                                          []) for i in range(len(value_info1))
        ]
        init1 = [
            helper.make_tensor(name=initializer1[i],
                               data_type=TensorProto.INT64,
                               dims=(),
                               vals=[1]) for i in range(len(initializer1))
        ]
        sparse_init1 = [
            _make_sparse_tensor(sparse_initializer1[i])
            for i in range(len(sparse_initializer1))
        ]

        ops = [helper.make_opsetid("", 10)]
        m0 = helper.make_model(helper.make_graph(
            nodes=n0,
            name='g0',
            inputs=i0,
            outputs=o0,
            value_info=vi0,
            initializer=init0,
            sparse_initializer=sparse_init0),
                               producer_name='test',
                               opset_imports=ops)
        m1 = helper.make_model(helper.make_graph(
            nodes=n1,
            name='g1',
            inputs=i1,
            outputs=o1,
            value_info=vi1,
            initializer=init1,
            sparse_initializer=sparse_init1),
                               producer_name='test',
                               opset_imports=ops)

        overlap = compose.check_overlapping_names(m0.graph, m1.graph)
        i = 0

        overlapping_inputs = list(set(inputs0) & set(inputs1))
        overlapping_outputs = list(set(outputs0) & set(outputs1))
        overlapping_edges = list(set(overlapping_inputs + overlapping_outputs))
        if len(overlapping_edges) > 0:
            self.assertEqual(overlap[i], ('edge', overlapping_edges))
            i += 1

        overlapping_vis = list(set(value_info0) & set(value_info1))
        if len(overlapping_vis) > 0:
            self.assertEqual(overlap[i], ('value_info', overlapping_vis))
            i += 1

        overlapping_init = list(set(initializer0) & set(initializer1))
        if len(overlapping_init) > 0:
            self.assertEqual(overlap[i], ('initializer', overlapping_init))
            i += 1

        overlapping_sparse_init = list(
            set(sparse_initializer0) & set(sparse_initializer1))
        if len(overlapping_sparse_init) > 0:
            expected_overlap = []
            for overlapping_name in overlapping_sparse_init:
                expected_overlap.append(overlapping_name + '_values')
                expected_overlap.append(overlapping_name + '_idx')
            self.assertEqual(overlap[i],
                             ('sparse_initializer', expected_overlap))
            i += 1

        m0_new = compose.add_prefix(m0, prefix='g0/')
        overlap = compose.check_overlapping_names(m0_new.graph, m1.graph)
        self.assertEqual(0, len(overlap))
    def test_embed_layer_norm(self):
        hidden_size = 32
        initializers = [
            helper.make_tensor(
                "word_embedding",
                TensorProto.FLOAT,
                [100, hidden_size],
                [1.0] * (100 * hidden_size),
            ),
            helper.make_tensor(
                "position_embedding",
                TensorProto.FLOAT,
                [20, hidden_size],
                [1.0] * (20 * hidden_size),
            ),
            helper.make_tensor(
                "segment_embedding",
                TensorProto.FLOAT,
                [2, hidden_size],
                [1.0] * (2 * hidden_size),
            ),
            helper.make_tensor("gamma", TensorProto.FLOAT, [hidden_size], [1.0] * hidden_size),
            helper.make_tensor("beta", TensorProto.FLOAT, [hidden_size], [1.0] * hidden_size),
        ]

        nodes = [
            helper.make_node(
                "EmbedLayerNormalization",
                inputs=[
                    "input_ids",
                    "segment_ids",
                    "word_embedding",
                    "position_embedding",
                    "segment_embedding",
                    "gamma",
                    "beta",
                ],
                outputs=["output", "mask_index"],
                domain="com.microsoft",
            ),
        ]

        inputs = [
            helper.make_tensor_value_info("input_ids", TensorProto.FLOAT, ["b", "s"]),
            helper.make_tensor_value_info("segment_ids", TensorProto.FLOAT, ["b", "s"]),
        ]

        outputs = [
            helper.make_tensor_value_info("output", TensorProto.FLOAT, None),
            helper.make_tensor_value_info("mask_index", TensorProto.INT32, None),
        ]

        graph = helper.make_graph(nodes, "Unsqueeze_Test", inputs, outputs, initializers)
        model = helper.make_model(graph)

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info("output", TensorProto.FLOAT, ["b", "s", hidden_size]),
            helper.make_tensor_value_info("mask_index", TensorProto.INT32, ["b"]),
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
Esempio n. 55
0
def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes):

    W = np.random.randint(wdt.min(), wdt.max() + 1, size=(ch, ch))
    W = W.astype(np.float32)

    T = np.random.randint(tdt.min(),
                          tdt.max() + 1,
                          size=(ch, 2**adt.bitwidth() - 1))
    T = T.astype(np.float32)

    tensors = []
    tensors.append(
        helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ch]))
    for i in range(1, nnodes):
        inter = helper.make_tensor_value_info("inter_" + str(i),
                                              TensorProto.FLOAT, [1, ch])
        tensors.append(inter)
    tensors.append(
        helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ch]))

    FCLayer_nodes = []
    for i in range(nnodes):
        pe = 1
        simd = 1
        FCLayer_nodes += [
            helper.make_node(
                "StreamingFCLayer_Batch",
                [tensors[i].name, "weights_" + str(i), "thresh_" + str(i)],
                [tensors[i + 1].name],
                domain="finn.custom_op.fpgadataflow",
                backend="fpgadataflow",
                MW=ch,
                MH=ch,
                SIMD=simd,
                PE=pe,
                inputDataType=adt.name,
                weightDataType=wdt.name,
                outputDataType=adt.name,
                ActVal=0,
                binaryXnorMode=0,
                noActivation=0,
            )
        ]

    graph = helper.make_graph(
        nodes=FCLayer_nodes,
        name="fclayer_graph",
        inputs=[tensors[0]],
        outputs=[tensors[-1]],
    )

    model = helper.make_model(graph, producer_name="fclayer-model")
    model = ModelWrapper(model)

    model.set_tensor_datatype("inp", adt)
    model.set_tensor_datatype("outp", adt)

    for i in range(1, nnodes + 1):
        model.graph.value_info.append(tensors[i])
        model.set_initializer("weights_" + str(i - 1), W)
        model.set_initializer("thresh_" + str(i - 1), T)
        model.set_tensor_datatype("weights_" + str(i - 1), wdt)
        model.set_tensor_datatype("thresh_" + str(i - 1), tdt)

    return model
Esempio n. 56
0
import onnx
from onnx import helper, TensorProto

INPUT_1 = helper.make_tensor_value_info('first_input', TensorProto.FLOAT, [2])
INPUT_2 = helper.make_tensor_value_info('second/input:0', TensorProto.FLOAT,
                                        [2])
INPUT_3 = helper.make_tensor_value_info('third_input', TensorProto.FLOAT, [2])
OUTPUT_1 = helper.make_tensor_value_info('path/to/output:0', TensorProto.FLOAT,
                                         [2])
OUTPUT_2 = helper.make_tensor_value_info('path/to/output:1', TensorProto.FLOAT,
                                         [2])
OUTPUT_3 = helper.make_tensor_value_info('path/to/output:2', TensorProto.FLOAT,
                                         [2])

nodes = [
    helper.make_node(
        'Add',
        ['first_input', 'second/input:0'],
        ['path/to/output:0'],
    ),
    helper.make_node('Add', ['third_input', 'second/input:0'],
                     ['path/to/output:1']),
    helper.make_node('Add', ['path/to/output:0', 'path/to/output:1'],
                     ['path/to/output:2']),
]
graph_def = helper.make_graph(nodes, 'simple_scoring',
                              [INPUT_1, INPUT_2, INPUT_3],
                              [OUTPUT_1, OUTPUT_2, OUTPUT_3])
model_def = helper.make_model(graph_def, producer_name='create_model.py')
onnx.save(model_def, 'model.onnx')
Esempio n. 57
0
ATTRIBUTE_TENSOR = helper.make_tensor_value_info('attribute_tensor',
                                                 TensorProto.FLOAT, [4, 1])
BIAS_TENSOR = helper.make_tensor_value_info('bias_tensor', TensorProto.FLOAT,
                                            [1, 1])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 1])

nodes = [
    helper.make_node(
        'MatMul',
        ['query_tensor', 'attribute_tensor'],
        ['matmul'],
    ),
    helper.make_node(
        'Add',
        ['matmul', 'bias_tensor'],
        ['output'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'simple_scoring',
    [
        QUERY_TENSOR,
        ATTRIBUTE_TENSOR,
        BIAS_TENSOR,
    ],
    [OUTPUT],
)
model_def = helper.make_model(graph_def, producer_name='simple.py')
onnx.save(model_def, 'simple.onnx')
    def test_matmul_integer(self):
        if legacy_opset_pre_ver(10):
            raise unittest.SkipTest(
                "ONNX version {} doesn't support MatMulInteger.".format(
                    defs.onnx_opset_version()))

        node_def = helper.make_node("MatMulInteger",
                                    ["A", "B", "a_zero_point", "b_zero_point"],
                                    ["Z"])
        # A & B are 3-D tensor and a_zero_point & b_zero_point are scalar
        A = self._get_rnd_int(-20, 20, shape=(2, 3, 4), dtype=np.int8)
        B = self._get_rnd_int(-20, 20, shape=(2, 4, 6), dtype=np.int8)
        a_zero_point = self._get_rnd_int(-20, 20, dtype=np.int8)
        b_zero_point = self._get_rnd_int(-20, 20, dtype=np.int8)
        A_minus_zero_point = np.subtract(A.astype(np.int32),
                                         a_zero_point.astype(np.int32))
        B_minus_zero_point = np.subtract(B.astype(np.int32),
                                         b_zero_point.astype(np.int32))
        z = np.matmul(A_minus_zero_point, B_minus_zero_point)
        graph_def = helper.make_graph(
            [node_def],
            name="test_unknown_shape",
            inputs=[
                helper.make_tensor_value_info("A", TensorProto.INT8,
                                              [None, None, None]),
                helper.make_tensor_value_info("B", TensorProto.INT8,
                                              [None, None, None]),
                helper.make_tensor_value_info("a_zero_point", TensorProto.INT8,
                                              []),
                helper.make_tensor_value_info("b_zero_point", TensorProto.INT8,
                                              [])
            ],
            outputs=[
                helper.make_tensor_value_info("Z", TensorProto.INT32,
                                              [None, None, None])
            ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        output = tf_rep.run({
            "A": A,
            "B": B,
            "a_zero_point": a_zero_point,
            "b_zero_point": b_zero_point
        })
        np.testing.assert_almost_equal(output["Z"], z)
        # A & B are 4-D tensor and a_zero_point & b_zero_point are 1-D tensor
        A = self._get_rnd_int(-20, 20, shape=(2, 5, 3, 4), dtype=np.int8)
        B = self._get_rnd_int(-20, 20, shape=(2, 1, 4, 6), dtype=np.int8)
        a_zero_point = self._get_rnd_int(-20,
                                         20,
                                         shape=(A.shape[-2]),
                                         dtype=np.int8)
        b_zero_point = self._get_rnd_int(-20,
                                         20,
                                         shape=(B.shape[-1]),
                                         dtype=np.int8)
        a_zero_point_with_reshape = np.reshape(a_zero_point, [A.shape[-2], 1])
        A_minus_zero_point = np.subtract(
            A.astype(np.int32), a_zero_point_with_reshape.astype(np.int32))
        B_minus_zero_point = np.subtract(B.astype(np.int32),
                                         b_zero_point.astype(np.int32))
        z = np.matmul(A_minus_zero_point, B_minus_zero_point)
        graph_def = helper.make_graph(
            [node_def],
            name="test_unknown_shape",
            inputs=[
                helper.make_tensor_value_info("A", TensorProto.INT8,
                                              [None, None, None, None]),
                helper.make_tensor_value_info("B", TensorProto.INT8,
                                              [None, None, None, None]),
                helper.make_tensor_value_info("a_zero_point", TensorProto.INT8,
                                              [None]),
                helper.make_tensor_value_info("b_zero_point", TensorProto.INT8,
                                              [None])
            ],
            outputs=[
                helper.make_tensor_value_info("Z", TensorProto.INT32,
                                              [None, None, None, None])
            ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        output = tf_rep.run({
            "A": A,
            "B": B,
            "a_zero_point": a_zero_point,
            "b_zero_point": b_zero_point
        })
        np.testing.assert_almost_equal(output["Z"], z)
Esempio n. 59
0
def test_cast_errors():
    np.random.seed(133391)
    input_data = np.ceil(np.random.rand(2, 3, 4) * 16)

    # missing 'to' attribute
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'])
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT16, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(ValueError):
        import_onnx_model(model)[0]

    # unsupported data type representation
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=1.2345)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(ValueError):
        import_onnx_model(model)[0]

    # unsupported input tensor data type:
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=onnx.TensorProto.INT32)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises((ValueError, NgraphTypeError)):
        import_onnx_model(model)[0]

    # unsupported output tensor data type:
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=onnx.TensorProto.COMPLEX128)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX128, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(ValueError):
        import_onnx_model(model)[0]
Esempio n. 60
0
    def construct_model(self, model_path):
        #          (input)
        #         /    |  \
        #        /     |   \
        #       /      |    \
        #      /       |     \
        #  Conv(1)  Conv(2)  conv(3)
        #       \      |     /
        #         \    |    /
        #           \  |   /
        #            Concat
        #              |
        #             Relu
        #              |
        #           Identity
        #              |
        #           (output)
        initializers = []
        input = helper.make_tensor_value_info("input", TensorProto.FLOAT,
                                              [1, 3, 15, 15])
        output = helper.make_tensor_value_info("output", TensorProto.FLOAT,
                                               [1, 13, 13, 13])

        # Conv1 output [1, 2, 13, 13]
        conv1_weight_initializer = numpy_helper.from_array(
            np.random.randint(-1, 2, [2, 3, 3, 3]).astype(np.float32),
            name="conv1_weight",
        )
        conv1_node = helper.make_node("Conv", ["input", "conv1_weight"],
                                      ["conv1_output"],
                                      name="conv1_node")

        # Conv2 output [1, 5, 13, 13]
        conv2_weight_initializer = numpy_helper.from_array(
            np.random.randint(-1, 2, [5, 3, 3, 3]).astype(np.float32),
            name="conv2_weight",
        )
        conv2_node = helper.make_node("Conv", ["input", "conv2_weight"],
                                      ["conv2_output"],
                                      name="conv2_node")

        # Conv3 output [1, 6, 13, 13]
        conv3_weight_initializer = numpy_helper.from_array(
            np.random.randint(-1, 2, [6, 3, 3, 3]).astype(np.float32),
            name="conv3_weight",
        )
        conv3_node = helper.make_node("Conv", ["input", "conv3_weight"],
                                      ["conv3_output"],
                                      name="conv3_node")

        concat_node = helper.make_node(
            "Concat",
            ["conv1_output", "conv2_output", "conv3_output"],
            ["concat_output"],
            name="concat_node",
            axis=1,
        )

        relu_node = helper.make_node("Relu", ["concat_output"],
                                     ["relu_output"],
                                     name="relu_node")
        identity_node = helper.make_node("Identity", ["relu_output"],
                                         ["output"],
                                         name="identity_node")

        initializers = [
            conv1_weight_initializer,
            conv2_weight_initializer,
            conv3_weight_initializer,
        ]
        graph = helper.make_graph(
            [
                conv1_node, conv2_node, conv3_node, concat_node, relu_node,
                identity_node
            ],
            "qlinear_concat_op_test",
            [input],
            [output],
            initializer=initializers,
        )
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        save(model, model_path)