Exemplo n.º 1
0
def verify_pad(indata, pads, value=0.0):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    len_dim = len(pads) // 2
    np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
    outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value)
    #  onnx graph
    node = helper.make_node(
        'Pad',
        inputs=['input'],
        outputs=['output'],
        mode='constant',
        pads=pads,
        value=value
    )
    graph = helper.make_graph([node],
                              'pad_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='pad_test')
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 2
0
 def test_onnx_to_caffe2_loop(self):
     body_nodes = [helper.make_node(
         "MatMul", ["_X", "W"], ["_Y"])]
     nodes = self._make_fake_loop_op(body_nodes,
                                     [(TensorProto.FLOAT, (2, 2), "X")],
                                     [(TensorProto.FLOAT, (2, 2), "Y")])
     X = np.random.rand(2, 2).astype(np.float32)
     W = np.random.rand(2, 2).flatten().astype(np.float32)
     graph_def = helper.make_graph(
         nodes,
         "test",
         [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
          helper.make_tensor_value_info("W", TensorProto.FLOAT, (2, 2))],
         [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
         initializer=[helper.make_tensor("W",
                                         TensorProto.FLOAT,
                                         [2, 2],
                                         W.tolist())]
     )
     model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
     Y = X
     for _ in range(10):
         Y = np.matmul(Y, W.reshape(2, 2))
     p = c2.prepare(model_def)
     out = p.run(X)
     np.testing.assert_allclose(out.Y, Y)
Exemplo n.º 3
0
def _test_upsample_bilinear_opset9():
    scale = 2
    in_shape = (1, 1, 3, 3)
    out_shape = (1, 1, 3*scale, 3*scale)
    y = helper.make_node("Upsample", ['in','scales'], ['out'], mode='linear')
    scales=[1.0, 1.0, 2.0, 2.0]
    in_array = np.random.uniform(size=in_shape).astype(np.float32)
    out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW")

    ref_array = np.array(scales)
    ref_node = helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['scales'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))

    graph = helper.make_graph([ref_node, y],
                              'upsample_bilinear_opset9_test',
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='upsample_bilinear_opset9_test')
    inputs = []
    inputs.append(in_array)

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, inputs, target, ctx, out_shape, 'float32')
        tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 4
0
def _test_power_iteration(x_shape, y_shape):
    if isinstance(y_shape, int):
        y_shape = [y_shape]

    x = np.random.uniform(size=x_shape).astype(np.float32)
    y = np.random.uniform(size=y_shape).astype(np.float32)

    np_res = np.power(x, y).astype(np.float32)

    res = helper.make_node("Pow", ['x', 'y'], ['out'])

    graph = helper.make_graph([res],
                              'power_test',
                              inputs = [helper.make_tensor_value_info("x",
                                            TensorProto.FLOAT, list(x_shape)),
                                        helper.make_tensor_value_info("y",
                                            TensorProto.FLOAT, list(y_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(np_res.shape))])

    model = helper.make_model(graph, producer_name='power_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
        tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
def test_spacetodepth():
    n, c, h, w = shape = (1, 1, 4, 6)
    input1 = np.random.rand(n, c, h, w).astype("float32")
    blocksize = 2
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=shape)]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 4, 2, 3))]

    nodes = [helper.make_node("SpaceToDepth", ["input1"], ["output"], block_size=blocksize)]

    graph = helper.make_graph(nodes,
                              "spacetodepth_test",
                              inputs,
                              outputs)

    spacetodepth_model = helper.make_model(graph)

    bkd_rep = backend.prepare(spacetodepth_model)
    output = bkd_rep.run([input1])

    tmp = np.reshape(input1, [n, c,
                    h // blocksize, blocksize,
                    w // blocksize, blocksize])
    tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
    numpy_op = np.reshape(tmp, [n, c * (blocksize**2),
                    h // blocksize,
                    w // blocksize])

    npt.assert_almost_equal(output[0], numpy_op)
Exemplo n.º 6
0
def verify_mean(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.mean((a_np1, a_np2, a_np3), axis=0)

    mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])

    graph = helper.make_graph([mean_node],
                              "Mean_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np2",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np3",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='Mean_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
        tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 7
0
def verify_reduce_x(name, indata, axis, keepdims):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    if name == 'ReduceMax':
        outdata = np.maximum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMin':
        outdata = np.minimum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceSum':
        outdata = np.sum(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMean':
        outdata = np.mean(indata, axis=axis, keepdims=keepdims == 1)
    else:
        raise Exception('unsupport op: {}'.format(name))
    if len(np.asarray(outdata).shape) == 0:
        outdata = np.asarray([outdata])
    #  onnx graph
    if axis is None:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                keepdims=keepdims)
    else:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                axis=axis, keepdims=keepdims)
    graph = helper.make_graph([node],
                              '{}_test'.format(name),
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='{}_test'.format(name))
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 8
0
    def test_onnx_to_caffe2_zipfile(self):
        buf = tempfile.NamedTemporaryFile()
        onnx_model = zipfile.ZipFile(buf, 'w')
        output = tempfile.NamedTemporaryFile()
        init_net_output = tempfile.NamedTemporaryFile()

        node_def = helper.make_node(
            "MatMul", ["X", "W"], ["Y"])
        X = np.random.rand(2, 3).astype(np.float32)
        W = np.random.rand(3, 2).flatten().astype(np.float32)
        graph_def = helper.make_graph(
            [node_def],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
             helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
            initializer=[helper.make_tensor("W",
                                            TensorProto.FLOAT,
                                            [3, 2],
                                            b'__EXTERNAL',
                                            raw=True)])
        model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
        onnx_model.writestr('__MODEL_PROTO', model_def.SerializeToString())
        onnx_model.writestr('W', W.tobytes())
        onnx_model.close()

        W = W.reshape((3, 2))
        Y_expect = np.matmul(X, W)

        c2_model = c2.prepare_zip_archive(buf)
        Y = c2_model.run(X).Y
        np.testing.assert_allclose(Y, Y_expect)
Exemplo n.º 9
0
 def test_model_docstring(self):  # type: () -> None
     graph = helper.make_graph([], "my graph", [], [])
     model_def = helper.make_model(graph, doc_string='test')
     # models may have their own documentation, but don't have a name
     # their name is the domain-qualified name of the underlying graph.
     self.assertFalse(hasattr(model_def, "name"))
     self.assertEqual(model_def.doc_string, 'test')
Exemplo n.º 10
0
def verify_split(indata, outdatas, split, axis=0):
    indata = np.array(indata).astype(np.float32)
    outdatas = [np.array(o).astype(np.float32) for o in outdatas]
    node = helper.make_node(
        'Split',
        inputs=['input'],
        outputs=['output_{}'.format(i) for i in range(len(split))],
        axis=axis,
        split=split
    )
    graph = helper.make_graph([node],
                              'split_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output_{}".format(i),
                                            TensorProto.FLOAT, list(outdatas[i].shape))
                                            for i in range(len(split))
                                         ])
    model = helper.make_model(graph, producer_name='split_test')

    for target, ctx in ctx_list():
        output_shape = [o.shape for o in outdatas]
        output_type = ['float32', 'float32', 'float32']
        tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)
    for o, t in zip(outdatas, tvm_out):
        tvm.testing.assert_allclose(o, t)
Exemplo n.º 11
0
    def test_initializer(self):
        X = np.array([[1, 2], [3, 4]]).astype(np.float32)
        Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
        weight = np.array([[1, 0], [0, 1]])
        graph_def = make_graph(
            [make_node("Add", ["X", "Y"], ["Z0"]),
             make_node("Cast", ["Z0"], ["Z"], to="float"),
             make_node("Mul", ["Z", "weight"], ["W0"]),
             make_node("Tanh", ["W0"], ["W1"]),
             make_node("Sigmoid", ["W1"], ["W2"]),
             make_node("Scale", ["W2"], ["W3"], scale=-1.0)],
            name="test_initializer",
            inputs=[
                make_tensor_value_info("X", onnx.TensorProto.FLOAT, (2, 2)),
                make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (2, 2)),
                make_tensor_value_info("weight", onnx.TensorProto.FLOAT, (2, 2)),
            ],
            outputs=[
                make_tensor_value_info("W3", onnx.TensorProto.FLOAT, (2, 2))
            ],
            initializer=[make_tensor("weight",
                                     onnx.TensorProto.FLOAT,
                                     [2, 2],
                                     weight.flatten().astype(float))]
        )

        def sigmoid(x):
            return 1 / (1 + np.exp(-x))

        W_ref = -sigmoid(np.tanh((X + Y) * weight))
        c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
        output = c2_rep.run({"X": X, "Y": Y})
        np.testing.assert_almost_equal(output["W3"], W_ref)
Exemplo n.º 12
0
    def test_small_model(self):
        # Create one input
        X = helper.make_tensor_value_info('IN', TensorProto.FLOAT, [2, 3])
        # Create one output
        Y = helper.make_tensor_value_info('OUT', TensorProto.FLOAT, [2, 3])
        # Create a node
        node_def = helper.make_node('Abs', ['IN'], ['OUT'])

        # Create the model
        graph_def = helper.make_graph([node_def], "test-model", [X], [Y])
        onnx_model = helper.make_model(graph_def,
                                       producer_name='onnx-example')

        model = Model()
        model.BuildFromOnnxModel(onnx_model)
        schedule = model.OptimizeSchedule()
        schedule = schedule.replace('\n', ' ')
        expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func OUT = pipeline.get_func\(1\);.+{.+}.+'
        self.assertRegex(schedule, expected_schedule)

        input = np.random.rand(2, 3) - 0.5
        outputs = model.run([input])
        self.assertEqual(1, len(outputs))
        output = outputs[0]
        expected = np.abs(input)
        np.testing.assert_allclose(expected, output)
Exemplo n.º 13
0
    def test_scalars(self):
        # Create 2 inputs
        X = helper.make_tensor_value_info('A', TensorProto.INT32, [])
        Y = helper.make_tensor_value_info('B', TensorProto.INT32, [])
        # Create one output
        Z = helper.make_tensor_value_info('C', TensorProto.INT32, [])
        # Create a node
        node_def = helper.make_node('Add', ['A', 'B'], ['C'])

        # Create the model
        graph_def = helper.make_graph([node_def], "scalar-model", [X, Y], [Z])
        onnx_model = helper.make_model(graph_def,
                                       producer_name='onnx-example')

        model = Model()
        model.BuildFromOnnxModel(onnx_model)
        schedule = model.OptimizeSchedule()
        schedule = schedule.replace('\n', ' ')
        expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func C = pipeline.get_func\(2\);.+{.+}.+'
        self.assertRegex(schedule, expected_schedule)

        input1 = np.random.randint(-10, 10, size=())
        input2 = np.random.randint(-10, 10, size=())
        outputs = model.run([input1, input2])
        self.assertEqual(1, len(outputs))
        output = outputs[0]
        expected = input1 + input2
        np.testing.assert_allclose(expected, output)
Exemplo n.º 14
0
def test_reshape_like():
    in_shape = (4, 3, 3, 4)
    ref_shape = (3, 4, 4, 3)

    ref_array = np.random.uniform(size=ref_shape).astype('float32')
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))
    copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
    reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])

    graph = helper.make_graph([ref_node, copy_node, reshape_node],
                              "reshape_like_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_like_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
Exemplo n.º 15
0
def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs):
    input_a = np.random.uniform(size=input_dim).astype(dtype)
    out = np.empty(shape=out_dim, dtype=dtype)
    out.fill(value)

    if is_shape == True:
        fill_node = helper.make_node("ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs)
    else:
        fill_node = helper.make_node("ConstantFill", ["input_a"], ["out"], value=value, dtype=dtype, **kwargs)

    graph = helper.make_graph([fill_node],
                              "fill_test",
                              inputs = [helper.make_tensor_value_info("input_a",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out.shape))])

    model = helper.make_model(graph, producer_name='fill_test')

    for target, ctx in ctx_list():
        if is_shape == True:
            tvm_out = get_tvm_output(model, [], target, ctx, out.shape)
        else:
            tvm_out = get_tvm_output(model, [input_a], target, ctx, out.shape)

        tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 16
0
 def _optimized(self, graph):
     orig_model = helper.make_model(graph, producer_name='onnx-to-caffe2-test')
     orig_model_str = orig_model.SerializeToString()
     optimized_model_str = c2.Caffe2Backend.optimize_onnx(orig_model_str)
     optimized_model = ModelProto()
     optimized_model.ParseFromString(optimized_model_str)
     return optimized_model
Exemplo n.º 17
0
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
    in_array = np.random.uniform(size=shape).astype(dtype)

    if alpha == None and beta == None and bias==None:
        alpha = 0.0001
        beta = 0.75
        bias = 1.0
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize)
    else:
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
                                     beta=beta, bias=bias, size=nsize)

    graph = helper.make_graph([node],
                              "lrn_test",
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
    model = helper.make_model(graph, producer_name='lrn_test')

    def _get_python_lrn():
        square_sum = np.zeros(shape).astype(dtype)
        for n, c, h, w in np.ndindex(in_array.shape):
            square_sum[n, c, h, w] = sum(in_array[n,
                                         max(0, c - int(math.floor((nsize - 1) / 2))): \
                                             min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
                                         h,
                                         w] ** 2)
        py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
        return py_out

    for target, ctx in ctx_list():
        input_name = model.graph.input[0].name
        py_out = _get_python_lrn()
        tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32')
        tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 18
0
def test_shape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.INT32,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    shape_node = helper.make_node("Shape", ['out'], ['final_out'])

    graph = helper.make_graph([ref_node, reshape_node, shape_node],
                              "shape_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("final_out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='shape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

    tvm.testing.assert_allclose(ref_shape, tvm_out)
Exemplo n.º 19
0
def get_onnx_graph(testname, input_names, inputs, output_name, output_shape, attr):
    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=output_shape)]

    nodes = [helper.make_node(output_name, input_names, ["output"], **attr)]

    graph = helper.make_graph(nodes, testname, inputs, outputs)

    model = helper.make_model(graph)
    return model
Exemplo n.º 20
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
                        opset_imports=[opset_id],  # current supported opset version
                        producer_name='onnx-caffe2',  # producer name
                        )
     checker.check_model(model)
     return model
Exemplo n.º 21
0
    def test_check_model(self):  # type: () -> None
        node = helper.make_node(
            "Relu", ["X"], ["Y"], name="test")
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        model = helper.make_model(graph, producer_name='test')

        checker.check_model(model)
 def test_ops(op_name, inputs, input_tensors, numpy_op):
     outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=np.shape(inputs[0]))]
     nodes = [helper.make_node(op_name, ["input"+str(i+1) for i in range(len(inputs))], ["output"])]
     graph = helper.make_graph(nodes,
                               op_name + "_test",
                               input_tensors,
                               outputs)
     model = helper.make_model(graph)
     bkd_rep = backend.prepare(model)
     output = bkd_rep.run(inputs)
     npt.assert_almost_equal(output[0], numpy_op)
Exemplo n.º 23
0
 def test_model(self):  # type: () -> None
     node_def = helper.make_node(
         "Relu", ["X"], ["Y"])
     graph_def = helper.make_graph(
         [node_def],
         "test",
         [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
         [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
     self.assertRaises(AttributeError, helper.make_model, graph_def, xxx=1)
     model_def = helper.make_model(graph_def, producer_name='test')
     self.assertEqual(model_def.producer_name, 'test')
Exemplo n.º 24
0
    def test_model_metadata_props(self):  # type: () -> None
        graph = helper.make_graph([], "my graph", [], [])
        model_def = helper.make_model(graph, doc_string='test')
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)  # helper replaces, so no dupe

        dupe = model_def.metadata_props.add()
        dupe.key = 'Title'
        dupe.value = 'Other'
        self.assertRaises(checker.ValidationError, checker.check_model, model_def)
Exemplo n.º 25
0
 def verify_single_ops(op, x, out_np, rtol=1e-7, atol=1e-7):
     z = helper.make_node(op, ['in1'], ['out'])
     graph = helper.make_graph([z],
                                '_test',
                               inputs = [helper.make_tensor_value_info("in1",
                                             TensorProto.FLOAT, list(in_shape)),],
                               outputs = [helper.make_tensor_value_info("out",
                                             TensorProto.FLOAT, list(out_shape))])
     model = helper.make_model(graph, producer_name='_test')
     for target, ctx in ctx_list():
         tvm_out = get_tvm_output(model, [x], target, ctx)
         tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
Exemplo n.º 26
0
    def test_check_old_model(self):  # type: () -> None
        node = helper.make_node(
            "Pad", ["X"], ["Y"], paddings=(0, 0, 0, 0))
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        onnx_id = helper.make_opsetid("", 1)
        model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])

        checker.check_model(model)
Exemplo n.º 27
0
 def test_polish_model(self):  # type: () -> None
     node_def = helper.make_node(
         "Relu", ["X"], ["Y"], doc_string="ABC")
     graph_def = helper.make_graph(
         [node_def],
         "test",
         [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
         [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
     model_def = helper.make_model(graph_def, producer_name='test')
     polished_def = onnx.utils.polish_model(model_def)
     self.assertEqual(polished_def.producer_name, 'test')
     self.assertEqual(len(polished_def.graph.node), 1)
     self.assertFalse(polished_def.graph.node[0].HasField('doc_string'))
Exemplo n.º 28
0
    def run_node(cls, node, input, device='CPU'):
        input_tensors = []

        if len(node.input) != len(input):
            raise ValueError(
                "Unexpected Input Size: Op_Type = {0}, Expected = {1}, Received = {2}"
                .format(node.op_type, len(node.input), len(input))
                ) 

        for i in range(len(input)):
            input_tensors.append(helper.make_tensor_value_info(node.input[i], TensorProto.FLOAT, input[i].shape))

        onnx_graph = helper.make_graph([node], "test_{}".format(node.op_type), input_tensors, [])
        onnx_model = helper.make_model(onnx_graph)
        return CNTKBackend.run_model(onnx_model, input, device)
Exemplo n.º 29
0
 def verify_binary_ops(op, x, y, out_np, broadcast=None, rtol=1e-7, atol=1e-7):
     if broadcast is None:
         z = helper.make_node(op, ['in1', 'in2'], ['out'])
     else:
         z = helper.make_node(op, ['in1', 'in2'], ['out'], broadcast=1)
     graph = helper.make_graph([z],
                                '_test',
                               inputs = [helper.make_tensor_value_info("in1",
                                             TensorProto.FLOAT, list(in_shape)),
                                         helper.make_tensor_value_info("in2",
                                             TensorProto.FLOAT, list(in_shape))],
                               outputs = [helper.make_tensor_value_info("out",
                                             TensorProto.FLOAT, list(out_shape))])
     model = helper.make_model(graph, producer_name='_test')
     for target, ctx in ctx_list():
         tvm_out = get_tvm_output(model, [x, y], target, ctx)
         tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
Exemplo n.º 30
0
def verify_argmax(input_dim, axis=None, keepdims=None):
    def _argmax_numpy(data, axis=0, keepdims=True):
        result = np.argmax(data, axis=axis)
        if (keepdims == 1):
            result = np.expand_dims(result, axis)
        return result.astype(data.dtype)

    a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)

    if keepdims is None and axis is None:
        b_np = _argmax_numpy(a_np1)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'])
    elif axis is None:
        b_np = _argmax_numpy(a_np1, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     keepdims=keepdims)
    elif keepdims is None:
        b_np = _argmax_numpy(a_np1, axis=axis)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis)
    else:
        b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims)
        node = onnx.helper.make_node('ArgMax',
                                     inputs=['a_np1'],
                                     outputs=['out'],
                                     axis=axis,
                                     keepdims=keepdims)

    graph = helper.make_graph([node],
                              "argmax_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.INT32, list(a_np1.shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.INT32, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='argmax_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
        tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
Exemplo n.º 31
0
def gen_gru_onnx_test_model(model_path,
                            seq_length,
                            batch_size,
                            hidden_size,
                            input_size,
                            direction,
                            has_bias,
                            has_sequence_lens,
                            has_initial_h,
                            linear_before_reset=False):

    # Validate parameters
    assert direction in GRU_DIRS, 'ONNX GRU direction invalid!'
    assert not has_sequence_lens, 'ONNX GRU Variable sequence length not supported'

    # Get number of directions
    num_directions = 2 if (direction == GRU_DIR_BIDIRECTIONAL) else 1

    # Tensor sizes
    X_shape = [seq_length, batch_size, input_size]
    W_shape = [num_directions, 3 * hidden_size, input_size]
    R_shape = [num_directions, 3 * hidden_size, hidden_size]
    B_shape = [num_directions, 6 * hidden_size]
    sequence_lens_shape = [batch_size]
    initial_h_shape = [num_directions, batch_size, hidden_size]
    Y_shape = [seq_length, num_directions, batch_size, hidden_size]

    # Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
    np.random.seed(1)
    X = np.random.randn(*X_shape)
    W = np.random.randn(*W_shape)
    R = np.random.randn(*R_shape)
    B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
    sequence_lens = np.random.randint(
        1, seq_length, batch_size) if has_sequence_lens else np.tile(
            seq_length, batch_size)
    initial_h = np.random.randn(
        *initial_h_shape) if has_initial_h else np.zeros(initial_h_shape)

    # Function to get all the weight components for the given direction
    def get_weights(dir_idx):
        Wz = np.reshape(W[dir_idx, 0 * hidden_size:1 * hidden_size, :],
                        [hidden_size, input_size])
        Wr = np.reshape(W[dir_idx, 1 * hidden_size:2 * hidden_size, :],
                        [hidden_size, input_size])
        Wh = np.reshape(W[dir_idx, 2 * hidden_size:3 * hidden_size, :],
                        [hidden_size, input_size])
        Rz = np.reshape(R[dir_idx, 0 * hidden_size:1 * hidden_size, :],
                        [hidden_size, hidden_size])
        Rr = np.reshape(R[dir_idx, 1 * hidden_size:2 * hidden_size, :],
                        [hidden_size, hidden_size])
        Rh = np.reshape(R[dir_idx, 2 * hidden_size:3 * hidden_size, :],
                        [hidden_size, hidden_size])
        bWz = np.reshape(B[dir_idx, 0 * hidden_size:1 * hidden_size],
                         [hidden_size])
        bWr = np.reshape(B[dir_idx, 1 * hidden_size:2 * hidden_size],
                         [hidden_size])
        bWh = np.reshape(B[dir_idx, 2 * hidden_size:3 * hidden_size],
                         [hidden_size])
        bRz = np.reshape(B[dir_idx, 3 * hidden_size:4 * hidden_size],
                         [hidden_size])
        bRr = np.reshape(B[dir_idx, 4 * hidden_size:5 * hidden_size],
                         [hidden_size])
        bRh = np.reshape(B[dir_idx, 5 * hidden_size:6 * hidden_size],
                         [hidden_size])
        return Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh

    # Function to get PyTorch weights (which are in the r,z,h order)
    def get_torch_weights(dir_idx):
        Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(
            dir_idx)
        W_torch = np.concatenate((Wr, Wz, Wh), 0)
        R_torch = np.concatenate((Rr, Rz, Rh), 0)
        bW_torch = np.concatenate((bWr, bWz, bWh), 0)
        bR_torch = np.concatenate((bRr, bRz, bRh), 0)
        return (W_torch, R_torch, bW_torch, bR_torch)

    # ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
    # Compute reference using Pytorch. Pytorch GRU has only forward/bidirectional so we will do the reverse GRU using
    # a Pytorch forward GRU.
    gru = torch.nn.GRU(input_size=input_size,
                       hidden_size=hidden_size,
                       num_layers=1,
                       bias=True,
                       batch_first=False,
                       dropout=0,
                       bidirectional=(direction == GRU_DIR_BIDIRECTIONAL))

    # Get GRU state dictionary
    gru_state_dict = gru.state_dict()

    # Assign forward weights
    forwardEnabled = direction in [GRU_DIR_FORWARD, GRU_DIR_BIDIRECTIONAL]
    if forwardEnabled:
        forward_dir_idx = 0
        (W_torch, R_torch, bW_torch,
         bR_torch) = get_torch_weights(forward_dir_idx)
        gru_state_dict['weight_ih_l0'] = torch.tensor(W_torch,
                                                      dtype=torch.float32)
        gru_state_dict['weight_hh_l0'] = torch.tensor(R_torch,
                                                      dtype=torch.float32)
        gru_state_dict['bias_ih_l0'] = torch.tensor(bW_torch,
                                                    dtype=torch.float32)
        gru_state_dict['bias_hh_l0'] = torch.tensor(bR_torch,
                                                    dtype=torch.float32)

    # Assign reverse weights
    reverseEnabled = direction in [GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
    if reverseEnabled:
        if direction == GRU_DIR_REVERSE:
            reverse_dir_idx = 0
            (W_torch, R_torch, bW_torch,
             bR_torch) = get_torch_weights(reverse_dir_idx)
            gru_state_dict['weight_ih_l0'] = torch.tensor(W_torch,
                                                          dtype=torch.float32)
            gru_state_dict['weight_hh_l0'] = torch.tensor(R_torch,
                                                          dtype=torch.float32)
            gru_state_dict['bias_ih_l0'] = torch.tensor(bW_torch,
                                                        dtype=torch.float32)
            gru_state_dict['bias_hh_l0'] = torch.tensor(bR_torch,
                                                        dtype=torch.float32)
        else:
            reverse_dir_idx = 1
            (W_torch, R_torch, bW_torch,
             bR_torch) = get_torch_weights(reverse_dir_idx)
            gru_state_dict['weight_ih_l0_reverse'] = torch.tensor(
                W_torch, dtype=torch.float32)
            gru_state_dict['weight_hh_l0_reverse'] = torch.tensor(
                R_torch, dtype=torch.float32)
            gru_state_dict['bias_ih_l0_reverse'] = torch.tensor(
                bW_torch, dtype=torch.float32)
            gru_state_dict['bias_hh_l0_reverse'] = torch.tensor(
                bR_torch, dtype=torch.float32)

    # Set GRU state dictionary
    gru.load_state_dict(gru_state_dict, strict=True)

    # Perform inference
    X_torch = torch.tensor(X, dtype=torch.float32)
    initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
    if direction == GRU_DIR_REVERSE:
        Y, next_h = gru(X_torch.flip([0]), initial_h_torch)
        Y = Y.flip([0])
    else:
        Y, next_h = gru(X_torch, initial_h_torch)

    # Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
    Y_ref = Y.detach().numpy()
    Y_ref = np.reshape(Y_ref,
                       [seq_length, batch_size, num_directions, hidden_size])
    Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])

    # Reshape states to ONNX format
    Y_h_ref = next_h.detach().numpy()

    # --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
    # Create X slices
    Xslices = list()
    for t in range(seq_length):
        Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))

    # Function to compute one GRU cell
    def compute_gru(forward):
        dir_idx = 0 if forward else (0 if direction == GRU_DIR_REVERSE else 1)
        Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(
            dir_idx)

        def f(x):
            return (1 / (1 + np.exp(-x)))

        def g(x):
            return np.tanh(x)

        def mm(x, w):
            return np.matmul(x, w.transpose())

        Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])

        Yslices = list()
        for t in range(seq_length):
            xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
            zt = f(mm(xt, Wz) + bWz + mm(Ht, Rz) + bRz)
            rt = f(mm(xt, Wr) + bWr + mm(Ht, Rr) + bRr)
            if linear_before_reset:
                htild = g(mm(xt, Wh) + bWh + rt * (mm(Ht, Rh) + bRh))
            else:
                htild = g(mm(xt, Wh) + bWh + mm(rt * Ht, Rh) + bRh)
            Ht = (1 - zt) * htild + zt * Ht
            Yslices.append(Ht)
        return Yslices, Ht

    Yslices = list()
    Hslices = list()

    # Compute forward GRU
    forwardYslices = list()
    if forwardEnabled:
        Yt, Ht = compute_gru(True)
        forwardYslices += Yt
        Hslices.append(Ht)

    # Compute reverse GRU
    reverseYslices = list()
    if reverseEnabled:
        Yt, Ht = compute_gru(False)
        reverseYslices += Yt
        Hslices.append(Ht)

    # Concatenate slices
    for t in range(seq_length):
        if forwardEnabled:
            Yslices.append(forwardYslices[t])
        if reverseEnabled:
            Yslices.append(reverseYslices[seq_length - 1 - t])
    Y_ref_np = np.concatenate(Yslices, 0).reshape(
        [seq_length, num_directions, batch_size, hidden_size])
    Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
        [num_directions, batch_size, hidden_size])

    # Use numpy implementation when linear_before_reset = False, else assert errors
    if linear_before_reset is False:
        Y_ref = Y_ref_np
        Y_h_ref = Y_h_ref_np
    else:
        assert np.max(
            np.abs(Y_ref - Y_ref_np)
        ) < 1e-6, "Mismatch between Pytorch and Numpy GRU implementation"
        assert np.max(
            np.abs(Y_h_ref - Y_h_ref_np)
        ) < 1e-6, "Mismatch between Pytorch and Numpy GRU implementation"

    # ---------------------------------------------- NODE DEFINITION  --------------------------------------------------
    # Node inputs
    node_inputs = [
        'X', 'W', 'R', 'B' if has_bias else '', '',
        'initial_h' if has_initial_h else ''
    ]

    # Node outputs
    node_outputs = ['Y', 'Y_h']

    # GRU node definition
    gru_node_def = onnx.helper.make_node(
        'GRU',
        name='gru',
        inputs=node_inputs,
        outputs=node_outputs,
        hidden_size=hidden_size,
        direction=direction,
        linear_before_reset=linear_before_reset)

    # Error node definition
    err_node_def = onnx.helper.make_node('Sub',
                                         name='error',
                                         inputs=['Y', 'Y_ref'],
                                         outputs=['Y_err'])

    # --------------------------------------------- GRAPH DEFINITION  --------------------------------------------------
    graph_input = list()
    graph_init = list()
    graph_output = list()

    # GRU inputs
    graph_input.append(
        helper.make_tensor_value_info('X', TensorProto.FLOAT, X_shape))
    graph_input.append(
        helper.make_tensor_value_info('W', TensorProto.FLOAT, W_shape))
    graph_input.append(
        helper.make_tensor_value_info('R', TensorProto.FLOAT, R_shape))
    if has_bias:
        graph_input.append(
            helper.make_tensor_value_info('B', TensorProto.FLOAT, B_shape))
    if has_sequence_lens:
        graph_input.append(
            helper.make_tensor_value_info('sequence_lens', TensorProto.INT32,
                                          sequence_lens_shape))
    if has_initial_h:
        graph_input.append(
            helper.make_tensor_value_info('initial_h', TensorProto.FLOAT,
                                          initial_h_shape))

    # Reference input
    graph_input.append(
        helper.make_tensor_value_info('Y_ref', TensorProto.FLOAT, Y_shape))

    # GRU initializers
    graph_init.append(make_init('X', TensorProto.FLOAT, X))
    graph_init.append(make_init('W', TensorProto.FLOAT, W))
    graph_init.append(make_init('R', TensorProto.FLOAT, R))
    if has_bias:
        graph_init.append(make_init('B', TensorProto.FLOAT, B))
    if has_sequence_lens:
        graph_init.append(
            make_init('sequence_lens', TensorProto.INT32, sequence_lens))
    if has_initial_h:
        graph_init.append(make_init('initial_h', TensorProto.FLOAT, initial_h))

    # Reference initializer
    graph_init.append(make_init('Y_ref', TensorProto.FLOAT, Y_ref))

    # Graph outputs
    graph_output.append(
        helper.make_tensor_value_info('Y_err', TensorProto.FLOAT, Y_shape))

    # Define graph (GraphProto)
    graph_name = 'gru_test'
    graph_def = helper.make_graph([gru_node_def, err_node_def],
                                  graph_name,
                                  inputs=graph_input,
                                  outputs=graph_output)

    # Set initializers
    graph_def.initializer.extend(graph_init)

    # --------------------------------------------- MODEL DEFINITION  --------------------------------------------------
    # Define model (ModelProto)
    model_def = helper.make_model(graph_def, producer_name='onnx-gru')

    # Check model
    onnx.checker.check_model(model_def)

    # Print model
    with open(model_path, 'w') as f:
        f.write(str(model_def))
Exemplo n.º 32
0
from onnx import helper, TensorProto

INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1])
INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1])
OUTPUT_1 = helper.make_tensor_value_info('output1', TensorProto.FLOAT, [1])
OUTPUT_2 = helper.make_tensor_value_info('output2', TensorProto.FLOAT, [1])

nodes = [
    helper.make_node(
        'Mul',
        ['input1', 'input2'],
        ['output1'],
    ),
    helper.make_node(
        'Add',
        ['input1', 'input2'],
        ['output2'],
    ),
]
graph_def = helper.make_graph(
    nodes,
    'add_mul',
    [INPUT_1, INPUT_2],
    [OUTPUT_1, OUTPUT_2],
)
model_def = helper.make_model(
    graph_def,
    producer_name='add_mul.py',
    opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'add_mul.onnx')
    def construct_model_conv_squeezes(self,
                                      output_model_path,
                                      conv_input_shape,
                                      conv_weight_shape,
                                      conv_output_shape,
                                      opset=13):
        #             (input)
        #            /   |     \
        #         Conv1 conv2    conv3
        #           |     |         |
        #       Squeeze1 Squeeze2   |
        #           \      /        |
        #             add1          |
        #               |           |
        #              Unsqueeze    |
        #                      \    |
        #                       add2
        #                         |
        #                      (output)
        input_tensor = helper.make_tensor_value_info('input',
                                                     TensorProto.FLOAT,
                                                     conv_input_shape)

        conv1_weight_arr = np.random.randint(-1, 2, conv_weight_shape).astype(
            np.float32)
        conv1_weight_initializer = onnx.numpy_helper.from_array(
            conv1_weight_arr, name='conv1_weight')
        conv1_node = onnx.helper.make_node('Conv', ['input', 'conv1_weight'],
                                           ['conv1_output'],
                                           name='conv1_node')

        conv2_weight_arr = np.random.randint(-1, 2, conv_weight_shape).astype(
            np.float32)
        conv2_weight_initializer = onnx.numpy_helper.from_array(
            conv2_weight_arr, name='conv2_weight')
        conv2_node = onnx.helper.make_node('Conv', ['input', 'conv2_weight'],
                                           ['conv2_output'],
                                           name='conv2_node')

        conv3_weight_arr = np.random.randint(-1, 2, conv_weight_shape).astype(
            np.float32)
        conv3_weight_initializer = onnx.numpy_helper.from_array(
            conv3_weight_arr, name='conv3_weight')
        conv3_node = onnx.helper.make_node('Conv', ['input', 'conv3_weight'],
                                           ['conv3_output'],
                                           name='conv3_node')

        if (opset >= 13):
            squeeze_axes_initializer = onnx.numpy_helper.from_array(
                np.array([0], dtype=np.int64), name='squeeze_axes')
            squeeze1_node = helper.make_node('Squeeze',
                                             ['conv1_output', 'squeeze_axes'],
                                             ['squeeze1_output'],
                                             name='suqeeze1_node')
            squeeze2_node = helper.make_node('Squeeze',
                                             ['conv2_output', 'squeeze_axes'],
                                             ['squeeze2_output'],
                                             name='suqeeze2_node')
        else:
            squeeze1_node = helper.make_node('Squeeze', ['conv1_output'],
                                             ['squeeze1_output'],
                                             name='suqeeze1_node',
                                             axes=[0])
            squeeze2_node = helper.make_node('Squeeze', ['conv2_output'],
                                             ['squeeze2_output'],
                                             name='suqeeze2_node',
                                             axes=[0])

        add1_node = helper.make_node('Add',
                                     ['squeeze1_output', 'squeeze2_output'],
                                     ['add1_output'],
                                     name='add1_node')
        if (opset >= 13):
            unsqueeze_node = helper.make_node('Unsqueeze',
                                              ['add1_output', 'squeeze_axes'],
                                              ['unsqueeze_output'],
                                              name='unsqueeze_node')
        else:
            unsqueeze_node = helper.make_node('Unsqueeze', ['add1_output'],
                                              ['unsqueeze_output'],
                                              name='unsqueeze_node',
                                              axes=[0])

        output_tensor = helper.make_tensor_value_info('output',
                                                      TensorProto.FLOAT,
                                                      conv_output_shape)
        add2_node = helper.make_node('Add',
                                     ['unsqueeze_output', 'conv3_output'],
                                     ['output'],
                                     name='add2_node')

        initializers = [
            conv1_weight_initializer, conv2_weight_initializer,
            conv3_weight_initializer
        ]
        if (opset >= 13):
            initializers.append(squeeze_axes_initializer)
        graph = helper.make_graph([
            conv1_node, conv2_node, conv3_node, squeeze1_node, squeeze2_node,
            add1_node, unsqueeze_node, add2_node
        ],
                                  'TestOpSuqeezes_test_model', [input_tensor],
                                  [output_tensor],
                                  initializer=initializers)
        model = helper.make_model(
            graph, opset_imports=[helper.make_opsetid("", opset)])
        model.ir_version = 7  # use stable onnx ir version
        onnx.save(model, output_model_path)
        pdf_temp_2,
        max_score,
        normalized_scores,  # top_action_1, # top_action_2,
        # one_hot_top_action_int, one_hot_top_action_float,
        one_hot_top_action,
        exploit_prob,
        exploit_top_action,
        pdf,  # node_rand_sub, 
        node,
        node_rand,
        add_node,
        clip_node,
        topk_node
    ],
    'compute_graph',
    input_tensors,
    output_tensors,
    initializer_tensors)
model = oh.make_model(graph, producer_name='explore')

f = open("model1.onnx", "wb")
f.write(model.SerializeToString())
f.close()

tf_model = onnx.load('model1.onnx')

tf_rep = prepare(tf_model)

sample = tf_rep.run(np.asarray([[.3, .3, .4]]))
print(sample)
Exemplo n.º 35
0
def main(model_path,
         model_save_path=None,
         add_transpose_for_channel_last_first_issue=True,
         bottom_nodes_name=None):

    onnx_weight_node_list = []
    output_tensor_value_info = []
    onnx_node_list = []
    inner_node_shape_value_info = []

    # parse node information through tflite interpreter (tflite interpreter can't parse operator information in our target tensorflow version 1.15)
    interpreter = tf.lite.Interpreter(model_path)
    interpreter.allocate_tensors()

    # get model input info(assume there is only one input)
    input_details = interpreter.get_input_details()
    model_input_name = input_details[0]['name']
    input_tensor_value_info = None

    # generate tree
    tree_graph = Tree(model_path=model_path,
                      bottom_nodes_name=bottom_nodes_name,
                      defused=True)

    # get sequential node name
    sequential_keys = tree_graph.get_sequential_nodes_key()

    # get tree node in the form of {node_name: op_node_obj}
    tree_dict = tree_graph.get_nodes()

    #############################
    # build head transpose node #
    #############################
    for h_node in tree_graph.get_head_nodes():
        # transpose for channel last to channel first
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                h_node.node_name)
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                h_node.node_input_shape.tolist())
            h_transpose_node = build_head_transpose_node_for_channel_last_2_channel_first(
                input_tensor_value_info.name, h_node.node_name)

            onnx_node_list.append(h_transpose_node)
            h_node.input_nodes_name = [h_transpose_node.name]
        else:
            input_tensor_value_info = helper.make_tensor_value_info(
                model_input_name, TensorProto.FLOAT,
                tflite_utils.tflite2onnx_shape_map(
                    h_node.node_input_shape.tolist()))
            h_node.input_nodes_name = [input_tensor_value_info.name]

    ############################
    # build model node by node #
    ############################
    for key in sequential_keys:
        logging.getLogger('tflite2onnx').debug("generating: " + key)
        nodes, val, weight = tree_dict[key].generate()

        if (len(val) != 0) and (tree_dict[key].is_bottom_node is False):
            inner_node_shape_value_info.extend(val)
        if len(weight) != 0:
            onnx_weight_node_list.extend(weight)
        if len(nodes) != 0:
            onnx_node_list.extend(nodes)

    # sometimes, there are sub-node in one tree node, we need to find the last one
    b_nodes = [node for node in tree_graph.get_bottom_nodes()]

    ###############################
    # build bottom transpose node #
    ###############################
    for b_node in b_nodes:

        out_value_info = None
        if add_transpose_for_channel_last_first_issue is True:
            logging.getLogger('tflite2onnx').debug(
                "generating transpose node for channel last first issue: " +
                b_node.node_name)
            out_value_info, transpose_node = build_button_transpose_node_for_channel_first_2_channel_last(
                b_node.node_list[-1], b_node.node_output_shape.tolist())

            if transpose_node != None:
                onnx_node_list.append(transpose_node)
        else:
            out_value_info = set_end_node(b_node.node_list[-1],
                                          b_node.node_output_shape.tolist())
        output_tensor_value_info.append(out_value_info)

    input_init = [input_tensor_value_info]
    input_init.extend(onnx_weight_node_list)
    onnx_inputs = tflite_utils.make_kneron_valid_onnx_input(input_init)

    graph_cnn = helper.make_graph(onnx_node_list,
                                  'cnn_test',
                                  onnx_inputs,
                                  output_tensor_value_info,
                                  onnx_weight_node_list,
                                  value_info=inner_node_shape_value_info)

    cnn_model = helper.make_model(graph_cnn, producer_name='Kneron')
    cnn_model.opset_import[0].version = 11

    # add generated time to model meta data
    helper.set_model_props(
        cnn_model, {
            'Generated Time':
            datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S") + " (UTC+0)"
        })

    cnn_model = onnx.utils.polish_model(cnn_model)

    # save
    if model_save_path is not None:
        onnx.save(cnn_model, model_save_path)
    return cnn_model
Exemplo n.º 36
0
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 5, 32, 32])

conv1 = helper.make_node(
        'Conv',         # name
        ['X', 'W'],     # inputs
        ['Y'],          # outputs
        ## Node Attributes
        kernel_shape=[3, 3],
        strides=[1, 1],
        pads=[1, 1, 1, 1],
        )


graph_def = helper.make_graph(
        nodes=[conv1],     # graph nodes
        name= 'conv_model',   # graph name
        inputs =      [X, W_info],  # graph inputs
        outputs =     [Y],          # graph outputs
        initializer = [W],
        )

model_def = helper.make_model(graph_def, producer_name='benchmarks')

onnx.checker.check_model(model_def)
model_def = shape_inference.infer_shapes(model_def)
onnx.checker.check_model(model_def)
model_def = optimizer.optimize(model_def)
onnx.checker.check_model(model_def)

onnx.save_model(model_def, 'e2_conv.onnx')
Exemplo n.º 37
0
def _make_module(in_shape, kernel_output_channel, bias_shape, auto_pad_mode,
                 dilation, group, kernel_shape, pad, stride):
    inputs = []
    initializers = []

    # input
    input = helper.make_tensor_value_info('input', TensorProto.FLOAT, in_shape)
    inputs.append('input')

    # weight
    w_shape = []
    w_shape.append(kernel_output_channel)
    w_shape.append(in_shape[1])
    w_shape.extend(kernel_shape)
    weight = helper.make_tensor('weight',
                                TensorProto.FLOAT,
                                dims=w_shape,
                                vals=np.random.rand(*w_shape).astype(
                                    np.float32).flatten().tolist())
    inputs.append('weight')
    initializers.append(weight)

    # bias
    if bias_shape is not None:
        bias = helper.make_tensor('bias',
                                  TensorProto.FLOAT,
                                  dims=bias_shape,
                                  vals=np.random.rand(*bias_shape).astype(
                                      np.float32).flatten().tolist())
        inputs.append('bias')
        initializers.append(bias)

    # dilation
    d = [1, 1] if dilation is None else dilation

    # stride
    s = [1, 1] if stride is None else stride

    # pad
    padding = [0, 0, 0, 0]
    if (auto_pad_mode is None
            or auto_pad_mode == 'NOTSET') and pad is not None:
        padding = pad
    elif auto_pad_mode == 'SAME_UPPER':
        output_h = math.floor((in_shape[2] + s[0] - 1) / s[0])
        pad_nums = max(0, (output_h - 1) * s[0] + (w_shape[2] - 1) * d[0] + 1 -
                       in_shape[2])
        padding[0] = math.floor(pad_nums / 2)
        padding[2] = pad_nums - padding[0]

        output_w = math.floor((in_shape[3] + s[1] - 1) / s[1])
        pad_nums = max(0, (output_w - 1) * s[1] + (w_shape[3] - 1) * d[1] + 1 -
                       in_shape[3])
        padding[1] = math.floor(pad_nums / 2)
        padding[3] = pad_nums - padding[1]
    elif auto_pad_mode == 'SAME_LOWER':
        output_h = math.floor((in_shape[2] + s[0] - 1) / s[0])
        pad_nums = max(0, (output_h - 1) * s[0] + (w_shape[2] - 1) * d[0] + 1 -
                       in_shape[2])
        padding[0] = math.ceil(pad_nums / 2)
        padding[2] = pad_nums - padding[0]

        output_w = math.floor((in_shape[3] + s[1] - 1) / s[1])
        pad_nums = max(0, (output_w - 1) * s[1] + (w_shape[3] - 1) * d[1] + 1 -
                       in_shape[3])
        padding[1] = math.ceil(pad_nums / 2)
        padding[3] = pad_nums - padding[1]

    # output
    out_shape = []
    out_shape.append(in_shape[0])
    out_shape.append(w_shape[0])
    out_shape.append(
        math.floor((in_shape[2] + padding[0] + padding[2] -
                    ((w_shape[2] - 1) * d[0] + 1) + s[0]) / s[0]))
    out_shape.append(
        math.floor((in_shape[3] + padding[1] + padding[3] -
                    ((w_shape[3] - 1) * d[1] + 1) + s[1]) / s[1]))
    output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                           out_shape)

    attributes_dict = {}

    if auto_pad_mode is not None:
        attributes_dict['auto_pad'] = auto_pad_mode

    if dilation is not None:
        attributes_dict['dilations'] = dilation

    if group is not None:
        attributes_dict['group'] = group

    if kernel_shape is not None:
        attributes_dict['kernel_shape'] = kernel_shape

    if pad is not None:
        attributes_dict['pads'] = padding

    if stride is not None:
        attributes_dict['strides'] = stride

    node = onnx.helper.make_node('Conv',
                                 inputs=inputs,
                                 outputs=['output'],
                                 **attributes_dict)

    nodes = []
    nodes.append(node)

    graph_def = helper.make_graph(nodes,
                                  'test-model', [input], [output],
                                  initializer=initializers)

    model_def = helper.make_model(graph_def, producer_name='kendryte')

    return model_def
Exemplo n.º 38
0
    def create_const_net(self, const_shapes, ir_version, opset=None):
        """
            ONNX net                                          IR net

            Inputs->Concat with Sum of consts->Output   =>    Input->Concat with consts
        """

        #
        #   Create ONNX model
        #

        from onnx import helper
        from onnx import TensorProto

        shape_len = 0
        for shape in const_shapes:
            if len(shape) > shape_len:
                shape_len = len(shape)
                input_shape = shape

        concat_axis = 0
        output_shape = input_shape.copy()
        output_shape[concat_axis] *= 2

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)

        nodes = list()
        input_names = list()
        consts = list()
        for i, shape in enumerate(const_shapes):
            const = np.random.randint(-127, 127, shape).astype(np.float)
            const_name = 'const{}'.format(i + 1)
            nodes.append(helper.make_node(
                'Constant',
                inputs=[],
                outputs=[const_name],
                value=helper.make_tensor(
                    name='const_tensor',
                    data_type=TensorProto.FLOAT,
                    dims=const.shape,
                    vals=const.flatten(),
                ),
            ))
            input_names.append(const_name)
            consts.append(const)

        nodes.append(helper.make_node(
            'Sum',
            inputs=input_names,
            outputs=['sum']
        ))

        nodes.append(helper.make_node(
            'Concat',
            inputs=['input', 'sum'],
            outputs=['output'],
            axis=concat_axis
        ))

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            nodes,
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        args = dict(producer_name='test_model')
        if opset:
            args['opset_imports'] = [helper.make_opsetid("", opset)]
        onnx_net = helper.make_model(graph_def, **args)

        #   Create reference IR net

        ref_net = None

        return onnx_net, ref_net
Exemplo n.º 39
0
    def create_net(self, dyn_shapes, const_shapes, precision, ir_version, opset=None):
        """
            ONNX net                                IR net

            Inputs->Sum with consts->Output   =>    Input->Eltwise
        """

        #
        #   Create ONNX model
        #

        from onnx import helper
        from onnx import TensorProto

        inputs = list()
        input_names = list()
        out_shape_len = 0
        for i, shape in enumerate(dyn_shapes):
            input_name = 'input{}'.format(i + 1)
            inputs.append(helper.make_tensor_value_info(input_name, TensorProto.FLOAT, shape))
            input_names.append(input_name)
            if len(shape) > out_shape_len:
                out_shape_len = len(shape)
                output_shape = shape
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)

        nodes = list()
        consts = list()
        for i, shape in enumerate(const_shapes):
            const = np.random.randint(-127, 127, shape).astype(np.float)
            const_name = 'const{}'.format(i + 1)
            nodes.append(helper.make_node(
                'Constant',
                inputs=[],
                outputs=[const_name],
                value=helper.make_tensor(
                    name='const_tensor',
                    data_type=TensorProto.FLOAT,
                    dims=const.shape,
                    vals=const.flatten(),
                ),
            ))
            input_names.append(const_name)
            consts.append(const)

        nodes.append(helper.make_node(
            'Sum',
            inputs=input_names,
            outputs=['output']
        ))

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            nodes,
            'test_model',
            inputs,
            [output],
        )

        # Create the model (ModelProto)
        args = dict(producer_name='test_model')
        if opset:
            args['opset_imports'] = [helper.make_opsetid("", opset)]
        onnx_net = helper.make_model(graph_def, **args)

        #   Create reference IR net

        ref_net = None
        # Too complicated IR to generate by hand

        return onnx_net, ref_net
Exemplo n.º 40
0
def convert(args):
    # Read the model files.
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)

        # Using blocks in programs, create nodes using:
        onnx_nodes = []

        # Load parameters
        global_block = inference_program.global_block()
        for var_name in global_block.vars:
            var = global_block.var(var_name)
            if var_name not in ['feed', 'fetch'] and var.persistable:
                param = fluid.executor.fetch_var(var_name, inference_scope)
                param_node = helper.make_node(
                    'Constant',
                    inputs=[],
                    outputs=[var_name],
                    value=helper.make_tensor(
                        name=var_name,
                        dims=var.shape,
                        data_type=PADDLE_TO_ONNX_DTYPE[var.dtype],
                        vals=param.flatten().tolist()))
                onnx_nodes.append(param_node)

        # Create inputs
        inputs = [
            paddle_variable_to_onnx_tensor(v, global_block)
            for v in feed_target_names
        ]

        # Create outputs
        fetch_target_names = [
            fetch_target.name for fetch_target in fetch_targets
        ]
        outputs = [
            paddle_variable_to_onnx_tensor(v, global_block)
            for v in fetch_target_names
        ]

        # Create nodes
        for block in inference_program.blocks:
            for op in block.ops:
                if op.type in ops.node_maker:
                    # TODO(kuke): deal with the corner case that vars in
                    #     different blocks have the same name
                    node_proto = ops.node_maker[op.type](
                        inputs=op.input_arg_names,
                        attrs=op.attr_names,
                        outputs=op.output_arg_names)

                    onnx_nodes.append(node_proto)
                else:
                    if op.type not in ['feed', 'fetch']:
                        raise NotImplementedError("OP[%s] is not supported in "
                                                  "the converter!" % op.type)

        # Make graph
        model_name = os.path.basename(
            args.fluid_model.strip('/')).split('.')[0]
        onnx_graph = helper.make_graph(onnx_nodes, model_name, inputs, outputs)

        # Make model
        onnx_model = helper.make_model(onnx_graph,
                                       producer_name='PaddlePaddle')

        # Model check
        checker.check_model(onnx_model)

        # Output readable model
        print("The converted model is:\n{}".format(onnx_model))

        # Save converted model
        if args.onnx_model is not None:
            try:
                with open(args.onnx_model, 'wb') as f:
                    f.write(onnx_model.SerializeToString())
                print("Saved converted model to path: %s" % args.onnx_model)
            except (IOError), e:
                print("Invalid ONNX model saving path: %s" % args.onnx_model)
Exemplo n.º 41
0
def test_cast_errors():
    np.random.seed(133391)
    input_data = np.ceil(np.random.rand(2, 3, 4) * 16)

    # missing 'to' attribute
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'])
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT16, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)

    # unsupported data type representation
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=1.2345)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)

    # unsupported input tensor data type:
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=onnx.TensorProto.INT32)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises((RuntimeError, NgraphTypeError)):
        import_onnx_model(model)

    # unsupported output tensor data type:
    node = onnx.helper.make_node('Cast',
                                 inputs=['A'],
                                 outputs=['B'],
                                 to=onnx.TensorProto.COMPLEX128)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX128, value.shape)
        for name, value in zip(node.output, ())
    ]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)
Exemplo n.º 42
0
    def construct_model_gemm(self, output_model_path):
        #      (input)
        #         |
        #        GEMM
        #         |
        #        Clip
        #         |
        #        GEMM
        #         |
        #      (output)
        input_name = "input"
        output_name = "output"
        initializers = []

        def make_gemm(input_name, weight_shape, weight_name, bias_shape,
                      bias_name, output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(bias_data, name=bias_name))

            return onnx.helper.make_node(
                "Gemm",
                [input_name, weight_name, bias_name],
                [output_name],
                alpha=1.0,
                beta=1.0,
                transB=1,
            )

        # make gemm1 node
        gemm1_output_name = "gemm1_output"
        gemm1_node = make_gemm(
            input_name,
            [100, 10],
            "linear1.weight",
            [100],
            "linear1.bias",
            gemm1_output_name,
        )

        # make Clip
        clip_min_name = "clip_min"
        clip_max_name = "clip_max"
        clip_output_name = "clip_output"
        clip_inputs = [gemm1_output_name, clip_min_name, clip_max_name]
        clip_outputs = [clip_output_name]
        initializers.append(
            onnx.numpy_helper.from_array(np.array(-1.0, dtype=np.float32),
                                         name=clip_min_name))
        initializers.append(
            onnx.numpy_helper.from_array(np.array(1.0, dtype=np.float32),
                                         name=clip_max_name))
        clip_node = onnx.helper.make_node("Clip", clip_inputs, clip_outputs)

        # make gemm2 node
        gemm2_node = make_gemm(
            clip_output_name,
            [10, 100],
            "linear2.weight",
            [10],
            "linear2.bias",
            output_name,
        )

        # make graph
        input_tensor = helper.make_tensor_value_info(input_name,
                                                     TensorProto.FLOAT,
                                                     [-1, 10])
        output_tensor = helper.make_tensor_value_info(output_name,
                                                      TensorProto.FLOAT,
                                                      [-1, 10])
        graph_name = "gemm_test"
        graph = helper.make_graph(
            [gemm1_node, clip_node, gemm2_node],
            graph_name,
            [input_tensor],
            [output_tensor],
            initializer=initializers,
        )
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        model.ir_version = 7  # use stable onnx ir version

        onnx.save(model, output_model_path)
Exemplo n.º 43
0
def GenerateModel3(model_name):
    nodes = [  # LayerNorm subgraph
        helper.make_node("Shape", ["input_ids"], ["shape1_out"], "shape1"),
        helper.make_node("Gather", ["shape1_out", "indices_0"],
                         ["gather0_out"], "gather0"),
        helper.make_node("Unsqueeze", ["gather0_out"], ["unsqueeze0_out"],
                         "unsqueeze0",
                         axes=[0]),
        helper.make_node("Shape", ["input_ids"], ["shape2_out"], "shape2"),
        helper.make_node("Gather", ["shape2_out", "indices_1"],
                         ["gather1_out"], "gather1"),
        helper.make_node("Unsqueeze", ["gather1_out"], ["unsqueeze1_out"],
                         "unsqueeze1",
                         axes=[0]),
        helper.make_node("Concat", ["unsqueeze0_out", "unsqueeze1_out"],
                         ["concat_out"],
                         "concat",
                         axis=0),
        helper.make_node("Cast", ["gather1_out"], ["cast_out"], "cast", to=7),
        helper.make_node("Range", ["start_0", "cast_out", "delta_1"],
                         ["range_out"], "range"),
        helper.make_node("Unsqueeze", ["range_out"], ["unsqueeze2_out"],
                         "unsqueeze2",
                         axes=[0]),
        helper.make_node("Expand", ["unsqueeze2_out", "concat_out"],
                         ["expand_out"], "expand"),
        helper.make_node("Gather", ["pos_embed", "expand_out"],
                         ["pos_gather_out"], "pos_gather"),
        helper.make_node("Gather", ["word_embed", "input_ids"],
                         ["word_gather_out"], "word_gather"),
        helper.make_node("Add", ["word_gather_out", "pos_gather_out"],
                         ["word_add_pos_out"], "word_add_pos"),
        helper.make_node("Gather", ["seg_embed", "segment_ids"],
                         ["seg_gather_out"], "seg_gather"),
        helper.make_node("Add", ["word_add_pos_out", "seg_gather_out"],
                         ["add3_out"], "add3"),
        helper.make_node("LayerNormalization",
                         ["add3_out", "layer_norm_weight", "layer_norm_bias"],
                         ["layernorm_out"],
                         "layernorm",
                         axis=-1,
                         epsion=0.000009999999747378752),
        helper.make_node("Cast", ["input_mask"], ["mask_cast_out"],
                         "mask_cast",
                         to=6),
        helper.make_node("ReduceSum", ["mask_cast_out"], ["mask_index_out"],
                         "mask_index",
                         axes=[1],
                         keepdims=0),
        helper.make_node(
            "Attention",
            ["layernorm_out", "qkv_weights", "qkv_bias", "mask_index_out"],
            ["att_out"],
            "att",
            domain="com.microsoft",
            num_heads=2),
        helper.make_node("MatMul", ["att_out", "matmul_weight"],
                         ["matmul_out"], "matmul"),
        helper.make_node("Add", ["matmul_out", "add_bias"], ["add_out"],
                         "add"),
        helper.make_node("Add", ["add_out", "layernorm_out"], ["add2_out"],
                         "add2")
    ]

    # hidden_size=4, num_heads=2, max_seq_length=3
    initializers = [  # initializers
        helper.make_tensor('indices_0', TensorProto.INT64, [], [0]),
        helper.make_tensor('indices_1', TensorProto.INT64, [], [1]),
        helper.make_tensor('start_0', TensorProto.INT64, [], [0]),
        helper.make_tensor('delta_1', TensorProto.INT64, [], [1]),
        helper.make_tensor('word_embed', TensorProto.FLOAT, [2, 4],
                           [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor('pos_embed', TensorProto.FLOAT, [4, 4], [
            1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0,
            2.0, 3.0, 4.0
        ]),
        helper.make_tensor('seg_embed', TensorProto.FLOAT, [2, 4],
                           [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor('layer_norm_weight', TensorProto.FLOAT, [4],
                           [1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor('layer_norm_bias', TensorProto.FLOAT, [4],
                           [0.1, 0.2, 0.3, 0.4]),
        helper.make_tensor('qkv_weights', TensorProto.FLOAT, [4, 4], [
            1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0,
            2.0, 3.0, 4.0
        ]),
        helper.make_tensor('qkv_bias', TensorProto.FLOAT, [4],
                           [0.1, 0.2, 0.3, 0.4]),
        helper.make_tensor('matmul_weight', TensorProto.FLOAT, [4, 4], [
            1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0,
            2.0, 3.0, 4.0
        ]),
        helper.make_tensor('add_bias', TensorProto.FLOAT, [4],
                           [0.1, 0.2, 0.3, 0.4]),
    ]

    graph = helper.make_graph(
        nodes,
        "EmbedLayerNorm_format3",  #name
        [  # inputs
            helper.make_tensor_value_info('input_ids', TensorProto.INT64,
                                          ['batch', 3]),
            helper.make_tensor_value_info('segment_ids', TensorProto.INT64,
                                          ['batch', 3]),
            helper.make_tensor_value_info('input_mask', TensorProto.INT64,
                                          ['batch', 3]),
        ],
        [  # outputs
            helper.make_tensor_value_info('add2_out', TensorProto.FLOAT,
                                          ['batch', 3, 4]),
        ],
        initializers)

    model = helper.make_model(graph)
    onnx.save(model, model_name)
Exemplo n.º 44
0
    def create_net_const(self, shape, scale, precision, ir_version):
        """
            ONNX net                                     IR net

            Input->Concat(+scaled const)->Output   =>    Input->Concat(+const)

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto
        import numpy as np

        concat_axis = 0
        output_shape = shape.copy()
        output_shape[concat_axis] *= 2

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)

        constant = np.random.randint(-127, 127, shape).astype(np.float)

        node_const_def = onnx.helper.make_node(
            'Constant',
            inputs=[],
            outputs=['const1'],
            value=helper.make_tensor(
                name='const_tensor',
                data_type=TensorProto.FLOAT,
                dims=constant.shape,
                vals=constant.flatten(),
            ),
        )

        node_def = onnx.helper.make_node(
            'Scale',
            inputs=['const1'],
            outputs=['scale'],
            scale=scale
        )

        node_concat_def = onnx.helper.make_node(
            'Concat',
            inputs=['input', 'scale'],
            outputs=['output'],
            axis=concat_axis
        )

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_const_def, node_def, node_concat_def],
            'test_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_model')

        #
        #   Create reference IR net
        #
        ir_const = constant.flatten() * scale
        if precision == 'FP16':
            ir_const = ir_const.astype(np.float16)

        ref_net = None

        return onnx_net, ref_net
Exemplo n.º 45
0
def GenerateModel5(model_name):
    batch_size = 2
    hidden_size = 4
    attention_heads = 2
    sequence_length = 3

    nodes = [
        helper.make_node("Gather", ["word_embed", "input_ids"],
                         ["word_gather_out"],
                         "word_gather",
                         axis=0),
        helper.make_node("Add", ["word_gather_out", "pos_gather_out"],
                         ["word_add_pos_out"], "word_add_pos"),
        helper.make_node("Gather", ["seg_embed", "segment_ids"],
                         ["seg_gather_out"],
                         "seg_gather",
                         axis=0),
        helper.make_node("Add", ["word_add_pos_out", "seg_gather_out"],
                         ["add3_out"], "add3"),
        helper.make_node("LayerNormalization",
                         ["add3_out", "layer_norm_weight", "layer_norm_bias"],
                         ["layernorm_out"],
                         "layernorm",
                         axis=-1,
                         epsion=0.000009999999747378752),
        helper.make_node("Cast", ["input_mask"], ["mask_cast_out"],
                         "mask_cast",
                         to=6),
        helper.make_node("ReduceSum", ["mask_cast_out"], ["mask_index_out"],
                         "mask_index",
                         axes=[1],
                         keepdims=0),
        helper.make_node(
            "Attention",
            ["layernorm_out", "qkv_weights", "qkv_bias", "mask_index_out"],
            ["att_out"],
            "att",
            domain="com.microsoft",
            num_heads=attention_heads),
        helper.make_node("MatMul", ["att_out", "matmul_weight"],
                         ["matmul_out"], "matmul"),
        helper.make_node("Add", ["matmul_out", "add_bias"], ["add_out"],
                         "add"),
        helper.make_node("Add", ["add_out", "layernorm_out"], ["add2_out"],
                         "add2")
    ]

    qkv_weights = [
        1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0,
        3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0,
        1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0,
        3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0,
        1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0
    ]

    initializers = [  # initializers
        helper.make_tensor('word_embed', TensorProto.FLOAT, [2, hidden_size],
                           [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor(
            'pos_gather_out', TensorProto.FLOAT,
            [batch_size, sequence_length, hidden_size], [
                1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 8.0, 7.0, 6.0,
                1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 8.0, 7.0, 6.0
            ]),
        helper.make_tensor('seg_embed', TensorProto.FLOAT, [2, hidden_size],
                           [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor('layer_norm_weight', TensorProto.FLOAT,
                           [hidden_size], [1.0, 2.0, 3.0, 4.0]),
        helper.make_tensor('layer_norm_bias', TensorProto.FLOAT, [hidden_size],
                           [0.1, 0.2, 0.3, 0.4]),
        helper.make_tensor('qkv_weights', TensorProto.FLOAT,
                           [hidden_size, 3 * hidden_size], qkv_weights),
        helper.make_tensor(
            'qkv_bias', TensorProto.FLOAT, [3 * hidden_size],
            [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]),
        helper.make_tensor('matmul_weight', TensorProto.FLOAT,
                           [hidden_size, hidden_size], [
                               1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0,
                               2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0
                           ]),
        helper.make_tensor('add_bias', TensorProto.FLOAT, [hidden_size],
                           [0.1, 0.2, 0.3, 0.4]),
    ]

    graph = helper.make_graph(
        nodes,
        "EmbedLayerNorm_format5",  #name
        [  # inputs
            helper.make_tensor_value_info('input_ids', TensorProto.INT64,
                                          [batch_size, sequence_length]),
            helper.make_tensor_value_info('segment_ids', TensorProto.INT64,
                                          [batch_size, sequence_length]),
            helper.make_tensor_value_info('input_mask', TensorProto.INT64,
                                          [batch_size, sequence_length]),
        ],
        [  # outputs
            helper.make_tensor_value_info(
                'add2_out', TensorProto.FLOAT,
                [batch_size, sequence_length, hidden_size]),
        ],
        initializers)

    model = helper.make_model(graph)
    onnx.save(model, model_name)
Exemplo n.º 46
0
    ['Y'],  # outputs
    mode='constant',  # attributes
    value=1.5,
    pads=[0, 1, 0, 1],
)

# Create the graph (GraphProto)
graph_def = helper.make_graph(
    [node_def],
    'test-model',
    [X],
    [Y],
)

# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name='onnx-example')
model_def.opset_import[0].version = 10

print('The model is:\n{}'.format(model_def))
onnx.checker.check_model(model_def)
print('The model is checked!')

#####################################
# Same example with sklearn-onnx
# ++++++++++++++++++++++++++++++
#
# Every operator has its own class in *sklearn-onnx*.
# The list is dynamically created based on the installed
# onnx package.

from skl2onnx.algebra.onnx_ops import OnnxPad  # noqa
Exemplo n.º 47
0
def gen_lstm_onnx_test_model(
    model_path,
    seq_length,
    batch_size,
    hidden_size,
    input_size,
    direction,
    has_bias,
    has_sequence_lens,
    has_initial_h,
    has_initial_c,
    has_peephole,
    input_forget=False,
):

    # Validate parameters
    assert direction in LSTM_DIRS, "ONNX LSTM direction invalid!"
    assert not has_sequence_lens, "ONNX LSTM Variable sequence length not supported"

    # Get number of directions
    num_directions = 2 if (direction == LSTM_DIR_BIDIRECTIONAL) else 1

    # Tensor sizes
    X_shape = [seq_length, batch_size, input_size]
    W_shape = [num_directions, 4 * hidden_size, input_size]
    R_shape = [num_directions, 4 * hidden_size, hidden_size]
    B_shape = [num_directions, 8 * hidden_size]
    sequence_lens_shape = [batch_size]
    initial_h_shape = [num_directions, batch_size, hidden_size]
    initial_c_shape = [num_directions, batch_size, hidden_size]
    P_shape = [num_directions, 3 * hidden_size]
    Y_shape = [seq_length, num_directions, batch_size, hidden_size]

    # Generate random inputs (weights are assumed concatenated in ONNX format: i,o,f,c)
    np.random.seed(1)
    X = np.random.randn(*X_shape)
    W = np.random.randn(*W_shape)
    R = np.random.randn(*R_shape)
    B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
    sequence_lens = (np.random.randint(1, seq_length, batch_size)
                     if has_sequence_lens else np.tile(seq_length, batch_size))
    initial_h = (np.random.randn(
        *initial_h_shape) if has_initial_h else np.zeros(initial_h_shape))
    initial_c = (np.random.randn(
        *initial_c_shape) if has_initial_c else np.zeros(initial_c_shape))
    P = np.random.randn(*P_shape) if has_peephole else np.zeros(P_shape)

    # Function to get all the weight components for the given direction
    def get_weights(dir_idx):
        Wi = np.reshape(W[dir_idx, 0 * hidden_size:1 * hidden_size, :],
                        [hidden_size, input_size])
        Wo = np.reshape(W[dir_idx, 1 * hidden_size:2 * hidden_size, :],
                        [hidden_size, input_size])
        Wf = np.reshape(W[dir_idx, 2 * hidden_size:3 * hidden_size, :],
                        [hidden_size, input_size])
        Wc = np.reshape(W[dir_idx, 3 * hidden_size:4 * hidden_size, :],
                        [hidden_size, input_size])
        Ri = np.reshape(R[dir_idx, 0 * hidden_size:1 * hidden_size, :],
                        [hidden_size, hidden_size])
        Ro = np.reshape(R[dir_idx, 1 * hidden_size:2 * hidden_size, :],
                        [hidden_size, hidden_size])
        Rf = np.reshape(R[dir_idx, 2 * hidden_size:3 * hidden_size, :],
                        [hidden_size, hidden_size])
        Rc = np.reshape(R[dir_idx, 3 * hidden_size:4 * hidden_size, :],
                        [hidden_size, hidden_size])
        bWi = np.reshape(B[dir_idx, 0 * hidden_size:1 * hidden_size],
                         [hidden_size])
        bWo = np.reshape(B[dir_idx, 1 * hidden_size:2 * hidden_size],
                         [hidden_size])
        bWf = np.reshape(B[dir_idx, 2 * hidden_size:3 * hidden_size],
                         [hidden_size])
        bWc = np.reshape(B[dir_idx, 3 * hidden_size:4 * hidden_size],
                         [hidden_size])
        bRi = np.reshape(B[dir_idx, 4 * hidden_size:5 * hidden_size],
                         [hidden_size])
        bRo = np.reshape(B[dir_idx, 5 * hidden_size:6 * hidden_size],
                         [hidden_size])
        bRf = np.reshape(B[dir_idx, 6 * hidden_size:7 * hidden_size],
                         [hidden_size])
        bRc = np.reshape(B[dir_idx, 7 * hidden_size:8 * hidden_size],
                         [hidden_size])
        Pi = np.tile(P[dir_idx, 0 * hidden_size:1 * hidden_size],
                     (batch_size, 1))
        Po = np.tile(P[dir_idx, 1 * hidden_size:2 * hidden_size],
                     (batch_size, 1))
        Pf = np.tile(P[dir_idx, 2 * hidden_size:3 * hidden_size],
                     (batch_size, 1))
        return (
            Wi,
            Wo,
            Wf,
            Wc,
            Ri,
            Ro,
            Rf,
            Rc,
            bWi,
            bWo,
            bWf,
            bWc,
            bRi,
            bRo,
            bRf,
            bRc,
            Pi,
            Po,
            Pf,
        )

    # Function to get PyTorch weights (which are in the i, f, c, o order)
    def get_torch_weights(dir_idx):
        (
            Wi,
            Wo,
            Wf,
            Wc,
            Ri,
            Ro,
            Rf,
            Rc,
            bWi,
            bWo,
            bWf,
            bWc,
            bRi,
            bRo,
            bRf,
            bRc,
            Pi,
            Po,
            Pf,
        ) = get_weights(dir_idx)
        W_torch = np.concatenate((Wi, Wf, Wc, Wo), 0)
        R_torch = np.concatenate((Ri, Rf, Rc, Ro), 0)
        bW_torch = np.concatenate((bWi, bWf, bWc, bWo), 0)
        bR_torch = np.concatenate((bRi, bRf, bRc, bRo), 0)
        return (W_torch, R_torch, bW_torch, bR_torch)

    # ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
    # Compute reference using Pytorch. Pytorch LSTM has only forward/bidirectional so we will do the reverse LSTM using
    # a Pytorch forward LSTM.
    lstm = torch.nn.LSTM(
        input_size=input_size,
        hidden_size=hidden_size,
        num_layers=1,
        bias=True,
        batch_first=False,
        dropout=0,
        bidirectional=(direction == LSTM_DIR_BIDIRECTIONAL),
    )

    # Get LSTM state dictionary
    lstm_state_dict = lstm.state_dict()

    # Assign forward weights
    forwardEnabled = direction in [LSTM_DIR_FORWARD, LSTM_DIR_BIDIRECTIONAL]
    if forwardEnabled:
        forward_dir_idx = 0
        (W_torch, R_torch, bW_torch,
         bR_torch) = get_torch_weights(forward_dir_idx)
        lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch,
                                                       dtype=torch.float32)
        lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch,
                                                       dtype=torch.float32)
        lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch,
                                                     dtype=torch.float32)
        lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch,
                                                     dtype=torch.float32)

    # Assign reverse weights
    reverseEnabled = direction in [LSTM_DIR_REVERSE, LSTM_DIR_BIDIRECTIONAL]
    if reverseEnabled:
        if direction == LSTM_DIR_REVERSE:
            reverse_dir_idx = 0
            (W_torch, R_torch, bW_torch,
             bR_torch) = get_torch_weights(reverse_dir_idx)
            lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch,
                                                           dtype=torch.float32)
            lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch,
                                                           dtype=torch.float32)
            lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch,
                                                         dtype=torch.float32)
            lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch,
                                                         dtype=torch.float32)
        else:
            reverse_dir_idx = 1
            (W_torch, R_torch, bW_torch,
             bR_torch) = get_torch_weights(reverse_dir_idx)
            lstm_state_dict["weight_ih_l0_reverse"] = torch.tensor(
                W_torch, dtype=torch.float32)
            lstm_state_dict["weight_hh_l0_reverse"] = torch.tensor(
                R_torch, dtype=torch.float32)
            lstm_state_dict["bias_ih_l0_reverse"] = torch.tensor(
                bW_torch, dtype=torch.float32)
            lstm_state_dict["bias_hh_l0_reverse"] = torch.tensor(
                bR_torch, dtype=torch.float32)

    # Set LSTM state dictionary
    lstm.load_state_dict(lstm_state_dict, strict=True)

    # Perform inference
    X_torch = torch.tensor(X, dtype=torch.float32)
    initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
    initial_c_torch = torch.tensor(initial_c, dtype=torch.float32)
    if direction == LSTM_DIR_REVERSE:
        Y, (next_h, next_c) = lstm(X_torch.flip([0]),
                                   (initial_h_torch, initial_c_torch))
        Y = Y.flip([0])
    else:
        Y, (next_h, next_c) = lstm(X_torch, (initial_h_torch, initial_c_torch))

    # Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
    Y_ref = Y.detach().numpy()
    Y_ref = np.reshape(Y_ref,
                       [seq_length, batch_size, num_directions, hidden_size])
    Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])

    # Reshape states to ONNX format
    Y_h_ref = next_h.detach().numpy()
    Y_c_ref = next_c.detach().numpy()

    # --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
    # Create X slices
    Xslices = list()
    for t in range(seq_length):
        Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))

    # Function to compute one LSTM cell
    def compute_lstm(forward):
        dir_idx = 0 if forward else (0 if direction == LSTM_DIR_REVERSE else 1)
        (
            Wi,
            Wo,
            Wf,
            Wc,
            Ri,
            Ro,
            Rf,
            Rc,
            bWi,
            bWo,
            bWf,
            bWc,
            bRi,
            bRo,
            bRf,
            bRc,
            Pi,
            Po,
            Pf,
        ) = get_weights(dir_idx)

        def f(x):
            return 1 / (1 + np.exp(-x))

        def g(x):
            return np.tanh(x)

        def h(x):
            return np.tanh(x)

        def mm(x, w):
            return np.matmul(x, w.transpose())

        Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
        Ct = np.reshape(initial_c[dir_idx, :, :], [batch_size, hidden_size])

        Yslices = list()
        for t in range(seq_length):
            xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
            ft = f(mm(xt, Wf) + bWf + mm(Ht, Rf) + bRf + Pf * Ct)
            if input_forget:
                it = 1 - ft
            else:
                it = f(mm(xt, Wi) + bWi + mm(Ht, Ri) + bRi + Pi * Ct)
            ctild = g(mm(xt, Wc) + bWc + mm(Ht, Rc) + bRc)
            Ct = ft * Ct + it * ctild
            ot = f(mm(xt, Wo) + bWo + mm(Ht, Ro) + bRo + Po * Ct)
            Ht = ot * h(Ct)
            Yslices.append(Ht)
        return Yslices, Ht, Ct

    Yslices = list()
    Hslices = list()
    Cslices = list()

    # Compute forward LSTM
    forwardYslices = list()
    if forwardEnabled:
        Yt, Ht, Ct = compute_lstm(True)
        forwardYslices += Yt
        Hslices.append(Ht)
        Cslices.append(Ct)

    # Compute reverse LSTM
    reverseYslices = list()
    if reverseEnabled:
        Yt, Ht, Ct = compute_lstm(False)
        reverseYslices += Yt
        Hslices.append(Ht)
        Cslices.append(Ct)

    # Concatenate slices
    for t in range(seq_length):
        if forwardEnabled:
            Yslices.append(forwardYslices[t])
        if reverseEnabled:
            Yslices.append(reverseYslices[seq_length - 1 - t])
    Y_ref_np = np.concatenate(Yslices, 0).reshape(
        [seq_length, num_directions, batch_size, hidden_size])
    Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
        [num_directions, batch_size, hidden_size])
    Y_c_ref_np = np.concatenate(Cslices, 0).reshape(
        [num_directions, batch_size, hidden_size])

    # Use numpy implementation when using peepholes or input_forget, else assert errors
    if has_peephole or input_forget:
        Y_ref = Y_ref_np
        Y_h_ref = Y_h_ref_np
        Y_c_ref = Y_c_ref_np
    else:
        assert (np.max(np.abs(Y_ref - Y_ref_np)) <
                1e-6), "Mismatch between Pytorch and Numpy LSTM implementation"
        assert (np.max(np.abs(Y_h_ref - Y_h_ref_np)) <
                1e-6), "Mismatch between Pytorch and Numpy LSTM implementation"
        assert (np.max(np.abs(Y_c_ref - Y_c_ref_np)) <
                1e-6), "Mismatch between Pytorch and Numpy LSTM implementation"

    # ---------------------------------------------- NODE DEFINITION  --------------------------------------------------
    # Node inputs
    node_inputs = [
        "X",
        "W",
        "R",
        "B" if has_bias else "",
        "",
        "initial_h" if has_initial_h else "",
        "initial_c" if has_initial_c else "",
        "P" if has_peephole else "",
    ]

    # Node outputs
    node_outputs = ["Y", "Y_h", "Y_c"]

    # LSTM node definition
    lstm_node_def = onnx.helper.make_node(
        "LSTM",
        name="lstm",
        inputs=node_inputs,
        outputs=node_outputs,
        hidden_size=hidden_size,
        direction=direction,
        input_forget=input_forget,
    )

    # Error node definition
    err_node_def = onnx.helper.make_node("Sub",
                                         name="error",
                                         inputs=["Y", "Y_ref"],
                                         outputs=["Y_err"])

    # --------------------------------------------- GRAPH DEFINITION  --------------------------------------------------
    graph_input = list()
    graph_init = list()
    graph_output = list()

    # LSTM inputs
    graph_input.append(
        helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
    graph_input.append(
        helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
    graph_input.append(
        helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
    if has_bias:
        graph_input.append(
            helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape))
    if has_sequence_lens:
        graph_input.append(
            helper.make_tensor_value_info("sequence_lens", TensorProto.INT32,
                                          sequence_lens_shape))
    if has_initial_h:
        graph_input.append(
            helper.make_tensor_value_info("initial_h", TensorProto.FLOAT,
                                          initial_h_shape))
    if has_initial_c:
        graph_input.append(
            helper.make_tensor_value_info("initial_c", TensorProto.FLOAT,
                                          initial_c_shape))
    if has_peephole:
        graph_input.append(
            helper.make_tensor_value_info("P", TensorProto.FLOAT, P_shape))

    # Reference input
    graph_input.append(
        helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape))

    # LSTM initializers
    graph_init.append(make_init("X", TensorProto.FLOAT, X))
    graph_init.append(make_init("W", TensorProto.FLOAT, W))
    graph_init.append(make_init("R", TensorProto.FLOAT, R))
    if has_bias:
        graph_init.append(make_init("B", TensorProto.FLOAT, B))
    if has_sequence_lens:
        graph_init.append(
            make_init("sequence_lens", TensorProto.INT32, sequence_lens))
    if has_initial_h:
        graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
    if has_initial_c:
        graph_init.append(make_init("initial_c", TensorProto.FLOAT, initial_c))
    if has_peephole:
        graph_init.append(make_init("P", TensorProto.FLOAT, P))

    # Reference initializer
    graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))

    # Graph outputs
    graph_output.append(
        helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape))

    # Define graph (GraphProto)
    graph_name = "lstm_test"
    graph_def = helper.make_graph(
        [lstm_node_def, err_node_def],
        graph_name,
        inputs=graph_input,
        outputs=graph_output,
    )

    # Set initializers
    graph_def.initializer.extend(graph_init)

    # --------------------------------------------- MODEL DEFINITION  --------------------------------------------------
    # Define model (ModelProto)
    model_def = helper.make_model(graph_def, producer_name="onnx-lstm")

    # Check model
    onnx.checker.check_model(model_def)

    # Print model
    with open(model_path, "w") as f:
        f.write(str(model_def))
Exemplo n.º 48
0
opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 10
onnxdomain.domain = ""  # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)

msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"

opsets.append(msdomain)
kwargs = {}
kwargs["opset_imports"] = opsets

model_def = helper.make_model(graph_def,
                              producer_name="onnx-example",
                              **kwargs)

file_name = "gelu_format2_0" if switch_order else "gelu_format2_1"
if has_bias:
    file_name += "_with_bias"

if gelu_use_graph_input:
    file_name += "_use_graph_input"

if node_has_graph_output:
    file_name += "_use_graph_output"

file_name += ".onnx"
onnx.save(model_def, file_name)
print(file_name)
Exemplo n.º 49
0
                                 data_type=TensorProto.FLOAT,
                                 dims=(1, ),
                                 vals=B.reshape(1).tolist())
# Create the graph (GraphProto)
graph_def = helper.make_graph(
    [node_def],
    "test-model",
    inputs=[
        helper.make_tensor_value_info("data", TensorProto.FLOAT, [1, 1, 3, 3]),
        helper.make_tensor_value_info("W", TensorProto.FLOAT, [1, 1, 2, 2]),
        helper.make_tensor_value_info("B", TensorProto.FLOAT, [
            1,
        ])
    ],
    outputs=[
        helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1, 4, 4])
    ])

graph_def.initializer.extend([weight_tensor])
graph_def.initializer.extend([bias_tensor])
graph_def.initializer[0].name = 'W'
graph_def.initializer[1].name = 'B'
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name='onnx-conv')

with open('simpleConv.onnxtxt', 'w') as f:
    f.write(str(model_def))

onnx.checker.check_model(model_def)
print('The model is checked!')
Exemplo n.º 50
0
def save_model(graph, file_name):
    model = helper.make_model(graph)
    onnx.checker.check_model(model)
    onnx.save(model, file_name)
Exemplo n.º 51
0
    def test_merge_drop_unnecessary_initializers_and_value_info(
            self):  # type: () -> None
        '''
        Tests automatic removal of initializers when merging graphs
        '''
        ops = [helper.make_opsetid("", 10)]

        g = GraphProto()
        g.input.extend(
            [helper.make_tensor_value_info('x', TensorProto.FLOAT, [])])
        g.output.extend(
            [helper.make_tensor_value_info('y', TensorProto.FLOAT, [])])
        g.node.extend(
            [helper.make_node('Identity', inputs=['x'], outputs=['y'])])

        g1 = GraphProto()
        g1.CopyFrom(g)
        g1.name = 'g1'
        m1 = helper.make_model(g1, producer_name='test', opset_imports=ops)
        checker.check_model(m1)

        g2 = GraphProto()
        g2.CopyFrom(g)
        g2.name = 'g2'
        g2.initializer.extend([
            helper.make_tensor(name='x',
                               data_type=TensorProto.FLOAT,
                               dims=(),
                               vals=[0])
        ])
        m2 = helper.make_model(g2, producer_name='test', opset_imports=ops)
        checker.check_model(m2)

        g3 = GraphProto()
        g3.CopyFrom(g)
        g3.name = 'g3'
        g3.sparse_initializer.extend([_make_sparse_tensor('x')])
        m3 = helper.make_model(g3, producer_name='test', opset_imports=ops)
        checker.check_model(m3)

        g4 = GraphProto()
        g4.CopyFrom(g)
        g4.name = 'g3'
        g4.value_info.extend(
            [helper.make_tensor_value_info('x', TensorProto.FLOAT, [])])
        m4 = helper.make_model(g4, producer_name='test', opset_imports=ops)
        checker.check_model(m4)

        # Initializer 'x' from m1 is removed, because there is no longer an input with that name
        out_m1 = compose.merge_models(m1,
                                      m2,
                                      prefix1='m1/',
                                      io_map=[('y', 'x')])
        self.assertEqual(0, len(out_m1.graph.initializer))

        # Sparse initializer 'x' from m1 is removed, because there is no longer an input with that name
        out_m2 = compose.merge_models(m1,
                                      m3,
                                      prefix1='m1/',
                                      io_map=[('y', 'x')])
        self.assertEqual(0, len(out_m2.graph.initializer))

        # Value info 'x' from m1 is removed, because there is no longer an input with that name
        out_m3 = compose.merge_models(m1,
                                      m4,
                                      prefix1='m1/',
                                      io_map=[('y', 'x')])
        self.assertEqual(0, len(out_m3.graph.value_info))
Exemplo n.º 52
0
    def test_training_info_proto(self):  # type: () -> None
        # Inference graph.
        A_shape = [2, 2]
        A_name = 'A'
        A = np.random.rand(*A_shape).astype(np.float32)
        A_initializer = numpy_helper.from_array(A, name=A_name)
        A_value_info = helper.make_tensor_value_info(A_name, TensorProto.FLOAT,
                                                     A_shape)

        B_shape = [2, 2]
        B_name = 'B'
        B = np.random.rand(*B_shape).astype(np.float32)
        B_initializer = numpy_helper.from_array(B, name=B_name)
        B_value_info = helper.make_tensor_value_info(B_name, TensorProto.FLOAT,
                                                     B_shape)

        C_shape = [2, 2]
        C_name = 'C'
        C_value_info = helper.make_tensor_value_info(C_name, TensorProto.FLOAT,
                                                     C_shape)

        inference_node = helper.make_node('MatMul',
                                          inputs=[A_name, B_name],
                                          outputs=[C_name])

        inference_graph = helper.make_graph([inference_node],
                                            'simple_inference',
                                            [A_value_info, B_value_info],
                                            [C_value_info],
                                            [A_initializer, B_initializer])

        # Training graph
        X_shape = [2, 2]
        X_name = 'X'
        X = np.random.rand(*X_shape).astype(np.float32)
        X_initializer = numpy_helper.from_array(X, name=X_name)
        X_value_info = helper.make_tensor_value_info(X_name, TensorProto.FLOAT,
                                                     X_shape)

        Y_shape = [2, 2]
        Y_name = 'Y'
        Y_value_info = helper.make_tensor_value_info(Y_name, TensorProto.FLOAT,
                                                     Y_shape)

        node = helper.make_node(
            'MatMul',
            inputs=[X_name, C_name],  # tensor "C" is from inference graph.
            outputs=[Y_name])

        training_graph = helper.make_graph([node], 'simple_training',
                                           [X_value_info], [Y_value_info],
                                           [X_initializer])

        # Capture assignment of B <--- Y.
        training_info = helper.make_training_info(training_graph,
                                                  [(B_name, Y_name)], None,
                                                  None)

        # Create a model with both inference and training information.
        model = helper.make_model(inference_graph)
        # Check if the inference-only part is correct.
        onnx.checker.check_model(model)
        # Insert training information.
        new_training_info = model.training_info.add()
        new_training_info.CopyFrom(training_info)

        # Generate the actual training graph from training information so that
        # we can run onnx checker to check if the full training graph is a valid
        # graph. As defined in spec, full training graph forms by concatenating
        # corresponding fields.
        full_training_graph = helper.make_graph(
            list(model.graph.node) +
            list(model.training_info[0].algorithm.node), 'full_training_graph',
            list(model.graph.input) +
            list(model.training_info[0].algorithm.input),
            list(model.graph.output) +
            list(model.training_info[0].algorithm.output),
            list(model.graph.initializer) +
            list(model.training_info[0].algorithm.initializer))

        # Wrap full training graph as a ModelProto so that we can run checker.
        full_training_model = helper.make_model(full_training_graph)
        full_training_model_with_shapes = shape_inference.infer_shapes(
            full_training_model)
        onnx.checker.check_model(full_training_model_with_shapes)
Exemplo n.º 53
0
    def test_overlapping_function_names(self):  # type: () -> None
        '''
        Tests error checking when the name of local function entries overlaps
        '''
        ops = [helper.make_opsetid("", 10), helper.make_opsetid("local", 10)]

        def _make_function(
                domain,  # type: Text
                fname,  # type: Text
                inputs,  # type: List[Text]
                outputs,  # type: List[Text]
                nodes,  # type: List[NodeProto]
        ):  # type: (...) -> FunctionProto
            f = FunctionProto()
            f.domain = domain
            f.name = fname
            f.input.extend(inputs)
            f.output.extend(outputs)
            f.node.extend(nodes)
            f.opset_import.extend(ops)
            return f

        ops = [helper.make_opsetid("", 10), helper.make_opsetid("local", 10)]

        g = GraphProto()
        g.input.extend([
            helper.make_tensor_value_info('x0', TensorProto.FLOAT, []),
            helper.make_tensor_value_info('x1', TensorProto.FLOAT, [])
        ])
        g.output.extend([
            helper.make_tensor_value_info('y', TensorProto.FLOAT, []),
        ])
        g.node.extend([
            helper.make_node('f1',
                             domain='local',
                             inputs=['x0', 'x1'],
                             outputs=['y'])
        ])

        g1 = GraphProto()
        g1.CopyFrom(g)
        g1.name = 'g1'
        m1 = helper.make_model(g1, producer_name='test', opset_imports=ops)
        m1.functions.extend([
            _make_function(
                'local', 'f1', ['x0', 'x1'], ['y'],
                [helper.make_node('Add', inputs=['x0', 'x1'], outputs=['y'])])
        ])
        checker.check_model(m1)

        g2 = GraphProto()
        g2.CopyFrom(g)
        g2.name = 'g2'
        m2 = helper.make_model(g2, producer_name='test', opset_imports=ops)
        m2.functions.extend([
            _make_function(
                'local', 'f1', ['x0', 'x1'], ['y'],
                [helper.make_node('Mul', inputs=['x0', 'x1'], outputs=['y'])])
        ])
        checker.check_model(m2)

        m = compose.merge_models(m1,
                                 m2,
                                 io_map=[('y', 'x0'), ('y', 'x1')],
                                 prefix1='m1/',
                                 prefix2='m2/')
        checker.check_model(m)

        nodes = [n.op_type for n in m.graph.node]
        self.assertEqual(['m1/f1', 'm2/f1'], nodes)

        functions = [f.name for f in m.functions]
        self.assertEqual(['m1/f1', 'm2/f1'], functions)

        g3 = GraphProto()
        g3.CopyFrom(g)
        g3.name = 'g3'
        g3.node[0].op_type = 'f2'
        m3 = helper.make_model(g3, producer_name='test', opset_imports=ops)
        m3.functions.extend([
            _make_function('local', 'f1', ['x0', 'x1'], ['y'], [
                helper.make_node('Add', inputs=['x0', 'x1'], outputs=['y0']),
                helper.make_node('Mul', inputs=['x0', 'x1'], outputs=['y1']),
                helper.make_node('Add', inputs=['y0', 'y1'], outputs=['y'])
            ]),
            _make_function('local', 'f2', ['x0', 'x1'], ['y'], [
                helper.make_node(
                    'f1', domain='local', inputs=['x0', 'x1'], outputs=['y0']),
                helper.make_node('Mul', inputs=['x0', 'x1'], outputs=['y1']),
                helper.make_node('Add', inputs=['y0', 'y1'], outputs=['y'])
            ])
        ])
        checker.check_model(m3)

        m = compose.merge_models(m1,
                                 m3,
                                 io_map=[('y', 'x0'), ('y', 'x1')],
                                 prefix1='m1/',
                                 prefix2='m3/')
        checker.check_model(m)

        nodes = [n.op_type for n in m.graph.node]
        self.assertEqual(['m1/f1', 'm3/f2'], nodes)

        functions = [f.name for f in m.functions]
        self.assertEqual(['m1/f1', 'm3/f1', 'm3/f2'], functions)

        self.assertEqual(['Add'], [n.op_type for n in m.functions[0].node])
        self.assertEqual(['Add', 'Mul', 'Add'],
                         [n.op_type for n in m.functions[1].node])
        self.assertEqual(['m3/f1', 'Mul', 'Add'],
                         [n.op_type for n in m.functions[2].node])
Exemplo n.º 54
0
    def _test_overlapping_names(
            self,
            inputs0=['i0', 'i1'],  # type: List[Text]
            inputs1=['i2', 'i3'],  # type: List[Text]
            outputs0=['o0', 'o1'],  # type: List[Text]
            outputs1=['o2', 'o3'],  # type: List[Text]
            value_info0=['v0', 'v1'],  # type: List[Text]
            value_info1=['v2', 'v3'],  # type: List[Text]
            initializer0=['init0', 'init1'],  # type: List[Text]
            initializer1=['init2', 'init3'],  # type: List[Text]
            sparse_initializer0=['sparse_init0',
                                 'sparse_init1'],  # type: List[Text]
            sparse_initializer1=['sparse_init2',
                                 'sparse_init3'],  # type: List[Text]
    ):  # type: (...) -> None
        n0 = [
            helper.make_node('Identity',
                             inputs=[inputs0[i]],
                             outputs=[outputs0[i]])
            for i in range(len(inputs0))
        ]
        i0 = [
            helper.make_tensor_value_info(inputs0[i], TensorProto.FLOAT, [])
            for i in range(len(inputs0))
        ]
        o0 = [
            helper.make_tensor_value_info(outputs0[i], TensorProto.FLOAT, [])
            for i in range(len(outputs0))
        ]
        vi0 = [
            helper.make_tensor_value_info(value_info0[i], TensorProto.FLOAT,
                                          []) for i in range(len(value_info0))
        ]
        init0 = [
            helper.make_tensor(name=initializer0[i],
                               data_type=TensorProto.INT64,
                               dims=(),
                               vals=[1]) for i in range(len(initializer0))
        ]

        sparse_init0 = [
            _make_sparse_tensor(sparse_initializer0[i])
            for i in range(len(sparse_initializer0))
        ]

        n1 = [
            helper.make_node('Identity',
                             inputs=[inputs1[i]],
                             outputs=[outputs1[i]])
            for i in range(len(inputs1))
        ]
        i1 = [
            helper.make_tensor_value_info(inputs1[i], TensorProto.FLOAT, [])
            for i in range(len(inputs1))
        ]
        o1 = [
            helper.make_tensor_value_info(outputs1[i], TensorProto.FLOAT, [])
            for i in range(len(outputs1))
        ]
        vi1 = [
            helper.make_tensor_value_info(value_info1[i], TensorProto.FLOAT,
                                          []) for i in range(len(value_info1))
        ]
        init1 = [
            helper.make_tensor(name=initializer1[i],
                               data_type=TensorProto.INT64,
                               dims=(),
                               vals=[1]) for i in range(len(initializer1))
        ]
        sparse_init1 = [
            _make_sparse_tensor(sparse_initializer1[i])
            for i in range(len(sparse_initializer1))
        ]

        ops = [helper.make_opsetid("", 10)]
        m0 = helper.make_model(helper.make_graph(
            nodes=n0,
            name='g0',
            inputs=i0,
            outputs=o0,
            value_info=vi0,
            initializer=init0,
            sparse_initializer=sparse_init0),
                               producer_name='test',
                               opset_imports=ops)
        m1 = helper.make_model(helper.make_graph(
            nodes=n1,
            name='g1',
            inputs=i1,
            outputs=o1,
            value_info=vi1,
            initializer=init1,
            sparse_initializer=sparse_init1),
                               producer_name='test',
                               opset_imports=ops)

        overlap = compose.check_overlapping_names(m0.graph, m1.graph)
        i = 0

        overlapping_inputs = list(set(inputs0) & set(inputs1))
        overlapping_outputs = list(set(outputs0) & set(outputs1))
        overlapping_edges = list(set(overlapping_inputs + overlapping_outputs))
        if len(overlapping_edges) > 0:
            self.assertEqual(overlap[i], ('edge', overlapping_edges))
            i += 1

        overlapping_vis = list(set(value_info0) & set(value_info1))
        if len(overlapping_vis) > 0:
            self.assertEqual(overlap[i], ('value_info', overlapping_vis))
            i += 1

        overlapping_init = list(set(initializer0) & set(initializer1))
        if len(overlapping_init) > 0:
            self.assertEqual(overlap[i], ('initializer', overlapping_init))
            i += 1

        overlapping_sparse_init = list(
            set(sparse_initializer0) & set(sparse_initializer1))
        if len(overlapping_sparse_init) > 0:
            expected_overlap = []
            for overlapping_name in overlapping_sparse_init:
                expected_overlap.append(overlapping_name + '_values')
                expected_overlap.append(overlapping_name + '_idx')
            self.assertEqual(overlap[i],
                             ('sparse_initializer', expected_overlap))
            i += 1

        m0_new = compose.add_prefix(m0, prefix='g0/')
        overlap = compose.check_overlapping_names(m0_new.graph, m1.graph)
        self.assertEqual(0, len(overlap))
Exemplo n.º 55
0
def test_cast_errors():
    from onnx.onnx_cpp2py_export.checker import ValidationError

    np.random.seed(133391)
    input_data = np.ceil(np.random.rand(2, 3, 4) * 16)

    # missing 'to' attribute
    node = onnx.helper.make_node("Cast", inputs=["A"], outputs=["B"])
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(ValidationError):
        import_onnx_model(model)

    # unsupported data type representation
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=1.2345)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.INT32,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(ValidationError):
        import_onnx_model(model)

    # unsupported input tensor data type:
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=onnx.TensorProto.INT32)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.INT32,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises((RuntimeError, NgraphTypeError)):
        import_onnx_model(model)

    # unsupported output tensor data type:
    node = onnx.helper.make_node("Cast",
                                 inputs=["A"],
                                 outputs=["B"],
                                 to=onnx.TensorProto.COMPLEX128)
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(node.input, [input_data])
    ]
    output_tensors = [
        make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128,
                               input_data.shape)
    ]  # type: ignore

    graph = make_graph([node], "compute_graph", input_tensors, output_tensors)
    model = make_model(graph, producer_name="NgraphBackend")
    with pytest.raises(RuntimeError):
        import_onnx_model(model)
Exemplo n.º 56
0
def convert_topology(topology,
                     model_name,
                     doc_string,
                     target_opset,
                     targeted_onnx,
                     channel_first_inputs=None):
    '''
    This function is used to convert our Topology object defined in _parser.py into a ONNX model (type: ModelProto).
    :param topology: The Topology object we are going to convert
    :param model_name: GraphProto's name. Let "model" denote the returned model. The string "model_name" would be
    assigned to "model.graph.name."
    :param doc_string: A string attached to the produced model
    :param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
    :param targeted_onnx[deprecated]: A string, which specifies the targeted ONNX version of the produced model. Possible values
    include '1.1.2', '1.2', and so on.
    :return: a ONNX ModelProto
    '''
    if targeted_onnx is not None and StrictVersion(
            targeted_onnx) != StrictVersion(onnx.__version__):
        warnings.warn(
            'targeted_onnx is deprecated, please specify target_opset for the target model.\n'
            +
            '*** ONNX version conflict found. The installed version is %s while the targeted version is %s'
            % (onnx.__version__, targeted_onnx))

    opset_from_onnx_version = onnx.defs.onnx_opset_version()
    if target_opset is None:
        target_opset = opset_from_onnx_version
    elif target_opset > opset_from_onnx_version:
        raise RuntimeError(
            "target_opset %d is higher than the number of the installed onnx package."
        )

    topology._initialize_graph_status_for_traversing()

    container = ModelComponentContainer(target_opset)

    # Put roots and leaves as ONNX's model into buffers. They will be added into ModelComponentContainer later.
    tensor_inputs = {}
    other_inputs = {}
    tensor_outputs = {}
    other_outputs = {}
    for scope in topology.scopes:
        for variable in scope.variables.values():
            if variable.is_root:
                if isinstance(variable.type,
                              (TensorType, Int64Type, FloatType, StringType)):
                    tensor_inputs[variable.raw_name] = variable
                else:
                    other_inputs[variable.raw_name] = variable
            if variable.is_leaf:
                if isinstance(variable.type,
                              (TensorType, Int64Type, FloatType, StringType)):
                    tensor_outputs[variable.raw_name] = variable
                else:
                    other_outputs[variable.raw_name] = variable

    # Add roots the graph according to their order in the original model
    invalid_name = []
    nhwc_inputs = []
    if channel_first_inputs is None:
        channel_first_inputs = []
    for name in topology.raw_model.input_names:
        # Check input naming convention
        input_name = name.replace('_', '').replace(":", "").replace("/", "")
        if input_name and (input_name[0].isdigit() or
                           (not input_name.isalnum())):
            invalid_name.append(name)
        if name in tensor_inputs:
            onnx_input = tensor_inputs[name]  # type: Variable
            if name in channel_first_inputs or \
                    (name.endswith(':0') and name[:-2] in channel_first_inputs):
                nhwc_inputs.append(onnx_input.full_name)
                s = onnx_input.type.shape
                onnx_input.type.shape = [s[0], s[3], s[1], s[2]]
            container.add_input(onnx_input)

    if invalid_name:
        warnings.warn(
            'Some input names are not compliant with ONNX naming convention: %s'
            % invalid_name)
    for name in topology.raw_model.input_names:
        if name in other_inputs:
            container.add_input(other_inputs[name])

    # Add leaves the graph according to their order in the original model
    invalid_name = []
    for name in topology.raw_model.output_names:
        # Check output naming convention
        output_name = name.replace('_', '').replace(":", "").replace("/", "")
        if output_name and (output_name[0].isdigit() or
                            (not output_name.isalnum())):
            invalid_name.append(name)
        if name in tensor_outputs:
            container.add_output(tensor_outputs[name])
    if invalid_name:
        warnings.warn(
            'Some output names are not compliant with ONNX naming convention: %s'
            % invalid_name)
    for name in topology.raw_model.output_names:
        if name in other_outputs:
            container.add_output(other_outputs[name])

    # Traverse the graph from roots to leaves
    for operator in topology.topological_operator_iterator():
        scope = next(scope for scope in topology.scopes
                     if scope.name == operator.scope)
        if operator.type in topology.custom_conversion_functions:
            topology.custom_conversion_functions[operator.type](scope,
                                                                operator,
                                                                container)
        else:
            # Convert the selected operator into some ONNX objects and save them into the container
            registration.get_converter(operator.type)(scope, operator,
                                                      container)

    # When calling ModelComponentContainer's add_initializer(...), nothing is added into the input list.
    # However, for ONNX target opset < 9, initializers should also be model's (GraphProto) inputs.
    # Thus, we create ValueInfoProto objects from initializers (type: TensorProto) directly and
    # then add them into model's input list.
    extra_inputs = []  # ValueInfoProto list of the initializers
    for tensor in container.initializers:
        # Sometimes (especially when creating optional input values such as RNN's initial hidden state), an initializer
        # is also one of the original model's input, so it has been added into the container's input list. If this is
        # the case, we need to skip one iteration to avoid duplicated inputs.
        if tensor.name in [value_info.name for value_info in container.inputs]:
            continue

        # Initializers are always tensors so we can just call make_tensor_value_info(...)
        value_info = helper.make_tensor_value_info(tensor.name,
                                                   tensor.data_type,
                                                   tensor.dims)
        extra_inputs.append(value_info)

    # enable the ONNX optimizations
    nodes = optimize_onnx(container.nodes, nhwc_inputs,
                          container.inputs + extra_inputs, container.outputs)

    # Create a graph from its main components
    if container.target_opset < 9:
        # Before ONNX opset 9, initializers need to be passed in with inputs
        graph = helper.make_graph(nodes, model_name,
                                  container.inputs + extra_inputs,
                                  container.outputs, container.initializers)
    else:
        # In ONNX opset 9 and above, initializers are included as operator
        # inputs, and therefore do not need to be passed as extra_inputs
        graph = helper.make_graph(nodes, model_name, container.inputs,
                                  container.outputs, container.initializers)

    # Add extra information related to the graph
    graph.value_info.extend(container.value_info)

    # Create model
    onnx_model = helper.make_model(graph)

    # Merge operator sets for the same domain, the largest version number would be kept
    purified_operator_set = dict()
    for op_domain, op_version in container.node_domain_version_pair_sets:
        if op_domain not in purified_operator_set:
            purified_operator_set[op_domain] = op_version
        else:
            purified_operator_set[op_domain] = max(
                purified_operator_set[op_domain], op_version)

    # Fill operator sets
    i = 0
    for op_domain, op_version in purified_operator_set.items():
        if i == 0 and len(onnx_model.opset_import) == 1:
            # Overwrite the default operator set created by helper.make_model(...)
            op_set = onnx_model.opset_import[0]
        else:
            # Just create one ONNX element in opset_import
            op_set = onnx_model.opset_import.add()
        op_set.domain = op_domain
        op_set.version = op_version
        i += 1
        if container.target_opset < op_version:
            raise RuntimeError(
                ('The specified opset %d is too low to convert this model, ' +
                 'which requires at least opset %d.') %
                (container.target_opset, op_version))
        elif container.target_opset > op_version:
            getLogger('onnxmltools').warning(
                'The maximum opset needed by this model is only %d.' %
                op_version)

    # Add extra information
    add_metadata_props(onnx_model, topology.metadata_props, target_opset)
    onnx_model.ir_version = onnx_proto.IR_VERSION
    onnx_model.producer_name = utils.get_producer()
    onnx_model.producer_version = utils.get_producer_version()
    onnx_model.domain = utils.get_domain()
    onnx_model.model_version = utils.get_model_version()
    onnx_model.doc_string = doc_string

    return onnx_model
Exemplo n.º 57
0
    def test_imagescaler1(self):
        # test import model with imagescaler
        try:
            import onnx
            from onnx import helper, TensorProto
        except:
            unittest.TestCase.skipTest(self, 'onnx not found')

        if self.data_dir_local is None:
            unittest.TestCase.skipTest(self, 'DLPY_DATA_DIR_LOCAL is not set in '
                                             'the environment variables')

        import numpy as np
        n1 = helper.make_node('ImageScaler',
                              ['X'],
                              ['X1'],
                              bias=[0., 0., 0.],
                              scale=1.)
        n2 = helper.make_node('Conv',
                              inputs=['X1', 'W1'],
                              outputs=['X2'],
                              kernel_shape=[3, 3],
                              pads=[0, 0, 0, 0])
        n3 = helper.make_node('MatMul',
                              inputs=['X2', 'W2'],
                              outputs=['X3'])

        W1 = np.ones((3, 3, 3)).astype(np.float32)
        W2 = np.ones((9, 2)).astype(np.float32)

        graph_def = helper.make_graph(
            [n1, n2, n3],
            name='test',
            inputs=[
                helper.make_tensor_value_info('X',
                                              TensorProto.FLOAT,
                                              [1, 3, 10, 10]),
                helper.make_tensor_value_info('W1',
                                              TensorProto.FLOAT,
                                              [3, 3, 3]),
                helper.make_tensor_value_info('W2',
                                              TensorProto.FLOAT,
                                              [9, 2])],
            outputs=[
                helper.make_tensor_value_info('X3',
                                              TensorProto.FLOAT,
                                              [1, 2])],
            initializer=[
                helper.make_tensor('W1',
                                   TensorProto.FLOAT,
                                   [3, 3, 3],
                                   W1.flatten().astype(np.float32)),
                helper.make_tensor('W2',
                                   TensorProto.FLOAT,
                                   [9, 2],
                                   W2.flatten().astype(np.float32))])
        onnx_model =  helper.make_model(graph_def)

        model1 = Model.from_onnx_model(self.s, onnx_model)

        l1 = model1.layers[0]
        self.assertTrue(l1.type == 'input')
        self.assertTrue(l1.config['offsets'] == [0., 0., 0.])
        self.assertTrue(l1.config['scale'] == 1.)
Exemplo n.º 58
0
    def test_sequence_ops(self):
        # test SequenceConstruct and SequenceAt
        a = np.random.randn(2, 1, 2).astype(np.float32)
        b = np.random.randn(1, 1, 2).astype(np.float32)
        c = np.random.randn(3, 1, 2).astype(np.float32)
        seq_construct_node = helper.make_node('SequenceConstruct',
                                              ['a', 'b', 'c'], ['S'])
        seq_at_node = helper.make_node('SequenceAt', ['S', 'at'], ['Y'])
        out_value_info = helper.make_tensor_value_info('Y',
                                                       onnx.TensorProto.FLOAT,
                                                       [None])
        a_value_info = helper.make_tensor_value_info('a',
                                                     onnx.TensorProto.FLOAT,
                                                     [2, 1, 2])
        b_value_info = helper.make_tensor_value_info('b',
                                                     onnx.TensorProto.FLOAT,
                                                     [1, 1, 2])
        c_value_info = helper.make_tensor_value_info('c',
                                                     onnx.TensorProto.FLOAT,
                                                     [3, 1, 2])
        at_value_info = helper.make_tensor_value_info('at',
                                                      onnx.TensorProto.INT32,
                                                      [])

        graph = helper.make_graph(
            [seq_construct_node, seq_at_node],
            name='seq_construct_at_test',
            inputs=[a_value_info, b_value_info, c_value_info, at_value_info],
            outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'at': 0})
        np.testing.assert_almost_equal(output["Y"], a)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'at': -2})
        np.testing.assert_almost_equal(output["Y"], b)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'at': 2})
        np.testing.assert_almost_equal(output["Y"], c)

        # test SequenceEmpty, SequenceInsert, and SequenceAt
        p = np.int32(0)
        seq_empty_node = helper.make_node('SequenceEmpty', [], ['S'])
        seq_insert_node1 = helper.make_node('SequenceInsert', ['S', 'a'],
                                            ['S1'])
        seq_insert_node2 = helper.make_node('SequenceInsert', ['S1', 'b'],
                                            ['S2'])
        seq_insert_node3 = helper.make_node('SequenceInsert', ['S2', 'c', 'p'],
                                            ['S3'])
        seq_at_node = helper.make_node('SequenceAt', ['S3', 'at'], ['Y'])

        p_value_info = helper.make_tensor_value_info('p',
                                                     onnx.TensorProto.INT32,
                                                     [])

        graph = helper.make_graph([
            seq_empty_node, seq_insert_node1, seq_insert_node2,
            seq_insert_node3, seq_at_node
        ],
                                  name='seq_empty_insert_at_test',
                                  inputs=[
                                      a_value_info, b_value_info, c_value_info,
                                      p_value_info, at_value_info
                                  ],
                                  outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'p': p, 'at': 0})
        np.testing.assert_almost_equal(output["Y"], c)

        # test SequenceConstruct, SequenceErase, and SequenceLength
        seq_construct_node = helper.make_node('SequenceConstruct',
                                              ['a', 'b', 'c'], ['S'])
        seq_erase_node = helper.make_node('SequenceErase', ['S', 'p'], ['S1'])
        seq_length_node = helper.make_node('SequenceLength', ['S1'], ['Y'])

        graph = helper.make_graph(
            [seq_construct_node, seq_erase_node, seq_length_node],
            name='seq_construct_erase_length_test',
            inputs=[a_value_info, b_value_info, c_value_info, p_value_info],
            outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'p': p})
        np.testing.assert_almost_equal(output["Y"], 2)

        # test SequenceConstruct and SequenceErase
        seq_construct_node = helper.make_node('SequenceConstruct',
                                              ['a', 'b', 'c'], ['S'])
        seq_erase_node = helper.make_node('SequenceErase', ['S', 'p'], ['S1'])
        seq_at_node = helper.make_node('SequenceAt', ['S1', 'at'], ['Y'])

        graph = helper.make_graph(
            [seq_construct_node, seq_erase_node, seq_at_node],
            name='seq_construct_erase_test',
            inputs=[
                a_value_info, b_value_info, c_value_info, p_value_info,
                at_value_info
            ],
            outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'p': p, 'at': 0})
        np.testing.assert_almost_equal(output["Y"], b)
        output = tf_rep.run({'a': a, 'b': b, 'c': c, 'p': p, 'at': 1})
        np.testing.assert_almost_equal(output["Y"], c)

        # test SequenceConstruct and ConcatFromSequence
        seq_construct_node = helper.make_node('SequenceConstruct',
                                              ['a', 'b', 'c'], ['S'])
        concat_from_seq_node = helper.make_node('ConcatFromSequence', ['S'],
                                                ['Y'],
                                                axis=1)
        a = np.array([[1, 2], [3, 4]]).astype(np.float32)
        b = np.array([[5, 6], [7, 8]]).astype(np.float32)
        c = np.array([[9, 10], [11, 12]]).astype(np.float32)
        a_value_info = helper.make_tensor_value_info('a',
                                                     onnx.TensorProto.FLOAT,
                                                     [2, 2])
        b_value_info = helper.make_tensor_value_info('b',
                                                     onnx.TensorProto.FLOAT,
                                                     [2, 2])
        c_value_info = helper.make_tensor_value_info('c',
                                                     onnx.TensorProto.FLOAT,
                                                     [2, 2])

        graph = helper.make_graph(
            [seq_construct_node, concat_from_seq_node],
            name='seq_construct_concat_test',
            inputs=[a_value_info, b_value_info, c_value_info],
            outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'c': c})
        d = np.concatenate((a, b, c), axis=1).astype(np.float32)
        np.testing.assert_almost_equal(output["Y"], d)

        # test SplitToSequence and SequenceAt
        a = np.array([[1, 2, 3, 4, 5, 6, 7], [11, 12, 13, 14, 15, 16, 17],
                      [21, 22, 23, 24, 25, 26, 27]]).astype(np.float32)
        b = np.int32([2, 1])
        seq_split_node = helper.make_node('SplitToSequence', ['a', 'b'], ['S'])
        seq_at_node = helper.make_node('SequenceAt', ['S', 'at'], ['Y'])
        a_value_info = helper.make_tensor_value_info('a',
                                                     onnx.TensorProto.FLOAT,
                                                     [3, 7])
        b_value_info = helper.make_tensor_value_info('b',
                                                     onnx.TensorProto.INT32,
                                                     [2])
        at_value_info = helper.make_tensor_value_info('at',
                                                      onnx.TensorProto.INT32,
                                                      [])

        graph = helper.make_graph(
            [seq_split_node, seq_at_node],
            name='split_to_seq_test',
            inputs=[a_value_info, b_value_info, at_value_info],
            outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'b': b, 'at': 1})
        np.testing.assert_almost_equal(output["Y"], np.split(a, [2, 3])[1])

        axis = 1
        seq_split_node = helper.make_node('SplitToSequence', ['a'], ['S'],
                                          axis=axis)
        seq_at_node = helper.make_node('SequenceAt', ['S', 'at'], ['Y'])
        at_value_info = helper.make_tensor_value_info('at',
                                                      onnx.TensorProto.INT32,
                                                      [])

        graph = helper.make_graph([seq_split_node, seq_at_node],
                                  name='split_to_seq_test',
                                  inputs=[a_value_info, at_value_info],
                                  outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'at': 0})
        np.testing.assert_almost_equal(output["Y"], np.split(a, 7, axis=1)[0])

        seq_split_node = helper.make_node('SplitToSequence', ['a'], ['S'],
                                          keepdims=0)
        seq_at_node = helper.make_node('SequenceAt', ['S', 'at'], ['Y'])
        at_value_info = helper.make_tensor_value_info('at',
                                                      onnx.TensorProto.INT32,
                                                      [])

        graph = helper.make_graph([seq_split_node, seq_at_node],
                                  name='split_to_seq_test',
                                  inputs=[a_value_info, at_value_info],
                                  outputs=[out_value_info])
        model = helper.make_model(graph, producer_name='backend-test')
        tf_rep = prepare(model)
        output = tf_rep.run({'a': a, 'at': 0})
        expected = [np.squeeze(res) for res in np.split(a, 3)]
        np.testing.assert_almost_equal(output["Y"], expected[0])
Exemplo n.º 59
0
    def construct_model_attention_and_matmul(self, output_model_path):
        #      (input)
        #         |
        #     Attention
        #         |
        #       MatMul
        #         |
        #      (output)
        input_name = "input"
        output_name = "output"
        initializers = []

        def make_attention_node(input_name, weight_shape, weight_name,
                                bias_shape, bias_name, output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(bias_data, name=bias_name))

            return onnx.helper.make_node("Attention",
                                         [input_name, weight_name, bias_name],
                                         [output_name])

        def make_matmul_node(input_name, weight_shape, weight_name,
                             output_name):
            weight_data = np.random.normal(0, 0.1,
                                           weight_shape).astype(np.float32)
            initializers.append(
                onnx.numpy_helper.from_array(weight_data, name=weight_name))

            return onnx.helper.make_node("MatMul", [input_name, weight_name],
                                         [output_name])

        # make attention node
        attention_output_name = "attention_output"
        attention_node = make_attention_node(input_name, [10, 30],
                                             "qkv.weight", [30], "qkv.bias",
                                             attention_output_name)
        attention_node.domain = "com.microsoft"
        attention_node.attribute.extend(
            [helper.make_attribute("num_heads", 5)])

        # make matmul node
        matmul_node = make_matmul_node(attention_output_name, [10, 10],
                                       "matmul.weight", output_name)

        # make graph
        input_tensor = helper.make_tensor_value_info(input_name,
                                                     TensorProto.FLOAT,
                                                     [1, -1, 10])
        output_tensor = helper.make_tensor_value_info(output_name,
                                                      TensorProto.FLOAT,
                                                      [1, -1, 10])
        graph_name = "attention_test"
        graph = helper.make_graph(
            [attention_node, matmul_node],
            graph_name,
            [input_tensor],
            [output_tensor],
            initializer=initializers,
        )
        model = helper.make_model(graph,
                                  opset_imports=[helper.make_opsetid("", 13)])
        model.ir_version = onnx.IR_VERSION

        onnx.save(model, output_model_path)
Exemplo n.º 60
0
    def create_neg(self, shape, ir_version):
        """
            ONNX net                   IR net

            Input->Neg->Output   =>    Input->Power(scale=-1, shift=0, power=1)

        """

        #
        #   Create ONNX model
        #

        import onnx
        from onnx import helper
        from onnx import TensorProto

        input = helper.make_tensor_value_info('input', TensorProto.FLOAT,
                                              shape)
        output = helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                               shape)

        node_reduce_mean_def = onnx.helper.make_node(
            'Neg',
            inputs=['input'],
            outputs=['output'],
        )

        # Create the graph (GraphProto)
        graph_def = helper.make_graph(
            [node_reduce_mean_def],
            'test_neg_model',
            [input],
            [output],
        )

        # Create the model (ModelProto)
        onnx_net = helper.make_model(graph_def, producer_name='test_neg_model')

        #
        #   Create reference IR net
        #   Please, specify 'type': 'Input' for input node
        #   Moreover, do not forget to validate ALL layer attributes!!!
        #

        ref_net = None

        if check_ir_version(10, None, ir_version):
            nodes_attributes = {
                'input': {
                    'kind': 'op',
                    'type': 'Parameter'
                },
                'input_data': {
                    'shape': shape,
                    'kind': 'data'
                },
                'neg': {
                    'kind': 'op',
                    'type': 'Negative'
                },
                'neg_data': {
                    'shape': shape,
                    'kind': 'data'
                },
                'result': {
                    'kind': 'op',
                    'type': 'Result'
                }
            }
            ref_net = build_graph(nodes_attributes, [('input', 'input_data'),
                                                     ('input_data', 'neg'),
                                                     ('neg', 'neg_data'),
                                                     ('neg_data', 'result')])

        return onnx_net, ref_net