示例#1
0
def verify_mxnet_frontend_impl(mx_symbol, data_shape=(1, 3, 224, 224), out_shape=(1, 1000),
                               gluon_impl=False, name=None, dtype='float32'):
    """Use name different from test to avoid let nose pick it up"""
    if gluon_impl:
        def get_gluon_output(name, x):
            net = vision.get_model(name)
            net.collect_params().initialize(mx.init.Xavier())
            net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
                                           inputs=mx.sym.var('data'),
                                           params=net.collect_params())
            out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
            return out, net_sym
    else:
        def get_mxnet_output(symbol, x, dtype='float32'):
            from collections import namedtuple
            Batch = namedtuple('Batch', ['data'])
            mod = mx.mod.Module(symbol, label_names=None)
            mod.bind(data_shapes=[('data', x.shape)], for_training=False)
            mod.init_params()
            mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
            out = mod.get_outputs()[0].asnumpy()
            args, auxs = mod.get_params()
            return out, args, auxs

    def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
        if gluon_impl:
            new_sym, params = frontend.from_mxnet(symbol)
        else:
            new_sym, params = frontend.from_mxnet(symbol, args, auxs)

        dshape = x.shape
        shape_dict = {'data': dshape}
        with nnvm.compiler.build_config(opt_level=3):
            graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input("data", tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        # get outputs
        out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
        return out.asnumpy()

    # random input
    x = np.random.uniform(size=data_shape)
    if gluon_impl:
        gluon_out, gluon_sym = get_gluon_output(name, x)
        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
            tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
    else:
        mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
        assert "data" not in args
        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
            tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
def verify_hardsigmoid(input_dim, alpha, beta):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.clip(a_np1 * alpha + beta, 0, 1)

    hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"],
                                        alpha=alpha,
                                        beta=beta)

    graph = helper.make_graph(
        [hardsigmoid_node],
        "HardSigmoid_test",
        inputs=[
            helper.make_tensor_value_info("a_np1", TensorProto.FLOAT,
                                          list(input_dim))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(b_np.shape))
        ])

    model = helper.make_model(graph, producer_name='HardSigmoid_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape)
        tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
示例#3
0
 def verify_binary_ops(op,
                       x,
                       y,
                       out_np,
                       broadcast=None,
                       rtol=1e-7,
                       atol=1e-7):
     if broadcast is None:
         z = helper.make_node(op, ['in1', 'in2'], ['out'])
     else:
         z = helper.make_node(op, ['in1', 'in2'], ['out'], broadcast=1)
     graph = helper.make_graph(
         [z],
         '_test',
         inputs=[
             helper.make_tensor_value_info("in1", TensorProto.FLOAT,
                                           list(in_shape)),
             helper.make_tensor_value_info("in2", TensorProto.FLOAT,
                                           list(in_shape))
         ],
         outputs=[
             helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                           list(out_shape))
         ])
     model = helper.make_model(graph, producer_name='_test')
     for target, ctx in ctx_list():
         tvm_out = get_tvm_output(model, [x, y], target, ctx)
         tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
示例#4
0
def verify_take(src_shape, indices_src, axis=None):
    src_dtype = "float32"
    indices_dtype = "int32"
    indices_src = np.array(indices_src, dtype=indices_dtype)
    a = sym.Variable("a")
    indices = sym.Variable("indices")
    if axis is None:
        out = sym.take(a, indices)
    else:
        out = sym.take(a, indices, axis=axis)
    for target, ctx in ctx_list():
        # set input
        shape_dict = {"a": src_shape, "indices": indices_src.shape}
        type_dict = {"a": src_dtype, "indices": indices_dtype}
        graph, lib, _ = nnvm.compiler.build(out,
                                            target,
                                            shape=shape_dict,
                                            dtype=type_dict)
        m = graph_runtime.create(graph, lib, ctx)

        shape_size = 1
        for i in range(len(src_shape)):
            shape_size = shape_size * src_shape[i]
        a_src = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
        if axis is None:
            out_np = np.take(a_src, indices_src)
        else:
            out_np = np.take(a_src, indices_src, axis=axis)
        #print("out_np:", out_np.shape, "\n", out_np)
        m.run(a=a_src, indices=indices_src)
        out = m.get_output(0, tvm.nd.empty(out_np.shape, dtype=src_dtype))
        #print("out:", out.shape, "\n", out.asnumpy())
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
示例#5
0
def _test_power_iteration(x_shape, y_shape):
    if isinstance(y_shape, int):
        y_shape = [y_shape]

    x = np.random.uniform(size=x_shape).astype(np.float32)
    y = np.random.uniform(size=y_shape).astype(np.float32)

    np_res = np.power(x, y).astype(np.float32)

    res = helper.make_node("Pow", ['x', 'y'], ['out'])

    graph = helper.make_graph([res],
                              'power_test',
                              inputs = [helper.make_tensor_value_info("x",
                                            TensorProto.FLOAT, list(x_shape)),
                                        helper.make_tensor_value_info("y",
                                            TensorProto.FLOAT, list(y_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(np_res.shape))])

    model = helper.make_model(graph, producer_name='power_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
        np.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
示例#6
0
def test_matmul():
    a_shape = (4, 3)
    b_shape = (3, 4)

    a_array = np.random.uniform(size=a_shape).astype('float32')
    b_array = np.random.uniform(size=b_shape).astype('float32')
    out_np = np.matmul(a_array, b_array)

    mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])

    graph = helper.make_graph(
        [mul_node],
        "matmul_test",
        inputs=[
            helper.make_tensor_value_info("a", TensorProto.FLOAT,
                                          list(a_shape)),
            helper.make_tensor_value_info("b", TensorProto.FLOAT,
                                          list(b_shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(out_np.shape))
        ])

    model = helper.make_model(graph, producer_name='matmul_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx,
                                 out_np.shape)
        np.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
示例#7
0
def _test_slice_iteration(indata, outdata, starts, ends, axes=None):
    if axes:
        y = helper.make_node("Slice", ['in'], ['out'],
                             axes=axes,
                             starts=starts,
                             ends=ends)
    else:
        y = helper.make_node("Slice", ['in'], ['out'],
                             starts=starts,
                             ends=ends)

    graph = helper.make_graph(
        [y],
        'slice_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(indata.shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(outdata.shape))
        ])

    model = helper.make_model(graph, producer_name='slice_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape,
                                 'float32')

    np.testing.assert_allclose(outdata, tvm_out)
示例#8
0
def test_unsqueeze():
    in_shape = (3, 3)
    axis = (0, 3, 4)
    out_shape = (1, 3, 3, 1, 1)
    y = helper.make_node("Unsqueeze", ['in'], ['out'], axes=list(axis))

    graph = helper.make_graph(
        [y],
        'squeeze_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(in_shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(out_shape))
        ])

    model = helper.make_model(graph, producer_name='squeeze_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')

    np.testing.assert_allclose(out_shape, tvm_out.shape)
示例#9
0
def verify_reduce_x(name, indata, axis, keepdims):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    if name == 'ReduceMax':
        outdata = np.maximum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMin':
        outdata = np.minimum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceSum':
        outdata = np.sum(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMean':
        outdata = np.mean(indata, axis=axis, keepdims=keepdims == 1)
    else:
        raise Exception('unsupport op: {}'.format(name))
    if len(np.asarray(outdata).shape) == 0:
        outdata = np.asarray([outdata])
    #  onnx graph
    if axis is None:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                keepdims=keepdims)
    else:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                axes=axis, keepdims=keepdims)
    graph = helper.make_graph([node],
                              '{}_test'.format(name),
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='{}_test'.format(name))
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
示例#10
0
def verify_pad(indata, pads, value=0.0):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    len_dim = len(pads) // 2
    np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
    outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value)
    #  onnx graph
    node = helper.make_node(
        'Pad',
        inputs=['input'],
        outputs=['output'],
        mode='constant',
        pads=pads,
        value=value
    )
    graph = helper.make_graph([node],
                              'pad_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='pad_test')
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
示例#11
0
def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs):
    input_a = np.random.uniform(size=input_dim).astype(dtype)
    out = np.empty(shape=out_dim, dtype=dtype)
    out.fill(value)

    if is_shape == True:
        fill_node = helper.make_node("ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs)
    else:
        fill_node = helper.make_node("ConstantFill", ["input_a"], ["out"], value=value, dtype=dtype, **kwargs)

    if is_shape == True:
        inputs = []
    else:
        inputs = [helper.make_tensor_value_info("input_a",
                  TensorProto.FLOAT, list(input_dim))]

    graph = helper.make_graph([fill_node],
                              "fill_test",
                              inputs,
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out.shape))])

    model = helper.make_model(graph, producer_name='fill_test')

    for target, ctx in ctx_list():
        if is_shape == True:
            tvm_out = get_tvm_output(model, [], target, ctx, out.shape)
        else:
            tvm_out = get_tvm_output(model, [input_a], target, ctx, out.shape)

        tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
示例#12
0
def _test_upsample_bilinear_opset9():
    scale = 2
    in_shape = (1, 1, 3, 3)
    out_shape = (1, 1, 3*scale, 3*scale)
    y = helper.make_node("Upsample", ['in','scales'], ['out'], mode='linear')
    scales=[1.0, 1.0, 2.0, 2.0]
    in_array = np.random.uniform(size=in_shape).astype(np.float32)
    out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW")

    ref_array = np.array(scales)
    ref_node = helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['scales'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))

    graph = helper.make_graph([ref_node, y],
                              'upsample_bilinear_opset9_test',
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='upsample_bilinear_opset9_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, 'float32')
        tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
示例#13
0
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
    in_array = np.random.uniform(size=shape).astype(dtype)

    if alpha == None and beta == None and bias==None:
        alpha = 0.0001
        beta = 0.75
        bias = 1.0
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize)
    else:
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
                                     beta=beta, bias=bias, size=nsize)

    graph = helper.make_graph([node],
                              "lrn_test",
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
    model = helper.make_model(graph, producer_name='lrn_test')

    def _get_python_lrn():
        square_sum = np.zeros(shape).astype(dtype)
        for n, c, h, w in np.ndindex(in_array.shape):
            square_sum[n, c, h, w] = sum(in_array[n,
                                         max(0, c - int(math.floor((nsize - 1) / 2))): \
                                             min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
                                         h,
                                         w] ** 2)
        py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
        return py_out

    for target, ctx in ctx_list():
        input_name = model.graph.input[0].name
        py_out = _get_python_lrn()
        tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32')
        tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#14
0
def test_shape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.INT32,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    shape_node = helper.make_node("Shape", ['out'], ['final_out'])

    graph = helper.make_graph([ref_node, reshape_node, shape_node],
                              "shape_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("final_out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='shape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

    tvm.testing.assert_allclose(ref_shape, tvm_out)
示例#15
0
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
    in_array = np.random.uniform(size=shape).astype(dtype)

    if alpha == None and beta == None and bias==None:
        alpha = 0.0001
        beta = 0.75
        bias = 1.0
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize)
    else:
        node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha,
                                     beta=beta, bias=bias, size=nsize)

    graph = helper.make_graph([node],
                              "lrn_test",
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))])
    model = helper.make_model(graph, producer_name='lrn_test')

    def _get_python_lrn():
        square_sum = np.zeros(shape).astype(dtype)
        for n, c, h, w in np.ndindex(in_array.shape):
            square_sum[n, c, h, w] = sum(in_array[n,
                                         max(0, c - int(math.floor((nsize - 1) / 2))): \
                                             min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),
                                         h,
                                         w] ** 2)
        py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
        return py_out

    for target, ctx in ctx_list():
        input_name = model.graph.input[0].name
        py_out = _get_python_lrn()
        tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32')
        tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#16
0
def test_non_max_suppression():
    dshape = (1, 5, 6)
    data = sym.Variable("data")
    valid_count = sym.Variable("valid_count", dtype="int32")
    iou_threshold = 0.7
    force_suppress = True
    top_k = 2
    out = sym.non_max_suppression(data=data, valid_count=valid_count, return_indices=False,
                                  iou_threshold=iou_threshold, force_suppress=force_suppress, top_k=top_k)

    np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],
                         [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],
                         [1, 0.5, 100, 60, 70, 110]]]).astype("float32")
    np_valid_count = np.array([4]).astype("int32")
    np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],
                           [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],
                           [-1, -1, -1, -1, -1, -1]]])

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(out, target, {"data": dshape, "valid_count": (dshape[0],)},
                                            dtype={"data": "float32", "valid_count": "int32"})
        m = graph_runtime.create(graph, lib, ctx)
        m.set_input(**{"data": np_data, "valid_count": np_valid_count})
        m.run()
        tvm_out = m.get_output(0, tvm.nd.empty(np_result.shape, "float32"))
        tvm.testing.assert_allclose(tvm_out.asnumpy(), np_result, atol=1e-5, rtol=1e-5)
示例#17
0
def test_forward_where():
    cond = mx.sym.var('cond')
    x = mx.sym.var('x')
    y = mx.sym.var('y')
    dshape = (2, 2)
    dtype = 'float32'
    mx_sym = mx.sym.where(cond, x, y)
    np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
    np_x = np.random.uniform(size=dshape).astype(dtype)
    np_y = np.random.uniform(size=dshape).astype(dtype)
    mx_cond = mx.nd.array(np_cond)
    mx_x = mx.nd.array(np_x)
    mx_y = mx.nd.array(np_y)
    mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
    mod.bind(data_shapes=[('cond', dshape), ('x', dshape), ('y', dshape)], for_training=False)
    mod.init_params()
    args, auxs = mod.get_params()
    mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
    out_shape = dshape
    new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
    shape_dict = {'cond': dshape, 'x': dshape, 'y': dshape}
    for target, ctx in ctx_list():
        with nnvm.compiler.build_config(opt_level=3):
            graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input("cond", tvm.nd.array(np_cond))
        m.set_input("x", tvm.nd.array(np_x))
        m.set_input("y", tvm.nd.array(np_y))
        m.set_input(**params)
        m.run()
        # get outputs
        tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
        tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#18
0
def verify_gather(in_shape, indices, axis, dtype):
    x = np.random.uniform(size=in_shape).astype(dtype)
    indices = np.array(indices, dtype="int32")
    out_np = np.take(x, indices, axis=axis)

    y = helper.make_node("Gather", ['in', 'indices'], ['out'], axis=axis)

    graph = helper.make_graph(
        [y],
        'gather_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(in_shape)),
            helper.make_tensor_value_info("indices", TensorProto.INT32,
                                          list(indices.shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(out_np.shape))
        ])
    model = helper.make_model(graph, producer_name='gather_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, indices], target, ctx,
                                 out_np.shape)
        np.testing.assert_allclose(out_np, tvm_out)
示例#19
0
def test_forward_minimum():
    a = mx.sym.var('a')
    b = mx.sym.var('b')
    dshape = (10, 20)
    dtype = 'float32'
    mx_sym = mx.sym._internal._minimum(a, b)
    np_a = np.random.uniform(size=dshape).astype(dtype)
    np_b = np.random.uniform(size=dshape).astype(dtype)
    mx_a = mx.nd.array(np_a)
    mx_b = mx.nd.array(np_b)
    mod = mx.mod.Module(mx_sym, label_names=None, data_names=['a', 'b'])
    mod.bind(data_shapes=[('a', dshape), ('b', dshape)], for_training=False)
    mod.init_params()
    args, auxs = mod.get_params()
    mx_out = mx.nd._internal._minimum(mx_a, mx_b).asnumpy()
    out_shape = dshape
    new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
    shape_dict = {'a': dshape, 'b': dshape}
    for target, ctx in ctx_list():
        with nnvm.compiler.build_config(opt_level=3):
            graph, lib, params = nnvm.compiler.build(new_sym, target, shape_dict, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input("a", tvm.nd.array(np_a))
        m.set_input("b", tvm.nd.array(np_b))
        m.set_input(**params)
        m.run()
        # get outputs
        tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
        tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#20
0
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs):
    indata = np.random.uniform(size=(2, 4, 5, 6)).astype(dtype)
    outdata = outfunc(indata, **npargs)

    y = helper.make_node(opname, ['in'], ['out'], **kwargs)

    graph = helper.make_graph(
        [y],
        opname + '_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(indata.shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(outdata.shape))
        ])

    model = helper.make_model(graph, producer_name=opname + '_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape,
                                 dtype)

    np.testing.assert_allclose(outdata, tvm_out)
示例#21
0
def test_forward_minimum():
    a = mx.sym.var('a')
    b = mx.sym.var('b')
    dshape = (10, 20)
    dtype = 'float32'
    mx_sym = mx.sym._internal._minimum(a, b)
    np_a = np.random.uniform(size=dshape).astype(dtype)
    np_b = np.random.uniform(size=dshape).astype(dtype)
    mx_a = mx.nd.array(np_a)
    mx_b = mx.nd.array(np_b)
    mod = mx.mod.Module(mx_sym, label_names=None, data_names=['a', 'b'])
    mod.bind(data_shapes=[('a', dshape), ('b', dshape)], for_training=False)
    mod.init_params()
    args, auxs = mod.get_params()
    mx_out = mx.nd._internal._minimum(mx_a, mx_b).asnumpy()
    out_shape = dshape
    new_sym, params = frontend.from_mxnet(mx_sym, args, auxs)
    shape_dict = {'a': dshape, 'b': dshape}
    for target, ctx in ctx_list():
        with nnvm.compiler.build_config(opt_level=3):
            graph, lib, params = nnvm.compiler.build(new_sym,
                                                     target,
                                                     shape_dict,
                                                     params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input("a", tvm.nd.array(np_a))
        m.set_input("b", tvm.nd.array(np_b))
        m.set_input(**params)
        m.run()
        # get outputs
        tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
        tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#22
0
def _test_upsample_bilinear():
    scale = 2
    in_shape = (1, 1, 3, 3)
    out_shape = (1, 1, 3 * scale, 3 * scale)
    y = helper.make_node("Upsample", ['in'], ['out'],
                         mode='linear',
                         scales=[1.0, 1.0, 2.0, 2.0])

    in_array = np.random.uniform(size=in_shape).astype(np.float32)
    out_array = topi.testing.bilinear_resize_python(in_array,
                                                    (3 * scale, 3 * scale),
                                                    "NCHW")

    graph = helper.make_graph(
        [y],
        'upsample_bilinear_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(in_shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(out_shape))
        ])

    model = helper.make_model(graph, producer_name='upsample_bilinear_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape,
                                 'float32')
        np.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
示例#23
0
def verify_UpsampleLayerParams(input_dim, scale, mode):
    dtype = "float32"

    a_np = np.full(input_dim, 1, dtype=dtype)
    if mode == 'NN':
        b_np = topi.testing.upsampling_python(a_np, (scale, scale))
    else:
        new_h = input_dim[2] * scale
        new_w = input_dim[3] * scale
        b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w),
                                                   'NCHW')

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_upsample(name='Upsample',
                         scaling_factor_h=scale,
                         scaling_factor_w=scale,
                         mode=mode,
                         input_name='input',
                         output_name='output')

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
示例#24
0
def verify_mean(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.mean((a_np1, a_np2, a_np3), axis=0)

    mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])

    graph = helper.make_graph([mean_node],
                              "Mean_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np2",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np3",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='Mean_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
        np.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
示例#25
0
def verify_keras_frontend(keras_model):
    in_shape = [
        dim.value if dim.value is not None else 1
        for dim in keras_model.input_layers[0].input.shape
    ]
    out_shape = [
        dim.value if dim.value is not None else 1
        for dim in keras_model.output_layers[0].output.shape
    ]

    def get_keras_output(x, dtype='float32'):
        return keras_model.predict(x)

    def get_tvm_output(x, target, ctx, input_name='data', dtype='float32'):
        sym, params = nnvm.frontend.from_keras(keras_model)
        shape_dict = {input_name: x.shape}
        with nnvm.compiler.build_config(opt_level=2):
            graph, lib, params = nnvm.compiler.build(sym,
                                                     target,
                                                     shape_dict,
                                                     params=params)
        m = graph_runtime.create(graph, lib, ctx)
        m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
        return out.asnumpy()

    x = np.random.uniform(size=in_shape)
    keras_out = get_keras_output(x)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(x.transpose([0, 3, 1, 2]), target, ctx)
        np.testing.assert_allclose(keras_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#26
0
def verify_split(indata, outdatas, split, axis=0):
    indata = np.array(indata).astype(np.float32)
    outdatas = [np.array(o).astype(np.float32) for o in outdatas]
    node = helper.make_node(
        'Split',
        inputs=['input'],
        outputs=['output_{}'.format(i) for i in range(len(split))],
        axis=axis,
        split=split)
    graph = helper.make_graph(
        [node],
        'split_test',
        inputs=[
            helper.make_tensor_value_info("input", TensorProto.FLOAT,
                                          list(indata.shape))
        ],
        outputs=[
            helper.make_tensor_value_info("output_{}".format(i),
                                          TensorProto.FLOAT,
                                          list(outdatas[i].shape))
            for i in range(len(split))
        ])
    model = helper.make_model(graph, producer_name='split_test')

    for target, ctx in ctx_list():
        output_shape = [o.shape for o in outdatas]
        output_type = ['float32', 'float32', 'float32']
        tvm_out = get_tvm_output(model, indata, target, ctx, output_shape,
                                 output_type)
    for o, t in zip(outdatas, tvm_out):
        tvm.testing.assert_allclose(o, t)
示例#27
0
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
    import onnx_caffe2.backend

    def get_caffe2_output(model, x, dtype='float32'):
        prepared_backend = onnx_caffe2.backend.prepare(model)
        W = {model.graph.input[0].name: x.astype(dtype)}
        c2_out = prepared_backend.run(W)[0]
        return c2_out

    def get_tvm_output(graph, x, target, ctx, dtype='float32'):
        new_sym, params = nnvm.frontend.from_onnx(graph)
        shape_dict = {'input_0': x.shape}
        graph, lib, params = nnvm.compiler.build(new_sym,
                                                 target,
                                                 shape_dict,
                                                 params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input('input_0', tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        # get outputs
        out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
        return out.asnumpy()

    dtype = 'float32'
    x = np.random.uniform(size=data_shape)
    model = onnx.load(graph_file)
    c2_out = get_caffe2_output(model, x, dtype)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, x, target, ctx, dtype)
        np.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#28
0
def test_reshape_like():
    in_shape = (4, 3, 3, 4)
    ref_shape = (3, 4, 4, 3)

    ref_array = np.random.uniform(size=ref_shape).astype('float32')
    ref_node = onnx.helper.make_node(
        'Constant',
        inputs=[],
        outputs=['ref_in'],
        value=onnx.helper.make_tensor(name='const_tensor',
                                      data_type=onnx.TensorProto.FLOAT,
                                      dims=ref_array.shape,
                                      vals=ref_array.flatten().astype(float)))
    copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
    reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])

    graph = helper.make_graph(
        [ref_node, copy_node, reshape_node],
        "reshape_like_test",
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(in_shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(ref_shape))
        ])

    model = helper.make_model(graph, producer_name='reshape_like_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
示例#29
0
def test_update():
    w = sym.Variable("w")
    w2 = sym.Variable("w2")
    w = sym._assign(w, w + 1)
    w2 = sym._assign(w2, w + 1)

    dshape = (5, 3, 18, 18)
    shape_dict = {"w": dshape, "w2": dshape}
    dtype = "float32"

    def check(target, ctx):
        graph, lib, _ = nnvm.compiler.build(w2, target, shape_dict)

        m = graph_runtime.create(graph, lib, ctx)

        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.set_input("w", data)
        m.run()
        out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(),
                                    data.asnumpy() + 2,
                                    rtol=1e-5)

        m.run()
        out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(),
                                    data.asnumpy() + 3,
                                    rtol=1e-5)

    for target, ctx in ctx_list():
        check(target, ctx)
示例#30
0
def test_shape():
    in_shape = (4, 3, 3, 4)
    ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.INT32,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    shape_node = helper.make_node("Shape", ['out'], ['final_out'])

    graph = helper.make_graph([ref_node, reshape_node, shape_node],
                              "shape_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("final_out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='shape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'int32')

    tvm.testing.assert_allclose(ref_shape, tvm_out)
示例#31
0
def test_grouped_conv2d():
    x = sym.Variable("x")
    y = sym.conv2d(x,
                   channels=32,
                   kernel_size=(3, 3),
                   groups=32,
                   name="y",
                   padding=(1, 1))
    dtype = "float32"
    dshape = (1, 32, 18, 18)
    kshape = (32, 1, 3, 3)
    oshape = (1, 32, 18, 18)
    shape_dict = {"x": dshape}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
        bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
        m.run(x=data, y_weight=kernel, y_bias=bias)
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        c_np = topi.testing.depthwise_conv2d_python_nchw(
            data.asnumpy(), kernel.asnumpy(), (1, 1), 'SAME')
        c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)
        np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例#32
0
def test_multibox_transform_loc():
    batch_size = 1
    num_anchors = 3
    num_classes = 3
    cls_prob = sym.Variable("cls_prob")
    loc_preds = sym.Variable("loc_preds")
    anchors = sym.Variable("anchors")
    transform_loc_data, valid_count = sym.multibox_transform_loc(cls_prob=cls_prob, loc_pred=loc_preds,
                                                                 anchor=anchors)
    out = sym.non_max_suppression(data=transform_loc_data, valid_count=valid_count, return_indices=False)

    # Manually create test case
    np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]])
    np_loc_preds = np.array([[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]])
    np_anchors = np.array([[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]])

    expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
                                 [0, 0.44999999, 1, 1, 1, 1],
                                 [0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]])

    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(out, target, {"cls_prob": (batch_size, num_anchors, num_classes),
                                                          "loc_preds": (batch_size, num_anchors * 4),
                                                          "anchors": (1, num_anchors, 4)})
        m = graph_runtime.create(graph, lib, ctx)
        m.set_input(**{"cls_prob": np_cls_prob.astype(dtype), "loc_preds": np_loc_preds.astype(dtype), "anchors": np_anchors.astype(dtype)})
        m.run()
        tvm_out = m.get_output(0, tvm.nd.empty(expected_np_out.shape, dtype))
        tvm.testing.assert_allclose(tvm_out.asnumpy(), expected_np_out, atol=1e-5, rtol=1e-5)
示例#33
0
def test_conv2d_transpose():
    x = sym.Variable("x")
    y = sym.conv2d_transpose(x,
                             channels=10,
                             kernel_size=(3, 3),
                             strides=(2, 2),
                             name="y",
                             padding=(1, 1),
                             output_padding=(2, 2))
    dtype = "float32"
    dshape = (1, 3, 18, 18)
    kshape = (3, 10, 3, 3)
    oshape = (1, 10, 37, 37)
    shape_dict = {"x": dshape}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
        bias = tvm.nd.array(np.random.uniform(size=kshape[1]).astype(dtype))
        m.run(x=data, y_weight=kernel, y_bias=bias)
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        c_np = topi.testing.conv2d_transpose_nchw_python(
            data.asnumpy(), kernel.asnumpy(), 2, 1)
        c_np = c_np + bias.asnumpy().reshape(kshape[1], 1, 1)
        d_np = np.zeros(shape=oshape)
        d_np[:, :, 0:c_np.shape[2], 0:c_np.shape[3]] = c_np
        np.testing.assert_allclose(out.asnumpy(), d_np, rtol=1e-5)
def _test_softmax(inshape, axis):
    opname = 'Softmax'
    indata = np.random.uniform(size=inshape).astype(np.float32)
    outshape = inshape
    outdata = topi.testing.softmax_python(indata)
    if isinstance(axis, int):
        y = helper.make_node(opname, ['in'], ['out'], axis=axis)
    elif axis is None:
        y = helper.make_node(opname, ['in'], ['out'])

    graph = helper.make_graph(
        [y],
        opname + '_test',
        inputs=[
            helper.make_tensor_value_info("in", TensorProto.FLOAT,
                                          list(indata.shape))
        ],
        outputs=[
            helper.make_tensor_value_info("out", TensorProto.FLOAT,
                                          list(outdata.shape))
        ])

    model = helper.make_model(graph, producer_name=opname + '_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outshape,
                                 'float32')
        tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
示例#35
0
def run_model_checkonly(model_file, model_name=''):
    model = cm.models.MLModel(model_file)
    sym, params = nnvm.frontend.from_coreml(model)
    x = model_zoo.get_cat_image()
    for target, ctx in ctx_list():
        tvm_output = get_tvm_output(sym, x, params, target, ctx)
        print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
示例#36
0
def test_update():
    w = sym.Variable("w")
    w2 = sym.Variable("w2")
    w = sym._assign(w, w + 1)
    w2 = sym._assign(w2, w + 1)

    dshape = (5, 3, 18, 18)
    shape_dict = {"w": dshape, "w2":dshape}
    dtype = "float32"

    def check(target, ctx):
        graph, lib, _ = nnvm.compiler.build(w2, target, shape_dict)

        m = graph_runtime.create(graph, lib, ctx)

        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.set_input("w", data)
        m.run()
        out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(), data.asnumpy() + 2, rtol=1e-5)

        m.run()
        out = m.get_input("w2", tvm.nd.empty(dshape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(), data.asnumpy() + 3, rtol=1e-5)

    for target, ctx in ctx_list():
        check(target, ctx)
示例#37
0
def verify_UpsampleLayerParams(input_dim, scale, mode):
    dtype = "float32"

    a_np = np.full(input_dim, 1, dtype=dtype)
    if mode == 'NN':
        b_np = topi.testing.upsampling_python(a_np, scale)
    else:
        new_h = input_dim[2] * scale
        new_w = input_dim[3] * scale
        b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_upsample(name='Upsample',
                         scaling_factor_h=scale,
                         scaling_factor_w=scale,
                         mode=mode,
                         input_name='input',
                         output_name='output')

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
示例#38
0
def test_reshape_like():
    in_shape = (4, 3, 3, 4)
    ref_shape = (3, 4, 4, 3)

    ref_array = np.random.uniform(size=ref_shape).astype('float32')
    ref_node = onnx.helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['ref_in'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = onnx.TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))
    copy_node = helper.make_node("Identity", ["ref_in"], ["copy_in"])
    reshape_node = helper.make_node("Reshape", ["in", "copy_in"], ["out"])

    graph = helper.make_graph([ref_node, copy_node, reshape_node],
                              "reshape_like_test",
                              inputs = [helper.make_tensor_value_info("in",
                                            TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_like_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
示例#39
0
def verify_split(indata, outdatas, split, axis=0):
    indata = np.array(indata).astype(np.float32)
    outdatas = [np.array(o).astype(np.float32) for o in outdatas]
    node = helper.make_node(
        'Split',
        inputs=['input'],
        outputs=['output_{}'.format(i) for i in range(len(split))],
        axis=axis,
        split=split
    )
    graph = helper.make_graph([node],
                              'split_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output_{}".format(i),
                                            TensorProto.FLOAT, list(outdatas[i].shape))
                                            for i in range(len(split))
                                         ])
    model = helper.make_model(graph, producer_name='split_test')

    for target, ctx in ctx_list():
        output_shape = [o.shape for o in outdatas]
        output_type = ['float32', 'float32', 'float32']
        tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)
    for o, t in zip(outdatas, tvm_out):
        tvm.testing.assert_allclose(o, t)
示例#40
0
def verify_caffe2_forward_impl(model, data_shape, out_shape):
    dtype = 'float32'
    data = np.random.uniform(size=data_shape).astype(dtype)
    c2_out = get_caffe2_output(model, data, dtype)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, data, target, ctx, out_shape, dtype)
        tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#41
0
def verify_reduce_x(name, indata, axis, keepdims):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    if name == 'ReduceMax':
        outdata = np.maximum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMin':
        outdata = np.minimum.reduce(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceSum':
        outdata = np.sum(indata, axis=axis, keepdims=keepdims == 1)
    elif name == 'ReduceMean':
        outdata = np.mean(indata, axis=axis, keepdims=keepdims == 1)
    else:
        raise Exception('unsupport op: {}'.format(name))
    if len(np.asarray(outdata).shape) == 0:
        outdata = np.asarray([outdata])
    #  onnx graph
    if axis is None:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                keepdims=keepdims)
    else:
        node = helper.make_node(name, inputs=['input'], outputs=['output'],
                                axis=axis, keepdims=keepdims)
    graph = helper.make_graph([node],
                              '{}_test'.format(name),
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='{}_test'.format(name))
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
示例#42
0
def test_avg_pool2d_no_count_pad():
    kh, kw = (4, 4)
    sh, sw = (2, 2)
    ph, pw = (2, 2)

    x = sym.Variable("x")
    y = sym.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw),
                       name="y", count_include_pad=False)
    dtype = "float32"
    n = 1
    (ic, ih, iw) = (3, 28, 28)
    (oc, oh, ow) = (3, 15, 15)

    a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
    pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
    no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
    pad_np[np.ix_(*no_zero)] = a_np
    b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)

    for i in range(oh):
        for j in range(ow):
            pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
            b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],
                                   axis=(2,3)) / np.maximum(pad_count, 1)
    b_np = np.maximum(b_np, 0.0)
    shape_dict = {"x": (n, ic, ih, iw)}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(a_np)
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty((n, oc, oh, ow), dtype))
        tvm.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)
示例#43
0
def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs):
    input_a = np.random.uniform(size=input_dim).astype(dtype)
    out = np.empty(shape=out_dim, dtype=dtype)
    out.fill(value)

    if is_shape == True:
        fill_node = helper.make_node("ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs)
    else:
        fill_node = helper.make_node("ConstantFill", ["input_a"], ["out"], value=value, dtype=dtype, **kwargs)

    graph = helper.make_graph([fill_node],
                              "fill_test",
                              inputs = [helper.make_tensor_value_info("input_a",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(out.shape))])

    model = helper.make_model(graph, producer_name='fill_test')

    for target, ctx in ctx_list():
        if is_shape == True:
            tvm_out = get_tvm_output(model, [], target, ctx, out.shape)
        else:
            tvm_out = get_tvm_output(model, [input_a], target, ctx, out.shape)

        tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
示例#44
0
def verify_pad(indata, pads, value=0.0):
    indata = np.array(indata).astype(np.float32)
    #  numpy expect result
    len_dim = len(pads) // 2
    np_pads = [(pads[i], pads[i+len_dim]) for i in range(len_dim)]
    outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value)
    #  onnx graph
    node = helper.make_node(
        'Pad',
        inputs=['input'],
        outputs=['output'],
        mode='constant',
        pads=pads,
        value=value
    )
    graph = helper.make_graph([node],
                              'pad_test',
                              inputs = [helper.make_tensor_value_info("input",
                                            TensorProto.FLOAT, list(indata.shape))],
                              outputs = [helper.make_tensor_value_info("output",
                                            TensorProto.FLOAT, list(outdata.shape))])
    model = helper.make_model(graph, producer_name='pad_test')
    #  tvm result
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32')
    tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
示例#45
0
def verify_mean(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.mean((a_np1, a_np2, a_np3), axis=0)

    mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])

    graph = helper.make_graph([mean_node],
                              "Mean_test",
                              inputs = [helper.make_tensor_value_info("a_np1",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np2",
                                            TensorProto.FLOAT, list(input_dim)),
                                        helper.make_tensor_value_info("a_np3",
                                            TensorProto.FLOAT, list(input_dim))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(b_np.shape))])

    model = helper.make_model(graph, producer_name='Mean_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
        tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
示例#46
0
def _test_upsample_bilinear_opset9():
    scale = 2
    in_shape = (1, 1, 3, 3)
    out_shape = (1, 1, 3*scale, 3*scale)
    y = helper.make_node("Upsample", ['in','scales'], ['out'], mode='linear')
    scales=[1.0, 1.0, 2.0, 2.0]
    in_array = np.random.uniform(size=in_shape).astype(np.float32)
    out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW")

    ref_array = np.array(scales)
    ref_node = helper.make_node('Constant',
                                 inputs=[],
                                 outputs=['scales'],
                                 value=onnx.helper.make_tensor(name = 'const_tensor',
                                                               data_type = TensorProto.FLOAT,
                                                               dims = ref_array.shape,
                                                               vals = ref_array.flatten().astype(float)))

    graph = helper.make_graph([ref_node, y],
                              'upsample_bilinear_opset9_test',
                              inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
                              outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='upsample_bilinear_opset9_test')
    inputs = []
    inputs.append(in_array)

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, inputs, target, ctx, out_shape, 'float32')
        tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
示例#47
0
def _test_power_iteration(x_shape, y_shape):
    if isinstance(y_shape, int):
        y_shape = [y_shape]

    x = np.random.uniform(size=x_shape).astype(np.float32)
    y = np.random.uniform(size=y_shape).astype(np.float32)

    np_res = np.power(x, y).astype(np.float32)

    res = helper.make_node("Pow", ['x', 'y'], ['out'])

    graph = helper.make_graph([res],
                              'power_test',
                              inputs = [helper.make_tensor_value_info("x",
                                            TensorProto.FLOAT, list(x_shape)),
                                        helper.make_tensor_value_info("y",
                                            TensorProto.FLOAT, list(y_shape))],
                              outputs = [helper.make_tensor_value_info("out",
                                            TensorProto.FLOAT, list(np_res.shape))])

    model = helper.make_model(graph, producer_name='power_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
        tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
示例#48
0
def helper(symbol, inputs, dtype,
           np_forward, np_backward=None):
    ishapes = {}
    input_syms = []
    np_inputs = {}
    for (k, v) in inputs.items():
        ishapes.update({k: v[0]})
        np_inputs.update({k: np.random.uniform(size=v[0]).astype(dtype)})
        if len(v) > 1:
            input_syms.append(v[1])

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(symbol, target, ishapes)
        m = graph_runtime.create(graph, lib, ctx)
        m.run(**np_inputs)
        y_np = np_forward(**np_inputs)
        out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
        np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)

        # backward
        if np_backward:
            graph._set_symbol_list_attr("grad_ys", symbol)
            for x in input_syms:
                graph._set_symbol_list_attr("grad_xs", x)
            graph._set_symbol_list_attr("grad_ys_out_grad", sym.Variable("head_grads"))
            graph = graph.apply("Gradient")
            ishapes.update({"head_grads": y_np.shape})
            graph, lib, _ = nnvm.compiler.build(graph, target, ishapes)
            m = graph_runtime.create(graph, lib, ctx)
            head_grads = np.random.uniform(size=y_np.shape).astype(dtype)
            y_np = head_grads * np_backward(**np_inputs)
            m.run(head_grads=head_grads, **np_inputs)
            out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))

            np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
示例#49
0
def test_mixed_precision():
    x = sym.Variable("x")
    dtype = "int8"
    out_dtype="int32"
    y = sym.conv2d(x,
                   channels=10,
                   kernel_size=(3,3),
                   name="y",
                   padding=(1,1),
                   use_bias=False,
                   out_dtype="int32")
    dshape = (1, 3, 18, 18)
    kshape = (10, 3, 3, 3)
    oshape = (1, 10, 18, 18)
    shape_dict = {"x": dshape}
    dtype_dict = {"x": dtype}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict, dtype_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(np.random.uniform(-127, 127, size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(-127, 127, size=kshape).astype(dtype))
        m.run(x=data, y_weight=kernel)
        out = m.get_output(0, tvm.nd.empty(oshape, out_dtype))
        c_np = topi.testing.conv2d_nchw_python(
            data.asnumpy().astype(out_dtype),
            kernel.asnumpy().astype(out_dtype), 1, 1)
        tvm.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例#50
0
def verify_min(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.min((a_np1, a_np2, a_np3), axis=0)

    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim)),
              ('input3', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Min',
                            input_names=['input1', 'input2', 'input3'],
                            output_name='output',
                            mode='MIN')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model,
                           [a_np1, a_np2, a_np3],
                           ['input1', 'input2', 'input3'],
                           b_np.shape,
                           dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
示例#51
0
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
    dtype = 'float32'
    x = np.random.uniform(size=data_shape)
    model = onnx.load_model(graph_file)
    c2_out = get_caffe2_output(model, x, dtype)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype)
        tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#52
0
def verify_onnx_forward_impl(graph_file, data_shape, out_shape):
    dtype = 'float32'
    x = np.random.uniform(size=data_shape)
    model = onnx.load_model(graph_file)
    c2_out = get_caffe2_output(model, x, dtype)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype)
        tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
示例#53
0
def test_model_checkonly(model_file, model_name=''):
    model = cm.models.MLModel(model_file)
    sym, params = nnvm.frontend.from_coreml(model)
    x = model_zoo.get_cat_image()
    for target, ctx in ctx_list():
        tvm_output = get_tvm_output(sym, x, params, target, ctx)
        print(target, ctx, model_name, 'prediction id: ',
              np.argmax(tvm_output.flat))
示例#54
0
 def verify_single_ops(op, x, out_np, rtol=1e-7, atol=1e-7):
     z = helper.make_node(op, ['in1'], ['out'])
     graph = helper.make_graph([z],
                                '_test',
                               inputs = [helper.make_tensor_value_info("in1",
                                             TensorProto.FLOAT, list(in_shape)),],
                               outputs = [helper.make_tensor_value_info("out",
                                             TensorProto.FLOAT, list(out_shape))])
     model = helper.make_model(graph, producer_name='_test')
     for target, ctx in ctx_list():
         tvm_out = get_tvm_output(model, [x], target, ctx)
         tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
示例#55
0
def verify_reduce(dshape, fnp, fsym, **kwargs):
    x = sym.Variable("x")
    y = fsym(x + 1, **kwargs)
    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        # set input
        data = np.random.uniform(size=dshape).astype(dtype)
        out_np = fnp(data + 1, **kwargs)
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(out_np.shape))
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
示例#56
0
 def run_test_conv2d(sym, dtype, dshape, kshape, oshape, shape_dict, padding):
     for target, ctx in ctx_list():
         graph, lib, _ = nnvm.compiler.build(sym, target, shape_dict)
         m = graph_runtime.create(graph, lib, ctx)
         data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
         kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
         bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
         m.run(x=data, y_weight=kernel, y_bias=bias)
         out = m.get_output(0, tvm.nd.empty(oshape, dtype))
         c_np = topi.testing.conv2d_nchw_python(
             data.asnumpy(), kernel.asnumpy(), 1, padding)
         c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1)
         tvm.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
示例#57
0
def verify_collapse(dshape, target_shape, fnp):
    x = sym.Variable("x", shape=dshape)
    t = sym.Variable("t", shape=target_shape)
    y = sym.collapse_sum(x, t)
    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target,
                                            {"x": dshape, "t": target_shape})
        m = graph_runtime.create(graph, lib, ctx)
        data = np.random.uniform(size=dshape).astype(dtype)
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(target_shape))
        out_np = fnp(data)
        tvm.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
示例#58
0
def verify_reshape(dshape, oshape):
    x = sym.Variable("x")
    y = sym.reshape(x, shape=oshape)
    y = y + 1
    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        # set input
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.run(x=data)
        out_np = data.asnumpy().reshape(oshape) + 1
        out = m.get_output(0, tvm.nd.empty(out_np.shape))
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
示例#59
0
def verify_split(ishape, indices_or_sections, axis):
    x = sym.Variable("x")
    y = sym.split(x, indices_or_sections=indices_or_sections, axis=axis)
    dtype = "float32"
    x_np = np.random.uniform(size=ishape).astype(dtype)
    res = np.split(x_np, indices_or_sections, axis=axis)
    for target, ctx in ctx_list():
        # set input
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": ishape})
        m = graph_runtime.create(graph, lib, ctx)
        m.run(x=x_np)
        for i, arr  in enumerate(res):
            out = m.get_output(i, tvm.nd.empty(arr.shape))
            np.testing.assert_allclose(out.asnumpy(), arr, atol=1e-5, rtol=1e-5)