コード例 #1
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def test_forward_scalar_ops():
    for op in [operator.add, operator.sub, operator.mul, operator.truediv,
               operator.pow, operator.lt, operator.le, operator.eq,
               operator.ne, operator.gt, operator.ge]:
        dtype='float32'
        a_shape = (3, 4, 5)
        a_np = np.random.uniform(size=a_shape).astype(dtype)
        b_scalar = 2.3
        mx_sym = op(mx.sym.var('a'), b_scalar)
        ref_res = op(mx.nd.array(a_np), b_scalar)
        shapes = {'a': a_shape}
        new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(a_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
    for op in ["maximum", "minimum"]:
        dtype='float32'
        a_shape = (3, 4, 5)
        a_np = np.random.uniform(size=a_shape).astype(dtype)
        b_scalar = 2.3
        mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
        ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
        shapes = {'a': a_shape}
        new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(a_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #2
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_UpsampleLayerParams(input_dim, scale, mode):
    dtype = "float32"

    a_np = np.full(input_dim, 1, dtype=dtype)
    if mode == 'NN':
        b_np = topi.testing.upsampling_python(a_np, scale)
    else:
        new_h = input_dim[2] * scale
        new_w = input_dim[3] * scale
        b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_upsample(name='Upsample',
                         scaling_factor_h=scale,
                         scaling_factor_w=scale,
                         mode=mode,
                         input_name='input',
                         output_name='output')

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #3
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def test_forward_broadcast_ops():
    for op in ["broadcast_add", "broadcast_sub", "broadcast_mul",
               "broadcast_div", "broadcast_mod", "broadcast_maximum",
               "broadcast_minimum", "broadcast_equal", "broadcast_not_equal",
               "broadcast_greater", "broadcast_greater_equal",
               "broadcast_lesser", "broadcast_lesser_equal"]:
        a_shape = (3, 4, 5)
        b_shape = (4, 5)
        if op == "broadcast_mod":
            dtype = 'int32'
            a_np = np.random.randint(1, 100, size=a_shape).astype(dtype)
            b_np = np.random.randint(1, 100, size=b_shape).astype(dtype)
        else:
            dtype = 'float32'
            a_np = np.random.uniform(size=a_shape).astype(dtype)
            b_np = np.random.uniform(size=b_shape).astype(dtype)
        mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
        ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
        shapes = {'a': a_shape, 'b': b_shape}
        new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(a_np, b_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #4
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def test_forward_where():
    cond = mx.sym.var('cond')
    x = mx.sym.var('x')
    y = mx.sym.var('y')
    dshape = (2, 2)
    dtype = 'float32'
    mx_sym = mx.sym.where(cond, x, y)
    np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
    np_x = np.random.uniform(size=dshape).astype(dtype)
    np_y = np.random.uniform(size=dshape).astype(dtype)
    mx_cond = mx.nd.array(np_cond)
    mx_x = mx.nd.array(np_x)
    mx_y = mx.nd.array(np_y)
    shapes = {'cond': dshape, 'x': dshape, 'y': dshape}
    mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
    mod.bind(data_shapes=shapes.items(), for_training=False)
    mod.init_params()
    args, auxs = mod.get_params()
    mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()

    new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
    for target, ctx in ctx_list():
        for kind in ["graph", "debug"]:
            intrp = relay.create_executor(kind, ctx=ctx, target=target)
            op_res = intrp.evaluate(new_sym)(np_cond, np_x, np_y)
            tvm.testing.assert_allclose(op_res.asnumpy(), mx_out)
コード例 #5
0
def test_gru_like():
    def unit(rnn_dim):
        X = relay.var("X", shape=(1, rnn_dim))
        W = relay.var("y", shape=(3 * rnn_dim, rnn_dim))
        matmul = relay.nn.dense(X, W)
        splitted = relay.split(matmul, indices_or_sections=3, axis=1)
        out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
        return relay.Function([X, W], out)

    def sigmoid(x):
        return 1 / (1 + np.exp(-x))

    def unit_numpy(X, W):
        prod = np.dot(X, W.transpose())
        splits = np.split(prod, indices_or_sections=3, axis=1)
        return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])

    dtype = "float32"
    rnn_dim = 1000
    x = np.random.rand(1, rnn_dim).astype(dtype)
    y = np.random.rand(3*rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005
    out_shape = (1, rnn_dim)
    z = unit(rnn_dim)

    for target, ctx in ctx_list():
        with relay.build_config(opt_level=2):
            graph, lib, params = relay.build(z, target)
            m = graph_runtime.create(graph, lib, ctx)
            m.set_input("X", tvm.nd.array(x.astype(dtype)))
            m.set_input("y", tvm.nd.array(y.astype(dtype)))
            m.set_input(**params)
            m.run()
            out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
            ref = unit_numpy(x, y)
            tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
コード例 #6
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def run_model_checkonly(model_file, model_name='', input_name='image'):
    model = cm.models.MLModel(model_file)
    x = model_zoo.get_cat_image()
    shape_dict = {input_name : x.shape}
    func, params = relay.frontend.from_coreml(model, shape_dict)
    for target, ctx in ctx_list():
        tvm_output = get_tvm_output(func, x, params, target, ctx)
        print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
コード例 #7
0
ファイル: test_forward.py プロジェクト: yongwww/tvm
def verify_keras_frontend(keras_model, need_transpose=True, layout='NCHW'):
    # Keras frontend currently supports tensorflow backend only.
    assert (keras.backend.backend() == 'tensorflow')

    if layout != 'NCHW':
        need_transpose = False

    in_shapes = []
    for layer in keras_model._input_layers:
        if tf.executing_eagerly():
            in_shapes.append(
                tuple(dim if dim is not None else 1
                      for dim in layer.input.shape))
        else:
            in_shapes.append(
                tuple(dim.value if dim.value is not None else 1
                      for dim in layer.input.shape))

    def get_keras_output(xs, dtype='float32'):
        return keras_model.predict(xs)

    def get_tvm_output(xs, target, ctx, dtype='float32'):
        shape_dict = {
            name: x.shape
            for (name, x) in zip(keras_model.input_names, xs)
        }
        mod, params = relay.frontend.from_keras(keras_model,
                                                shape_dict,
                                                layout=layout)
        with tvm.transform.PassContext(opt_level=2):
            graph, lib, params = relay.build(mod, target, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        for name, x in zip(keras_model.input_names, xs):
            m.set_input(name, tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        return [m.get_output(i).asnumpy() for i in range(m.get_num_outputs())]

    def to_channels_first(arr):
        return arr.transpose([0, -1] + list(range(1, arr.ndim - 1)))

    def to_channels_last(arr):
        return arr.transpose([0] + list(range(2, arr.ndim)) + [1])

    xs = [
        np.random.uniform(size=shape, low=-1.0, high=1.0)
        for shape in in_shapes
    ]
    keras_out = get_keras_output(xs)
    keras_out = keras_out if isinstance(keras_out, list) else [keras_out]
    for target, ctx in ctx_list():
        inputs = [to_channels_first(x) for x in xs] if need_transpose else xs
        tvm_out = get_tvm_output(inputs, target, ctx)
        for kout, tout in zip(keras_out, tvm_out):
            if need_transpose:
                tout = to_channels_last(tout)
            tvm.testing.assert_allclose(kout, tout, rtol=1e-5, atol=1e-5)
コード例 #8
0
def run_model_checkonly(model_file, model_name='', input_name='image'):
    model = cm.models.MLModel(model_file)
    x = model_zoo.get_cat_image()
    shape_dict = {input_name: x.shape}
    mod, params = relay.frontend.from_coreml(model, shape_dict)
    for target, ctx in ctx_list():
        tvm_output = get_tvm_output(mod["main"], x, params, target, ctx)
        print(target, ctx, model_name, 'prediction id: ',
              np.argmax(tvm_output.flat))
コード例 #9
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(start, stop, step):
     ref_res = _mx_symbol(mx.nd, start, stop, step).asnumpy()
     mx_sym = _mx_symbol(mx.sym, start, stop, step)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)()
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
コード例 #10
0
 def verify(start, stop, step):
     ref_res = _mx_symbol(mx.nd, start, stop, step).asnumpy()
     mx_sym = _mx_symbol(mx.sym, start, stop, step)
     mod, _ = relay.frontend.from_mxnet(mx_sym, {})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()()
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
コード例 #11
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
    def verify(mode, seq_len, input_size, hidden_size, num_layers,
               batch=1, init_states=True, bidirectional=False):
        if mode == "rnn":
            layer = gluon.rnn.RNN(hidden_size, num_layers, bidirectional=bidirectional)
        elif mode == "gru":
            layer = gluon.rnn.GRU(hidden_size, num_layers, bidirectional=bidirectional)
        else: # mode == "lstm"
            layer = gluon.rnn.LSTM(hidden_size, num_layers, bidirectional=bidirectional)
        num_states = 2 if mode == "lstm" else 1
        layer.initialize()
        layer.hybridize()

        dtype = "float32"
        directions = 2 if bidirectional else 1
        data_np = np.random.uniform(size=(seq_len, batch, input_size)).astype(dtype)
        data_mx = mx.nd.array(data_np)

        if init_states:
            shape_dict = {'data0': data_np.shape}
            inputs = {'data0': data_np}
            state_shape = (num_layers*directions, batch, hidden_size)
            states_np = []
            states_mx = []
            for i in range(num_states):
                s = np.random.uniform(size=state_shape).astype(dtype)
                states_np.append(s)
                states_mx.append(mx.nd.array(s))
                shape_dict['data%s' % (i+1)] = s.shape
                inputs['data%s' % (i+1)] = s
            mx_out, mx_states = layer(data_mx, states_mx)
            mx_res = [mx_out] + mx_states
        else:
            shape_dict = {'data': data_np.shape}
            inputs = {'data': data_np}
            mx_res = layer(data_mx)

        mx_sym = layer._cached_graph[1]
        mx_params = {}
        for name, param in layer.collect_params().items():
            mx_params[name] = param._reduce()

        mod, params = relay.frontend.from_mxnet(
            mx_sym, shape=shape_dict, arg_params=mx_params)
        for target, ctx in ctx_list():
            # only test graph runtime because debug runtime is too slow
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(**inputs, **params)
                if init_states:
                    assert len(op_res) == len(mx_res)
                    for i, val in enumerate(op_res):
                        tvm.testing.assert_allclose(
                            val.asnumpy(), mx_res[i].asnumpy(), rtol=1e-3)
                else:
                    tvm.testing.assert_allclose(
                        op_res.asnumpy(), mx_res.asnumpy(), rtol=1e-3)
コード例 #12
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(xshape, yshape, y_data):
     x_data = np.random.uniform(size=xshape).astype("float32")
     ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
     mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_data, y_data)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #13
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(shape, axis, size):
     x_np = np.random.uniform(size=shape).astype("float32")
     ref_res = mx.nd.broadcast_axis(mx.nd.array(x_np), axis=axis, size=size)
     mx_sym = mx.sym.broadcast_axis(mx.sym.var("x"), axis=axis, size=size)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #14
0
ファイル: test_forward.py プロジェクト: mirsci/tvm
 def verify(shape, axis, begin, end):
     data_np = np.random.uniform(size=shape).astype("float32")
     ref_res = mx.nd.slice_axis(mx.nd.array(data_np), axis, begin, end)
     mx_sym = mx.sym.slice_axis(mx.sym.var("data"), axis, begin, end)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(data_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #15
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(shape):
     x_np = np.random.uniform(size=shape).astype("float32")
     ref_res = mx.nd.contrib.div_sqrt_dim(mx.nd.array(x_np))
     mx_sym = mx.sym.contrib.div_sqrt_dim(mx.sym.var("x"))
     mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #16
0
def run_model_checkonly(model_file, model_name='', input_name='image'):
    model = cm.models.MLModel(model_file)
    x = model_zoo.get_cat_image()
    shape_dict = {input_name : x.shape}
    # Some Relay passes change operators on the fly. Ensuring that we generate
    # new graph for each target.
    for target, ctx in ctx_list():
        mod, params = relay.frontend.from_coreml(model, shape_dict)
        tvm_output = get_tvm_output(mod["main"], x, params, target, ctx)
        print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
コード例 #17
0
ファイル: test_forward.py プロジェクト: xchani/tvm
 def verify(shape, axis, is_ascend, dtype="float32"):
     x_np = np.random.uniform(size=shape).astype("float32")
     ref_res = mx.nd.argsort(mx.nd.array(x_np), axis=axis, is_ascend=is_ascend, dtype=dtype)
     mx_sym = mx.sym.argsort(mx.sym.var("x"), axis=axis, is_ascend=is_ascend, dtype=dtype)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #18
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(xshape, yshape, y_data):
     x_data = np.random.uniform(size=xshape).astype("float32")
     ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
     mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
     mod, _ = relay.frontend.from_mxnet(mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x_data, y_data)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #19
0
ファイル: test_forward.py プロジェクト: blacklong28/tvm
 def verify(shape, use_sequence_length, value, axis, dtype, itype):
     data_np = np.random.uniform(size=shape).astype(dtype)
     valid_length_np = np.random.randint(0,
                                         shape[axis],
                                         size=shape[1 - axis]).astype(itype)
     if use_sequence_length:
         ref_res = mx.nd.SequenceMask(
             mx.nd.array(data_np, dtype=dtype),
             sequence_length=mx.nd.array(valid_length_np, dtype=itype),
             use_sequence_length=use_sequence_length,
             value=value,
             axis=axis)
         mx_sym = mx.sym.SequenceMask(
             mx.sym.var('data'),
             sequence_length=mx.sym.var('valid_length'),
             use_sequence_length=use_sequence_length,
             value=value,
             axis=axis)
         mod, _ = relay.frontend.from_mxnet(
             mx_sym, {
                 "data": shape,
                 'valid_length': valid_length_np.shape
             },
             dtype={
                 "data": dtype,
                 "valid_length": itype
             })
     else:
         ref_res = mx.nd.SequenceMask(
             mx.nd.array(data_np, dtype=dtype),
             use_sequence_length=use_sequence_length,
             value=value,
             axis=axis)
         mx_sym = mx.sym.SequenceMask(
             mx.sym.var('data'),
             use_sequence_length=use_sequence_length,
             value=value,
             axis=axis)
         mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape},
                                            dtype={"data": dtype})
     for target, ctx in ctx_list():
         for kind in ['graph', 'debug']:
             if use_sequence_length is False and kind == 'graph':
                 # Disable the test for 'graph' when it's identity.
                 continue
             intrp = relay.create_executor(kind,
                                           mod=mod,
                                           ctx=ctx,
                                           target=target)
             if use_sequence_length:
                 op_res = intrp.evaluate()(data_np, valid_length_np)
             else:
                 op_res = intrp.evaluate()(data_np)
             tvm.testing.assert_allclose(op_res.asnumpy(),
                                         ref_res.asnumpy())
コード例 #20
0
ファイル: test_forward.py プロジェクト: lemonqueen/TVM
def verify_caffe_forward_impl_twoinput(net_path, model_path, data_shape):
    dtype = 'float32'
    data1 = np.random.uniform(size=data_shape).astype(dtype)  # 生成测试数据
    data2 = np.random.uniform(size=data_shape).astype(dtype)
    c_out = get_caffe_output_twoinput(net_path, model_path, data1, data2,
                                      dtype)
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output_twoinput(net_path, model_path, data1, data2,
                                          target, ctx)
        tvm.testing.assert_allclose(c_out, tvm_out, rtol=1e-5, atol=1e-5)
    print("完成!!!!")
コード例 #21
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(shape, indices_src, axis, mode="clip"):
     x_np = np.random.uniform(size=shape).astype("float32")
     indices_np = np.array(indices_src, dtype="float32")
     ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
     mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
     mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x_np, indices_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #22
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(shape, indices_src, axis, mode="clip"):
     x_np = np.random.uniform(size=shape).astype("float32")
     indices_np = np.array(indices_src, dtype="float32")
     ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
     mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_np, indices_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #23
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(indices_shape, depth, on_value, off_value, dtype):
     x = np.random.randint(0, 5, size=indices_shape)
     ref_res = mx.nd.one_hot(mx.nd.array(x), depth, on_value, off_value, dtype)
     mx_sym = mx.sym.one_hot(mx.sym.var("x"), depth, on_value, off_value, dtype)
     shape_dict = {"x": x.shape}
     mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x.astype("float32"))
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3)
コード例 #24
0
ファイル: test_forward.py プロジェクト: blacklong28/tvm
def test_forward_scalar_ops():
    for op in [
            operator.add, operator.sub, operator.mul, operator.truediv,
            operator.pow, operator.lt, operator.le, operator.eq, operator.ne,
            operator.gt, operator.ge
    ]:
        dtype = 'float32'
        a_shape = (3, 4, 5)
        a_np = np.random.uniform(size=a_shape).astype(dtype)
        b_scalar = 2.3
        mx_sym = op(mx.sym.var('a'), b_scalar)
        ref_res = op(mx.nd.array(a_np), b_scalar)
        shapes = {'a': a_shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate()(a_np)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res.asnumpy())
    for op in ["maximum", "minimum"]:
        dtype = 'float32'
        a_shape = (3, 4, 5)
        a_np = np.random.uniform(size=a_shape).astype(dtype)
        b_scalar = 2.3
        mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
        ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
        shapes = {'a': a_shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate()(a_np)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res.asnumpy())
コード例 #25
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(val, shape, dtype):
     ctx = mx.cpu()
     ref_res = mx.nd.full(shape, val, dtype=dtype)
     mx_sym = mx.sym.full(shape, val, dtype=dtype)
     mod, _ = relay.frontend.from_mxnet(mx_sym, {})
     for target, ctx in ctx_list():
         # Skip testing graph runtime because this op will be optimized out
         # by constant folding.
         for kind in ["debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()()
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #26
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(val, shape, dtype):
     ctx = mx.cpu()
     ref_res = mx.nd.full(shape, val, dtype=dtype)
     mx_sym = mx.sym.full(shape, val, dtype=dtype)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {})
     for target, ctx in ctx_list():
         # Skip testing graph runtime because this op will be optimized out
         # by constant folding.
         for kind in ["debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)()
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #27
0
    def verify(x, shape, dtype):
        a_np = np.array(x).astype(dtype)
        mx_sym = _mx_symbol(mx.sym, 'unravel_index', [mx.sym.var('a'), shape])
        ref_res = _mx_symbol(mx.nd, 'unravel_index', [mx.nd.array(a_np), shape])
        shapes = {'a': a_np.shape}
        mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)

        for target, ctx in ctx_list():
            for kind in ["graph", "vm", "debug"]:
                intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
                op_res = intrp.evaluate()(a_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #28
0
ファイル: test_forward.py プロジェクト: Manikant92/tvm
def verify_model(model_name, input_data=[]):
    """Assert that the output of a compiled model matches with that of its
    baseline."""
    if len(input_data) == 0:
        baseline_model, baseline_input = load_model(model_name)
    else:
        baseline_model = model_name
        baseline_input = input_data
    if torch.cuda.is_available():
        baseline_model = baseline_model.cuda()
        baseline_input = baseline_input.cuda()
    with torch.no_grad():
        baseline_outputs = baseline_model(baseline_input)
    if isinstance(baseline_outputs, tuple):
        baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
    else:
        baseline_outputs = (baseline_outputs.float().cpu().numpy(), )
    output_shapes = [out.shape for out in baseline_outputs]
    dtype = "float32"
    input_name = "input0"
    input_shapes = {input_name: list(baseline_input.shape)}
    trace = torch.jit.trace(baseline_model, baseline_input).float().eval()
    if torch.cuda.is_available():
        trace = trace.cuda()
    else:
        trace = trace.cpu()

    mod, params = relay.frontend.from_pytorch(trace, input_shapes)
    compiled_input = {input_name: tvm.nd.array(baseline_input.cpu().numpy())}

    with relay.build_config(opt_level=3):
        for target, ctx in ctx_list():
            relay_graph, relay_lib, relay_params = relay.build(mod,
                                                               target=target,
                                                               params=params)
            relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
            relay_model.set_input(**relay_params)
            relay_model.set_input(**compiled_input)
            relay_model.run()

            for i, baseline_output in enumerate(baseline_outputs):
                compiled_output = relay_model.get_output(i).asnumpy()

                assert_shapes_match(baseline_output, compiled_output)
                tvm.testing.assert_allclose(baseline_output,
                                            compiled_output,
                                            rtol=1e-3,
                                            atol=1e-3)

    del model_name
    del baseline_model
    torch.cuda.empty_cache()
コード例 #29
0
def verify_model(model_name):
    """Assert that the output of a compiled model matches with that of its
    baseline."""
    baseline_model, baseline_input = load_model(model_name)
    if torch.cuda.is_available():
        baseline_model = baseline_model.cuda()
        baseline_input = baseline_input.cuda()
    baseline_outputs = baseline_model(baseline_input)
    if isinstance(baseline_outputs, tuple):
        baseline_outputs = tuple(out.detach().cpu().numpy()
                                 for out in baseline_outputs)
    else:
        baseline_outputs = (baseline_outputs.detach().float().cpu().numpy(), )
    output_shapes = [out.shape for out in baseline_outputs]
    dtype = 'float32'
    input_name = 'input0'
    input_shapes = {input_name: list(baseline_input.shape)}
    baseline_model(baseline_input)
    trace = torch.jit.trace(baseline_model, baseline_input).float().eval()
    if torch.cuda.is_available():
        trace = trace.cuda()
    else:
        trace = trace.cpu()

    mod, params = relay.frontend.from_pytorch_neo(trace, input_shapes)

    compiled_input = {input_name: tvm.nd.array(baseline_input.cpu().numpy())}

    with relay.build_config(opt_level=3):
        for target, ctx in ctx_list():
            relay_graph, relay_lib, relay_params = relay.build(mod,
                                                               target=target,
                                                               params=params)
            relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)
            relay_model.set_input(**relay_params)
            relay_model.set_input(**compiled_input)
            relay_model.run()

            for i, baseline_output in enumerate(baseline_outputs):
                output_shape = baseline_output.shape
                compiled_output = relay_model.get_output(
                    i, tvm.nd.array(np.zeros(output_shape).astype(dtype),
                                    ctx)).asnumpy()

                assert_shapes_match(baseline_output, compiled_output)
                tvm.testing.assert_allclose(baseline_output,
                                            compiled_output,
                                            rtol=1e-3,
                                            atol=1e-3)

    from subprocess import call
    call('rm -rf ~/.torch/models/*', shell=True)
コード例 #30
0
 def verify(shape, axis=1, epsilon=1e-5):
     x = np.random.uniform(size=shape).astype("float32")
     gamma = np.random.uniform(size=(shape[axis])).astype("float32")
     beta = np.random.uniform(size=(shape[axis])).astype("float32")
     ref_res = mx.nd.InstanceNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta), epsilon)
     mx_sym = mx.sym.InstanceNorm(mx.sym.var("x"), mx.sym.var("gamma"), mx.sym.var("beta"), epsilon)
     shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
     mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x, gamma, beta)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5)
コード例 #31
0
ファイル: test_forward.py プロジェクト: lemonqueen/TVM
def verify_caffe_forward_impl(net_path, model_path, data_shape):
    dtype = 'float32'
    data = np.random.uniform(size=data_shape).astype(dtype)  # 生成测试数据
    c_out = get_caffe_output(net_path, model_path, data, dtype)
    # print("c_out\n",c_out,type(c_out) )
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(net_path, model_path, data, target, ctx)
        # print("c_out\n",c_out,type(c_out) )
        # print("tvm_out\n",tvm_out,type(tvm_out) )
        # print("c_out\n",c_out.shape)
        # print("tvm_out\n",np.array(tvm_out).shape)
        # import ipdb; ipdb.set_trace()
        tvm.testing.assert_allclose(c_out, tvm_out, rtol=1e-5, atol=1e-5)
    print("完成!!!!")
コード例 #32
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_l2_normalize(input_dim, eps):
    dtype = "float32"

    a_np = np.random.uniform(size=input_dim).astype(dtype)
    b_np = topi.testing.l2_normalize_python(a_np, eps, 1)

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_l2_normalize(name='L2', epsilon=eps, input_name='input', output_name='output')

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #33
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(x_shape, y_shape, axes):
     x_np = np.random.uniform(size=x_shape).astype("float32")
     y_np = np.random.uniform(size=y_shape).astype("float32")
     if axes is None:
         ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
         mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
     else:
         ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
         mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_np, y_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #34
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
 def verify(data_shape, weight_shape):
     in_dim, out_dim = weight_shape
     x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
     w_np = np.random.uniform(size=weight_shape).astype("float32")
     ref_res = mx.nd.Embedding(mx.nd.array(x_np), mx.nd.array(w_np),
                               input_dim=in_dim, output_dim=out_dim)
     mx_sym = mx.sym.Embedding(mx.sym.var("x"), mx.sym.var("w"),
                               input_dim=in_dim, output_dim=out_dim)
     new_sym, _ = relay.frontend.from_mxnet(
         mx_sym, {"x": data_shape, "w": weight_shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x=x_np, w=w_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #35
0
ファイル: test_forward.py プロジェクト: hzhang57/tvm
def verify_l2_normalize(input_dim, eps):
    dtype = "float32"

    a_np = np.random.uniform(size=input_dim).astype(dtype)
    b_np = topi.testing.l2_normalize_python(a_np, eps, 1)

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_l2_normalize(name='L2', epsilon=eps, input_name='input', output_name='output')

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #36
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(data_shape, weight_shape):
     in_dim, out_dim = weight_shape
     x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
     w_np = np.random.uniform(size=weight_shape).astype("float32")
     ref_res = mx.nd.Embedding(mx.nd.array(x_np), mx.nd.array(w_np),
                               input_dim=in_dim, output_dim=out_dim)
     mx_sym = mx.sym.Embedding(mx.sym.var("x"), mx.sym.var("w"),
                               input_dim=in_dim, output_dim=out_dim)
     mod, _ = relay.frontend.from_mxnet(
         mx_sym, {"x": data_shape, "w": weight_shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x=x_np, w=w_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #37
0
ファイル: test_forward.py プロジェクト: mirsci/tvm
 def verify(x_shape, y_shape, axes):
     x_np = np.random.uniform(size=x_shape).astype("float32")
     y_np = np.random.uniform(size=y_shape).astype("float32")
     if axes is None:
         ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
         mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
     else:
         ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
         mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
     new_sym, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, ctx=ctx, target=target)
             op_res = intrp.evaluate(new_sym)(x_np, y_np)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #38
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def test_forward_elemwise_ops():
    for op in ["elemwise_add", "elemwise_sub", "elemwise_mul",
               "elemwise_div", "maximum", "minimum"]:
        shape = (3, 4, 5)
        dtype = 'float32'
        a_np = np.random.uniform(size=shape).astype(dtype)
        b_np = np.random.uniform(size=shape).astype(dtype)
        mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
        ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
        shapes = {'a': shape, 'b': shape}
        new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(a_np, b_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #39
0
ファイル: test_forward.py プロジェクト: mirsci/tvm
def test_forward_elemwise_ops():
    for op in ["elemwise_add", "elemwise_sub", "elemwise_mul",
               "elemwise_div", "maximum", "minimum"]:
        shape = (3, 4, 5)
        dtype = 'float32'
        a_np = np.random.uniform(size=shape).astype(dtype)
        b_np = np.random.uniform(size=shape).astype(dtype)
        mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
        ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
        shapes = {'a': shape, 'b': shape}
        new_sym, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(a_np, b_np)
                tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #40
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(shape, k, axis, ret_type, is_ascend=False, dtype="float32"):
     x_np = np.random.uniform(size=shape).astype("float32")
     ref_res = mx.nd.topk(mx.nd.array(x_np), k=k, axis=axis, ret_typ=ret_type,
                          is_ascend=is_ascend, dtype=dtype)
     mx_sym = mx.sym.topk(mx.sym.var("x"), k=k, axis=axis, ret_typ=ret_type,
                          is_ascend=is_ascend, dtype=dtype)
     mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x_np)
             if isinstance(ref_res, list):
                 assert len(op_res) == len(ref_res)
                 for i, t in enumerate(op_res):
                     tvm.testing.assert_allclose(t.asnumpy(), ref_res[i].asnumpy())
             else:
                 tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #41
0
def check_result(args, expected_result, mod=None):
    """
    Check that evaluating `expr` applied to the arguments produces
    `result` on Relay VM.

    Parameters
    ----------
    args: list of Expr
        The arguments to supply the expr.

    expected_result:
        The expected result of running the expression.
    """
    for target, ctx in ctx_list():
        vm = relay.create_executor('vm', ctx=ctx, target=target, mod=mod)

        rts_result = vm.evaluate()(*args)
        tvm.testing.assert_allclose(expected_result, rts_result.asnumpy())
コード例 #42
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(data_shape, kernel_size, stride, pad, num_filter):
     weight_shape=(1, num_filter) + kernel_size
     x = np.random.uniform(size=data_shape).astype("float32")
     weight = np.random.uniform(size=weight_shape).astype("float32")
     bias = np.random.uniform(size=num_filter).astype("float32")
     ref_res = mx.nd.Deconvolution(data=mx.nd.array(x), weight=mx.nd.array(weight), bias=mx.nd.array(bias),
                                   kernel=kernel_size, stride=stride,
                                   pad=pad, num_filter=num_filter, no_bias=False)
     mx_sym = mx.sym.Deconvolution(mx.sym.var("x"), mx.sym.var("weight"), mx.sym.var("bias"),
                                   kernel=kernel_size, stride=stride,
                                   pad=pad, num_filter=num_filter, no_bias=False)
     shape_dict = {"x": x.shape, "weight": weight.shape, "bias": bias.shape}
     mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             op_res = intrp.evaluate()(x, weight, bias)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy(), rtol=1e-3)
コード例 #43
0
ファイル: test_forward.py プロジェクト: tienln4/tvm
 def verify(xshape, yshape, offset=None):
     x_data = np.random.uniform(size=xshape).astype("float32")
     y_data = np.random.uniform(size=yshape).astype("float32")
     if offset is None:
         mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"))
         ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data))
     else:
         mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"), offset=offset)
         ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data), offset=offset)
     mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": xshape, "y": yshape})
     for target, ctx in ctx_list():
         for kind in ["graph", "debug"]:
             intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
             if offset is None or offset == (0, 0):
                 op_res = intrp.evaluate()(x_data, y_data)
             else:
                 op_res = intrp.evaluate()(x_data)
             tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
コード例 #44
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_ConcatLayerParams(input1_dim, input2_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input2_dim).astype(dtype)

    b_np = np.concatenate((a_np1, a_np2), axis=1)
    inputs = [('input1', datatypes.Array(*input1_dim)),
              ('input2', datatypes.Array(*input2_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Concate',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='CONCAT')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #45
0
ファイル: test_forward.py プロジェクト: hzhang57/tvm
def verify_ConcatLayerParams(input1_dim, input2_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input2_dim).astype(dtype)

    b_np = np.concatenate((a_np1, a_np2), axis=1)
    inputs = [('input1', datatypes.Array(*input1_dim)),
              ('input2', datatypes.Array(*input2_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Concate',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='CONCAT')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #46
0
ファイル: test_forward.py プロジェクト: ritesh2212/tvm-1
    def verify(shape, axis=1, fix_gamma=False):
        x = np.random.uniform(size=shape).astype("float32")
        gamma = np.random.uniform(size=(shape[axis])).astype("float32")
        beta = np.random.uniform(size=(shape[axis])).astype("float32")
        moving_mean = np.random.uniform(size=(shape[axis])).astype("float32")
        moving_var = np.random.uniform(size=(shape[axis])).astype("float32")
        ref_res = mx.nd.BatchNorm(mx.nd.array(x),
                                  mx.nd.array(gamma),
                                  mx.nd.array(beta),
                                  mx.nd.array(moving_mean),
                                  mx.nd.array(moving_var),
                                  axis=axis,
                                  use_global_stats=True,
                                  fix_gamma=fix_gamma)
        mx_sym = mx.sym.BatchNorm(mx.sym.var("x"),
                                  mx.sym.var("gamma"),
                                  mx.sym.var("beta"),
                                  mx.sym.var("mean"),
                                  mx.sym.var("var"),
                                  axis=axis,
                                  use_global_stats=True,
                                  fix_gamma=fix_gamma)

        shape_dict = {
            "x": x.shape,
            "gamma": gamma.shape,
            "beta": beta.shape,
            "mean": moving_mean.shape,
            "var": moving_var.shape
        }
        mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
        #print(mod)
        for target, ctx in ctx_list():
            for kind in ["graph", "debug"]:
                intrp = relay.create_executor(kind,
                                              mod=mod,
                                              ctx=ctx,
                                              target=target)
                op_res = intrp.evaluate()(x, gamma, beta, moving_mean,
                                          moving_var)
                tvm.testing.assert_allclose(op_res.asnumpy(),
                                            ref_res.asnumpy(),
                                            rtol=1e-3)
コード例 #47
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_average(input_dim1, input_dim2, axis=0):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim1).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim2).astype(dtype)

    b_np = np.mean((a_np1, a_np2), axis=axis)

    inputs = [('input1', datatypes.Array(*input_dim1)),
              ('input2', datatypes.Array(*input_dim2))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='MEAN',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='AVE')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #48
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_MultiplyLayerParams(input_dim, alpha):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.multiply(a_np1, a_np2) * alpha
    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Mul',
                            alpha=alpha,
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='MULTIPLY')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #49
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
    def verify(mode, input_size, seq_len, hidden_size, num_layers, batch=1):
        if mode == "rnn":
            layer = gluon.rnn.RNN(hidden_size, num_layers)
        elif mode == "gru":
            layer = gluon.rnn.GRU(hidden_size, num_layers)
        else: # mode == "lstm"
            layer = gluon.rnn.LSTM(hidden_size, num_layers)
        num_states = 2 if mode == "lstm" else 1
        layer.initialize()

        dtype = "float32"
        data_np = np.random.uniform(size=(seq_len, batch, input_size)).astype(dtype)
        states_np = []
        states_mx = []
        shape_dict = {'data0': data_np.shape}
        inputs = {'data0': data_np}
        for i in range(num_states):
            s = np.random.uniform(size=(num_layers, batch, hidden_size)).astype(dtype)
            states_np.append(s)
            states_mx.append(mx.nd.array(s))
            shape_dict['data%s' % (i+1)] = s.shape
            inputs['data%s' % (i+1)] = s

        layer.hybridize()
        mx_out, mx_states = layer(mx.nd.array(data_np), states_mx)
        mx_res = [mx_out] + mx_states
        mx_sym = layer._cached_graph[1]
        mx_params = {}
        for name, param in layer.collect_params().items():
            mx_params[name] = param._reduce()

        new_sym, params = relay.frontend.from_mxnet(
            mx_sym, shape=shape_dict, arg_params=mx_params)
        for target, ctx in ctx_list():
            # only test graph runtime because debug runtime is too slow
            for kind in ["graph"]:
                intrp = relay.create_executor(kind, ctx=ctx, target=target)
                op_res = intrp.evaluate(new_sym)(**inputs, **params)
                assert len(op_res) == len(mx_res)
                for i, val in enumerate(op_res):
                    tvm.testing.assert_allclose(val.asnumpy(), mx_res[i].asnumpy(), rtol=1e-3)
コード例 #50
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_lrn(input_dim, size, bias, alpha, beta):
    dtype = "float32"
    axis=1
    a_np = np.random.uniform(size=input_dim).astype(dtype)
    b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)

    input = [('input', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(input, output)
    builder.add_lrn(name='LRN',
                    input_name='input',
                    output_name='output',
                    alpha=alpha,
                    beta=beta,
                    k=bias,
                    local_size=size)

    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, a_np, 'input', b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
コード例 #51
0
ファイル: test_forward.py プロジェクト: bddppq/tvm
def verify_keras_frontend(keras_model, need_transpose=True):
    # Keras frontend currently supports tensorflow backend only.
    assert(keras.backend.backend() == 'tensorflow')

    in_shapes = []
    for layer in keras_model._input_layers:
        in_shapes.append(tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape))

    def get_keras_output(xs, dtype='float32'):
        return keras_model.predict(xs)

    def get_tvm_output(xs, target, ctx, dtype='float32'):
        shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)}
        func, params = relay.frontend.from_keras(keras_model, shape_dict)
        with relay.build_module.build_config(opt_level=2):
            graph, lib, params = relay.build(func, target, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        for name, x in zip(keras_model.input_names, xs):
            m.set_input(name, tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        return [m.get_output(i).asnumpy() for i in range(m.get_num_outputs())]

    def to_channels_first(arr):
        return arr.transpose([0, -1] + list(range(1, arr.ndim - 1)))

    def to_channels_last(arr):
        return arr.transpose([0] + list(range(2, arr.ndim)) + [1])

    xs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
    keras_out = get_keras_output(xs)
    keras_out = keras_out if isinstance(keras_out, list) else [keras_out]
    for target, ctx in ctx_list():
        inputs = [to_channels_first(x) for x in xs] if need_transpose else xs
        tvm_out = get_tvm_output(inputs, target, ctx)
        for kout, tout in zip(keras_out, tvm_out):
            if need_transpose:
                tout = to_channels_last(tout)
            tvm.testing.assert_allclose(kout, tout, rtol=1e-5, atol=1e-5)
コード例 #52
0
ファイル: test_forward.py プロジェクト: LANHUIYING/tvm
def verify_mxnet_frontend_impl(mx_symbol,
                               data_shape=(1, 3, 224, 224),
                               out_shape=(1, 1000),
                               gluon_impl=False,
                               name=None,
                               dtype='float32'):
    """Use name different from test to avoid let nose pick it up"""
    if gluon_impl:
        def get_gluon_output(name, x):
            net = vision.get_model(name)
            net.collect_params().initialize(mx.init.Xavier())
            net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
                                           inputs=mx.sym.var('data'),
                                           params=net.collect_params())
            out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
            return out, net_sym
    else:
        def get_mxnet_output(symbol, x, dtype='float32'):
            from collections import namedtuple
            Batch = namedtuple('Batch', ['data'])
            mod = mx.mod.Module(symbol, label_names=None)
            mod.bind(data_shapes=[('data', x.shape)], for_training=False)
            mod.init_params()
            mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
            out = mod.get_outputs()[0].asnumpy()
            args, auxs = mod.get_params()
            return out, args, auxs

    def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
        shape_dict = {"data": x.shape}
        if gluon_impl:
            new_sym, params = relay.frontend.from_mxnet(symbol, shape_dict)
        else:
            new_sym, params = relay.frontend.from_mxnet(symbol,
                                                        shape_dict,
                                                        arg_params=args,
                                                        aux_params=auxs)
        with relay.build_config(opt_level=3):
            graph, lib, params = relay.build(new_sym, target, params=params)
        m = graph_runtime.create(graph, lib, ctx)
        # set inputs
        m.set_input("data", tvm.nd.array(x.astype(dtype)))
        m.set_input(**params)
        m.run()
        # get outputs
        out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
        return out.asnumpy()

    # random input
    x = np.random.uniform(size=data_shape)
    if gluon_impl:
        gluon_out, gluon_sym = get_gluon_output(name, x)
        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
            tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
    else:
        mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
        assert "data" not in args
        for target, ctx in ctx_list():
            tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
            tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)