Exemple #1
0
def test_scalar_sym_pow():
    scalar = 3
    x = sym.Variable("x")
    y = scalar**x

    def forward(x):
        return scalar**x

    def backward(head_grads, x):
        return [np.log(scalar) * scalar**x * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Exemple #2
0
def test_pad():
    x = sym.Variable("x")
    y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.)

    def forward(x):
        return np.pad(x,
                      pad_width=((0, 0), (0, 0), (0, 1), (2, 3)),
                      mode='constant',
                      constant_values=1.)

    dtype = "float32"
    inputs = [('x', (1, 3, 28, 28), x)]
    helper(y, inputs, dtype, forward)
def test_dense():
    x = sym.Variable("data", shape=(10, 20))
    y = sym.dense(x, units=30, name="fc")
    g, ldict = correct_layout(y, "HW")
    assert (ldict["data"][0] == "HW")
    assert (ldict["fc"][0] == "HW")
    assert (ldict["fc_bias"][0] == "__undef__")
    # second pass will insert layout transform
    _, ldict = correct_layout(g, "HW16w")
    assert (ldict["data"][0] == "HW16w")
    assert (ldict["data_HW"][0] == "HW")
    assert (ldict["fc"][0] == "HW")
    assert (ldict["fc_bias"][0] == "__undef__")
Exemple #4
0
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    dtype = "float32"
    inputs = {
        'x': ((10, 20), x),
        'gamma': ((20, ), ),
        'beta': ((20, ), ),
        'moving_mean': ((20, ), ),
        'moving_var': ((20, ), )
    }

    helper(y, inputs, dtype, forward)
Exemple #5
0
def test_batchnorm():
    x = sym.Variable("x", shape=(10, 20))
    y = sym.batch_norm(1 / x, name="bn")
    sdict = infer_shape(y)
    assert (sdict["bn_gamma"][0] == [20])

    x = sym.Variable("x", shape=(10, 20, 30, 40))
    y = sym.batch_norm(data=x, axis=0, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert (sdict['bn_moving_var'][0] == [10])

    y = sym.batch_norm(data=x, axis=1, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert (sdict['bn_gamma'][0] == [20])

    y = sym.batch_norm(data=x, axis=2, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert (sdict['bn_beta'][0] == [30])

    y = sym.batch_norm(data=x, axis=3, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert (sdict['bn_moving_mean'][0] == [40])
Exemple #6
0
def test_infer_shape():
    x = sym.Variable('x', shape=(2, 4, 2))
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten")
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply('InferShape')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Exemple #7
0
def test_infer_type():
    x = sym.Variable('x', dtype=0)
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.cast(y, dtype="float64", name="cast1")
    g = graph.create(y)
    g._set_json_attr("dtype_attr_key", "dtype")
    g = g.apply('InferType')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["cast1"]]] == 1
    assert g.json_attr('dtype')[jnode_row_ptr[nindex["add1"]]] == 0
Exemple #8
0
def verify_reduce(dshape, fnp, fsym, **kwargs):
    x = sym.Variable("x")
    y = fsym(x + 1, **kwargs)
    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        # set input
        data = np.random.uniform(size=dshape).astype(dtype)
        out_np = fnp(data + 1, **kwargs)
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(out_np.shape))
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
Exemple #9
0
def test_precompute_prune():
    x = sym.Variable("x") + 1
    a = sym.Variable("a")
    y = sym.Variable("y")
    z = y + x + a
    shape = (10, 10)
    dtype = tvm.float32
    nx = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    na = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    ny = tvm.nd.array(np.random.uniform(size=shape).astype(dtype))
    params = {"x": nx, "a": na}
    graph, lib, params = nnvm.compiler.build(
        z, "llvm", shape={"y": ny.shape}, params=params)
    assert graph.index.num_nodes == 4
    m = graph_runtime.create(graph, lib, tvm.cpu(0))
    params["y"] = ny
    res = tvm.nd.empty(shape)
    m["load_params"](nnvm.compiler.save_param_dict(params))
    m.run()
    out = m.get_output(0, out=res)
    np.testing.assert_allclose(
        res.asnumpy(), nx.asnumpy() + 1 + ny.asnumpy() + na.asnumpy())
Exemple #10
0
def test1():

    in_shape = [3, 3, 3]
    out_shape = [3, 3, 3, 2]
    data = {
        "x": np.arange(np.prod(in_shape), dtype=np.float32).reshape(in_shape),
        "y": np.arange(np.prod(in_shape), dtype=np.float32).reshape(in_shape)
    }

    axis = -4
    x = sym.Variable("x")
    y = sym.Variable("y")
    x = sym.expand_dims(x, axis=axis, num_newaxis=1)  # sym.elemwise_add(x,y)
    y = sym.expand_dims(y, axis=axis, num_newaxis=1)  # sym.elemwise_add(x,y)
    z = sym.concatenate(x, y, axis=-4)

    nnvm_graph = nnvm.graph.create(z)
    print('Got NNVM graph')
    print(nnvm_graph.json())

    in_shapes_dict = {n: list(np.shape(v)) for n, v in data.items()}
    tvm_graph, lib, params = nnvm.compiler.build(nnvm_graph, 'llvm',
                                                 in_shapes_dict)
    print('Got TVM graph')

    graph_module = graph_runtime.create(tvm_graph, lib, tvm.cpu(0))
    print('Got graph module')

    for name, value in data.items():
        graph_module.set_input(name, value)

    graph_module.run()

    out_value = graph_module.get_output(0, tvm.nd.empty((out_shape),
                                                        'float32'))

    # print('Inputs:\nX:', data["x"], "\nY:", data["y"])
    print('Output value:', type(out_value), '\nShape:', out_value.shape,
          '\nO:', out_value)
Exemple #11
0
def test_block_grad():
    x = sym.Variable("x")
    y = sym.block_grad(x)

    def forward(x):
        return x

    def backward(head_grads, x):
        return [np.zeros_like(head_grads)]

    dtype = "float32"
    inputs = [('x', (3, 4, 5), x)]
    helper(y, inputs, dtype, forward, backward, need_head_grads=False)
Exemple #12
0
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(
        x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    shape = {
        'x': (10, 20),
        'gamma': (20,),
        'beta': (20,),
        'moving_mean': (20,),
        'moving_var': (20,)
    }

    check_function(y, forward, in_range=(0.001, 1.0), shape=shape)
Exemple #13
0
def test_sigmoid():
    x = sym.Variable("x")
    y = sym.sigmoid(x)

    def forward(x):
        return 1.0 / (1.0 + np.exp(-x))

    def backward(head_grads, x):
        y_np = forward(x)
        return [y_np *(1 - y_np) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Exemple #14
0
def test_tanh():
    x = sym.Variable("x")
    y = sym.tanh(x)

    def forward(x):
        return np.sinh(x) / np.cosh(x)

    def backward(head_grads, x):
        y_np = forward(x)
        return [(1 - y_np**2) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Exemple #15
0
def test_block_grad():
    x = sym.Variable("x")
    y = sym.block_grad(x)

    def forward(x):
        return x

    def backward(head_grads, x):
        return [np.zeros_like(head_grads)]

    shape = {'x': (3, 4, 5)}
    # Numerical grad checking would fail for this function
    check_function(y, forward, backward, shape=shape, numerical_grads=False)
def test_conv2d_transpose():
    x = sym.Variable("data", shape=(1, 32, 512, 512))
    y = sym.conv2d_transpose(x,
                             name="conv",
                             channels=12,
                             kernel_size=(3, 3),
                             padding=(1, 1),
                             layout="NCHW")
    _, ldict = correct_layout(y)
    assert (ldict["data"][0] == "NCHW")
    assert (ldict["conv_weight"][0] == "OIHW")
    assert (ldict["conv_bias"][0] == "C")
    assert (ldict["conv"][0] == "NCHW")
Exemple #17
0
def test_sym_scalar_pow():
    scalar = 3
    x = sym.Variable("x")
    y = x**scalar

    def forward(x):
        return x**scalar

    def backward(head_grads, x):
        return [scalar * x**(scalar -  1) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Exemple #18
0
def verify_elemwise_sum(num_args):
    s = [sym.Variable("input" + str(i)) for i in range(num_args)]
    y = sym.elemwise_sum(*s, num_args=num_args)

    def forward(**inputs):
        return np.sum(np.array(list(inputs.values())), axis=0)

    def backward(head_grads, **inputs):
        return [head_grads] * num_args

    dtype = "float32"
    inputs = [("input" + str(i), (3, 4, 5), s[i]) for i in range(num_args)]
    helper(y, inputs, dtype, forward, backward, need_input=False)
Exemple #19
0
def test_non_max_suppression():
    dshape = (1, 5, 6)
    data = sym.Variable("data")
    valid_count = sym.Variable("valid_count", dtype="int32")
    iou_threshold = 0.7
    force_suppress = True
    top_k = 2
    out = sym.non_max_suppression(data=data,
                                  valid_count=valid_count,
                                  return_indices=False,
                                  iou_threshold=iou_threshold,
                                  force_suppress=force_suppress,
                                  top_k=top_k)

    np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80],
                         [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79],
                         [1, 0.5, 100, 60, 70, 110]]]).astype("float32")
    np_valid_count = np.array([4]).astype("int32")
    np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45],
                           [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1],
                           [-1, -1, -1, -1, -1, -1]]])

    target = "llvm"
    ctx = tvm.cpu()
    graph, lib, _ = nnvm.compiler.build(out,
                                        target, {
                                            "data": dshape,
                                            "valid_count": (dshape[0], )
                                        },
                                        dtype={
                                            "data": "float32",
                                            "valid_count": "int32"
                                        })
    m = graph_runtime.create(graph, lib, ctx)
    m.set_input(**{"data": np_data, "valid_count": np_valid_count})
    m.run()
    out = m.get_output(0, tvm.nd.empty(np_result.shape, "float32"))
    tvm.testing.assert_allclose(out.asnumpy(), np_result, atol=1e-5, rtol=1e-5)
Exemple #20
0
def test_split():
    x1 = sym.Variable("x", shape=(10, 20))
    z = sym.split(x1, indices_or_sections=[11], name="y")
    sdict = infer_shape(z)
    assert(sdict["y"][0] == [10, 11])
    assert(sdict["y"][1] == [10, 9])
    z = sym.split(x1, indices_or_sections=2, name="y")
    sdict = infer_shape(z)
    assert(sdict["y"][0] == [10, 10])
    assert(sdict["y"][1] == [10, 10])
    z = sym.split(x1, indices_or_sections=[6], axis=-1, name="y")
    sdict = infer_shape(z)
    assert(sdict["y"][0] == [10, 6])
    assert(sdict["y"][1] == [10, 14])
Exemple #21
0
def test_infer_shape_known_partial():
    x = sym.Variable('x')
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten1")
    g = graph.create(y)
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    shape = [[2, 4, 2], [], []]
    g._set_json_attr("shape", shape, 'list_shape')
    g = g.apply("InferShape")
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten1"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Exemple #22
0
def verify_reshape(dshape, oshape):
    x = sym.Variable("x")
    y = sym.reshape(x, shape=oshape)
    y = y + 1
    dtype = "float32"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        # set input
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.run(x=data)
        out_np = data.asnumpy().reshape(oshape) + 1
        out = m.get_output(0, tvm.nd.empty(out_np.shape))
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
Exemple #23
0
def verify_flip(ishape, axis):
    x = sym.Variable("x")
    y = sym.flip(x, axis=axis) + 1
    dtype = "float32"
    x_np = np.random.uniform(size=ishape).astype(dtype)
    res = np.flip(x_np, axis) + 1

    for target, ctx in ctx_list():
        # set input
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": ishape})
        m = graph_runtime.create(graph, lib, ctx)
        m.run(x=x_np)
        out = m.get_output(0, tvm.nd.empty(res.shape))
        np.testing.assert_allclose(out.asnumpy(), res, atol=1e-5, rtol=1e-5)
Exemple #24
0
def test_residual_block_layout_transform():
    ch = 16
    size = 32
    data = sym.Variable(name="data")
    conv1 = sym.conv2d(data=data,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv1")
    layout_transform1 = sym.__layout_transform__(data=conv1,
                                                 src_layout="NCHW",
                                                 dst_layout="NCHW8c")
    layout_transform2 = sym.__layout_transform__(data=layout_transform1,
                                                 src_layout="NCHW8c",
                                                 dst_layout="NCHW")
    conv2 = sym.conv2d(data=conv1,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv2")
    elemwise_sum = sym.elemwise_add(layout_transform2, conv2)
    out = sym.relu(elemwise_sum)

    dtype = "float32"
    dshape = (1, ch, size, size)
    kshape = (ch, ch, 3, 3)
    oshape = (1, ch, size, size)
    shape_dict = {"data": dshape}

    target = "llvm"  # only test on llvm since it involves NCHW8c layout
    ctx = tvm.context(target, 0)
    graph, lib, _ = nnvm.compiler.build(out, target, shape_dict)
    # data, conv1 weight, conv1, layout transform + elemwise add + relu, conv2 weight, conv2 op
    assert graph.index.num_nodes == 6

    data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
    kernel1 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    kernel2 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    m = graph_runtime.create(graph, lib, ctx)
    m.run(data=data, conv1_weight=kernel1, conv2_weight=kernel2)
    out = m.get_output(0, tvm.nd.empty(oshape, dtype))

    conv1 = topi.testing.conv2d_nchw_python(data.asnumpy(), kernel1.asnumpy(),
                                            (1, 1), 'SAME')
    conv2 = topi.testing.conv2d_nchw_python(conv1, kernel2.asnumpy(), (1, 1),
                                            'SAME')
    ref = np.maximum(conv1 + conv2, 0)
    np.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Exemple #25
0
def test_log():
    x = sym.Variable("x")
    y = sym.log(x)

    def forward(x):
        return np.log(x)

    def backward(head_grads, x):
        return [1. / x * head_grads]

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = [('x', dshape, x)]
    helper(y, inputs, dtype, forward, backward, rnd_min=0.001)
Exemple #26
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(head_grads, x):
        return [np.exp(x) * head_grads]

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = [('x', dshape, x)]
    helper(y, inputs, dtype, forward, backward)
def verify_gather_nd(src_shape, indices_src):
    src_dtype = "float32"
    indices_dtype = "int32"
    indices_src = np.array(indices_src, dtype=indices_dtype)
    a = sym.Variable("a", shape=src_shape)
    indices = sym.Variable("indices", shape=indices_src.shape)
    y = sym.gather_nd(a, indices)

    def forward(a, indices):
        return topi.testing.gather_nd_python(a, indices)

    a_src = np.arange(np.prod(src_shape), dtype=src_dtype).reshape(src_shape)

    check_function(y,
                   forward,
                   dtype={
                       'a': src_dtype,
                       'indices': indices_dtype
                   },
                   values={
                       'a': a_src,
                       'indices': indices_src
                   })
def verify_take(src_shape, indices_src, axis=None):
    src_dtype = "float32"
    indices_dtype = "int32"
    indices_src = np.array(indices_src, dtype=indices_dtype)
    a = sym.Variable("a", shape=src_shape)
    indices = sym.Variable("indices", shape=indices_src.shape)
    y = sym.take(a, indices, axis=axis)

    def forward(a, indices):
        return np.take(a, indices=indices, axis=axis)

    a_src = np.arange(np.prod(src_shape), dtype=src_dtype).reshape(src_shape)

    check_function(y,
                   forward,
                   dtype={
                       'a': src_dtype,
                       'indices': indices_dtype
                   },
                   values={
                       'a': a_src,
                       'indices': indices_src
                   })
Exemple #29
0
def test_exp():
    x = sym.Variable("x")
    y = sym.exp(x)

    def forward(x):
        return np.exp(x)

    def backward(x):
        return np.exp(x)

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward, backward)
Exemple #30
0
def verify_squeeze(dshape, axis):
    x = sym.Variable("x")
    if axis:
        y = sym.squeeze(x, axis=axis)
    else:
        y = sym.squeeze(x)
    y = y + 1

    def forward(x):
        return np.squeeze(x, axis=axis) + 1

    dtype = "float32"
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward)