Exemplo n.º 1
0
def test_conv_ewise_injective():
    x = sym.Variable("x")
    y = sym.conv2d(x, channels=32, kernel_size=(3, 3), groups=32,
                   name="y", padding=(1,1))
    y = sym.flatten(y + 1) + 1
    dtype = "float32"
    dshape = (1, 32, 18, 18)
    kshape = (32, 1, 3, 3)
    oshape = (1, 32* 18 * 18)
    shape_dict = {"x": dshape}

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        # print(graph.ir(join_entry_attrs=["shape"]))
        assert graph.index.num_nodes == 5
        # set input
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
        bias = tvm.nd.array(np.random.uniform(size=kshape[0]).astype(dtype))
        m.run(x=data, y_weight=kernel, y_bias=bias)
        # get output
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        c_np = topi.testing.depthwise_conv2d_python_nchw(
            data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME')
        c_np = c_np + bias.asnumpy().reshape(kshape[0], 1, 1) + 1
        c_np = c_np.reshape(c_np.shape[0], np.prod(c_np.shape[1:])) + 1
        np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
Exemplo n.º 2
0
 def _impl(cls, inputs, args, params):
     inputs[0] = _sym.flatten(inputs[0])
     args['units'] = infer_channels(inputs[1], params)
     return AttrCvt(
         'dense',
         ignores=['axis', 'axis_w'],
         extras={'use_bias': len(inputs) == 3},
     )(inputs, args, params)
Exemplo n.º 3
0
def test_flatten():
    x = sym.Variable("x", shape=(10, 20, 10, 10))
    y = sym.flatten(x, name="y")
    g, ldict = correct_layout(y, "NCHW")
    assert(ldict["x"][0] == "NCHW")
    assert(ldict["y"][0] == "__undef__")
    # second pass will insert layout transform
    _, ldict = correct_layout(g, "NCHW16c")
    assert(ldict["x"][0] == "NCHW16c")
    assert(ldict["x_NCHW"][0] == "NCHW")
    assert(ldict["y"][0] == "__undef__")
Exemplo n.º 4
0
def test_infer_shape():
    x = sym.Variable('x', shape=(2, 4, 2))
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten")
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply('InferShape')
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Exemplo n.º 5
0
def test_infer_shape_known_partial():
    x = sym.Variable('x')
    y = sym.elemwise_add(x, x, name='add1')
    y = sym.flatten(y, name="flatten1")
    g = graph.create(y)
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    shape = [[2, 4, 2], [] , []]
    g._set_json_attr("shape", shape, 'list_shape')
    g = g.apply("InferShape")
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert g.json_attr('shape')[jnode_row_ptr[nindex["flatten1"]]] == [2, 8]
    assert g.json_attr('shape')[jnode_row_ptr[nindex["add1"]]] == [2, 4, 2]
Exemplo n.º 6
0
def test_dense():
    x = sym.Variable("x")
    y = sym.dense(x, units=3, name="dense")
    y = sym.flatten(y)

    def forward(x, dense_weight, dense_bias):
        return np.dot(x, dense_weight.T) + dense_bias

    dtype = "float32"
    inputs = {
        'x': ((10, 100), x),
        'dense_weight': ((3, 100),),
        'dense_bias': ((3,),)
    }
    helper(y, inputs, dtype, forward)
Exemplo n.º 7
0
def test_ewise_injective():
    x = sym.Variable("x")
    y = x * 2
    y = sym.flatten(y) + 1
    dshape = (10, 2, 3)
    shape_dict = {"x": dshape}
    dtype = "float32"
    target = "llvm"
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        assert graph.index.num_nodes == 2
        m = graph_runtime.create(graph, lib, ctx)
        x_np = np.random.uniform(size=dshape).astype(dtype)
        m.run(x=x_np)
        out = m.get_output(0, tvm.nd.empty((10, 6)))
        np.testing.assert_allclose(
            out.asnumpy(),  x_np.reshape(out.shape) * 2 + 1,
            atol=1e-5, rtol=1e-5)
Exemplo n.º 8
0
def test_injective_reduce_injective():
    x = sym.Variable("x")
    x = sym.flatten(x) + 1
    y = sym.sum(x, axis=1)
    dtype = "float32"
    dshape = (32, 1, 18, 18)
    shape_dict = {"x": dshape}

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        assert graph.index.num_nodes == 2
        data = np.random.uniform(size=dshape).astype(dtype)
        m.run(x=data)
        c_np = np.sum(data.reshape(32, 18 * 18) + 1, axis=1)
        # get output
        out = m.get_output(0, tvm.nd.empty(c_np.shape, dtype))
        np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5)
Exemplo n.º 9
0
def test_plan_memory():
    x = sym.Variable('x', shape=(4, 2))
    x2 = sym.elemwise_add(x, x, name='addk')
    y = sym.flatten(x2, name="reshapek")
    y = sym.elemwise_add(y, x2, name="add2")
    y = sym.elemwise_add(y, y)
    g = graph.create(y)
    g._set_json_attr("shape_attr_key", "shape")
    g = g.apply(["InferShape", "InferType", "PlanMemory"])
    jgraph = json.loads(g.apply('SaveJSON').json_attr('json'))
    jnodes = jgraph['nodes']
    jnode_row_ptr = jgraph['node_row_ptr']
    storage_id = g.json_attr('storage_id')
    nindex = {n['name']: i for i, n in enumerate(jnodes)}
    assert (storage_id[jnode_row_ptr[nindex["addk"]]] !=
            storage_id[jnode_row_ptr[nindex["reshapek"]]])
    assert (storage_id[jnode_row_ptr[nindex["add2"]]] ==
            storage_id[jnode_row_ptr[nindex["reshapek"]]])
Exemplo n.º 10
0
def test_dense():
    x = sym.Variable("x", shape=(10, 100))
    w = sym.Variable("dense_weight", shape=(3, 100))
    b = sym.Variable("dense_bias", shape=(3,))
    y = sym.dense(x, w, b, use_bias=True, units=3, name="dense")
    y = sym.flatten(y)

    def forward(x, dense_weight, dense_bias):
        return np.dot(x, dense_weight.T) + dense_bias
    shape = {
        'x': (10, 100),
        'w': (3, 100),
        'b': (3,)
    }
    # Don't check gradients on cuda because is doesn't yet support ewise after reduce
    check_function(y, forward, shape=shape,
                   exclude_targets={'cuda'}, numerical_grads=True)
    check_function(y, forward, shape=shape,
                   only_targets={'cuda'}, numerical_grads=False)
Exemplo n.º 11
0
def yolo(num_classes=1470):
    data = sym.Variable("data")
    body = conv2d_block(data,
                        "conv1",
                        64,
                        kernel_size=(7, 7),
                        strides=(2, 2),
                        padding=(3, 3))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool1")
    body = conv2d_block(body,
                        "conv2",
                        192,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool2")

    body = conv2d_block(body,
                        "conv3",
                        128,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv4",
                        256,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv5",
                        256,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv6",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool3")

    body = conv2d_block(body,
                        "conv7",
                        256,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv8",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv9",
                        256,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv10",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv11",
                        256,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv12",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv13",
                        256,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv14",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv15",
                        512,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv16",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool4")

    body = conv2d_block(body,
                        "conv17",
                        512,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv18",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv19",
                        512,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding=(0, 0))
    body = conv2d_block(body,
                        "conv20",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv21",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv22",
                        1024,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv23",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv24",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))

    flatten = sym.flatten(data=body, name="flatten")
    fc = sym.dense(data=flatten, units=4096, use_bias=False, name="fc1")
    act = sym.relu(data=fc, name="relu1")
    fc = sym.dense(data=act, units=num_classes, use_bias=False, name="fc2")

    return fc
Exemplo n.º 12
0
def test_dense():
    x = sym.Variable('x')
    x1 = sym.dense(x, units=3, name="dense")
    x2 = sym.flatten(x1)
    x3 = sym.softmax(x2)
    assert x3.list_input_names() == ['x', 'dense_weight', 'dense_bias']
Exemplo n.º 13
0
def test_dense():
    x = sym.Variable('x')
    x1 = sym.dense(x, units=3, name="dense")
    x2 = sym.flatten(x1)
    x3 = sym.softmax(x2)
    assert x3.list_input_names() == ['x', 'dense_weight', 'dense_bias']
def test_cnn_gradients():
    # input data
    h = 128
    w = 128
    data_shape = (1000, 3, h, w)
    data = sym.Variable('data', shape=data_shape, dtype=0)

    # conv2d
    num_channels = 64
    kernel_size = 32
    conv_w_shape = (num_channels, 3, kernel_size, kernel_size)
    conv_b_shape = (num_channels, )
    conv_w = sym.Variable('conv_w', shape=conv_w_shape)
    conv_b = sym.Variable('conv_b', shape=conv_b_shape)
    conv1 = sym.conv2d(data=data,
                       weight=conv_w,
                       bias=conv_b,
                       channels=num_channels,
                       kernel_size=(kernel_size, kernel_size),
                       name='conv1')
    # relu1
    relu1 = sym.relu(data=conv1, name='relu1')
    # max pooling
    max_pooling1 = sym.max_pool2d(data=relu1,
                                  pool_size=(2, 2),
                                  name='max_pooling1')
    # flatten
    flatten1 = sym.flatten(data=max_pooling1)
    # shape after flatten
    flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels
    # dense1
    dense1_hidden_units = 100
    dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units)
    # relu2
    relu2 = sym.relu(data=dense1, name='relu2')
    # dense2
    dense2_hidden_units = 10
    dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units)
    # softmax
    mlp = sym.softmax(data=dense2, name='softmax')
    # fake non-sparse label
    label = sym.full_like(mlp, fill_value=1)
    # cross entropy loss
    ce_loss = sym.sum(sym.elemwise_mul(sym.log_softmax(dense2), label),
                      axis=1,
                      keepdims=True,
                      name="ce_loss")

    # input variables:
    # print grad_g.symbol.list_input_names()
    # >> ['data', 'conv_w', 'conv_b',
    #     'dense1_weight', 'dense1_bias',
    #     'dense2_weight', 'dense2_bias']

    # output gradient variables:
    # print grad_g.symbol.list_output_names()
    # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias',
    #     'dense1_grad_weight', 'dense1_grad_bias',
    #     'dense2_grad_weight', 'dense2_grad_bias']
    grad_g = graph_util.get_gradient_graph(ce_loss,
                                           ce_loss.list_input_variables())

    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)

    # forward graph shape
    assert in_shapes == [
        list(data_shape),
        list(conv_w_shape),
        list(conv_b_shape), [dense1_hidden_units, flatten_out_shape],
        [dense1_hidden_units], [dense2_hidden_units, dense1_hidden_units],
        [dense2_hidden_units]
    ]
    # input grads shape should be equal with input shape
    assert in_shapes == out_shapes

    # output grads w.r.t input variables
    grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables())

    # gradients number should be equal with grad_input number
    assert len(grads) == len(ce_loss.list_input_variables())

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == [
        'float32', 'float32', 'float32', 'float32', 'float32', 'float32',
        'float32'
    ]
Exemplo n.º 15
0
    # Don't forget that the label_names and filesnames are in binary and need conversion if used.

    a = tvm.placeholder((1, 3, 32, 32), name="a")
    b = tvm.placeholder((1, 10), name="b")

    dense_weight = sym.Variable("dense_weight",
                                init=np.empty((900, 10), dtype=dtype))
    # define network
    data = sym.Variable("data")
    y1 = sym.conv2d(data=data,
                    channels=1,
                    kernel_size=(3, 3),
                    padding=(0, 0),
                    use_bias=False,
                    out_layout='NCHW')
    y2 = sym.flatten(y1)
    #y3 = sym.dense(y2, units=10, use_bias=False)
    y3 = sym.dense(y2, weight=dense_weight, use_bias=False)
    y4 = sym.softmax(y3)
    out = y4  # This is some of the loss function

    # create workload
    net, params = create_workload(out, batch_size, image_shape, dtype)
    #print(net.debug_str())

    target = tvm.target.create('llvm')
    #target = tvm.target.create('opencl')
    with nnvm.compiler.build_config(opt_level=0):
        graph, lib, params = nnvm.compiler.build(net,
                                                 target,
                                                 shape={"data": data_shape},
Exemplo n.º 16
0
def test_flatten():
    x = sym.Variable("x", shape=(10, 20, 10))
    y = sym.flatten(x) * 2
    y = sym.exp(y, name="y")
    sdict = infer_shape(y)
    assert (sdict["y"][0] == [10, 200])
Exemplo n.º 17
0
out_shape = (batch_size, num_class)
dtype = "float32"

# name "data" is preferred!
data = sym.Variable("data")

# if you want to create_workload, you may have to let the system automatically generate layer kernels
# if you pass a self-defined kernel in, there will be error
#conv_kernel = sym.Variable("conv_kernel")
x = sym.conv2d(data=data,
               channels=1,
               kernel_size=(3, 3),
               padding=(0, 0),
               use_bias=False,
               out_layout='NCHW')
x = sym.flatten(data=x)
x = sym.dense(data=x, units=num_class, use_bias=False)
'''
params = {}
g = graph.create(x)
input_shapes, _ = graph_util.infer_shape(g, data=data_shape)
shape_dict = dict(zip(g.index.input_names, input_shapes))
np.random.seed(0)
initializer = Xavier()
for k, v in shape_dict.items():
    if k == 'data':
        print(k)
        continue
    print(k, end='\t')
    print(v)
    init_value = np.zeros(v).astype(dtype)
Exemplo n.º 18
0
def test_flatten():
    x = sym.Variable("x", shape=(10, 20, 10))
    y = sym.flatten(x) * 2
    y = sym.exp(y, name="y")
    sdict = infer_shape(y)
    assert(sdict["y"][0] == [10, 200])
Exemplo n.º 19
0
def test_cnn_gradients():
    # input data
    h = 128
    w = 128
    data_shape = (1000, 3, h, w)
    data = sym.Variable('data', shape=data_shape, dtype=0)

    # conv2d
    num_channels = 64
    kernel_size = 32
    conv_w_shape = (num_channels, 3, kernel_size, kernel_size)
    conv_b_shape = (num_channels,)
    conv_w = sym.Variable('conv_w', shape=conv_w_shape)
    conv_b = sym.Variable('conv_b', shape=conv_b_shape)
    conv1 = sym.conv2d(data=data, weight=conv_w, bias=conv_b,
                      channels=num_channels, kernel_size=(kernel_size, kernel_size),
                      name='conv1')
    # relu1
    relu1 = sym.relu(data=conv1, name='relu1')
    # max pooling
    max_pooling1 = sym.max_pool2d(data=relu1, pool_size=(2, 2), name='max_pooling1')
    # flatten
    flatten1 = sym.flatten(data=max_pooling1)
    # shape after flatten
    flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels
    # dense1
    dense1_hidden_units = 100
    dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units)
    # relu2
    relu2 = sym.relu(data=dense1, name='relu2')
    # dense2
    dense2_hidden_units = 10
    dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units)
    # softmax
    mlp = sym.softmax(data=dense2, name='softmax')
    # fake non-sparse label
    label = sym.full_like(mlp, fill_value=1)
    # cross entropy loss
    ce_loss = sym.sum(
        sym.elemwise_mul(sym.log_softmax(dense2), label),
        axis=1,
        keepdims=True,
        name="ce_loss")

    # input variables:
    # print grad_g.symbol.list_input_names()
    # >> ['data', 'conv_w', 'conv_b',
    #     'dense1_weight', 'dense1_bias',
    #     'dense2_weight', 'dense2_bias']

    # output gradient variables:
    # print grad_g.symbol.list_output_names()
    # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias',
    #     'dense1_grad_weight', 'dense1_grad_bias',
    #     'dense2_grad_weight', 'dense2_grad_bias']
    grad_g = graph_util.get_gradient_graph(ce_loss, ce_loss.list_input_variables())

    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)

    # forward graph shape
    assert in_shapes == [list(data_shape), list(conv_w_shape), list(conv_b_shape),
                          [dense1_hidden_units, flatten_out_shape], [dense1_hidden_units],
                          [dense2_hidden_units, dense1_hidden_units], [dense2_hidden_units]]
    # input grads shape should be equal with input shape
    assert in_shapes == out_shapes

    # output grads w.r.t input variables
    grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables())

    # gradients number should be equal with grad_input number
    assert len(grads) == len(ce_loss.list_input_variables())

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float32']