コード例 #1
0
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    shape = (10, 20)
    eps = 1e-5
    dtype = "float32"
    y = sym.batch_norm(
        x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, "llvm", {"x": shape})
        m = graph_runtime.create(graph, lib, tvm.cpu(0))
        x_np = np.random.uniform(size=shape).astype(dtype)
        mean_np = np.random.uniform(size=shape[1]).astype(dtype)
        var_np = np.random.uniform(size=shape[1]).astype(dtype)
        gamma_np = np.random.uniform(size=shape[1]).astype(dtype)
        beta_np = np.random.uniform(size=shape[1]).astype(dtype)
        res = tvm.nd.empty(shape)
        m.run(x=x_np, moving_mean=mean_np, moving_var=var_np,
              gamma=gamma_np, beta=beta_np)
        m.get_output(0, res)
        res_np = (x_np - mean_np) / np.sqrt(var_np + eps) * gamma_np + beta_np
        np.testing.assert_allclose(
            res.asnumpy(), res_np, atol=1e-5, rtol=1e-5)
コード例 #2
0
 def get_sym(out_channel):
     data = sym.Variable(name="data")
     data = sym.conv2d(data=data, kernel_size=(3,3), channels=out_channel, padding=(1, 1),
                       layout="NCHW", kernel_layout="OIHW", use_bias=True)
     data = sym.batch_norm(data)
     data = elu(data)
     return data
コード例 #3
0
    def test_duplex_data_transfer():
        """ This unittest tests duplex communication between the host and
        accelerator device. The network is as following:
                    data
                      |
                    conv2d  (acc)
                      |
                 batch_norm (cpu)
                      |
                    conv2d  (acc)
        """
        out_channels = 16
        data = symbol.Variable(name="data")
        simple_net = symbol.conv2d(data=data, kernel_size=(3, 3),
                                   channels=out_channels, padding=(1, 1),
                                   use_bias=False)
        simple_net = symbol.batch_norm(simple_net)
        simple_net = symbol.conv2d(data=simple_net, kernel_size=(3, 3),
                                   channels=out_channels, padding=(1, 1),
                                   use_bias=False)

        batch_size = 1
        data_shape = (batch_size, 3, 224, 224)
        shape_dict = {"data": data_shape}
        net, params = utils.create_workload(simple_net, batch_size,
                                            data_shape[1:])
        params["data"] = data = np.random.uniform(-1, 1,
                                                  size=data_shape).astype(
            "float32")

        check_graph(net, ['batch_norm'], shape_dict, params)
コード例 #4
0
 def Conv(data,
          num_filter,
          kernel=(1, 1),
          stride=(1, 1),
          pad=(0, 0),
          name=None,
          suffix=''):
     if pad[0] != 0 or pad[1] != 0:
         data = sym.pad(data=data,
                        pad_width=((0, 0), (pad[0], pad[0]),
                                   (pad[1], pad[1]), (0, 0)))
     conv = sym.conv2d(data=data,
                       channels=num_filter,
                       kernel_size=kernel,
                       strides=stride,
                       padding=(0, 0),
                       use_bias=False,
                       layout='NHWC',
                       kernel_layout='HWOI',
                       name="%s%s_conv2d" % (name, suffix))
     bn = sym.batch_norm(data=conv,
                         name="%s%s_batchnorm" % (name, suffix),
                         epsilon=2e-5,
                         axis=3)
     act = sym.relu(data=bn, name="%s%s_relu" % (name, suffix))
     return act
コード例 #5
0
 def check(dim, axis, nstep):
     eps = 0.01
     x = sym.Variable("x") + 1
     beta = sym.Variable("beta")
     gamma = sym.Variable("gamma")
     moving_var = sym.Variable("moving_var")
     moving_mean = sym.Variable("moving_mean")
     y1, y2 = x, sym.Variable("xx") + 1
     ishape = {"x": tuple(10 for i in range(dim))}
     for i in range(nstep):
         y1 = sym.batch_norm(y1 + 1,
                             gamma,
                             beta,
                             moving_mean,
                             moving_var,
                             epsilon=eps,
                             axis=axis)
         y1 = sym.dropout(y1)
         y2 = simple_bn(y2 + 1,
                        gamma,
                        beta,
                        moving_mean,
                        moving_var,
                        epsilon=eps,
                        axis=axis,
                        shape=ishape["x"])
     g = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g, ishape)
     g1 = g.apply("InferShape").apply("SimplifyInference")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
コード例 #6
0
ファイル: test_op_fusion.py プロジェクト: LANHUIYING/tvm
 def get_sym(out_channel):
     data = sym.Variable(name="data")
     data = sym.conv2d(data=data, kernel_size=(3,3), channels=out_channel, padding=(1, 1),
                       layout="NCHW", kernel_layout="OIHW", use_bias=True)
     data = sym.batch_norm(data)
     data = elu(data)
     return data
コード例 #7
0
ファイル: mlp.py プロジェクト: lyqscmy/tvm_model_zoo
def mlp(units):
    data = sym.Variable("data")

    deep = fc_layer(data, units[0], "fc_layer1")
    deep = fc_layer(deep, units[1], "fc_layer2")

    name = "output_layer"
    w = sym.Variable(name + "_fc_weight")
    b = sym.Variable(name + "_fc_bias")
    fc = sym.dense(data=deep,
                   weight=w,
                   bias=b,
                   units=units[2],
                   name=name + "_fc")

    gamma = sym.Variable(name + "_bn_gamma")
    beta = sym.Variable(name + "_bn_beta")
    moving_mean = sym.Variable(name + "_bn_moving_mean")
    moving_var = sym.Variable(name + "_bn_moving_var")

    bn = sym.batch_norm(data=fc,
                        gamma=gamma,
                        beta=beta,
                        moving_mean=moving_mean,
                        moving_var=moving_var,
                        name=name + '_bn')

    mlp = sym.softmax(data=bn, name=name + 'softmax')
    return mlp
コード例 #8
0
def test_batch_norm():
    x = sym.Variable('x')
    y = sym.dense(x, units=30, name="fc")
    z = sym.batch_norm(x, name='bn')
    assert z.list_input_names('aux_state') == [
        'bn_moving_mean', 'bn_moving_var'
    ]
    assert z.list_input_names('read_only') == ['x', 'bn_gamma', 'bn_beta']
コード例 #9
0
def test_batchnorm():
    x = sym.Variable("x", shape=(10, 20))
    y = sym.batch_norm(1 / x, name="bn")
    sdict = infer_shape(y)
    assert(sdict["bn_gamma"][0] == [20])

    x = sym.Variable("x", shape=(10, 20, 30, 40))
    y = sym.batch_norm(data=x, axis=0, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_moving_var'][0] == [10])

    y = sym.batch_norm(data=x, axis=1, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_gamma'][0] == [20])

    y = sym.batch_norm(data=x, axis=2, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_beta'][0] == [30])

    y = sym.batch_norm(data=x, axis=3, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_moving_mean'][0] == [40])
コード例 #10
0
ファイル: test_infer_shape.py プロジェクト: LANHUIYING/tvm
def test_batchnorm():
    x = sym.Variable("x", shape=(10, 20))
    y = sym.batch_norm(1 / x, name="bn")
    sdict = infer_shape(y)
    assert(sdict["bn_gamma"][0] == [20])

    x = sym.Variable("x", shape=(10, 20, 30, 40))
    y = sym.batch_norm(data=x, axis=0, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_moving_var'][0] == [10])

    y = sym.batch_norm(data=x, axis=1, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_gamma'][0] == [20])

    y = sym.batch_norm(data=x, axis=2, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_beta'][0] == [30])

    y = sym.batch_norm(data=x, axis=3, epsilon=2e-5, name='bn')
    sdict = infer_shape(y)
    assert(sdict['bn_moving_mean'][0] == [40])
コード例 #11
0
def separable_conv_block(data,
                         name,
                         depthwise_channels,
                         pointwise_channels,
                         kernel_size=(3, 3),
                         downsample=False,
                         padding=(1, 1),
                         epsilon=1e-5):
    """Helper function to get a separable conv block"""
    if downsample:
        strides = (2, 2)
    else:
        strides = (1, 1)
    # depthwise convolution + bn + relu
    conv1 = sym.conv2d(data=data,
                       channels=depthwise_channels,
                       groups=depthwise_channels,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding=padding,
                       use_bias=False,
                       layout="NCHW",
                       name=name + "_depthwise_conv1")
    bn1 = sym.batch_norm(data=conv1, epsilon=epsilon, name=name + "_bn1")
    act1 = sym.relu(data=bn1, name=name + "_relu1")
    # pointwise convolution + bn + relu
    conv2 = sym.conv2d(data=act1,
                       channels=pointwise_channels,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding=(0, 0),
                       use_bias=False,
                       layout="NCHW",
                       name=name + "_conv2")
    bn2 = sym.batch_norm(data=conv2, epsilon=epsilon, name=name + "_bn2")
    act2 = sym.relu(data=bn2, name=name + "_relu2")
    return act2
コード例 #12
0
ファイル: fc.py プロジェクト: lyqscmy/tvm_model_zoo
def fc_layer(data, units, name):
    w = sym.Variable(name + "_w")
    b = sym.Variable(name + "_b")
    fc = sym.dense(data=data, weight=w, bias=b, units=units, name=name + '_fc')
    relu = sym.relu(data=fc, name=name + '_relu')

    gamma = sym.Variable(name + "_gamma")
    beta = sym.Variable(name + "_beta")
    moving_mean = sym.Variable(name + "_moving_mean")
    moving_var = sym.Variable(name + "_moving_var")
    bn = sym.batch_norm(data=relu,
                        gamma=gamma,
                        beta=beta,
                        moving_mean=moving_mean,
                        moving_var=moving_var,
                        name=name + '_bn')
    return bn
コード例 #13
0
def test_duplex_data_transfer(device, target):
    R""" This unittest tests duplex communication between the host and
    accelerator device. The network is as following:
                data
                  |
                conv2d  (acc)
                  |
             batch_norm (cpu)
                  |
                conv2d  (acc)
    """
    if not tvm.module.enabled(device):
        print("Skip test because %s is not enabled." % device)
        return

    out_channels = 16
    data = symbol.Variable(name="data")
    simple_net = symbol.conv2d(data=data,
                               kernel_size=(3, 3),
                               channels=out_channels,
                               padding=(1, 1),
                               use_bias=False)
    simple_net = symbol.batch_norm(simple_net)
    simple_net = symbol.conv2d(data=simple_net,
                               kernel_size=(3, 3),
                               channels=out_channels,
                               padding=(1, 1),
                               use_bias=False)

    batch_size = 1
    data_shape = (batch_size, 3, 224, 224)
    shape_dict = {"data": data_shape}
    net, params = utils.create_workload(simple_net, batch_size, data_shape[1:])
    params["data"] = data = np.random.uniform(
        -1, 1, size=data_shape).astype("float32")

    target = {"cpu": "llvm", device: target}
    op_name_device = {
        "conv2d": device,
        "batch_norm": "cpu",
        "broadcast_add": "cpu",
        "elemwise_mul": "cpu"
    }
    fallback_device = tvm.context("cpu")
    check_graph(net, target, op_name_device, fallback_device, shape_dict,
                params)
コード例 #14
0
ファイル: test_top_level1.py プロジェクト: aswinjohn/tvm
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    dtype = "float32"
    inputs = [('x', (10, 20), x), ('gamma', (20, ), gamma),
              ('beta', (20, ), beta), ('moving_mean', (20, ), moving_var),
              ('moving_var', (20, ), moving_mean)]

    helper(y, inputs, dtype, forward, rnd_min=0.001)
コード例 #15
0
def conv_block(data,
               name,
               channels,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               epsilon=1e-5):
    """Helper function to construct conv-bn-relu"""
    # convolution + bn + relu
    conv = sym.conv2d(data=data,
                      channels=channels,
                      kernel_size=kernel_size,
                      strides=strides,
                      padding=padding,
                      use_bias=False,
                      layout="NCHW",
                      name=name + "_conv")
    bn = sym.batch_norm(data=conv, epsilon=epsilon, name=name + "_bn")
    act = sym.relu(data=bn, name=name + "_relu")
    return act
コード例 #16
0
ファイル: test_correct_layout.py プロジェクト: bddppq/tvm
def test_batchnorm():
    x = sym.Variable("data", shape=(10, 20, 30, 40))
    y = sym.batch_norm(x, axis=1, epsilon=2e-5, name="bn")
    g, ldict = correct_layout(y, "NCHW")
    assert(ldict["data"][0] == "NCHW")
    assert(ldict["bn"][0] == "NCHW")
    assert(ldict["bn"][1] == "C")
    assert(ldict["bn"][2] == "C")
    assert(ldict["bn_beta"][0] == "C")
    assert(ldict["bn_gamma"][0] == "C")
    assert(ldict["bn_moving_mean"][0] == "C")
    assert(ldict["bn_moving_var"][0] == "C")
    # batch_norm can deal with sub-dim of C at the last dim.
    g, ldict = correct_layout(g, "NCHW16c")
    assert(ldict["data"][0] == "NCHW16c")
    assert(ldict["bn"][0] == "NCHW16c")
    assert(ldict["bn"][1] == "C16c")
    assert(ldict["bn"][2] == "C16c")
    assert(ldict["bn_beta"][0] == "C")
    assert(ldict["bn_beta_C16c"][0] == "C16c")
    assert(ldict["bn_gamma"][0] == "C")
    assert(ldict["bn_gamma_C16c"][0] == "C16c")
    assert(ldict["bn_moving_mean"][0] == "C")
    assert(ldict["bn_moving_mean_C16c"][0] == "C16c")
    assert(ldict["bn_moving_var"][0] == "C")
    assert(ldict["bn_moving_var_C16c"][0] == "C16c")
    # but for other layout, it does a layout transform for data
    g, ldict = correct_layout(g, "NCH16cW")
    assert(ldict["data"][0] == "NCH16cW")
    assert(ldict["data_NCHW16c"][0] == "NCHW16c")
    assert(ldict["bn"][0] == "NCHW16c")
    assert(ldict["bn"][1] == "C16c")
    assert(ldict["bn"][2] == "C16c")
    assert(ldict["bn_beta"][0] == "C")
    assert(ldict["bn_beta_C16c"][0] == "C16c")
    assert(ldict["bn_gamma"][0] == "C")
    assert(ldict["bn_gamma_C16c"][0] == "C16c")
    assert(ldict["bn_moving_mean"][0] == "C")
    assert(ldict["bn_moving_mean_C16c"][0] == "C16c")
    assert(ldict["bn_moving_var"][0] == "C")
    assert(ldict["bn_moving_var_C16c"][0] == "C16c")
コード例 #17
0
def test_batchnorm():
    x = sym.Variable("data", shape=(10, 20, 30, 40))
    y = sym.batch_norm(x, axis=1, epsilon=2e-5, name="bn")
    g, ldict = correct_layout(y, "NCHW")
    assert (ldict["data"][0] == "NCHW")
    assert (ldict["bn"][0] == "NCHW")
    assert (ldict["bn"][1] == "C")
    assert (ldict["bn"][2] == "C")
    assert (ldict["bn_beta"][0] == "C")
    assert (ldict["bn_gamma"][0] == "C")
    assert (ldict["bn_moving_mean"][0] == "C")
    assert (ldict["bn_moving_var"][0] == "C")
    # batch_norm can deal with sub-dim of C at the last dim.
    g, ldict = correct_layout(g, "NCHW16c")
    assert (ldict["data"][0] == "NCHW16c")
    assert (ldict["bn"][0] == "NCHW16c")
    assert (ldict["bn"][1] == "C16c")
    assert (ldict["bn"][2] == "C16c")
    assert (ldict["bn_beta"][0] == "C")
    assert (ldict["bn_beta_C16c"][0] == "C16c")
    assert (ldict["bn_gamma"][0] == "C")
    assert (ldict["bn_gamma_C16c"][0] == "C16c")
    assert (ldict["bn_moving_mean"][0] == "C")
    assert (ldict["bn_moving_mean_C16c"][0] == "C16c")
    assert (ldict["bn_moving_var"][0] == "C")
    assert (ldict["bn_moving_var_C16c"][0] == "C16c")
    # but for other layout, it does a layout transform for data
    g, ldict = correct_layout(g, "NCH16cW")
    assert (ldict["data"][0] == "NCH16cW")
    assert (ldict["data_NCHW16c"][0] == "NCHW16c")
    assert (ldict["bn"][0] == "NCHW16c")
    assert (ldict["bn"][1] == "C16c")
    assert (ldict["bn"][2] == "C16c")
    assert (ldict["bn_beta"][0] == "C")
    assert (ldict["bn_beta_C16c"][0] == "C16c")
    assert (ldict["bn_gamma"][0] == "C")
    assert (ldict["bn_gamma_C16c"][0] == "C16c")
    assert (ldict["bn_moving_mean"][0] == "C")
    assert (ldict["bn_moving_mean_C16c"][0] == "C16c")
    assert (ldict["bn_moving_var"][0] == "C")
    assert (ldict["bn_moving_var_C16c"][0] == "C16c")
コード例 #18
0
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    shape = {
        'x': (10, 20),
        'gamma': (20, ),
        'beta': (20, ),
        'moving_mean': (20, ),
        'moving_var': (20, )
    }

    check_function(y, forward, in_range=(0.001, 1.0), shape=shape)
コード例 #19
0
ファイル: test_simplify_inference.py プロジェクト: bddppq/tvm
 def check(dim, axis, nstep):
     eps = 0.01
     x = sym.Variable("x") + 1
     beta = sym.Variable("beta")
     gamma = sym.Variable("gamma")
     moving_var = sym.Variable("moving_var")
     moving_mean = sym.Variable("moving_mean")
     y1, y2 = x, sym.Variable("xx") + 1
     ishape = {"x": tuple(10 for i in range(dim))}
     for i in range(nstep):
         y1 = sym.batch_norm(
             y1 + 1, gamma, beta, moving_mean, moving_var, epsilon=eps, axis=axis)
         y1 = sym.dropout(y1)
         y2 = simple_bn(y2 + 1, gamma, beta, moving_mean, moving_var,
                        epsilon=eps, axis=axis, shape=ishape["x"])
     g = nnvm.graph.create(y1)
     g2 = nnvm.graph.create(y2)
     graph_attr.set_shape_inputs(g, ishape)
     g1 = g.apply("InferShape").apply("SimplifyInference")
     # assert graph equals as expected
     graph_util.check_graph_equal(g1, g2)
コード例 #20
0
ファイル: test_top_level1.py プロジェクト: LANHUIYING/tvm
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(
        x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    shape = {
        'x': (10, 20),
        'gamma': (20,),
        'beta': (20,),
        'moving_mean': (20,),
        'moving_var': (20,)
    }

    check_function(y, forward, in_range=(0.001, 1.0), shape=shape)
コード例 #21
0
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    dtype = "float32"
    inputs = {
        'x': ((10, 20), x),
        'gamma': ((20, ), ),
        'beta': ((20, ), ),
        'moving_mean': ((20, ), ),
        'moving_var': ((20, ), )
    }

    helper(y, inputs, dtype, forward)
コード例 #22
0
    def get_feature(internel_layer, layers, filters, batch_norm=False):
        """
		Get VGG feature body as stacks of convoltions.
		layers  : [1, 1, 2, 2, 2]
		filters : [64, 128, 256, 512, 512]
		"""
        for i, num in enumerate(layers):
            """
			i = 0, num = 1
			i = 1, num = 1
			i = 2, num = 2
			i = 3, num = 2
			i = 4, num = 2
			"""
            for j in range(num):
                internel_layer = sym.pad(data=internel_layer,
                                         pad_width=((0, 0), (1, 1), (1, 1),
                                                    (0, 0)))
                internel_layer = sym.conv2d(data=internel_layer,
                                            kernel_size=(3, 3),
                                            channels=filters[i],
                                            layout='NHWC',
                                            kernel_layout='HWOI',
                                            name="conv%s_%s" % (i + 1, j + 1))
                if batch_norm:
                    internel_layer = sym.batch_norm(data=internel_layer,
                                                    axis=3,
                                                    name="bn%s_%s" %
                                                    (i + 1, j + 1))
                internel_layer = sym.relu(data=internel_layer,
                                          name="relu%s_%s" % (i + 1, j + 1))

            internel_layer = sym.max_pool2d(data=internel_layer,
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            layout="NHWC",
                                            name="pool%s" % (i + 1))
            return internel_layer
コード例 #23
0
ファイル: test_top_level1.py プロジェクト: masa-ito-fj/nnvm
def test_batchnorm():
    x = sym.Variable("x")
    beta = sym.Variable("beta")
    gamma = sym.Variable("gamma")
    moving_var = sym.Variable("moving_var")
    moving_mean = sym.Variable("moving_mean")
    eps = 1e-5
    y = sym.batch_norm(
        x, gamma, beta, moving_mean, moving_var, epsilon=eps)

    def forward(x, gamma, beta, moving_mean, moving_var):
        return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta

    dtype = "float32"
    inputs = {
        'x': ((10, 20), x),
        'gamma': ((20,),),
        'beta': ((20,),),
        'moving_mean': ((20,),),
        'moving_var': ((20,),)
    }

    helper(y, inputs,  dtype, forward)
コード例 #24
0
ファイル: tvm_gradient.py プロジェクト: pombredanne/test
def nnvm_bn():
    x = sym.Variable("x")
    z = sym.batch_norm(x)
    grad = graph_util.gradients([z], [x])
    print(grad)
コード例 #25
0
def test_batchnorm():
    x = sym.Variable("x", shape=(10, 20))
    y = sym.batch_norm(1 / x, name="bn")
    sdict = infer_shape(y)
    assert (sdict["bn_gamma"][0] == [20])
コード例 #26
0
ファイル: test_symbol.py プロジェクト: bddppq/tvm
def test_batch_norm():
    x = sym.Variable('x')
    y = sym.dense(x, units=30, name="fc")
    z = sym.batch_norm(x, name='bn')
    assert z.list_input_names('aux_state') == ['bn_moving_mean', 'bn_moving_var']
    assert z.list_input_names('read_only') == ['x', 'bn_gamma', 'bn_beta']
コード例 #27
0
ファイル: tvm_gradient.py プロジェクト: shinh/test
def nnvm_bn():
    x = sym.Variable("x")
    z = sym.batch_norm(x)
    grad = graph_util.gradients([z], [x])
    print(grad)
コード例 #28
0
import numpy as np
from tvm.contrib import graph_runtime as runtime
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing import utils

######################################################################
# Create a simple network
# -----------------------
# Let's create a very simple network for demonstration.
# It consists of convolution, batch normalization, and ReLU activation.

out_channels = 16
data = sym.Variable(name="data")
simple_net = sym.conv2d(data=data, kernel_size=(3,3), channels=out_channels, padding = (1, 1), use_bias=True)
simple_net = sym.batch_norm(data=simple_net)
simple_net = sym.relu(data=simple_net)

batch_size = 1
data_shape = (batch_size, 3, 224, 224)
net, params = utils.create_workload(simple_net, batch_size, data_shape[1:])

######################################################################
# Build and run with cuda backend
# -------------------------------
# We build and run this network with cuda backend, as usual.
# By setting the logging level to DEBUG, the result of NNVM graph compilation will be dumped as pseudo code.
import logging
logging.basicConfig(level=logging.DEBUG) # to dump TVM IR after fusion

target = "cuda"
コード例 #29
0
 def forward(self, inputs):
     return sym.batch_norm(inputs)
コード例 #30
0
ファイル: test_top_level1.py プロジェクト: bddppq/tvm
def test_batchnorm():
    x = sym.Variable('x')
    x = sym.batch_norm(x, name="bn")
    assert x.list_input_names() == [
        "x", "bn_gamma", "bn_beta", "bn_moving_mean", "bn_moving_var"]
コード例 #31
0
def test_batchnorm():
    x = sym.Variable('x')
    x = sym.batch_norm(x, name="bn")
    assert x.list_input_names() == [
        "x", "bn_gamma", "bn_beta", "bn_moving_mean", "bn_moving_var"
    ]
コード例 #32
0
import nnvm
from nnvm import symbol as sym


if __name__ == "__main__":
    x = sym.Variable("x", shape=[4, 5, 7, 9])
    y = sym.Variable("y", shape=[6, 5, 3, 3])
    z = sym.conv2d(name="z", channels=6, kernel_size=(1, 3), strides=(1, 1), padding=(1, 1), data=x)
    a = sym.batch_norm(z)

    compute_graph = nnvm.graph.create(a)
    print(compute_graph.ir())

    deploy_graph, lib, params = nnvm.compiler.build(
        compute_graph, target="cuda")
    print(deploy_graph.ir())

    print(lib.imported_modules[0].get_source())
コード例 #33
0
ファイル: bn.py プロジェクト: lyqscmy/tvm_model_zoo
import nnvm.symbol as sym
import numpy as np
import tvm

data = sym.Variable("data")
gamma = sym.Variable("gamma")
beta = sym.Variable("beta")
moving_mean = sym.Variable("moving_mean")
moving_var = sym.Variable("moving_var")
net = sym.batch_norm(data=data,
                     gamma=gamma,
                     beta=beta,
                     moving_mean=moving_mean,
                     moving_var=moving_var)
input_shape = (2, 3)
output_shape = input_shape
gamma_np = np.ones(input_shape[1], dtype="float32")
beta_np = np.zeros(input_shape[1], dtype="float32")
params = {
    "gamma": tvm.ndarray.array(gamma_np),
    "beta": tvm.ndarray.array(beta_np),
    "moving_mean": tvm.ndarray.array(beta_np),
    "moving_var": tvm.ndarray.array(gamma_np),
}