Пример #1
0
def get_symbol(num_classes, version, **kwargs):
    """Get symbol of SqueezeNet

    Parameters
    ----------
    num_classes: int
        The number of classification results

    version : str, optional
        "1.0" or "1.1" of SqueezeNet
    """
    assert version == '1.1', ("Unsupported SqueezeNet version {version}:"
                              "1.1 expected".format(version=version))
    net = sym.Variable("data")

    net = sym.conv2d(net, channels=64, kernel_size=(3, 3), strides=(2, 2))
    net = sym.relu(net)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 16, 64, 64)
    net = _make_fire(net, 16, 64, 64)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 32, 128, 128)
    net = _make_fire(net, 32, 128, 128)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 64, 256, 256)
    net = _make_fire(net, 64, 256, 256)

    net = sym.dropout(net, rate=0.5)
    net = sym.conv2d(net, channels=num_classes, kernel_size=(1, 1))
    net = sym.relu(net)
    net = sym.global_avg_pool2d(net)
    return sym.softmax(net, axis=1)
Пример #2
0
def get_symbol(num_classes, version, **kwargs):
    """Get symbol of SqueezeNet

    Parameters
    ----------
    num_classes: int
        The number of classification results

    version : str, optional
        "1.0" or "1.1" of SqueezeNet
    """
    assert version == '1.1', ("Unsupported SqueezeNet version {version}:"
                              "1.1 expected".format(version=version))
    net = sym.Variable("data")

    net = sym.conv2d(net, channels=64, kernel_size=(3, 3), strides=(2, 2))
    net = sym.relu(net)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 16, 64, 64)
    net = _make_fire(net, 16, 64, 64)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 32, 128, 128)
    net = _make_fire(net, 32, 128, 128)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 64, 256, 256)
    net = _make_fire(net, 64, 256, 256)

    net = sym.dropout(net, rate=0.5)
    net = sym.conv2d(net, channels=num_classes, kernel_size=(1, 1))
    net = sym.relu(net)
    net = sym.global_avg_pool2d(net)
    return sym.softmax(net, axis=1)
Пример #3
0
    def get_classifier(input_data, num_classes):
        """
		Get VGG classifier layers as fc layers.
		"""
        flatten = sym.flatten(data=input_data, name="flatten")
        fc1 = sym.dense(data=flatten, units=32, name="fc1")
        relu1 = sym.relu(data=fc1, name="relu1")
        drop1 = sym.dropout(data=relu1, rate=0.5, name="drop1")
        fc2 = sym.dense(data=drop1, units=32, name="fc2")
        relu2 = sym.relu(data=fc2, name="relu2")
        drop2 = sym.dropout(data=relu2, rate=0.5, name="drop2")
        fc3 = sym.dense(data=drop2, units=num_classes, name="fc3")
        return fc3
Пример #4
0
def verify_l2_normalize(ishape, eps, axis):
    x = sym.Variable("x")
    y = sym.l2_normalize(x, eps=eps, axis=axis)
    dtype = "float32"
    x_np = np.random.uniform(size=ishape).astype(dtype)

    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": ishape})
        m = graph_runtime.create(graph, lib, ctx)
        m.run(x=x_np)
        out = m.get_output(0, tvm.nd.empty(ishape))
        out_np = topi.testing.l2_normalize_python(x_np, eps, axis)
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)

    #Checking L2 normalization op followed by elementwise op relu
    z = sym.relu(y)
    x_np = np.random.uniform(low=-10.0, high=10.0, size=ishape).astype(dtype)
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(z, target, {"x": ishape})
        m = graph_runtime.create(graph, lib, ctx)
        m.run(x=x_np)
        out = m.get_output(0, tvm.nd.empty(ishape))
        out_np = topi.testing.l2_normalize_python(x_np, eps, axis)
        out_np = (out_np > 0) * out_np
        np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
Пример #5
0
def _make_fire_conv(net, channels, kernel_size, padding=0):
    net = sym.conv2d(net,
                     channels=channels,
                     kernel_size=(kernel_size, kernel_size),
                     padding=(padding, padding))
    net = sym.relu(net)
    return net
Пример #6
0
 def Conv(data,
          num_filter,
          kernel=(1, 1),
          stride=(1, 1),
          pad=(0, 0),
          name=None,
          suffix=''):
     if pad[0] != 0 or pad[1] != 0:
         data = sym.pad(data=data,
                        pad_width=((0, 0), (pad[0], pad[0]),
                                   (pad[1], pad[1]), (0, 0)))
     conv = sym.conv2d(data=data,
                       channels=num_filter,
                       kernel_size=kernel,
                       strides=stride,
                       padding=(0, 0),
                       use_bias=False,
                       layout='NHWC',
                       kernel_layout='HWOI',
                       name="%s%s_conv2d" % (name, suffix))
     bn = sym.batch_norm(data=conv,
                         name="%s%s_batchnorm" % (name, suffix),
                         epsilon=2e-5,
                         axis=3)
     act = sym.relu(data=bn, name="%s%s_relu" % (name, suffix))
     return act
Пример #7
0
def test_alter_conv2d_layout():
    data = sym.Variable("data", shape=(1, 32, 512, 512))
    conv = sym.conv2d(data,
                      name="conv",
                      channels=16,
                      kernel_size=(3, 3),
                      padding=(1, 1),
                      use_bias=False,
                      layout="NCHW")
    relu = sym.relu(conv, name="relu")
    flatten = sym.flatten(relu, name="flatten")
    softmax = sym.softmax(flatten, name="softmax")
    g = graph.create(softmax)
    g = g.apply("CorrectLayout")
    g = graph_attr.set_dtype_inputs(g, "float32")
    g = g.apply(["InferShape", "InferType"])
    layouts_origin = get_layouts(g)

    @reg.register_alter_op_layout("conv2d")
    def alter_conv2d_layout(attrs, inputs, tinfos):
        new_attrs = {k: attrs[k] for k in attrs.keys()}
        new_attrs["layout"] = "NCHW16c"
        new_attrs["kernel_layout"] = "NCHW16c"
        new_attrs["name"] = "conv_alter"
        return sym.conv2d(inputs[0], inputs[1], **new_attrs)

    g = g.apply("AlterOpLayout")
    layouts = get_layouts(g)

    # check copy layouts
    for node in ["data", "relu", "flatten", "softmax", "conv_weight"]:
        assert (layouts[node] == layouts_origin[node])
    assert (layouts["conv_alter"] == layouts_origin["conv"])
Пример #8
0
def test_consecutive_alter_layout():
    data = sym.Variable("data", shape=(1, 32, 512, 512))
    pool1 = sym.global_avg_pool2d(data,
                                  name="global_avg_pool2d_1",
                                  layout="NCHW")
    pool2 = sym.global_avg_pool2d(pool1,
                                  name="global_avg_pool2d_2",
                                  layout="NCHW")
    relu = sym.relu(pool2, name="relu")

    g = graph.create(relu)
    g = g.apply("CorrectLayout")
    g = graph_attr.set_dtype_inputs(g, "float32")
    g = g.apply(["InferShape", "InferType"])
    assert g.json_attr("layout") == ['NCHW', 'NCHW', 'NCHW', 'NCHW']

    @reg.register_alter_op_layout("global_avg_pool2d", level=100)
    def alter_global_avg_pool2d_layout(attrs, inputs, tinfos):
        new_attrs = {k: attrs[k] for k in attrs.keys()}
        new_attrs["layout"] = "NCHW16c"
        return sym.global_avg_pool2d(inputs[0], **new_attrs)

    g = g.apply("AlterOpLayout")

    # pool1 get replaced - output layout of pool1 is not recorded
    # pool2 get replaced - input layout of pool2 is not recorded
    # thus the second entry must be undefined - it can neither recover from pool1's output,
    # nor from pool2's input.
    assert g.json_attr("layout") == ['NCHW', '__undef__', 'NCHW', 'NCHW']
Пример #9
0
def test_unary():
    x = sym.Variable('x')
    x = sym.exp(x)
    x = sym.log(x)
    x = sym.sigmoid(x)
    x = sym.tanh(x)
    x = sym.relu(x)
    assert x.list_input_names() == ['x']
Пример #10
0
def test_unary():
    x = sym.Variable('x')
    x = sym.exp(x)
    x = sym.log(x)
    x = sym.sigmoid(x)
    x = sym.tanh(x)
    x = sym.relu(x)
    assert x.list_input_names() == ['x']
Пример #11
0
 def before(x, conv_weight, conv_bias, scale, channels):
     y = sym.conv2d(x, conv_weight, conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    name="conv")
     y = sym.relu(y)
     y = y * sym.expand_dims(scale, axis=1, num_newaxis=2)
     return y
Пример #12
0
 def before(x, conv_weight, conv_bias, in_scale, out_scale, channels):
     x = x * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
     y = sym.conv2d(x, conv_weight, conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    name="conv")
     y = sym.relu(y)
     y = y * sym.expand_dims(out_scale, axis=1, num_newaxis=2)
     return y
Пример #13
0
 def before(x, conv_weight, conv_bias, in_scale, out_scale, channels):
     x = x * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
     y = sym.conv2d(x, conv_weight, conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    groups=54,
                    name="depthiwise_conv")
     y = sym.relu(y)
     y = y * sym.expand_dims(out_scale, axis=1, num_newaxis=2)
     return y
Пример #14
0
 def expected(x, conv_weight, conv_bias, scale, channels):
     conv_weight = conv_weight * sym.expand_dims(scale, axis=1, num_newaxis=3)
     conv_bias = conv_bias * scale
     y = sym.conv2d(x,
                    conv_weight,
                    conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    name="conv")
     y = sym.relu(y)
     return y
Пример #15
0
def test_relu():
    x = sym.Variable("x")
    y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)

    def forward(x):
        x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return (x > 0) * x

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward)
Пример #16
0
def test_relu():
    x = sym.Variable("x")
    y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)

    def forward(x):
        x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return (x > 0) * x

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = {'x': (dshape, x)}
    helper(y, inputs, dtype, forward)
Пример #17
0
 def expected(x, conv_weight, conv_bias, in_scale, out_scale, channels):
     conv_weight = conv_weight * sym.expand_dims(out_scale, axis=1, num_newaxis=3)
     conv_weight = conv_weight * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
     conv_bias = conv_bias * out_scale
     y = sym.conv2d(x,
                    conv_weight,
                    conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    name="conv")
     y = sym.relu(y)
     return y
def test_multi_loss_graph_gradients():
    # input data
    shape1 = (1000, 100)
    data1 = sym.Variable('data1', shape=(1000, 100), dtype=0)

    # fake non-sparse label
    label = sym.full(fill_value=3)

    # square loss
    sub1 = sym.elemwise_sub(data1, label, name="sub1")
    square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss")

    # fake loss1
    shape2 = (1000, )
    data2 = sym.Variable('data2', shape=shape2, dtype=0)
    loss1 = sym.sqrt(data2, name="loss1")

    # fake loss2
    loss2 = sym.relu(data1, name='loss2')

    # block loss1
    total_loss = sym.elemwise_sum(sym.block_grad(loss1),
                                  square_loss,
                                  num_args=2,
                                  name="total_loss")

    # grad_g.symbol.list_output_names()
    # >> ['loss1_grad_0_output', 'grad_sum_output']
    grad_g = graph_util.get_gradient_graph([total_loss, loss2],
                                           total_loss.list_input_variables())
    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)
    assert out_shapes == [list(shape2), list(shape1)]

    # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss
    grad_data1 = grad_g.symbol[1]
    assert grad_data1.list_attr()['num_args'] == '2'

    # block grad should return zero grad
    grad_data2 = grad_g.symbol[0]
    assert 'zeros_like' in grad_g.ir()

    # test reverse infer shape for label
    assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32']

    # test reverse infer type for label
    assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
Пример #19
0
def test_multi_loss_graph_gradients():
    # input data
    shape1 = (1000, 100)
    data1 = sym.Variable('data1', shape=(1000, 100), dtype=0)

    # fake non-sparse label
    label = sym.full(fill_value=3)

    # square loss
    sub1 = sym.elemwise_sub(data1, label, name="sub1")
    square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss")

    # fake loss1
    shape2 = (1000, )
    data2 = sym.Variable('data2', shape=shape2, dtype=0)
    loss1 = sym.sqrt(data2, name="loss1")

    # fake loss2
    loss2 = sym.relu(data1, name='loss2')

    # block loss1
    total_loss = sym.elemwise_sum(
        sym.block_grad(loss1),
        square_loss,
        num_args=2,
        name="total_loss")

    # grad_g.symbol.list_output_names()
    # >> ['loss1_grad_0_output', 'grad_sum_output']
    grad_g = graph_util.get_gradient_graph([total_loss, loss2], total_loss.list_input_variables())
    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)
    assert out_shapes == [list(shape2), list(shape1)]

    # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss
    grad_data1 = grad_g.symbol[1]
    assert grad_data1.list_attr()['num_args'] == '2'

    # block grad should return zero grad
    grad_data2 = grad_g.symbol[0]
    assert 'zeros_like' in grad_g.ir()

    # test reverse infer shape for label
    assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32']
    
    # test reverse infer type for label
    assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
Пример #20
0
def test_residual_block_layout_transform():
    ch = 16
    size = 32
    data = sym.Variable(name="data")
    conv1 = sym.conv2d(data=data,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv1")
    layout_transform1 = sym.__layout_transform__(data=conv1,
                                                 src_layout="NCHW",
                                                 dst_layout="NCHW8c")
    layout_transform2 = sym.__layout_transform__(data=layout_transform1,
                                                 src_layout="NCHW8c",
                                                 dst_layout="NCHW")
    conv2 = sym.conv2d(data=conv1,
                       kernel_size=(3, 3),
                       channels=ch,
                       padding=(1, 1),
                       use_bias=False,
                       name="conv2")
    elemwise_sum = sym.elemwise_add(layout_transform2, conv2)
    out = sym.relu(elemwise_sum)

    dtype = "float32"
    dshape = (1, ch, size, size)
    kshape = (ch, ch, 3, 3)
    oshape = (1, ch, size, size)
    shape_dict = {"data": dshape}

    target = "llvm"  # only test on llvm since it involves NCHW8c layout
    ctx = tvm.context(target, 0)
    graph, lib, _ = nnvm.compiler.build(out, target, shape_dict)
    # data, conv1 weight, conv1, layout transform + elemwise add + relu, conv2 weight, conv2 op
    assert graph.index.num_nodes == 6

    data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
    kernel1 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    kernel2 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    m = graph_runtime.create(graph, lib, ctx)
    m.run(data=data, conv1_weight=kernel1, conv2_weight=kernel2)
    out = m.get_output(0, tvm.nd.empty(oshape, dtype))

    conv1 = topi.testing.conv2d_nchw_python(data.asnumpy(), kernel1.asnumpy(),
                                            (1, 1), 'SAME')
    conv2 = topi.testing.conv2d_nchw_python(conv1, kernel2.asnumpy(), (1, 1),
                                            'SAME')
    ref = np.maximum(conv1 + conv2, 0)
    np.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Пример #21
0
 def compile(self, **kwargs):
     if kwargs['op'] == 'dense':
         return sym.dense(data=kwargs['data'],
                          weight=kwargs['weight'],
                          bias=kwargs['bias'],
                          units=kwargs['units'])
     elif kwargs['op'] == 'relu':
         return sym.relu(data=kwargs['data'])
     elif kwargs['op'] == 'leaky_relu':
         return sym.leaky_relu(data=kwargs['data'], alpha=kwargs['alpha'])
     elif kwargs['op'] == 'sigmoid':
         return sym.sigmoid(data=kwargs['data'])
     else:
         raise RuntimeError('invalid operator')
Пример #22
0
 def expected(x, conv_weight, conv_bias, in_scale, out_scale, channels):
     conv_weight = conv_weight * sym.expand_dims(out_scale, axis=1, num_newaxis=3)
     conv_weight = conv_weight * sym.expand_dims(in_scale, axis=1, num_newaxis=3)
     conv_bias = conv_bias * out_scale
     y = sym.conv2d(x,
                    conv_weight,
                    conv_bias,
                    channels=channels,
                    kernel_size=(3, 3),
                    padding=(1, 1),
                    groups=54,
                    name="depthiwise_conv")
     y = sym.relu(y)
     return y
Пример #23
0
def separable_conv_block(data,
                         name,
                         depthwise_channels,
                         pointwise_channels,
                         kernel_size=(3, 3),
                         downsample=False,
                         padding=(1, 1),
                         epsilon=1e-5):
    """Helper function to get a separable conv block"""
    if downsample:
        strides = (2, 2)
    else:
        strides = (1, 1)
    # depthwise convolution + bn + relu
    conv1 = sym.conv2d(data=data,
                       channels=depthwise_channels,
                       groups=depthwise_channels,
                       kernel_size=kernel_size,
                       strides=strides,
                       padding=padding,
                       use_bias=False,
                       layout="NCHW",
                       name=name + "_depthwise_conv1")
    bn1 = sym.batch_norm(data=conv1, epsilon=epsilon, name=name + "_bn1")
    act1 = sym.relu(data=bn1, name=name + "_relu1")
    # pointwise convolution + bn + relu
    conv2 = sym.conv2d(data=act1,
                       channels=pointwise_channels,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       padding=(0, 0),
                       use_bias=False,
                       layout="NCHW",
                       name=name + "_conv2")
    bn2 = sym.batch_norm(data=conv2, epsilon=epsilon, name=name + "_bn2")
    act2 = sym.relu(data=bn2, name=name + "_relu2")
    return act2
def verify_l2_normalize(ishape, eps, axis):
    x = sym.Variable("x", shape=ishape)
    y = sym.l2_normalize(x, eps=eps, axis=axis)

    def forward1(x):
        return topi.testing.l2_normalize_python(x, eps, axis)

    check_function(y, forward1)

    def forward2(x):
        y = forward1(x)
        return (y > 0) * y

    #Checking L2 normalization op followed by elementwise op relu
    check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
def verify_lrn(ishape, size, axis, bias, alpha, beta):
    x = sym.Variable("x", shape=ishape)
    y = sym.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)

    def forward1(x):
        return topi.testing.lrn_python(x, size, axis, bias, alpha, beta)

    check_function(y, forward1)

    def forward2(x):
        y = forward1(x)
        return (y > 0) * y

    #Checking LRN op followed by elementwise op relu
    check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
def test_relu():
    x = sym.Variable("x")
    y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)

    def forward(x):
        x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return (x > 0) * x

    def backward(head_grads, x):
        sub = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return [(sub > 0).astype("float") * \
                ((x > 0).astype("float") + 0.3 * (x < 0).astype("float")) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Пример #27
0
def verify_lrn(ishape, size, axis, bias, alpha, beta):
    x = sym.Variable("x", shape=ishape)
    y = sym.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)

    def forward1(x):
        return topi.testing.lrn_python(x, size, axis, bias, alpha, beta)

    check_function(y, forward1)

    def forward2(x):
        y = forward1(x)
        return (y > 0)*y

    #Checking LRN op followed by elementwise op relu
    check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
Пример #28
0
def verify_l2_normalize(ishape, eps, axis):
    x = sym.Variable("x", shape=ishape)
    y = sym.l2_normalize(x, eps=eps, axis=axis)

    def forward1(x):
        return topi.testing.l2_normalize_python(x, eps, axis)

    check_function(y, forward1)

    def forward2(x):
        y = forward1(x)
        return (y > 0)*y

    #Checking L2 normalization op followed by elementwise op relu
    check_function(sym.relu(y), forward2, in_range={'x': (-10.0, 10.0)})
Пример #29
0
def test_relu():
    x = sym.Variable("x")
    y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)

    def forward(x):
        x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return (x > 0) * x

    def backward(head_grads, x):
        sub = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return [(sub > 0).astype("float") * \
                ((x > 0).astype("float") + 0.3 * (x < 0).astype("float")) * head_grads]

    shape = {'x': (1, 3, 32, 32)}
    check_function(y, forward, backward, shape=shape)
Пример #30
0
def test_relu():
    x = sym.Variable("x")
    y = sym.leaky_relu(x, alpha=0.3) - 0.2
    y = sym.relu(y)
    dtype = "float32"
    dshape = (1, 3, 32, 32)
    oshape = dshape
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
        m = graph_runtime.create(graph, lib, ctx)
        data = np.random.uniform(size=dshape).astype(dtype)
        m.run(x=data)
        data = (data < 0) * data * 0.3 + (data>0) * data - 0.2
        data = (data > 0) * data
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        np.testing.assert_allclose(out.asnumpy(), data, atol=1e-5, rtol=1e-5)
Пример #31
0
def test_dtypes():
    x = sym.Variable("x")
    y = sym.relu(x)
    dshape = (1, 3, 32, 32)
    oshape = dshape
    for dtype in ['float32', 'float64', 'int32', 'int16', 'int8', 'int64']:
        graph, lib, _ = nnvm.compiler.build(y, 'llvm', {"x": dshape}, dtype=dtype)
        m = graph_runtime.create(graph, lib, tvm.cpu())
        if 'float' in dtype:
          data = np.random.uniform(size=dshape).astype(dtype)
        elif 'int' in dtype:
          data = np.random.randint(-127, 127, dshape).astype(dtype)
        m.run(x=data)
        data = (data > 0) * data
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        tvm.testing.assert_allclose(out.asnumpy(), data, atol=1e-5, rtol=1e-5)
Пример #32
0
def test_dtypes():
    x = sym.Variable("x")
    y = sym.relu(x)
    dshape = (1, 3, 32, 32)
    oshape = dshape
    for dtype in ['float32', 'float64', 'int32', 'int16', 'int8', 'int64']:
        graph, lib, _ = nnvm.compiler.build(y, 'llvm', {"x": dshape}, dtype=dtype)
        m = graph_runtime.create(graph, lib, tvm.cpu())
        if 'float' in dtype:
          data = np.random.uniform(size=dshape).astype(dtype)
        elif 'int' in dtype:
          data = np.random.randint(-127, 127, dshape).astype(dtype)
        m.run(x=data)
        data = (data > 0) * data
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        np.testing.assert_allclose(out.asnumpy(), data, atol=1e-5, rtol=1e-5)
Пример #33
0
def test_relu():
    x = sym.Variable("x")
    y = sym.relu(sym.leaky_relu(x, alpha=0.3) - 0.2)

    def forward(x):
        x = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return (x > 0) * x

    def backward(head_grads, x):
        sub = (x < 0) * x * 0.3 + (x > 0) * x - 0.2
        return [(sub > 0).astype("float") * \
                ((x > 0).astype("float") + 0.3 * (x < 0).astype("float")) * head_grads]

    dtype = "float32"
    dshape = (1, 3, 32, 32)
    inputs = [('x', dshape, x)]
    helper(y, inputs, dtype, forward, backward)
Пример #34
0
def fc_layer(data, units, name):
    w = sym.Variable(name + "_w")
    b = sym.Variable(name + "_b")
    fc = sym.dense(data=data, weight=w, bias=b, units=units, name=name + '_fc')
    relu = sym.relu(data=fc, name=name + '_relu')

    gamma = sym.Variable(name + "_gamma")
    beta = sym.Variable(name + "_beta")
    moving_mean = sym.Variable(name + "_moving_mean")
    moving_var = sym.Variable(name + "_moving_var")
    bn = sym.batch_norm(data=relu,
                        gamma=gamma,
                        beta=beta,
                        moving_mean=moving_mean,
                        moving_var=moving_var,
                        name=name + '_bn')
    return bn
Пример #35
0
def conv_block(data,
               name,
               channels,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               epsilon=1e-5):
    """Helper function to construct conv-bn-relu"""
    # convolution + bn + relu
    conv = sym.conv2d(data=data,
                      channels=channels,
                      kernel_size=kernel_size,
                      strides=strides,
                      padding=padding,
                      use_bias=False,
                      layout="NCHW",
                      name=name + "_conv")
    bn = sym.batch_norm(data=conv, epsilon=epsilon, name=name + "_bn")
    act = sym.relu(data=bn, name=name + "_relu")
    return act
Пример #36
0
def test_alter_func_return_none():
    data = sym.Variable("data", shape=(1, 32, 512, 512))
    pool1 = sym.global_max_pool2d(data, name="pool1", layout="NCHW")
    pool2 = sym.global_max_pool2d(pool1, name="pool2", layout="NCHW")
    relu = sym.relu(pool2, name="relu")

    g = graph.create(relu)
    g = g.apply("CorrectLayout")
    g = graph_attr.set_dtype_inputs(g, "float32")
    g = g.apply(["InferShape", "InferType"])
    assert g.json_attr("layout") == ['NCHW', 'NCHW', 'NCHW', 'NCHW']

    @reg.register_alter_op_layout("global_max_pool2d", level=100)
    def alter_global_max_pool2d_layout(attrs, inputs, tinfos):
        return None

    g = g.apply("AlterOpLayout")

    # alter func return none, nothing get replaced,
    # the layouts should remain the same
    assert g.json_attr("layout") == ['NCHW', 'NCHW', 'NCHW', 'NCHW']
Пример #37
0
def yolo(num_classes=1470):
    data = sym.Variable("data")
    body = conv2d_block(data, "conv1", 64, kernel_size=(7, 7), strides=(2, 2), padding=(3, 3))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool1")
    body = conv2d_block(body, "conv2", 192, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool2")

    body = conv2d_block(body, "conv3", 128, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv4", 256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv5", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv6", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool3")

    body = conv2d_block(body, "conv7", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv8", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv9", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv10", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv11", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv12", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv13", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv14", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv15", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv16", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool4")

    body = conv2d_block(body, "conv17", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv18", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv19", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv20", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv21", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv22", 1024, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1))
    body = conv2d_block(body, "conv23", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv24", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))

    flatten = sym.flatten(data=body, name="flatten")
    fc = sym.dense(data=flatten, units=4096, use_bias=False, name="fc1")
    act = sym.relu(data=fc, name="relu1")
    fc = sym.dense(data=act, units=num_classes, use_bias=False, name="fc2")

    return fc 
Пример #38
0
    def get_feature(internel_layer, layers, filters, batch_norm=False):
        """
		Get VGG feature body as stacks of convoltions.
		layers  : [1, 1, 2, 2, 2]
		filters : [64, 128, 256, 512, 512]
		"""
        for i, num in enumerate(layers):
            """
			i = 0, num = 1
			i = 1, num = 1
			i = 2, num = 2
			i = 3, num = 2
			i = 4, num = 2
			"""
            for j in range(num):
                internel_layer = sym.pad(data=internel_layer,
                                         pad_width=((0, 0), (1, 1), (1, 1),
                                                    (0, 0)))
                internel_layer = sym.conv2d(data=internel_layer,
                                            kernel_size=(3, 3),
                                            channels=filters[i],
                                            layout='NHWC',
                                            kernel_layout='HWOI',
                                            name="conv%s_%s" % (i + 1, j + 1))
                if batch_norm:
                    internel_layer = sym.batch_norm(data=internel_layer,
                                                    axis=3,
                                                    name="bn%s_%s" %
                                                    (i + 1, j + 1))
                internel_layer = sym.relu(data=internel_layer,
                                          name="relu%s_%s" % (i + 1, j + 1))

            internel_layer = sym.max_pool2d(data=internel_layer,
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            layout="NHWC",
                                            name="pool%s" % (i + 1))
            return internel_layer
Пример #39
0
def test_residual_block_layout_transform():
    ch = 16
    size = 32
    data = sym.Variable(name="data")
    conv1 = sym.conv2d(data=data, kernel_size=(3,3), channels=ch, padding = (1, 1), use_bias=False, name="conv1")
    layout_transform1 = sym.__layout_transform__(data=conv1, src_layout="NCHW", dst_layout="NCHW8c")
    layout_transform2 = sym.__layout_transform__(data=layout_transform1, src_layout="NCHW8c", dst_layout="NCHW")
    conv2 = sym.conv2d(data=conv1, kernel_size=(3,3), channels=ch, padding = (1, 1), use_bias=False, name="conv2")
    elemwise_sum = sym.elemwise_add(layout_transform2, conv2)
    out = sym.relu(elemwise_sum)

    dtype="float32"
    dshape = (1, ch, size, size)
    kshape = (ch, ch, 3, 3)
    oshape = (1, ch, size, size)
    shape_dict = {"data": dshape}

    target = "llvm" # only test on llvm since it involves NCHW8c layout
    ctx = tvm.context(target, 0)
    graph, lib, _ = nnvm.compiler.build(out, target, shape_dict)
    # data, conv1 weight, conv1, layout transform + elemwise add + relu, conv2 weight, conv2 op
    assert graph.index.num_nodes == 6

    data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
    kernel1 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    kernel2 = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype))
    m = graph_runtime.create(graph, lib, ctx)
    m.run(data=data, conv1_weight=kernel1, conv2_weight=kernel2)
    out = m.get_output(0, tvm.nd.empty(oshape, dtype))

    conv1 = topi.testing.conv2d_nchw_python(
        data.asnumpy(), kernel1.asnumpy(), (1,1), 'SAME')
    conv2 = topi.testing.conv2d_nchw_python(
        conv1, kernel2.asnumpy(), (1,1), 'SAME')
    ref = np.maximum(conv1 + conv2, 0)
    tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5)
Пример #40
0
 def elu(data):
     return -0.5 * sym.relu(1 - sym.exp(data)) + sym.relu(data)
Пример #41
0
from tvm.contrib import graph_runtime as runtime
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing import utils

######################################################################
# Create a simple network
# -----------------------
# Let's create a very simple network for demonstration.
# It consists of convolution, batch normalization, and ReLU activation.

out_channels = 16
data = sym.Variable(name="data")
simple_net = sym.conv2d(data=data, kernel_size=(3,3), channels=out_channels, padding = (1, 1), use_bias=True)
simple_net = sym.batch_norm(data=simple_net)
simple_net = sym.relu(data=simple_net)

batch_size = 1
data_shape = (batch_size, 3, 224, 224)
net, params = utils.create_workload(simple_net, batch_size, data_shape[1:])

######################################################################
# Build and run with cuda backend
# -------------------------------
# We build and run this network with cuda backend, as usual.
# By setting the logging level to DEBUG, the result of NNVM graph compilation will be dumped as pseudo code.
import logging
logging.basicConfig(level=logging.DEBUG) # to dump TVM IR after fusion

target = "cuda"
graph, lib, params = nnvm.compiler.build(
Пример #42
0
def test_cnn_gradients():
    # input data
    h = 128
    w = 128
    data_shape = (1000, 3, h, w)
    data = sym.Variable('data', shape=data_shape, dtype=0)

    # conv2d
    num_channels = 64
    kernel_size = 32
    conv_w_shape = (num_channels, 3, kernel_size, kernel_size)
    conv_b_shape = (num_channels,)
    conv_w = sym.Variable('conv_w', shape=conv_w_shape)
    conv_b = sym.Variable('conv_b', shape=conv_b_shape)
    conv1 = sym.conv2d(data=data, weight=conv_w, bias=conv_b,
                      channels=num_channels, kernel_size=(kernel_size, kernel_size),
                      name='conv1')
    # relu1
    relu1 = sym.relu(data=conv1, name='relu1')
    # max pooling
    max_pooling1 = sym.max_pool2d(data=relu1, pool_size=(2, 2), name='max_pooling1')
    # flatten
    flatten1 = sym.flatten(data=max_pooling1)
    # shape after flatten
    flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels
    # dense1
    dense1_hidden_units = 100
    dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units)
    # relu2
    relu2 = sym.relu(data=dense1, name='relu2')
    # dense2
    dense2_hidden_units = 10
    dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units)
    # softmax
    mlp = sym.softmax(data=dense2, name='softmax')
    # fake non-sparse label
    label = sym.full_like(mlp, fill_value=1)
    # cross entropy loss
    ce_loss = sym.sum(
        sym.elemwise_mul(sym.log_softmax(dense2), label),
        axis=1,
        keepdims=True,
        name="ce_loss")

    # input variables:
    # print grad_g.symbol.list_input_names()
    # >> ['data', 'conv_w', 'conv_b',
    #     'dense1_weight', 'dense1_bias',
    #     'dense2_weight', 'dense2_bias']

    # output gradient variables:
    # print grad_g.symbol.list_output_names()
    # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias',
    #     'dense1_grad_weight', 'dense1_grad_bias',
    #     'dense2_grad_weight', 'dense2_grad_bias']
    grad_g = graph_util.get_gradient_graph(ce_loss, ce_loss.list_input_variables())

    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)

    # forward graph shape
    assert in_shapes == [list(data_shape), list(conv_w_shape), list(conv_b_shape),
                          [dense1_hidden_units, flatten_out_shape], [dense1_hidden_units],
                          [dense2_hidden_units, dense1_hidden_units], [dense2_hidden_units]]
    # input grads shape should be equal with input shape
    assert in_shapes == out_shapes

    # output grads w.r.t input variables
    grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables())

    # gradients number should be equal with grad_input number
    assert len(grads) == len(ce_loss.list_input_variables())

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float32']
Пример #43
0
 def forward(self, inputs):
     return sym.relu(inputs)
Пример #44
0
import nnvm
import nnvm.compiler
import nnvm.symbol as sym

data = sym.Variable('data')
weight = sym.Variable('weight')
c1 = sym.conv2d(data=data,
                weight=weight,
                use_bias=False,
                channels=32,
                kernel_size=(4, 4))

p1 = sym.avg_pool2d(data=c1, pool_size=(4, 4))
s1 = sym.relu(data=p1)

compute_graph = nnvm.graph.create(s1)
print('-------original')
print(compute_graph.ir())
print('-------after')

deploy_graph, lib, params = nnvm.compiler.build(
    compute_graph,
    target='llvm',
    shape={'data': (1, 32, 32, 32)},
    dtype='float32')
print(deploy_graph.ir())
Пример #45
0
def _make_fire_conv(net, channels, kernel_size, padding=0):
    net = sym.conv2d(net, channels=channels, kernel_size=(kernel_size, kernel_size),
                     padding=(padding, padding))
    net = sym.relu(net)
    return net
Пример #46
0
 def elu(data):
     return -0.5 * sym.relu(1 - sym.exp(data)) + sym.relu(data)