Exemplo n.º 1
0
def get_symbol(num_classes, version, **kwargs):
    """Get symbol of SqueezeNet

    Parameters
    ----------
    num_classes: int
        The number of classification results

    version : str, optional
        "1.0" or "1.1" of SqueezeNet
    """
    assert version == '1.1', ("Unsupported SqueezeNet version {version}:"
                              "1.1 expected".format(version=version))
    net = sym.Variable("data")

    net = sym.conv2d(net, channels=64, kernel_size=(3, 3), strides=(2, 2))
    net = sym.relu(net)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 16, 64, 64)
    net = _make_fire(net, 16, 64, 64)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 32, 128, 128)
    net = _make_fire(net, 32, 128, 128)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 64, 256, 256)
    net = _make_fire(net, 64, 256, 256)

    net = sym.dropout(net, rate=0.5)
    net = sym.conv2d(net, channels=num_classes, kernel_size=(1, 1))
    net = sym.relu(net)
    net = sym.global_avg_pool2d(net)
    return sym.softmax(net, axis=1)
Exemplo n.º 2
0
def get_symbol(num_classes, version, **kwargs):
    """Get symbol of SqueezeNet

    Parameters
    ----------
    num_classes: int
        The number of classification results

    version : str, optional
        "1.0" or "1.1" of SqueezeNet
    """
    assert version == '1.1', ("Unsupported SqueezeNet version {version}:"
                              "1.1 expected".format(version=version))
    net = sym.Variable("data")

    net = sym.conv2d(net, channels=64, kernel_size=(3, 3), strides=(2, 2))
    net = sym.relu(net)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 16, 64, 64)
    net = _make_fire(net, 16, 64, 64)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 32, 128, 128)
    net = _make_fire(net, 32, 128, 128)
    net = sym.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 48, 192, 192)
    net = _make_fire(net, 64, 256, 256)
    net = _make_fire(net, 64, 256, 256)

    net = sym.dropout(net, rate=0.5)
    net = sym.conv2d(net, channels=num_classes, kernel_size=(1, 1))
    net = sym.relu(net)
    net = sym.global_avg_pool2d(net)
    return sym.softmax(net, axis=1)
Exemplo n.º 3
0
def overfeat(num_classes=1000):
    data = sym.Variable("data")
    body = conv2d_block(data,
                        "conv1",
                        96,
                        kernel_size=(11, 11),
                        strides=(4, 4),
                        padding=(5, 5))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool1")
    body = conv2d_block(body,
                        "conv2",
                        256,
                        kernel_size=(5, 5),
                        strides=(1, 1),
                        padding=(2, 2))
    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool2")

    body = conv2d_block(body,
                        "conv3",
                        512,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv4",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))
    body = conv2d_block(body,
                        "conv5",
                        1024,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=(1, 1))

    body = sym.max_pool2d(data=body,
                          pool_size=(2, 2),
                          strides=(2, 2),
                          name="pool3")

    flatten = sym.flatten(data=body, name="flatten")
    fc = sym.dense(data=flatten, units=3072, use_bias=False, name="fc1")
    fc = sym.dense(data=fc, units=4096, use_bias=False, name="fc2")
    fc = sym.dense(data=fc, units=num_classes, use_bias=False, name="fc3")

    return fc
Exemplo n.º 4
0
def get_sym(layout, kernel_layout, channels):
    data = sym.Variable(name="data")
    data = sym.conv2d(data=data, kernel_size=(3,3), channels=channels, padding=(1, 1),
                      layout=layout, kernel_layout=kernel_layout, use_bias=True)
    data = sym.max_pool2d(data=data, pool_size=(2, 2), strides=(2, 2), layout=layout)
    data = sym.upsampling(data=data, scale=2, layout=layout)
    softmax_axis = 1
    if layout == "NHWC":
        softmax_axis = 3
    data = sym.softmax(data=data, axis=softmax_axis)
    return data
Exemplo n.º 5
0
def _alter_max_pool2d_layout(attrs, inputs, tinfo):
    import nnvm.symbol as sym
    copy_inputs = [s for s in inputs]
    new_attrs = {k: attrs[k] for k in attrs.keys()}
    # NHWC -> NCHW
    if attrs["layout"] != "NHWC":
        return None
    new_attrs["layout"] = "NCHW"
    if "target" in new_attrs:
        del new_attrs["target"]
    return sym.max_pool2d(*copy_inputs, **new_attrs)
Exemplo n.º 6
0
def get_sym(layout, kernel_layout, channels):
    data = sym.Variable(name="data")
    data = sym.conv2d(data=data, kernel_size=(3,3), channels=channels, padding=(1, 1),
                      layout=layout, kernel_layout=kernel_layout, use_bias=True)
    data = sym.max_pool2d(data=data, pool_size=(2, 2), strides=(2, 2), layout=layout)
    data = sym.upsampling(data=data, scale=2, layout=layout)
    softmax_axis = 1
    if layout == "NHWC":
        softmax_axis = 3
    data = sym.softmax(data=data, axis=softmax_axis)
    return data
Exemplo n.º 7
0
def yolo(num_classes=1470):
    data = sym.Variable("data")
    body = conv2d_block(data, "conv1", 64, kernel_size=(7, 7), strides=(2, 2), padding=(3, 3))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool1")
    body = conv2d_block(body, "conv2", 192, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool2")

    body = conv2d_block(body, "conv3", 128, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv4", 256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv5", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv6", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool3")

    body = conv2d_block(body, "conv7", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv8", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv9", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv10", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv11", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv12", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv13", 256, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv14", 512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv15", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv16", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = sym.max_pool2d(data=body, pool_size=(2, 2), strides=(2, 2), name="pool4")

    body = conv2d_block(body, "conv17", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv18", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv19", 512, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0))
    body = conv2d_block(body, "conv20", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv21", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv22", 1024, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1))
    body = conv2d_block(body, "conv23", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
    body = conv2d_block(body, "conv24", 1024, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))

    flatten = sym.flatten(data=body, name="flatten")
    fc = sym.dense(data=flatten, units=4096, use_bias=False, name="fc1")
    act = sym.relu(data=fc, name="relu1")
    fc = sym.dense(data=act, units=num_classes, use_bias=False, name="fc2")

    return fc 
Exemplo n.º 8
0
def test_max_pool2d():
    x = sym.Variable("x")
    y = sym.max_pool2d(x, pool_size=(2,2), strides=(2,2),
                       padding=(0,0), name="y", ceil_mode=True)
    dtype = "float32"
    dshape = (1, 3, 28, 28)
    oshape = (1, 3, 14, 14)
    shape_dict = {"x": dshape}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        b_np = np.max(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))
        np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)
Exemplo n.º 9
0
def test_max_pool2d():
    x = sym.Variable("x")
    y = sym.max_pool2d(x, pool_size=(2,2), strides=(2,2),
                       padding=(0,0), name="y", ceil_mode=True)
    dtype = "float32"
    dshape = (1, 3, 28, 28)
    oshape = (1, 3, 14, 14)
    shape_dict = {"x": dshape}
    for target, ctx in ctx_list():
        graph, lib, _ = nnvm.compiler.build(y, target, shape_dict)
        m = graph_runtime.create(graph, lib, ctx)
        data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
        m.run(x=data)
        out = m.get_output(0, tvm.nd.empty(oshape, dtype))
        b_np = np.max(data.asnumpy().reshape(1,3,14,2,14,2), axis=(3,5))
        tvm.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)
Exemplo n.º 10
0
def test_max_pool2d():
    x = sym.Variable("data", shape=(1, 32, 512, 512))
    y = sym.max_pool2d(x, name="pool", pool_size=(3,3),
                       padding=(1,1), layout="NCHW")
    g, ldict = correct_layout(y)
    assert(ldict["data"][0] == "NCHW")
    assert(ldict["pool"][0] == "NCHW")
    # if index of H and W remain the same,
    # pool2d does not convert the layout.
    g, ldict = correct_layout(g, "NCHW16c")
    assert(ldict["data"][0] == "NCHW16c")
    assert(ldict["pool"][0] == "NCHW16c")
    # for other layout it requires a layout transform.
    g, ldict = correct_layout(g, "NHWC")
    assert(ldict["data"][0] == "NHWC")
    assert(ldict["data_NCHW"][0] == "NCHW")
    assert(ldict["pool"][0] == "NCHW")
Exemplo n.º 11
0
 def Pooling(data, kernel, stride, pad, pool_type, name):
     if pad[0] != 0 or pad[1] != 0:
         data = sym.pad(data=data,
                        pad_width=((0, 0), (pad[0], pad[0]),
                                   (pad[1], pad[1]), (0, 0)))
     if pool_type == 'max':
         return sym.max_pool2d(data=data,
                               pool_size=kernel,
                               strides=stride,
                               name=name,
                               layout='NHWC')
     if pool_type == 'avg':
         return sym.avg_pool2d(data=data,
                               pool_size=kernel,
                               strides=stride,
                               name=name,
                               layout='NHWC')
     raise ValueError("Invalid pooling type: " + pool_type)
Exemplo n.º 12
0
def test_max_pool2d():
    x = sym.Variable("data", shape=(1, 32, 512, 512))
    y = sym.max_pool2d(x,
                       name="pool",
                       pool_size=(3, 3),
                       padding=(1, 1),
                       layout="NCHW")
    g, ldict = correct_layout(y)
    assert (ldict["data"][0] == "NCHW")
    assert (ldict["pool"][0] == "NCHW")
    # if index of H and W remain the same,
    # pool2d does not convert the layout.
    g, ldict = correct_layout(g, "NCHW16c")
    assert (ldict["data"][0] == "NCHW16c")
    assert (ldict["pool"][0] == "NCHW16c")
    # for other layout it requires a layout transform.
    g, ldict = correct_layout(g, "NHWC")
    assert (ldict["data"][0] == "NHWC")
    assert (ldict["data_NCHW"][0] == "NCHW")
    assert (ldict["pool"][0] == "NCHW")
Exemplo n.º 13
0
    def get_feature(internel_layer, layers, filters, batch_norm=False):
        """
		Get VGG feature body as stacks of convoltions.
		layers  : [1, 1, 2, 2, 2]
		filters : [64, 128, 256, 512, 512]
		"""
        for i, num in enumerate(layers):
            """
			i = 0, num = 1
			i = 1, num = 1
			i = 2, num = 2
			i = 3, num = 2
			i = 4, num = 2
			"""
            for j in range(num):
                internel_layer = sym.pad(data=internel_layer,
                                         pad_width=((0, 0), (1, 1), (1, 1),
                                                    (0, 0)))
                internel_layer = sym.conv2d(data=internel_layer,
                                            kernel_size=(3, 3),
                                            channels=filters[i],
                                            layout='NHWC',
                                            kernel_layout='HWOI',
                                            name="conv%s_%s" % (i + 1, j + 1))
                if batch_norm:
                    internel_layer = sym.batch_norm(data=internel_layer,
                                                    axis=3,
                                                    name="bn%s_%s" %
                                                    (i + 1, j + 1))
                internel_layer = sym.relu(data=internel_layer,
                                          name="relu%s_%s" % (i + 1, j + 1))

            internel_layer = sym.max_pool2d(data=internel_layer,
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            layout="NHWC",
                                            name="pool%s" % (i + 1))
            return internel_layer
Exemplo n.º 14
0
def test_max_pool2d():
    x = sym.Variable('x')
    y = sym.max_pool2d(x, pool_size=(3, 3), name="y")
    y = sym.global_max_pool2d(y)
    assert y.list_input_names() == ["x"]
Exemplo n.º 15
0
 def check(in_shape, out_shape, **kwargs):
     x = sym.Variable("x", shape=in_shape)
     y = sym.max_pool2d(x, name="y", **kwargs)
     sdict = infer_shape(y)
     assert(tuple(sdict["y"][0]) == tuple(out_shape))
Exemplo n.º 16
0
def test_max_pool2d():
    x = sym.Variable('x')
    y = sym.max_pool2d(x, pool_size=(3, 3), name="y")
    y = sym.global_max_pool2d(y)
    assert y.list_input_names() == ["x"]
Exemplo n.º 17
0
def max_pool2d_callback(attrs, inputs, tinfos):
    print("MAX_POOL2D!!!")
    new_attrs = {k: attrs[k] for k in attrs.keys()}
    new_attrs['layout'] = 'NCHWc'
    return sym.max_pool2d(inputs[0], **new_attrs)
def test_cnn_gradients():
    # input data
    h = 128
    w = 128
    data_shape = (1000, 3, h, w)
    data = sym.Variable('data', shape=data_shape, dtype=0)

    # conv2d
    num_channels = 64
    kernel_size = 32
    conv_w_shape = (num_channels, 3, kernel_size, kernel_size)
    conv_b_shape = (num_channels, )
    conv_w = sym.Variable('conv_w', shape=conv_w_shape)
    conv_b = sym.Variable('conv_b', shape=conv_b_shape)
    conv1 = sym.conv2d(data=data,
                       weight=conv_w,
                       bias=conv_b,
                       channels=num_channels,
                       kernel_size=(kernel_size, kernel_size),
                       name='conv1')
    # relu1
    relu1 = sym.relu(data=conv1, name='relu1')
    # max pooling
    max_pooling1 = sym.max_pool2d(data=relu1,
                                  pool_size=(2, 2),
                                  name='max_pooling1')
    # flatten
    flatten1 = sym.flatten(data=max_pooling1)
    # shape after flatten
    flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels
    # dense1
    dense1_hidden_units = 100
    dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units)
    # relu2
    relu2 = sym.relu(data=dense1, name='relu2')
    # dense2
    dense2_hidden_units = 10
    dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units)
    # softmax
    mlp = sym.softmax(data=dense2, name='softmax')
    # fake non-sparse label
    label = sym.full_like(mlp, fill_value=1)
    # cross entropy loss
    ce_loss = sym.sum(sym.elemwise_mul(sym.log_softmax(dense2), label),
                      axis=1,
                      keepdims=True,
                      name="ce_loss")

    # input variables:
    # print grad_g.symbol.list_input_names()
    # >> ['data', 'conv_w', 'conv_b',
    #     'dense1_weight', 'dense1_bias',
    #     'dense2_weight', 'dense2_bias']

    # output gradient variables:
    # print grad_g.symbol.list_output_names()
    # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias',
    #     'dense1_grad_weight', 'dense1_grad_bias',
    #     'dense2_grad_weight', 'dense2_grad_bias']
    grad_g = graph_util.get_gradient_graph(ce_loss,
                                           ce_loss.list_input_variables())

    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)

    # forward graph shape
    assert in_shapes == [
        list(data_shape),
        list(conv_w_shape),
        list(conv_b_shape), [dense1_hidden_units, flatten_out_shape],
        [dense1_hidden_units], [dense2_hidden_units, dense1_hidden_units],
        [dense2_hidden_units]
    ]
    # input grads shape should be equal with input shape
    assert in_shapes == out_shapes

    # output grads w.r.t input variables
    grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables())

    # gradients number should be equal with grad_input number
    assert len(grads) == len(ce_loss.list_input_variables())

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == [
        'float32', 'float32', 'float32', 'float32', 'float32', 'float32',
        'float32'
    ]
Exemplo n.º 19
0
 def check(in_shape, out_shape, **kwargs):
     x = sym.Variable("x", shape=in_shape)
     y = sym.max_pool2d(x, name="y", **kwargs)
     sdict = infer_shape(y)
     assert(tuple(sdict["y"][0]) == tuple(out_shape))
Exemplo n.º 20
0
def test_cnn_gradients():
    # input data
    h = 128
    w = 128
    data_shape = (1000, 3, h, w)
    data = sym.Variable('data', shape=data_shape, dtype=0)

    # conv2d
    num_channels = 64
    kernel_size = 32
    conv_w_shape = (num_channels, 3, kernel_size, kernel_size)
    conv_b_shape = (num_channels,)
    conv_w = sym.Variable('conv_w', shape=conv_w_shape)
    conv_b = sym.Variable('conv_b', shape=conv_b_shape)
    conv1 = sym.conv2d(data=data, weight=conv_w, bias=conv_b,
                      channels=num_channels, kernel_size=(kernel_size, kernel_size),
                      name='conv1')
    # relu1
    relu1 = sym.relu(data=conv1, name='relu1')
    # max pooling
    max_pooling1 = sym.max_pool2d(data=relu1, pool_size=(2, 2), name='max_pooling1')
    # flatten
    flatten1 = sym.flatten(data=max_pooling1)
    # shape after flatten
    flatten_out_shape = (h - kernel_size) * (w - kernel_size) * num_channels
    # dense1
    dense1_hidden_units = 100
    dense1 = sym.dense(data=flatten1, name='dense1', units=dense1_hidden_units)
    # relu2
    relu2 = sym.relu(data=dense1, name='relu2')
    # dense2
    dense2_hidden_units = 10
    dense2 = sym.dense(data=relu2, name='dense2', units=dense2_hidden_units)
    # softmax
    mlp = sym.softmax(data=dense2, name='softmax')
    # fake non-sparse label
    label = sym.full_like(mlp, fill_value=1)
    # cross entropy loss
    ce_loss = sym.sum(
        sym.elemwise_mul(sym.log_softmax(dense2), label),
        axis=1,
        keepdims=True,
        name="ce_loss")

    # input variables:
    # print grad_g.symbol.list_input_names()
    # >> ['data', 'conv_w', 'conv_b',
    #     'dense1_weight', 'dense1_bias',
    #     'dense2_weight', 'dense2_bias']

    # output gradient variables:
    # print grad_g.symbol.list_output_names()
    # >> ['conv1_grad_data', 'conv1_grad_weight', 'conv1_grad_bias',
    #     'dense1_grad_weight', 'dense1_grad_bias',
    #     'dense2_grad_weight', 'dense2_grad_bias']
    grad_g = graph_util.get_gradient_graph(ce_loss, ce_loss.list_input_variables())

    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)

    # forward graph shape
    assert in_shapes == [list(data_shape), list(conv_w_shape), list(conv_b_shape),
                          [dense1_hidden_units, flatten_out_shape], [dense1_hidden_units],
                          [dense2_hidden_units, dense1_hidden_units], [dense2_hidden_units]]
    # input grads shape should be equal with input shape
    assert in_shapes == out_shapes

    # output grads w.r.t input variables
    grads = graph_util.gradients(ce_loss, ce_loss.list_input_variables())

    # gradients number should be equal with grad_input number
    assert len(grads) == len(ce_loss.list_input_variables())

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float32']