def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''): if pad[0] != 0 or pad[1] != 0: data = sym.pad(data=data, pad_width=((0, 0), (pad[0], pad[0]), (pad[1], pad[1]), (0, 0))) conv = sym.conv2d(data=data, channels=num_filter, kernel_size=kernel, strides=stride, padding=(0, 0), use_bias=False, layout='NHWC', kernel_layout='HWOI', name="%s%s_conv2d" % (name, suffix)) bn = sym.batch_norm(data=conv, name="%s%s_batchnorm" % (name, suffix), epsilon=2e-5, axis=3) act = sym.relu(data=bn, name="%s%s_relu" % (name, suffix)) return act
def test_pad(): x = sym.Variable("x") y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.) def forward(x): return np.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), mode='constant', constant_values=1.) shape = {'x': (1, 3, 28, 28)} check_function(y, forward, shape=shape)
def test_pad(): x = sym.Variable("x") y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.) def forward(x): return np.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), mode='constant', constant_values=1.) dtype = "float32" inputs = {'x': ((1, 3, 28, 28), x)} helper(y, inputs, dtype, forward)
def test_pad(): x = sym.Variable("x") y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.) def forward(x): return np.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), mode='constant', constant_values=1.) dtype = "float32" inputs = [('x', (1, 3, 28, 28), x)] helper(y, inputs, dtype, forward)
def test_pad(): x = sym.Variable("x") y = sym.pad(x, pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), pad_value=1.) dtype = "float32" dshape = (1, 3, 28, 28) oshape = (1, 3, 29, 33) shape_dict = {"x": dshape} for target, ctx in ctx_list(): graph, lib, _ = nnvm.compiler.build(y, target, shape_dict) m = graph_runtime.create(graph, lib, ctx) data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) m.run(x=data) out = m.get_output(0, tvm.nd.empty(oshape, dtype)) b_np = np.pad(data.asnumpy(), pad_width=((0, 0), (0, 0), (0, 1), (2, 3)), mode='constant', constant_values=1.) np.testing.assert_allclose(out.asnumpy(), b_np, rtol=1e-5)
def Pooling(data, kernel, stride, pad, pool_type, name): if pad[0] != 0 or pad[1] != 0: data = sym.pad(data=data, pad_width=((0, 0), (pad[0], pad[0]), (pad[1], pad[1]), (0, 0))) if pool_type == 'max': return sym.max_pool2d(data=data, pool_size=kernel, strides=stride, name=name, layout='NHWC') if pool_type == 'avg': return sym.avg_pool2d(data=data, pool_size=kernel, strides=stride, name=name, layout='NHWC') raise ValueError("Invalid pooling type: " + pool_type)
def get_feature(internel_layer, layers, filters, batch_norm=False): """ Get VGG feature body as stacks of convoltions. layers : [1, 1, 2, 2, 2] filters : [64, 128, 256, 512, 512] """ for i, num in enumerate(layers): """ i = 0, num = 1 i = 1, num = 1 i = 2, num = 2 i = 3, num = 2 i = 4, num = 2 """ for j in range(num): internel_layer = sym.pad(data=internel_layer, pad_width=((0, 0), (1, 1), (1, 1), (0, 0))) internel_layer = sym.conv2d(data=internel_layer, kernel_size=(3, 3), channels=filters[i], layout='NHWC', kernel_layout='HWOI', name="conv%s_%s" % (i + 1, j + 1)) if batch_norm: internel_layer = sym.batch_norm(data=internel_layer, axis=3, name="bn%s_%s" % (i + 1, j + 1)) internel_layer = sym.relu(data=internel_layer, name="relu%s_%s" % (i + 1, j + 1)) internel_layer = sym.max_pool2d(data=internel_layer, pool_size=(2, 2), strides=(2, 2), layout="NHWC", name="pool%s" % (i + 1)) return internel_layer