コード例 #1
0
ファイル: test_legacy_nn.py プロジェクト: wangg12/pytorch
    def test_DepthConcat(self):
        outputSize = torch.IntTensor((5, 6, 7, 8))
        input = torch.randn(2, 3, 12, 12)
        gradOutput = torch.randn(2, int(outputSize.sum()), 12, 12)
        concat = nn.DepthConcat(1)
        concat.add(nn.SpatialConvolution(3, outputSize[0], 1, 1, 1, 1))  # > 2, 5, 12, 12
        concat.add(nn.SpatialConvolution(3, outputSize[1], 3, 3, 1, 1))  # > 2, 6, 10, 10
        concat.add(nn.SpatialConvolution(3, outputSize[2], 4, 4, 1, 1))  # > 2, 7, 9, 9
        concat.add(nn.SpatialConvolution(3, outputSize[3], 5, 5, 1, 1))  # > 2, 8, 8, 8
        concat.zeroGradParameters()
        # forward/backward
        outputConcat = concat.forward(input)
        gradInputConcat = concat.backward(input, gradOutput)
        # the spatial dims are the largest, the nFilters is the sum
        output = torch.Tensor(2, int(outputSize.sum()), 12, 12).zero_()  # zero for padding
        narrows = ((slice(None), slice(0, 5), slice(None), slice(None)),
                   (slice(None), slice(5, 11), slice(1, 11), slice(1, 11)),
                   (slice(None), slice(11, 18), slice(1, 10), slice(1, 10)),
                   (slice(None), slice(18, 26), slice(2, 10), slice(2, 10)))
        gradInput = input.clone().zero_()
        for i in range(4):
            conv = concat.get(i)
            gradWeight = conv.gradWeight.clone()
            conv.zeroGradParameters()
            output[narrows[i]].copy_(conv.forward(input))
            gradInput.add_(conv.backward(input, gradOutput[narrows[i]]))
            self.assertEqual(gradWeight, conv.gradWeight)

        self.assertEqual(output, outputConcat)
        self.assertEqual(gradInput, gradInputConcat)

        # Check that these don't raise errors
        concat.__repr__()
        str(concat)
コード例 #2
0
def build_conv_block(dim, padding_type, use_instance_norm):
    conv_block = nn.Sequential()
    p = 0
    if padding_type == 'reflect':
        conv_block.add(nn.SpatialReflectionPadding(1,1,1,1))
    elif padding_type == 'replicate':
        conv_block.add(nn.SpatialReplicationPadding(1,1,1,1))
    elif padding_type == 'zero':
        p = 1

    conv_block.add(nn.SpationConvolution(dim, dim, 3, 3, 1, 1, p, p))

    if use_instance_norm == 1:
        conv_block.add(nn.InstanceNormalization(dim))
    else:
        conv_block.add(nn.SpatialBatchNormalization(dim))

    conv_block.add(F.ReLU(True))

    if padding_type == 'reflect':
        conv_block.add(nn.SpatialReflectionPadding(1, 1, 1, 1))
    elif padding_type == 'replicate':
        conv_block.add(nn.SpatialReplicationPadding(1, 1, 1, 1))

    conv_block.add(nn.SpatialConvolution(dim, dim, 3, 3, 1, 1, p, p))

    if use_instance_norm == 1:
        conv_block.add(nn.InstanceNormalization(dim))
    else:
        conv_block.add(nn.SpatialBatchNormalization(dim))

    return conv_block    
コード例 #3
0
def FCNN():
	num_classes = 2
	n_layers_enc = 32
	n_layers_ctx = 128
	n_input = 5
	prob_drop = 0.25
	layers = []
	# Encoder
	pool = nn2.SpatialMaxPooling(2,2,2,2)
	layers.append(nn2.SpatialConvolution(n_input, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(pool)
	# Context Module
	layers.append(nn2.SpatialDilatedConvolution(n_layers_enc, n_layers_ctx, 3, 3, 1, 1, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 2, 2, 2, 2))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 4, 4, 4, 4))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 8, 8, 8, 8))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 16, 16, 16, 16))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 32, 32, 32, 32))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 64, 64, 64, 64))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_enc, 1, 1))
	layers.append(nn.ELU())	# Nao havia no paper
	# Decoder
	layers.append(nn2.SpatialMaxUnpooling(pool))
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, num_classes, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn.SoftMax()) # Nao havia no paper
	return nn.Sequential(*layers)
コード例 #4
0
def build_model(opt):
    model = torch.nn.Sequential()
    prev_dim = 3
    arch = opt.arch.split(',')

    model = torch.nn.Sequential()

    arch_len = len(arch)

    for i in range(arch_len):
        v = arch[i]
        first_char = v[0]
        needs_relu = True
        needs_bn = True
        next_dim = None
        layer = None
        if first_char == 'c':
            f = int(v[1])
            p = (f - 1) / 2
            s = int(v[3])
            next_dim = int(v[5])
            if opt.padding_type == 'reflect':
                model.add(nn.SpatialReflectionPadding(p, p, p, p))
                p = 0
            elif opt.padding_type == 'replicate':
                model.add(nn.SpatialReplicationPadding(p, p, p, p))
                p = 0
            elif padding_type == 'none':
                p = 0
            layer = nn.SpatialConvolution(prev_dim, next_dim, f, f, s, s, p, p)
        elif first_char == 'f':
            f = int(v[1])
            p = (f - 1) / 2
            s = int(v[3])  
            a = s - 1
            next_dim = int(v[5])
            layer = nn.SpatialFullConvolution(prev_dim, next_dim, f, f, s, s, p, p, a, a)
        elif first_char == 'd':
            next_dim = int(v[1])
            layer = nn.SpatialConvolution(prev_dim, next_dim, 3, 3, 2, 2, 1, 1)
        elif first_char == 'U':
            next_dim = prev_dim
            scale = int(v[1])
            layer = nn.SpatialFullConvolution(prev_dim, next_dim, 3, 3, 2, 2, 1, 1, 1, 1)
            needs_bn = False
            needs_relu = True

        model.add(layer)
        if i == arch_len - 1:
            needs_bn = False
            needs_relu = False

        if needs_bn == True:
            if opt.use_instance_norm == 1:
                model.add(nn.InstanceNormalization(next_dim))
            else:
                model.add(nn.SpatialBatchNormalization(next_dim))
            
        if needs_relu == True:
            model.add(nn.ReLU(true))
        
        prev_dim = next_dim
    
    model.add(bb.Tanh())
    model.add(nn.MulConstant(opt.tanh_constant))
    model.add(nn.TotalVariation(opt.tv_strength))
コード例 #5
0
 def test_convolution(self):
     self._test_single_layer(
         nn.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3),
         decimal=6
     )