Beispiel #1
0
    def test_pad(self):
        # ***************************************************************
        # Test ReplicationPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16,3,224,224)
        check_equal(arr, jnn.ReplicationPad2d(10), tnn.ReplicationPad2d(10))
        check_equal(arr, jnn.ReplicationPad2d((1,23,4,5)), tnn.ReplicationPad2d((1,23,4,5)))
        check_equal(arr, jnn.ReplicationPad2d((1,0,1,5)), tnn.ReplicationPad2d((1,0,1,5)))
        check_equal(arr, jnn.ReplicationPad2d((100)), tnn.ReplicationPad2d((100)))

        # ***************************************************************
        # Test ConstantPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16,3,224,224)
        check_equal(arr, jnn.ConstantPad2d(10,-2), tnn.ConstantPad2d(10,-2))
        check_equal(arr, jnn.ConstantPad2d((2,3,34,1),10.2), tnn.ConstantPad2d((2,3,34,1),10.2))

        # ***************************************************************
        # Test ZeroPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16,3,224,224)
        check_equal(arr, jnn.ZeroPad2d(1), tnn.ZeroPad2d(1))
        check_equal(arr, jnn.ZeroPad2d((2,3,34,1)), tnn.ZeroPad2d((2,3,34,1)))

        # ***************************************************************
        # Test ReflectionPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16,3,224,224)
        check_equal(arr, jnn.ReflectionPad2d(20), tnn.ReflectionPad2d(20))
        check_equal(arr, jnn.ReflectionPad2d((2,3,34,1)), tnn.ReflectionPad2d((2,3,34,1)))
        check_equal(arr, jnn.ReflectionPad2d((10,123,34,1)), tnn.ReflectionPad2d((10,123,34,1)))
        check_equal(arr, jnn.ReflectionPad2d((100)), tnn.ReflectionPad2d((100)))
Beispiel #2
0
    def test_pad(self):
        # ***************************************************************
        # Test ReplicationPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16, 3, 224, 224)
        check_equal(arr, jnn.ReplicationPad2d(10), tnn.ReplicationPad2d(10))
        check_equal(arr, jnn.ReplicationPad2d((1, 23, 4, 5)),
                    tnn.ReplicationPad2d((1, 23, 4, 5)))
        check_equal(arr, jnn.ReplicationPad2d((1, 0, 1, 5)),
                    tnn.ReplicationPad2d((1, 0, 1, 5)))
        check_equal(arr, jnn.ReplicationPad2d((100)),
                    tnn.ReplicationPad2d((100)))

        # ***************************************************************
        # Test ConstantPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16, 3, 224, 224)
        check_equal(arr, jnn.ConstantPad2d(10, -2), tnn.ConstantPad2d(10, -2))
        check_equal(arr, jnn.ConstantPad2d((2, 3, 34, 1), 10.2),
                    tnn.ConstantPad2d((2, 3, 34, 1), 10.2))

        arr = np.random.randn(16, 3, 224, 10, 10)
        check_equal(arr, jnn.ConstantPad2d(10, -2), tnn.ConstantPad2d(10, -2))
        check_equal(arr, jnn.ConstantPad2d((2, 3, 34, 1), 10.2),
                    tnn.ConstantPad2d((2, 3, 34, 1), 10.2))

        # ***************************************************************
        # Test ZeroPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16, 3, 224, 224)
        check_equal(arr, jnn.ZeroPad2d(1), tnn.ZeroPad2d(1))
        check_equal(arr, jnn.ZeroPad2d((2, 3, 34, 1)),
                    tnn.ZeroPad2d((2, 3, 34, 1)))

        # ***************************************************************
        # Test ReflectionPad2d Layer
        # ***************************************************************
        arr = np.random.randn(16, 3, 224, 224)
        check_equal(arr, jnn.ReflectionPad2d(20), tnn.ReflectionPad2d(20))
        check_equal(arr, jnn.ReflectionPad2d((2, 3, 34, 1)),
                    tnn.ReflectionPad2d((2, 3, 34, 1)))
        check_equal(arr, jnn.ReflectionPad2d((10, 123, 34, 1)),
                    tnn.ReflectionPad2d((10, 123, 34, 1)))
        check_equal(arr, jnn.ReflectionPad2d((100)), tnn.ReflectionPad2d(
            (100)))

        # ***************************************************************
        # Test function pad
        # ***************************************************************
        arr = np.random.randn(16, 3, 224, 224)
        padding = (10, 11, 2, 3)
        for mode in ['constant', 'replicate', 'reflect', 'circular']:
            j_data = jt.array(arr)
            t_data = torch.tensor(arr)
            t_output = tnn.functional.pad(t_data, padding,
                                          mode=mode).detach().numpy()
            j_output = jnn.pad(j_data, padding, mode).numpy()
            assert np.allclose(t_output, j_output)
def fixed_padding(inputs, kernel_size, dilation):
    kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    padd_func = nn.ZeroPad2d((pad_beg, pad_end, pad_beg, pad_end))
    padded_inputs = padd_func(inputs)
    return padded_inputs
Beispiel #4
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero'):
        super(ConvBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        self.conv = nn.Conv(input_dim,
                            output_dim,
                            kernel_size,
                            stride,
                            bias=self.use_bias)
Beispiel #5
0
    def __init__(self, in_channels=3):
        super(Discriminator, self).__init__()

        def discriminator_block(in_filters, out_filters, normalization=True):
            'Returns downsampling layers of each discriminator block'
            layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)]
            if normalization:
                layers.append(nn.InstanceNorm2d(out_filters, affine=None))
            layers.append(nn.LeakyReLU(scale=0.2))
            return layers

        self.model = nn.Sequential(
            *discriminator_block((in_channels * 2), 64, normalization=False),
            *discriminator_block(64, 128), *discriminator_block(128, 256),
            *discriminator_block(256, 512), nn.ZeroPad2d((1, 0, 1, 0)),
            nn.Conv(512, 1, 4, padding=1, bias=False))

        for m in self.modules():
            weights_init_normal(m)
Beispiel #6
0
    def __init__(self, input_shape):
        super(Discriminator, self).__init__()
        (channels, height, width) = input_shape
        self.output_shape = (1, (height // (2**4)), (width // (2**4)))

        def discriminator_block(in_filters, out_filters, normalize=True):
            'Returns downsampling layers of each discriminator block'
            layers = [nn.Conv(in_filters, out_filters, 4, stride=2, padding=1)]
            if normalize:
                layers.append(nn.InstanceNorm2d(out_filters, affine=None))
            layers.append(nn.LeakyReLU(scale=0.2))
            return layers

        self.model = nn.Sequential(
            *discriminator_block(channels, 64, normalize=False),
            *discriminator_block(64, 128), *discriminator_block(128, 256),
            *discriminator_block(256, 512), nn.ZeroPad2d((1, 0, 1, 0)),
            nn.Conv(512, 1, 4, padding=1))

        for m in self.modules():
            weights_init_normal(m)
Beispiel #7
0
    def __init__(self, in_channels=3, out_channels=3):
        super(GeneratorUNet, self).__init__()
        self.down1 = UNetDown(in_channels, 64, normalize=False)
        self.down2 = UNetDown(64, 128)
        self.down3 = UNetDown(128, 256)
        self.down4 = UNetDown(256, 512, dropout=0.5)
        self.down5 = UNetDown(512, 512, dropout=0.5)
        self.down6 = UNetDown(512, 512, dropout=0.5)
        self.down7 = UNetDown(512, 512, dropout=0.5)
        self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5)
        self.up1 = UNetUp(512, 512, dropout=0.5)
        self.up2 = UNetUp(1024, 512, dropout=0.5)
        self.up3 = UNetUp(1024, 512, dropout=0.5)
        self.up4 = UNetUp(1024, 512, dropout=0.5)
        self.up5 = UNetUp(1024, 256)
        self.up6 = UNetUp(512, 128)
        self.up7 = UNetUp(256, 64)
        self.final = nn.Sequential(nn.Upsample(scale_factor=2),
                                   nn.ZeroPad2d((1, 0, 1, 0)),
                                   nn.Conv(128, out_channels, 4, padding=1),
                                   nn.Tanh())

        for m in self.modules():
            weights_init_normal(m)