Exemple #1
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            nn.ReflectionPad2D(1),
            nn.Conv2D(dim,
                      kernel_size=3,
                      strides=1,
                      padding=0,
                      use_bias=use_bias,
                      in_channels=dim),
            InstanceNorm2D(),
            nn.Activation('relu')
        ]

        conv_block += [
            nn.ReflectionPad2D(1),
            nn.Conv2D(dim,
                      kernel_size=3,
                      strides=1,
                      padding=0,
                      use_bias=use_bias,
                      in_channels=dim),
            InstanceNorm2D()
        ]

        self.conv_block = nn.HybridSequential()
        self.conv_block.add(*conv_block)
Exemple #2
0
    def build_conv_block(self, dim, padding_type, use_dropout):
        conv_block = nn.HybridSequential()
        p = 0
        with self.name_scope():
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' %
                                          padding_type)

            conv_block.add(nn.Conv2D(dim, kernel_size=3, padding=p),
                           nn.InstanceNorm(), nn.Activation('relu'))
            if use_dropout:
                conv_block.add(nn.Dropout(0.5))

            p = 0
            if padding_type == 'reflect':
                conv_block.add(nn.ReflectionPad2D(1))
            elif padding_type == 'zero':
                p = 1
            else:
                raise NotImplementedError('padding [%s] is not implemented' %
                                          padding_type)
            conv_block.add(nn.Conv2D(dim, kernel_size=3, padding=p),
                           nn.InstanceNorm())

        return conv_block
Exemple #3
0
 def __init__(self, **kwargs):
     super(ResnetGenerator, self).__init__(**kwargs)
     num_blocks = 9
     with self.name_scope():
         self.module = nn.Sequential()
         with self.module.name_scope():
             self.module.add(nn.ReflectionPad2D(3))
             self.module.add(nn.Conv2D(64, kernel_size=7, padding=0))
             self.module.add(nn.InstanceNorm())
             self.module.add(nn.Activation(activation='relu'))
             n_downsampling = 2
             for i in range(n_downsampling):
                 mult = 2**i
                 self.module.add(
                     nn.Conv2D(128 * mult,
                               kernel_size=3,
                               strides=2,
                               padding=1))
                 self.module.add(nn.InstanceNorm())
                 self.module.add(nn.Activation(activation='relu'))
             for i in range(num_blocks):
                 self.module.add(ResnetBlock())
             for i in range(n_downsampling):
                 mult = 2**(n_downsampling - i)
                 self.module.add(
                     nn.Conv2DTranspose(int(64 * mult / 2),
                                        kernel_size=3,
                                        strides=2,
                                        padding=1,
                                        output_padding=1))
                 self.module.add(nn.InstanceNorm())
                 self.module.add(nn.Activation(activation='relu'))
             self.module.add(nn.ReflectionPad2D(3))
             self.module.add(nn.Conv2D(3, kernel_size=7, padding=0))
             self.module.add(nn.Activation(activation='tanh'))
Exemple #4
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            nn.ReflectionPad2D(1),
            SNConv2D(ndf,
                     kernel_size=4,
                     strides=2,
                     padding=0,
                     use_bias=True,
                     in_channels=input_nc),
            nn.LeakyReLU(0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                nn.ReflectionPad2D(1),
                SNConv2D(ndf * mult * 2,
                         kernel_size=4,
                         strides=2,
                         padding=0,
                         use_bias=True,
                         in_channels=ndf * mult),
                nn.LeakyReLU(0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            nn.ReflectionPad2D(1),
            SNConv2D(ndf * mult * 2,
                     kernel_size=4,
                     strides=1,
                     padding=0,
                     use_bias=True,
                     in_channels=ndf * mult),
            nn.LeakyReLU(0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = SNDense(1, use_bias=False, in_units=ndf * mult)
        self.gmp_fc = SNDense(1, use_bias=False, in_units=ndf * mult)
        self.conv1x1 = nn.Conv2D(ndf * mult,
                                 kernel_size=1,
                                 strides=1,
                                 use_bias=True,
                                 in_channels=ndf * mult * 2)
        self.leaky_relu = nn.LeakyReLU(0.2)

        self.pad = nn.ReflectionPad2D(1)
        self.conv = SNConv2D(1,
                             kernel_size=4,
                             strides=1,
                             padding=0,
                             use_bias=False,
                             in_channels=ndf * mult)

        self.model = nn.HybridSequential()
        self.model.add(*model)
Exemple #5
0
 def get_blocks(self):
     model = nn.Sequential()
     with model.name_scope():
         model.add(nn.ReflectionPad2D(1))
         model.add(nn.Conv2D(256, kernel_size=3))
         model.add(nn.InstanceNorm())
         model.add(nn.Activation(activation='relu'))
         model.add(nn.ReflectionPad2D(1))
         model.add(nn.Conv2D(256, kernel_size=3))
         model.add(nn.InstanceNorm())
     return model
Exemple #6
0
    def __init__(self, nc, padding_type, norm_layer, use_dropout, use_bias):
        """Initialize the residual block

        A residual block is a conv block with skip connections,
        Implement conv block in __init__, and implement skip connection in forward.
        """
        super(ResidualBlock, self).__init__()

        p = 0
        self.conv_block = nn.Sequential()
        if padding_type == 'reflect':
            self.conv_block.add(nn.ReflectionPad2D(1))
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        residual_conv1 = nn.Sequential()
        with residual_conv1.name_scope():
            residual_conv1.add(
                nn.Conv2D(in_channels=nc,
                          channels=nc,
                          kernel_size=3,
                          padding=p,
                          use_bias=use_bias))
            residual_conv1.add(norm_layer(in_channels=nc))
            residual_conv1.add(nn.LeakyReLU(0))
        self.conv_block.add(residual_conv1)

        p = 0
        if padding_type == 'reflect':
            self.conv_block.add(nn.ReflectionPad2D(1))
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        residual_conv2 = nn.Sequential()
        with residual_conv2.name_scope():
            residual_conv2.add(
                nn.Conv2D(in_channels=nc,
                          channels=nc,
                          kernel_size=3,
                          padding=p,
                          use_bias=use_bias))
            residual_conv2.add(norm_layer(in_channels=nc))
        self.conv_block.add(residual_conv2)
Exemple #7
0
    def __init__(self,
                 output_nc,
                 ngf=64,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.output_nc = output_nc
        self.ngf = ngf
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(nn.ReflectionPad2D(3),
                           nn.Conv2D(ngf, kernel_size=7, padding=0),
                           nn.InstanceNorm(), nn.Activation('relu'))

            n_downsampling = 2
            for i in range(n_downsampling):
                mult = 2**i
                self.model.add(
                    nn.Conv2D(ngf * mult * 2,
                              kernel_size=3,
                              strides=2,
                              padding=1), nn.InstanceNorm(),
                    nn.Activation('relu'))

            mult = 2**n_downsampling
            for i in range(n_blocks):
                self.model.add(
                    ResnetBlock(ngf * mult,
                                padding_type=padding_type,
                                use_dropout=use_dropout))

            for i in range(n_downsampling):
                mult = 2**(n_downsampling - i)
                self.model.add(
                    nn.Conv2DTranspose(int(ngf * mult / 2),
                                       kernel_size=3,
                                       strides=2,
                                       padding=1,
                                       output_padding=1), nn.InstanceNorm(),
                    nn.Activation('relu'))
            self.model.add(nn.ReflectionPad2D(3),
                           nn.Conv2D(output_nc, kernel_size=7, padding=0),
                           nn.Activation('tanh'))
Exemple #8
0
    def __init__(self, in_channels, out_channels, use_refl=True):
        super(Conv3x3, self).__init__()
        self.use_refl = use_refl

        with self.name_scope():
            self.pad = nn.ReflectionPad2D(1)
            self.conv = nn.Conv2D(in_channels=int(in_channels),
                                  channels=int(out_channels),
                                  kernel_size=3)
Exemple #9
0
    def __init__(self, dim, use_bias):
        super(ResnetAdaILNBlock, self).__init__()
        self.pad1 = nn.ReflectionPad2D(1)
        self.conv1 = nn.Conv2D(dim,
                               kernel_size=3,
                               strides=1,
                               padding=0,
                               use_bias=use_bias,
                               in_channels=dim)
        self.norm1 = adaILN(dim)
        self.relu1 = nn.Activation('relu')

        self.pad2 = nn.ReflectionPad2D(1)
        self.conv2 = nn.Conv2D(dim,
                               kernel_size=3,
                               strides=1,
                               padding=0,
                               use_bias=use_bias,
                               in_channels=dim)
        self.norm2 = adaILN(dim)
Exemple #10
0
    def __init__(self):
        super(SSIM, self).__init__()
        self.mu_x_pool = nn.AvgPool2D(3, 1)
        self.mu_y_pool = nn.AvgPool2D(3, 1)
        self.sig_x_pool = nn.AvgPool2D(3, 1)
        self.sig_y_pool = nn.AvgPool2D(3, 1)
        self.sig_xy_pool = nn.AvgPool2D(3, 1)

        self.refl = nn.ReflectionPad2D(1)

        self.C1 = 0.01**2
        self.C2 = 0.03**2
Exemple #11
0
 def __init__(self):
     super(Generator_256, self).__init__()
     self.net = nn.HybridSequential()
     with self.net.name_scope():
         self.net.add(
             nn.ReflectionPad2D(3),
             nn.Conv2D(32, kernel_size=7, strides=1),
             nn.InstanceNorm(),
             nn.Activation('relu'),  #c7s1-32
             conv_inst_relu(64),
             conv_inst_relu(128),
         )
         for _ in range(9):
             self.net.add(
                     ResBlock(128)
             )
         self.net.add(
             upconv_inst_relu(64),
             upconv_inst_relu(32),
             nn.ReflectionPad2D(3),
             nn.Conv2D(3,kernel_size=7,strides=1),
             nn.Activation('sigmoid')
         )
Exemple #12
0
    def __init__(self,
                 filt_size=3,
                 stride=2,
                 channels=None,
                 pad_off=0,
                 context=mx.cpu(),
                 **kwargs):
        super(Downsample, self).__init__(**kwargs)
        self.filt_size = filt_size
        assert self.filt_size in [1, 3, 5, 7]
        self.pad_off = pad_off
        self.pad_size = (filt_size - 1) // 2 + pad_off
        self.stride = stride
        self.channels = channels

        if self.filt_size == 1:
            filt = nd.array([
                1.0,
            ])
        elif self.filt_size == 3:
            filt = nd.array([1.0, 2.0, 1.0])
        elif self.filt_size == 5:
            filt = nd.array([1.0, 4.0, 6.0, 4.0, 1.0])
        elif self.filt_size == 7:
            filt = nd.array([1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0])
        kernel = filt[:, None] * filt[None, :]
        kernel = kernel / nd.sum(kernel)
        kernel = kernel[None, None, :, :].repeat(channels, axis=0)

        with self.name_scope():
            self.pad = nn.ReflectionPad2D(self.pad_size)
            self.blur_conv = nn.Conv2D(channels=channels,
                                       kernel_size=self.filt_size,
                                       strides=self.stride,
                                       use_bias=False,
                                       groups=channels,
                                       in_channels=channels)
            self.blur_conv.initialize(ctx=context)
            self.blur_conv.weight.set_data(kernel)
            self.blur_conv.weight.grad_req = 'null'
Exemple #13
0
def test_reflectionpad():
    layer = nn.ReflectionPad2D(3)
    check_layer_forward(layer, (2, 3, 24, 24))
Exemple #14
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_type='batch',
                 use_dropout=False,
                 netG_arch='resnet_9blocks',
                 padding_type='reflect'):
        super(CycleGAN_G, self).__init__()

        norm_layer = get_norm_layer(norm_type=norm_type)
        use_bias = norm_layer == nn.InstanceNorm

        if netG_arch == 'resnet_9blocks':
            n_blocks = 9
        elif netG_arch == 'resnet_6blocks':
            n_blocks = 6
        else:
            raise ValueError('Unknown netG_arch.')

        self.block_c7s1_64 = nn.Sequential()
        block_c7s1_64 = [
            nn.ReflectionPad2D(3),
            nn.Conv2D(channels=ngf,
                      in_channels=input_nc,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=use_bias),
            norm_layer(in_channels=ngf),
            nn.LeakyReLU(0)
        ]
        self.block_c7s1_64.add(*block_c7s1_64)

        self.block_dk = nn.Sequential()
        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            block_dk = [
                nn.Conv2D(in_channels=ngf * mult,
                          channels=ngf * mult * 2,
                          kernel_size=3,
                          strides=2,
                          padding=1,
                          use_bias=use_bias),
                norm_layer(in_channels=ngf * mult * 2),
                nn.LeakyReLU(0)
            ]
            self.block_dk.add(*block_dk)

        self.block_Rk = nn.Sequential()
        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks
            block_Rk = [
                ResidualBlock(ngf * mult,
                              padding_type=padding_type,
                              norm_layer=norm_layer,
                              use_dropout=use_dropout,
                              use_bias=use_bias)
            ]
            self.block_Rk.add(*block_Rk)

        self.block_uk = nn.Sequential()
        n_upsampling = 2
        for i in range(n_upsampling):  # add upsampling layers
            mult = 2**(n_upsampling - i)
            block_uk = [
                nn.Conv2DTranspose(in_channels=ngf * mult,
                                   channels=ngf * mult // 2,
                                   kernel_size=3,
                                   strides=2,
                                   padding=1,
                                   output_padding=1,
                                   use_bias=use_bias),
                norm_layer(in_channels=ngf * mult // 2),
                nn.LeakyReLU(0)
            ]
            self.block_uk.add(*block_uk)

        self.block_c7s1_3 = nn.Sequential()
        block_c7s1_3 = [
            nn.ReflectionPad2D(3),
            nn.Conv2D(in_channels=ngf,
                      channels=output_nc,
                      kernel_size=7,
                      padding=0),
            nn.HybridLambda('tanh')
        ]

        self.block_c7s1_3.add(*block_c7s1_3)
Exemple #15
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            nn.ReflectionPad2D(3),
            nn.Conv2D(ngf,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=False,
                      in_channels=input_nc),
            InstanceNorm2D(),
            nn.Activation('relu')
        ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                nn.ReflectionPad2D(1),
                nn.Conv2D(ngf * mult * 2,
                          kernel_size=3,
                          strides=2,
                          padding=0,
                          use_bias=False,
                          in_channels=ngf * mult),
                InstanceNorm2D(),
                nn.Activation('relu')
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = nn.Dense(1, use_bias=False)
        self.gmp_fc = nn.Dense(1, use_bias=False)
        self.conv1x1 = nn.Conv2D(ngf * mult,
                                 kernel_size=1,
                                 strides=1,
                                 use_bias=True,
                                 in_channels=ngf * mult * 2)
        self.relu = nn.Activation('relu')

        # Gamma, Beta block
        FC = [
            nn.Dense(ngf * mult, use_bias=False),
            nn.Activation('relu'),
            nn.Dense(ngf * mult, use_bias=False),
            nn.Activation('relu')
        ]
        self.gamma = nn.Dense(ngf * mult, use_bias=False)
        self.beta = nn.Dense(ngf * mult, use_bias=False)

        # Up-Sampling Bottleneck
        self.UpBlock1s = nn.HybridSequential()
        for i in range(n_blocks):
            self.UpBlock1s.add(ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                nn.HybridLambda(lambda F, x: F.UpSampling(
                    x, scale=2, sample_type='nearest')),
                nn.ReflectionPad2D(1),
                nn.Conv2D(int(ngf * mult / 2),
                          kernel_size=3,
                          strides=1,
                          padding=0,
                          use_bias=False,
                          in_channels=ngf * mult),
                ILN(int(ngf * mult / 2)),
                nn.Activation('relu')
            ]

        UpBlock2 += [
            nn.ReflectionPad2D(3),
            nn.Conv2D(output_nc,
                      kernel_size=7,
                      strides=1,
                      padding=0,
                      use_bias=False,
                      in_channels=ngf),
            nn.Activation('tanh')
        ]

        self.DownBlock = nn.HybridSequential()
        self.DownBlock.add(*DownBlock)
        self.FC = nn.HybridSequential()
        self.FC.add(*FC)
        self.UpBlock2 = nn.HybridSequential()
        self.UpBlock2.add(*UpBlock2)