def __init__(self, input_num, num1, num2, dilation_rate, drop_out,
                 norm_type):
        super(_DenseAsppBlock, self).__init__()
        self.add_module(
            'conv1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module(
            'norm1',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num1)),
        self.add_module('relu1', nn.ReLU(inplace=False)),
        self.add_module(
            'conv2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),
        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(
                num_features=input_num)),
        self.add_module('relu2', nn.ReLU(inplace=False)),

        self.drop_rate = drop_out
Example #2
0
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type=None):
        """Construct a PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_layer      -- normalization layer
        """
        super(NLayerDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')
        kw = 4
        padw = 1
        sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):  # gradually increase the number of filters
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * nf_mult),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)
Example #3
0
 def __init__(self,
              low_in_channels,
              high_in_channels,
              out_channels,
              key_channels,
              value_channels,
              dropout,
              sizes=([1]),
              norm_type=None,
              psp_size=(1, 3, 6, 8)):
     super(AFNB, self).__init__()
     self.stages = []
     self.norm_type = norm_type
     self.psp_size = psp_size
     self.stages = nn.ModuleList([
         self._make_stage([low_in_channels, high_in_channels], out_channels,
                          key_channels, value_channels, size)
         for size in sizes
     ])
     self.conv_bn_dropout = nn.Sequential(
         nn.Conv2d(out_channels + high_in_channels,
                   out_channels,
                   kernel_size=1,
                   padding=0),
         ModuleHelper.BatchNorm2d(norm_type=self.norm_type)(out_channels),
         nn.Dropout2d(dropout))
Example #4
0
    def build_conv_block(self, dim, padding_type, norm_type, use_dropout,
                         use_bias):
        """Construct a convolutional block.
        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not
        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim),
            nn.ReLU(True)
        ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(dim)
        ]

        return nn.Sequential(*conv_block)
 def __init__(self, num_input_features, num_output_features, norm_type):
     super(_Transition, self).__init__()
     self.add_module(
         'conv',
         nn.Conv2d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module(
         'norm',
         ModuleHelper.BatchNorm2d(norm_type=norm_type)(
             num_features=num_output_features)),
     self.add_module('relu', nn.ReLU(inplace=False))
Example #6
0
    def __init__(self, input_nc, ndf=64, norm_type=None):
        """Construct a 1x1 PatchGAN discriminator
        Parameters:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            norm_layer      -- normalization layer
        """
        super(PixelDiscriminator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        self.net = [
            nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ndf * 2),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]

        self.net = nn.Sequential(*self.net)
Example #7
0
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 input_nc=None,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_type=None,
                 use_dropout=False):
        """Construct a Unet submodule with skip connections.
        Parameters:
            outer_nc (int) -- the number of filters in the outer conv layer
            inner_nc (int) -- the number of filters in the inner conv layer
            input_nc (int) -- the number of channels in input images/features
            submodule (UnetSkipConnectionBlock) -- previously defined submodules
            outermost (bool)    -- if this module is the outermost module
            innermost (bool)    -- if this module is the innermost module
            norm_layer          -- normalization layer
            user_dropout (bool) -- if use dropout layers.
        """
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        use_bias = (norm_type == 'instancenorm')
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             bias=use_bias)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(inner_nc)
        uprelu = nn.ReLU(True)
        upnorm = ModuleHelper.BatchNorm2d(norm_type=norm_type)(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        bias=use_bias)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
Example #8
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 norm_type=None,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        """Construct a Resnet-based generator
        Parameters:
            input_nc (int)      -- the number of channels in input images
            output_nc (int)     -- the number of channels in output images
            ngf (int)           -- the number of filters in the last conv layer
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers
            n_blocks (int)      -- the number of ResNet blocks
            padding_type (str)  -- the name of padding layer in conv layers: reflect | replicate | zero
        """
        assert (n_blocks >= 0)
        super(ResNetGenerator, self).__init__()
        use_bias = (norm_type == 'instancenorm')

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf),
            nn.ReLU(True)
        ]

        n_downsampling = 2
        for i in range(n_downsampling):  # add downsampling layers
            mult = 2**i
            model += [
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(ngf * mult * 2),
                nn.ReLU(True)
            ]

        mult = 2**n_downsampling
        for i in range(n_blocks):  # add ResNet blocks

            model += [
                ResnetBlock(ngf * mult,
                            padding_type=padding_type,
                            norm_type=norm_type,
                            use_dropout=use_dropout,
                            use_bias=use_bias)
            ]

        for i in range(n_downsampling):  # add upsampling layers
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose2d(ngf * mult,
                                   int(ngf * mult / 2),
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1,
                                   bias=use_bias),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(int(ngf * mult /
                                                                  2)),
                nn.ReLU(True)
            ]
        model += [nn.ReflectionPad2d(3)]
        model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        model += [nn.Tanh()]

        self.model = nn.Sequential(*model)
Example #9
0
 def _make_stage(self, features, out_features, size, norm_type):
     prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
     conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False)
     bn = ModuleHelper.BatchNorm2d(norm_type=norm_type)(out_features)
     return nn.Sequential(prior, conv, bn)