Esempio n. 1
0
 def __init__(self,
              in_planes=3,
              ndf=64,
              n_layers=3,
              alpha=0.2,
              norm_mode='batch'):
     super(Discriminator, self).__init__()
     kernel_size = 4
     layer_list = [
         layers.Conv2d(in_planes,
                       ndf,
                       kernel_size,
                       2,
                       pad_mode='pad',
                       padding=1),
         layers.LeakyReLU(alpha)
     ]
     nf_mult = ndf
     for i in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**i, 8) * ndf
         layer_list.append(
             ConvNormReLU(nf_mult_prev,
                          nf_mult,
                          kernel_size,
                          2,
                          alpha,
                          norm_mode,
                          padding=1))
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8) * ndf
     layer_list.append(
         ConvNormReLU(nf_mult_prev,
                      nf_mult,
                      kernel_size,
                      1,
                      alpha,
                      norm_mode,
                      padding=1))
     layer_list.append(
         layers.Conv2d(nf_mult,
                       1,
                       kernel_size,
                       1,
                       pad_mode='pad',
                       padding=1))
     self.features = layers.SequentialLayer(layer_list)
Esempio n. 2
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=4,
              stride=2,
              alpha=0.2,
              norm_mode='batch',
              pad_mode='CONSTANT',
              use_relu=True,
              padding=None):
     super(ConvTransposeNormReLU, self).__init__()
     conv = layers.Conv2dTranspose(in_planes,
                                   out_planes,
                                   kernel_size,
                                   stride=stride,
                                   pad_mode='same')
     norm = layers.BatchNorm2d(out_planes)
     if norm_mode == 'instance':
         # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d
         norm = layers.BatchNorm2d(out_planes, affine=False)
     has_bias = (norm_mode == 'instance')
     if padding is None:
         padding = (kernel_size - 1) // 2
     if pad_mode == 'CONSTANT':
         conv = layers.Conv2dTranspose(in_planes,
                                       out_planes,
                                       kernel_size,
                                       stride,
                                       pad_mode='same',
                                       has_bias=has_bias)
         layer_list = [conv, norm]
     else:
         paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
         pad = layers.Pad(paddings=paddings, mode=pad_mode)
         conv = layers.Conv2dTranspose(in_planes,
                                       out_planes,
                                       kernel_size,
                                       stride,
                                       pad_mode='pad',
                                       has_bias=has_bias)
         layer_list = [pad, conv, norm]
     if use_relu:
         relu = layers.ReLU()
         if alpha > 0:
             relu = layers.LeakyReLU(alpha)
         layer_list.append(relu)
     self.features = layers.SequentialLayer(layer_list)
Esempio n. 3
0
    def __init__(self, outer_nc, inner_nc, in_planes=None, dropout=False,
                 submodule=None, outermost=False, innermost=False, alpha=0.2, norm_mode='batch'):
        super(UnetSkipConnectionBlock, self).__init__()
        downnorm = layers.BatchNorm2d(inner_nc)
        upnorm = layers.BatchNorm2d(outer_nc)
        use_bias = False
        if norm_mode == 'instance':
            downnorm = layers.BatchNorm2d(inner_nc, affine=False)
            upnorm = layers.BatchNorm2d(outer_nc, affine=False)
            use_bias = True
        if in_planes is None:
            in_planes = outer_nc
        downconv = layers.Conv2d(in_planes, inner_nc, kernel_size=4,
                             stride=2, padding=1, has_bias=use_bias, pad_mode='pad')
        downrelu = layers.LeakyReLU(alpha)
        uprelu = layers.ReLU()

        if outermost:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, pad_mode='pad')
            down = [downconv]
            up = [uprelu, upconv, layers.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = layers.Conv2dTranspose(inner_nc, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            model = down + [submodule] + up
            if dropout:
                model.append(layers.Dropout(0.5))

        self.model = layers.SequentialLayer(model)
        self.skip_connections = not outermost
        self.concat = Concat(axis=1)