def __init__(self, args, normalize=False, gpu_ids=[]):
     super(DCGAN_Encoder).__init__()
     self.args = args
     self.normalize = normalize
     self.gpu_ids = gpu_ids
     norm_layer = get_norm_layer(args.norm_type)
     n_downsampling = int(math.log2(args.width)) - 3
     layer = list()
     layer += [
         ConvBlock(ch_in=args.ch_in, ch_out=args.ngf, norm_layer=norm_layer)
     ]
     for i in range(n_downsampling):
         layer += [
             ConvBlock(ch_in=args.ngf * (2**i),
                       ch_out=args.ngf * (2**(i + 1)),
                       norm_layer=norm_layer)
         ]
     layer += [
         ConvBlock(ch_in=args.ngf * (2**n_downsampling),
                   ch_out=args.pose_dim,
                   kernel_size=4,
                   stride=1,
                   padding=0,
                   bias=False,
                   norm_layer=norm_layer,
                   activation_fn=nn.Tanh())
     ]
     self.layer = nn.Sequential(*layer)
 def __init__(self, args, gpu_ids=[]):
     super(DCGAN_Decoder).__init__()
     self.args = args
     self.gpu_ids = gpu_ids
     norm_layer = get_norm_layer(args.norm_type)
     n_upsampling = int(math.log2(args.width)) - 3
     layer = list()
     layer += [
         ConvTransBlock(ch_in=(args.content_dim + args.pose_dim),
                        ch_out=args.ngf * (2**n_upsampling),
                        kernel_size=4,
                        stride=1,
                        padding=0,
                        bias=False,
                        norm_layer=norm_layer)
     ]
     for i in range(n_upsampling, 0, -1):
         layer += [
             ConvTransBlock(ch_in=args.ngf * (2**i),
                            ch_out=args.ngf * (2**(i - 1)),
                            norm_layer=norm_layer)
         ]
     layer += [
         ConvTransBlock(ch_in=args.ngf,
                        ch_out=args.ch_out,
                        activation_fn=nn.Sigmoid())
     ]
     self.layer = nn.Sequential(*layer)
Exemple #3
0
    def __init__(self, args, gpu_ids=[], n_layers=3):
        super(NLayerD, self).__init__()
        self.args = args
        self.gpu_ids = gpu_ids
        norm_layer = get_norm_layer(self.args.norm_type)
        # use_bias is dependent on normalizing type
        if norm_layer == nn.InstanceNorm2d:
            use_bias = True
        else:
            use_bias = False

        conv_layer = nn.Conv2d(in_channels=args.ch_in,
                               out_channels=args.ndf,
                               kernel_size=4,
                               stride=2,
                               padding=1)
        layers = [conv_layer, nn.LeakyReLU(0.2, True)]

        nf_mult_prev = 1
        nf_mult = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            conv_layer = nn.Conv2d(in_channels=args.ndf * nf_mult_prev,
                                   out_channels=args.ndf * nf_mult,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias=use_bias)
            layers += [
                conv_layer,
                norm_layer(args.ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        conv_layer = nn.Conv2d(in_channels=args.ndf * nf_mult_prev,
                               out_channels=args.ndf * nf_mult,
                               kernel_size=4,
                               stride=1,
                               padding=1,
                               bias=use_bias)
        final_layer = nn.Conv2d(in_channels=args.ndf * nf_mult,
                                out_channels=1,
                                kernel_size=4,
                                stride=1,
                                padding=1)
        layers += [
            conv_layer,
            norm_layer(args.ndf * nf_mult),
            nn.LeakyReLU(0.2, True), final_layer
        ]
        if args.use_sigmoid == True:
            layers += [nn.Sigmoid()]
        self.model = nn.Sequential(*layers)
Exemple #4
0
def generator(args, gpu_ids=[]):
    norm_layer = get_norm_layer(args.norm_type)
    if args.modelG == 'resnet_9blocks' or args.modelG == 'resnet_9blocks':
        netG = ResnetG(args, gpu_ids=gpu_ids)
    elif args.modelG == 'UnetG':
        netG = UnetG(args, gpu_ids=gpu_ids)
    else:
        raise NotImplementedError('Enter the proper name of G')
    if len(gpu_ids) > 0:
        netG = netG.cuda(gpu_ids[0])
    netG.apply(initialize_weights)
    return netG
Exemple #5
0
def discriminator(args, gpu_ids=[]):
    norm_layer = get_norm_layer(args.norm_type)
    if args.modelD == 'n_layer':
        netD = NLayerD(args, gpu_ids=gpu_ids, n_layers=3)
    elif args.modelD == 'pixel':
        netD = PixelD(args, gpu_ids)
    else:
        raise NotImplementedError(
            '{} is not an appropriate modelG name'.format(args.modelD))
    if len(gpu_ids) > 0:
        netD = netD.cuda(gpu_ids[0])
    netD.apply(initialize_weights)
    return netD
Exemple #6
0
 def __init__(self, args, num_downsamples=7, gpu_ids=[]):
     super(UnetG, self).__init__()
     norm_layer = get_norm_layer(args.norm_type)
     # use_bias is dependent on normalizing type
     if norm_layer == nn.InstanceNorm2d:
         use_bias = True
     else:
         use_bias = False
     ### Start from innermost
     unet_block = UnetSkipConnectionBlock(ch_input=args.ngf * 8,
                                          ch_inner=args.ngf * 8,
                                          submodule=None,
                                          innermost=True,
                                          norm_layer=norm_layer)
     for i in range(num_downsamples - 5):
         unet_block = UnetSkipConnectionBlock(ch_input=args.ngf * 8,
                                              ch_inner=args.ngf * 8,
                                              submodule=unet_block,
                                              norm_layer=norm_layer,
                                              use_dropout=args.use_dropout)
     unet_block = UnetSkipConnectionBlock(ch_input=args.ngf * 4,
                                          ch_inner=args.ngf * 8,
                                          submodule=unet_block,
                                          norm_layer=norm_layer)
     unet_block = UnetSkipConnectionBlock(ch_input=args.ngf * 2,
                                          ch_inner=args.ngf * 4,
                                          submodule=unet_block,
                                          norm_layer=norm_layer)
     unet_block = UnetSkipConnectionBlock(ch_input=args.ngf,
                                          ch_inner=args.ngf * 2,
                                          submodule=unet_block,
                                          norm_layer=norm_layer)
     unet_block = UnetSkipConnectionBlock(ch_input=args.ch_in,
                                          ch_inner=args.ngf,
                                          submodule=unet_block,
                                          outermost=True,
                                          norm_layer=norm_layer)
     self.model = unet_block
Exemple #7
0
 def __init__(self, args, gpu_ids=[]):
     super(PixelD, self).__init__()
     self.args = args
     self.gpu_ids = gpu_ids
     norm_layer = get_norm_layer(self.args.norm_type)
     # use_bias is dependent on normalizing type
     if norm_layer == nn.InstanceNorm2d:
         use_bias = True
     else:
         use_bias = False
     conv_layer1 = nn.Conv2d(in_channels=self.ch_in,
                             out_channels=self.ndf,
                             kernel_size=1,
                             stride=1,
                             padding=0)
     conv_layer2 = nn.Conv2d(in_channels=self.ndf,
                             out_channels=self.ndf * 2,
                             kernel_size=1,
                             stride=1,
                             padding=0,
                             bias=use_bias)
     conv_layer3 = nn.Conv2d(in_channels=self.ndf * 2,
                             out_channels=1,
                             kernel_size=1,
                             stride=1,
                             padding=0,
                             bias=use_bias)
     layers = [
         conv_layer1,
         nn.LeakyReLU(0.2, True), conv_layer2,
         norm_layer(args.ndf * 2),
         nn.LeakyReLU(0.2, True), conv_layer3
     ]
     if args.use_sigmoid == True:
         layers += [nn.Sigmoid()]
     self.model = nn.Sequential(*layers)
Exemple #8
0
 def __init__(self, args, gpu_ids=[]):
     super(ResnetG, self).__init__()
     self.args = args
     self.gpu_ids = gpu_ids
     norm_layer = get_norm_layer(self.args.norm_type)
     # use_bias is dependent on normalizing type
     if norm_layer == nn.InstanceNorm2d:
         use_bias = True
     else:
         use_bias = False
     # Type of Generator
     if args.modelG == 'resnet_9blocks':
         n_blocks = 9
     elif args.model_G == 'resnet_6blocks':
         n_blocks = 6
     else:
         raise NotImplementedError(
             '{} is not an appropriate modelG name'.format(args.modelG))
     assert (n_blocks >= 0)
     #Construct a model
     n_downsampling = 2
     mult = 2**n_downsampling
     # First Module
     conv_layer = nn.Conv2d(in_channels=args.ch_in,
                            out_channels=args.ngf,
                            kernel_size=7,
                            padding=0,
                            bias=use_bias)
     layers = [
         nn.ReflectionPad2d(3), conv_layer,
         norm_layer(args.ngf, affine=True),
         nn.ReLU(True)
     ]
     # Downsamples with conv layers
     for i in range(n_downsampling):
         mult = 2**i
         conv_layer = nn.Conv2d(in_channels=args.ngf * mult,
                                out_channels=args.ngf * mult * 2,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=use_bias)
         layers += [
             conv_layer,
             norm_layer(args.ngf * mult * 2, affine=True),
             nn.ReLU(True)
         ]
     # Construct Resnet blocks
     for i in range(n_blocks):
         resnet_block = ResnetBlock(dim=args.ngf * mult * 2,
                                    padding_type=args.padding_type,
                                    norm_layer=norm_layer,
                                    use_dropout=args.use_dropout,
                                    use_bias=use_bias)
         layers += [resnet_block]
     # Construct TransposeConv layers
     for i in range(n_downsampling):
         mult = 2**(n_downsampling - i)
         trans_layer = nn.ConvTranspose2d(in_channels=args.ngf * mult,
                                          out_channels=int(args.ngf * mult /
                                                           2),
                                          kernel_size=3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1,
                                          bias=use_bias)
         layers += [
             trans_layer,
             norm_layer(int(args.ngf * mult / 2)),
             nn.ReLU(True)
         ]
     layers += [nn.ReflectionPad2d(3)]
     layers += [nn.Conv2d(args.ngf, args.ch_out, kernel_size=7, padding=0)]
     layers += [nn.Tanh()]
     self.model = nn.Sequential(*layers)