Example #1
0
    def __init__(self,
                 nlatent,
                 input_nc,
                 ndf=64,
                 norm_layer=nn.BatchNorm2d,
                 use_sigmoid=False,
                 gpu_ids=[]):
        """
        nlatent: number of channles in both latent codes (or one of them - depending on the model)
        input_nc: number of channels in input and output (assumes both inputs are concatenated)
        """
        super(CINDiscriminator, self).__init__()
        self.gpu_ids = gpu_ids

        use_bias = True

        kw = 4
        sequence = [
            nn.Conv2d(input_nc,
                      ndf,
                      kernel_size=kw,
                      stride=2,
                      padding=1,
                      bias=True),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(ndf,
                      2 * ndf,
                      kernel_size=kw,
                      stride=2,
                      padding=1,
                      bias=use_bias),
            norm_layer(2 * ndf, nlatent),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(2 * ndf,
                      4 * ndf,
                      kernel_size=kw,
                      stride=1,
                      padding=1,
                      bias=use_bias),
            norm_layer(4 * ndf, nlatent),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(4 * ndf,
                      5 * ndf,
                      kernel_size=kw,
                      stride=1,
                      padding=1,
                      bias=use_bias),
            norm_layer(5 * ndf, nlatent),
            nn.LeakyReLU(0.2, True),
            nn.Conv2d(5 * ndf, 1, kernel_size=kw, stride=1, padding=1)
        ]

        if use_sigmoid:
            sequence += [nn.Sigmoid()]

        self.model = TwoInputSequential(*sequence)
Example #2
0
    def __init__(self, nlatent, input_nc, output_nc, ngf=64, norm_layer=CondInstanceNorm,
                 use_dropout=False, n_blocks=9, gpu_ids=[], padding_type='reflect'):
        assert(n_blocks >= 0)
        super(CINResnetGenerator, self).__init__()
        self.gpu_ids = gpu_ids

        instance_norm = functools.partial(InstanceNorm2d, affine=True)

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, stride=1, bias=True),
            norm_layer(ngf, nlatent),
            nn.ReLU(True),

            nn.Conv2d(ngf, 2*ngf, kernel_size=3, padding=1, stride=2, bias=True),
            norm_layer(2*ngf, nlatent),
            nn.ReLU(True),

            nn.Conv2d(2*ngf, 4*ngf, kernel_size=3, padding=1, stride=2, bias=True),
            norm_layer(4*ngf, nlatent),
            nn.ReLU(True)
        ]
        
        for i in range(n_blocks):
            model += [CINResnetBlock(x_dim=4*ngf, z_dim=nlatent, padding_type=padding_type,
                                     norm_layer=norm_layer, use_dropout=use_dropout, use_bias=True)]

        model += [

            nn.ConvTranspose2d(4*ngf, 2*ngf,
                               kernel_size=3, stride=2,
                               padding=1, output_padding=1,
                               bias=True),
            norm_layer(2*ngf, nlatent),
            nn.ReLU(True),

            nn.ConvTranspose2d(2*ngf, ngf, kernel_size=3, stride=2, padding=1, output_padding=1, bias=True),
            norm_layer(ngf, nlatent),
            nn.ReLU(True),

            nn.ReflectionPad2d(3),
            nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
            nn.Tanh()
        ]


        self.model = TwoInputSequential(*model)