def __init__(self, opt):
        super().__init__()
        self.opt = opt
        nf = opt.ngf

        self.sw, self.sh = self.compute_latent_vector_size(opt)

        ic = 0 + (3 if 'warp' in self.opt.CBN_intype else 0) + (self.opt.semantic_nc if 'mask' in self.opt.CBN_intype else 0)
        self.fc = nn.Conv2d(ic, 16 * nf, 3, padding=1)
        if opt.eqlr_sn:
            self.fc = equal_lr(self.fc)

        self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)

        self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
        self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)

        self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
        self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
        if opt.use_attention:
            self.attn = Attention(4 * nf, 'spectral' in opt.norm_G)
        self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
        self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)

        final_nc = nf

        self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
        self.up = nn.Upsample(scale_factor=2)
Ejemplo n.º 2
0
    def __init__(self, fin, fout, opt, use_se=False, dilation=1):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)
        self.opt = opt
        self.pad_type = 'zero'
        self.use_se = use_se

        # create conv layers
        if self.pad_type != 'zero':
            self.pad = nn.ReflectionPad2d(dilation)
            self.conv_0 = nn.Conv2d(fin,
                                    fmiddle,
                                    kernel_size=3,
                                    padding=0,
                                    dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle,
                                    fout,
                                    kernel_size=3,
                                    padding=0,
                                    dilation=dilation)
        else:
            self.conv_0 = nn.Conv2d(fin,
                                    fmiddle,
                                    kernel_size=3,
                                    padding=dilation,
                                    dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle,
                                    fout,
                                    kernel_size=3,
                                    padding=dilation,
                                    dilation=dilation)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            if opt.eqlr_sn:
                self.conv_0 = equal_lr(self.conv_0)
                self.conv_1 = equal_lr(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = equal_lr(self.conv_s)
            else:
                self.conv_0 = spectral_norm(self.conv_0)
                self.conv_1 = spectral_norm(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        spade_config_str = opt.norm_G.replace('spectral', '')

        if 'spade_ic' in opt:
            ic = opt.spade_ic
        else:
            ic = 0 + (3 if 'warp' in opt.CBN_intype else
                      0) + (opt.semantic_nc if 'mask' in opt.CBN_intype else 0)
        self.norm_0 = SPADE(spade_config_str,
                            fin,
                            ic,
                            PONO=opt.PONO,
                            use_apex=opt.apex)
        self.norm_1 = SPADE(spade_config_str,
                            fmiddle,
                            ic,
                            PONO=opt.PONO,
                            use_apex=opt.apex)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str,
                                fin,
                                ic,
                                PONO=opt.PONO,
                                use_apex=opt.apex)

        if use_se:
            self.se_layar = SELayer(fout)
Ejemplo n.º 3
0
    def __init__(self, opt, stage1=False):
        super().__init__()
        self.opt = opt
        self.stage1 = stage1

        kw = 4
        #padw = int(np.ceil((kw - 1.0) / 2))
        padw = int((kw - 1.0) / 2)
        nf = opt.ndf
        input_nc = self.compute_D_input_nc(opt)

        norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
        sequence = [[
            nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, False)
        ]]

        for n in range(1, opt.n_layers_D):
            nf_prev = nf
            nf = min(nf * 2, 512)
            stride = 1 if n == opt.n_layers_D - 1 else 2
            if (((not stage1) and opt.use_attention) or
                (stage1
                 and opt.use_attention_st1)) and n == opt.n_layers_D - 1:
                self.attn = Attention(nf_prev, 'spectral' in opt.norm_D)
            if n == opt.n_layers_D - 1 and (not stage1):
                dec = []
                nc_dec = nf_prev
                for _ in range(opt.n_layers_D - 1):
                    dec += [
                        nn.Upsample(scale_factor=2),
                        norm_layer(
                            nn.Conv2d(nc_dec,
                                      int(nc_dec // 2),
                                      kernel_size=3,
                                      stride=1,
                                      padding=1)),
                        nn.LeakyReLU(0.2, False)
                    ]
                    nc_dec = int(nc_dec // 2)
                dec += [
                    nn.Conv2d(nc_dec,
                              opt.semantic_nc,
                              kernel_size=3,
                              stride=1,
                              padding=1)
                ]
                self.dec = nn.Sequential(*dec)
            sequence += [[
                norm_layer(
                    nn.Conv2d(nf_prev,
                              nf,
                              kernel_size=kw,
                              stride=stride,
                              padding=padw)),
                nn.LeakyReLU(0.2, False)
            ]]
        sequence += [[
            nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)
        ]]

        if opt.D_cam > 0:
            mult = min(2**(opt.n_layers_D - 1), 8)
            if opt.eqlr_sn:
                self.gap_fc = equal_lr(nn.Linear(opt.ndf * mult, 1,
                                                 bias=False))
                self.gmp_fc = equal_lr(nn.Linear(opt.ndf * mult, 1,
                                                 bias=False))
            else:
                self.gap_fc = nn.utils.spectral_norm(
                    nn.Linear(opt.ndf * mult, 1, bias=False))
                self.gmp_fc = nn.utils.spectral_norm(
                    nn.Linear(opt.ndf * mult, 1, bias=False))
            self.conv1x1 = nn.Conv2d(opt.ndf * mult * 2,
                                     opt.ndf * mult,
                                     kernel_size=1,
                                     stride=1,
                                     bias=True)
            self.leaky_relu = nn.LeakyReLU(0.2, True)

        # We divide the layers into groups to extract intermediate layer outputs
        for n in range(len(sequence)):
            self.add_module('model' + str(n), nn.Sequential(*sequence[n]))