Beispiel #1
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # create conv layers
        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        spade_config_str = opt.norm_G.replace('spectral', '')
        self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc)
        self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc)
Beispiel #2
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # create conv layers
        add_channels = 1 if (opt.norm_mode == 'clade'
                             and not opt.no_instance) else 0
        self.conv_0 = nn.Conv2d(fin + add_channels,
                                fmiddle,
                                kernel_size=3,
                                padding=1)
        self.conv_1 = nn.Conv2d(fmiddle + add_channels,
                                fout,
                                kernel_size=3,
                                padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin + add_channels,
                                    fout,
                                    kernel_size=1,
                                    bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        spade_config_str = opt.norm_G.replace('spectral', '')
        if opt.norm_mode == 'spade':
            self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc)
            self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
            if self.learned_shortcut:
                self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc)
        elif opt.norm_mode == 'clade':
            input_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0)
            self.norm_0 = SPADELight(spade_config_str, fin, input_nc,
                                     opt.no_instance, opt.add_dist)
            self.norm_1 = SPADELight(spade_config_str, fmiddle, input_nc,
                                     opt.no_instance, opt.add_dist)
            if self.learned_shortcut:
                self.norm_s = SPADELight(spade_config_str, fin, input_nc,
                                         opt.no_instance, opt.add_dist)
        else:
            raise ValueError('%s is not a defined normalization method' %
                             opt.norm_mode)
Beispiel #3
0
    def __init__(self, dim, semantic_nc, kernel_size=3):
        super().__init__()
        norm_G = 'spectralspadesyncbatch3x3'
        pw = (kernel_size - 1) // 2
        self.conv_0 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
        self.conv_1 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
        self.padding = nn.ReflectionPad2d(pw)
        if 'spectral' in norm_G:
            self.add_module('conv_block1', spectral_norm(self.conv_0))
            self.add_module('conv_block4', spectral_norm(self.conv_1))

        # define normalization layers
        spade_config_str = norm_G.replace('spectral', '')
        self.norm_0 = SPADE(spade_config_str, dim, semantic_nc)
        self.norm_1 = SPADE(spade_config_str, dim, semantic_nc)
Beispiel #4
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # create conv layers
        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        if opt.weight_norm_G == False:
            # apply spectral norm if specified
            if 'spectral' in opt.norm_G:
                self.conv_0 = spectral_norm(self.conv_0)
                self.conv_1 = spectral_norm(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = spectral_norm(self.conv_s)
        else:
            if opt.weight_norm_g == 0:
                # g is learnable
                self.conv_0 = weight_norm_0(self.conv_0)
                self.conv_1 = weight_norm_0(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = weight_norm_0(self.conv_s)
            elif opt.weight_norm_g == 1:
                # g == 1
                self.conv_0 = weight_norm_1(self.conv_0)
                self.conv_1 = weight_norm_1(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = weight_norm_1(self.conv_s)

        # define normalization layers
        norm_nc = opt.label_nc + (opt.orient_nc if not opt.no_orientation else
                                  0) + (opt.feat_num if opt.use_instance_feat
                                        else 0) + (3 if 'spadebase' in opt.netG
                                                   else 0)
        spade_config_str = opt.norm_G.replace('spectral', '')
        self.norm_0 = SPADE(spade_config_str, fin, norm_nc, opt.weight_norm_G)
        self.norm_1 = SPADE(spade_config_str, fmiddle, norm_nc,
                            opt.weight_norm_G)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str, fin, norm_nc,
                                opt.weight_norm_G)
Beispiel #5
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)
        # create conv layers
        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        self.conv_0 = spectral_norm(self.conv_0)
        self.conv_1 = spectral_norm(self.conv_1)
        if self.learned_shortcut:
            self.conv_s = spectral_norm(self.conv_s)
        spade_config_str = 'batchnorm3x3'
        self.norm_0 = SPADE(spade_config_str, fin, opt.edge_nc, opt)
        self.norm_1 = SPADE(spade_config_str, fmiddle, opt.edge_nc, opt)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str, fin, opt.edge_nc, opt)
Beispiel #6
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        if 'spectral' in opt.norm_G:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        spade_config_str = opt.norm_G.replace('spectral', '')
        self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc)
        self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc)
Beispiel #7
0
    def __init__(self, fin, fout, opt):
        super().__init__()
        # Attributes
        self.opt = opt
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # create conv layers
        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        spade_config_str = opt.norm_G.replace('spectral', '')

        if opt.embed_attributes:
            attr_nc = opt.attr_nc
        else:
            attr_nc = 0

        if opt.embed_captions:
            class_inst = SPADEAttention
            extra_args = [opt.attr_emb_dim, opt.attention_heads]
        else:
            class_inst = SPADE
            extra_args = [attr_nc, opt.attr_emb_dim]  # Can be zero

        self.norm_0 = class_inst(spade_config_str, fin, opt.label_nc,
                                 opt.mask_emb_dim, *extra_args)
        self.norm_1 = class_inst(spade_config_str, fmiddle, opt.label_nc,
                                 opt.mask_emb_dim, *extra_args)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str, fin, opt.label_nc,
                                opt.mask_emb_dim, attr_nc, opt.attr_emb_dim)
Beispiel #8
0
    def __init__(self, fin, fout, opt, semantic_nc):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # layers to generate conditional convolution weights
        nhidden = 128
        self.weight_channels = fmiddle * 9
        self.gen_weights1 = nn.Sequential(
            nn.Conv2d(semantic_nc, nhidden, kernel_size=3, padding=1),
            nn.ReLU(), nn.Conv2d(nhidden, fin * 9, kernel_size=3, padding=1))
        self.gen_weights2 = nn.Sequential(
            nn.Conv2d(semantic_nc, nhidden, kernel_size=3, padding=1),
            nn.ReLU(), nn.Conv2d(nhidden, fout * 9, kernel_size=3, padding=1))

        self.gen_se_weights1 = nn.Sequential(
            nn.Conv2d(semantic_nc, nhidden, kernel_size=3, padding=1),
            nn.ReLU(), nn.Conv2d(nhidden, fmiddle, kernel_size=3, padding=1),
            nn.Sigmoid())
        self.gen_se_weights2 = nn.Sequential(
            nn.Conv2d(semantic_nc, nhidden, kernel_size=3, padding=1),
            nn.ReLU(), nn.Conv2d(nhidden, fout, kernel_size=3, padding=1),
            nn.Sigmoid())

        # create conv layers
        if opt.mpdist:
            BNFunc = nn.SyncBatchNorm
        else:
            BNFunc = nn.BatchNorm2d
        self.conv_0 = DepthConv(fin, opt)
        self.norm_0 = BNFunc(fmiddle, affine=True)
        self.conv_1 = nn.Conv2d(fin, fmiddle, kernel_size=1)
        self.norm_1 = BNFunc(fin, affine=True)
        self.conv_2 = DepthConv(fmiddle, opt)
        self.norm_2 = BNFunc(fmiddle, affine=True)
        self.conv_3 = nn.Conv2d(fmiddle, fout, kernel_size=1)
        if self.learned_shortcut:
            self.conv_s = spectral_norm(
                nn.Conv2d(fin, fout, kernel_size=1, bias=False))
            self.norm_s = SPADE(fin, semantic_nc, opt)
Beispiel #9
0
    def __init__(self, fin, fout, opt, use_se=False, dilation=1):
        super().__init__()
        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)
        self.opt = opt
        self.pad_type = 'zero'
        self.use_se = use_se

        # create conv layers
        if self.pad_type != 'zero':
            self.pad = nn.ReflectionPad2d(dilation)
            self.conv_0 = nn.Conv2d(fin,
                                    fmiddle,
                                    kernel_size=3,
                                    padding=0,
                                    dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle,
                                    fout,
                                    kernel_size=3,
                                    padding=0,
                                    dilation=dilation)
        else:
            self.conv_0 = nn.Conv2d(fin,
                                    fmiddle,
                                    kernel_size=3,
                                    padding=dilation,
                                    dilation=dilation)
            self.conv_1 = nn.Conv2d(fmiddle,
                                    fout,
                                    kernel_size=3,
                                    padding=dilation,
                                    dilation=dilation)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            if opt.eqlr_sn:
                self.conv_0 = equal_lr(self.conv_0)
                self.conv_1 = equal_lr(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = equal_lr(self.conv_s)
            else:
                self.conv_0 = spectral_norm(self.conv_0)
                self.conv_1 = spectral_norm(self.conv_1)
                if self.learned_shortcut:
                    self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        spade_config_str = opt.norm_G.replace('spectral', '')

        if 'spade_ic' in opt:
            ic = opt.spade_ic
        else:
            ic = 0 + (3 if 'warp' in opt.CBN_intype else
                      0) + (opt.semantic_nc if 'mask' in opt.CBN_intype else 0)
        self.norm_0 = SPADE(spade_config_str,
                            fin,
                            ic,
                            PONO=opt.PONO,
                            use_apex=opt.apex)
        self.norm_1 = SPADE(spade_config_str,
                            fmiddle,
                            ic,
                            PONO=opt.PONO,
                            use_apex=opt.apex)
        if self.learned_shortcut:
            self.norm_s = SPADE(spade_config_str,
                                fin,
                                ic,
                                PONO=opt.PONO,
                                use_apex=opt.apex)

        if use_se:
            self.se_layar = SELayer(fout)
Beispiel #10
0
    def __init__(self, fin, fout, opt, Block_Name=None, use_rgb=True):
        super().__init__()

        # sean switch
        if opt.norm_mode != 'sean':
            self.use_sean = False
        elif opt.norm_mode == 'sean':
            self.use_sean = True

        # SEAN/ ACE-block specific attributes
        self.use_rgb = use_rgb

        self.Block_Name = Block_Name
        self.status = opt.status

        # Attributes
        self.learned_shortcut = (fin != fout)
        fmiddle = min(fin, fout)

        # create conv layers
        # There is some weird line here that is only relevant when instance maps are used.
        #
        # add_channels = 1 if (opt.norm_mode == 'clade' and not opt.no_instance) else 0

        self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
        self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)

        # apply spectral norm if specified
        if 'spectral' in opt.norm_G:
            self.conv_0 = spectral_norm(self.conv_0)
            self.conv_1 = spectral_norm(self.conv_1)
            if self.learned_shortcut:
                self.conv_s = spectral_norm(self.conv_s)

        # define normalization layers
        #
        # Mike: Added the if else option of clades architecture.py
        #
        # Added the option to choose sean blocks
        spade_config_str = opt.norm_G.replace('spectral', '')
        if opt.norm_mode == 'spade':
            self.norm_0 = SPADE(spade_config_str, fin, opt.semantic_nc)
            self.norm_1 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
            if self.learned_shortcut:
                self.norm_s = SPADE(spade_config_str, fin, opt.semantic_nc)
        elif opt.norm_mode == 'clade':
            input_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0)
            self.norm_0 = SPADELight(spade_config_str, fin, input_nc, opt.no_instance, opt.add_dist)
            self.norm_1 = SPADELight(spade_config_str, fmiddle, input_nc, opt.no_instance, opt.add_dist)
            if self.learned_shortcut:
                self.norm_s = SPADELight(spade_config_str, fin, input_nc, opt.no_instance, opt.add_dist)
        elif opt.norm_mode == 'sean':
            self.use_sean = True
            our_norm_type = 'spadesyncbatch3x3'
            self.ace_0 = ACE(our_norm_type, fin, 3, ACE_Name= Block_Name + '_ACE_0', status=self.status, spade_params=[spade_config_str, fin, opt.semantic_nc], use_rgb=use_rgb)
            self.ace_1 = ACE(our_norm_type, fmiddle, 3, ACE_Name= Block_Name + '_ACE_1', status=self.status, spade_params=[spade_config_str, fmiddle, opt.semantic_nc], use_rgb=use_rgb)
            if self.learned_shortcut:
                self.ace_s = ACE(our_norm_type, fin, 3, ACE_Name= Block_Name + '_ACE_s', status=self.status, spade_params=[spade_config_str, fin, opt.semantic_nc], use_rgb=use_rgb)
        else:
            raise ValueError('%s is not a defined normalization method' % opt.norm_mode)