示例#1
0
    def __init__(self,
                 input_nc,
                 ndf=64,
                 n_layers=3,
                 norm_layer=nn.BatchNorm2d,
                 use_sigmoid=False,
                 getIntermFeat=False,
                 n_self_attention=1):
        super(NLayerDiscriminator, self).__init__()
        self.getIntermFeat = getIntermFeat
        self.n_layers = n_layers
        self.n_self_attention = n_self_attention

        kw = 4
        padw = int(np.floor((kw - 1.0) / 2))
        sequence = [[
            SNConv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2, True)
        ]]

        nf = ndf
        for n in range(1, n_layers):
            nf_prev = nf
            nf = min(nf * 2, 512)
            sequence += [[
                SNConv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
                norm_layer(nf),
                nn.LeakyReLU(0.2, True)
            ]]

        nf_prev = nf
        nf = min(nf * 2, 512)

        # TODO: use n_self_attention and increase number of self attention layers

        sequence += [[
            SelfAttention2d(nf_prev),
            SNConv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
            norm_layer(nf),
            nn.LeakyReLU(0.2, True)
        ]]

        sequence += [[SNConv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]

        if use_sigmoid:
            sequence += [[nn.Sigmoid()]]

        if getIntermFeat:
            for n in range(len(sequence)):
                setattr(self, 'model' + str(n), nn.Sequential(*sequence[n]))
        else:
            sequence_stream = []
            for n in range(len(sequence)):
                sequence_stream += sequence[n]
            self.model = nn.Sequential(*sequence_stream)
示例#2
0
 def __init__(self, in_ch, out_ch, downsample, bottleneck_ratio=4):
     super().__init__()
     # conv blocks
     hidden_ch = in_ch // bottleneck_ratio
     self.conv1 = SNConv2d(in_ch, hidden_ch, kernel_size=1)
     self.conv2 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
     self.conv3 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
     self.conv4 = SNConv2d(hidden_ch, out_ch, kernel_size=1)
     # short-conv for increasing channel
     self.downsample = downsample
     if in_ch < out_ch:
         self.conv_short = SNConv2d(in_ch, out_ch - in_ch, kernel_size=1)
     else:
         self.conv_short = None
示例#3
0
    def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError(
                'padding [%s] is not implemented' % padding_type)

        conv_block += [SNConv2d(dim, dim, kernel_size=3, padding=p),
                       norm_layer(dim),
                       activation]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError(
                'padding [%s] is not implemented' % padding_type)
        conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
                       norm_layer(dim)]

        return nn.Sequential(*conv_block)
示例#4
0
 def __init__(self, in_ch, out_ch, upsample, embedding_dims, bottleneck_ratio=4):
     super().__init__()
     # conv layers
     hidden_ch = out_ch // bottleneck_ratio
     self.conv1 = SNConv2d(in_ch, hidden_ch, kernel_size=1)
     self.conv2 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
     self.conv3 = SNConv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
     self.conv4 = SNConv2d(hidden_ch, out_ch, kernel_size=1)
     self.out_ch = out_ch
     # bn layers
     self.bn1 = ConditionalBatchNorm(in_ch, embedding_dims)
     self.bn2 = ConditionalBatchNorm(hidden_ch, embedding_dims)
     self.bn3 = ConditionalBatchNorm(hidden_ch, embedding_dims)
     self.bn4 = ConditionalBatchNorm(hidden_ch, embedding_dims)
     # upsample
     self.upsample = upsample
示例#5
0
    def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
                 padding_type='reflect', cond=False, n_self_attention=0, acm_dim=64, img_size=512, vocab_size=512):
        assert(n_blocks >= 0)
        super(GlobalGenerator, self).__init__()
        activation = nn.ReLU(True)

        activation = Mish()

        self.cond = cond

        model = []
        if self.cond:
            model = [ACM(acm_dim, img_size=img_size, vocab_size=vocab_size)]

        model += [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf,
                                                   kernel_size=7, padding=0), norm_layer(ngf), activation]
        # downsample
        for i in range(n_downsampling):
            mult = 2**i
            model += [SNConv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
                      norm_layer(ngf * mult * 2), activation]

        # resnet blocks
        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [ResnetBlock(ngf * mult, padding_type=padding_type,
                                  activation=activation, norm_layer=norm_layer)]

        # self attention
        for i in range(n_self_attention):
            model += [SelfAttention2d(ngf * mult),
                      norm_layer(int(ngf * mult)), activation]

        # upsample
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
                      norm_layer(int(ngf * mult / 2)), activation]
        model += [nn.ReflectionPad2d(3), SNConv2d(ngf,
                                                  output_nc, kernel_size=7, padding=0), nn.Tanh()]
        if self.cond:
            self.model = MultiSequential(*model)
        else:
            self.model = nn.Sequential(*model)
示例#6
0
 def __init__(self, base_ch, resolution, n_classes):
     super().__init__()
     assert resolution in [32, 64, 128, 256]
     arch = D_arch(base_ch)[resolution]
     # initial conv
     self.initial_conv = SNConv2d(3, arch["in_channels"][0], kernel_size=3, padding=1)
     # main_conv
     blocks = []
     for in_ch, out_ch, downsample, _, attention in zip(*
             (v.values() if type(v) is dict else v for v in arch.values())):
         # D block with downsampling
         blocks.append(DBlock(in_ch, out_ch, 2 if downsample else 1))            
         # D block with non-downsampling
         blocks.append(DBlock(out_ch, out_ch, 1))
         # Non-local(self attention) if needed
         if attention:
             blocks.append(SelfAttention(out_ch))
     self.main = nn.Sequential(*blocks)
     # prob-linear
     self.linear_out = SNLinear(out_ch, 1)        
     # projection
     self.proj_embedding = SNEmbedding(n_classes, out_ch)