Пример #1
0
    def build_discriminator(self):
        filter_sizes = self.config['d_filter_sizes']
        kernel_sizes = self.config['d_kernel_sizes']
        dropout_rates = self.config['d_dropout_rates']

        input_img = tf.keras.layers.Input(self.config['target_shape'],
                                          name='input-img')

        x = input_img
        for filters, kernels, dropout in zip(filter_sizes, kernel_sizes,
                                             dropout_rates):
            x = downsampling_module(x,
                                    filters,
                                    kernels,
                                    strides=2,
                                    dropout=dropout,
                                    spectral_norm=self.config['spectral_norm'],
                                    initializer=self.config['initializer'])

        x = tf.keras.layers.Flatten()(x)

        src = tf.keras.layers.Dense(
            1,
            kernel_initializer=self.config['initializer'],
            kernel_constraint=SpectralNorm()
            if self.config['spectral_norm'] else None)(x)
        label = tf.keras.layers.Dense(
            self.config['n_classes'],
            kernel_initializer=self.config['initializer'],
            kernel_constraint=SpectralNorm()
            if self.config['spectral_norm'] else None)(x)

        return tf.keras.Model(inputs=[input_img],
                              outputs=[src, label],
                              name='discriminator')
Пример #2
0
    def __init__(self,
                 image_size=64,
                 z_dim=100,
                 conv_dim=64,
                 norm_layer=nn.InstanceNorm2d):
        super(Decoder, self).__init__()
        # 上采样
        self.imsize = image_size
        layer1 = []
        layer2 = []
        layer3 = []
        last = []

        repeat_num = int(np.log2(self.imsize)) - 3
        mult = 2**repeat_num  # 8
        layer1.append(
            SpectralNorm(nn.ConvTranspose2d(z_dim, conv_dim * mult, 4)))
        layer1.append(norm_layer(conv_dim * mult))
        layer1.append(nn.ReLU(inplace=False))

        curr_dim = conv_dim * mult

        layer2.append(
            SpectralNorm(
                nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer2.append(norm_layer(int(curr_dim / 2)))
        layer2.append(nn.ReLU(inplace=False))

        curr_dim = int(curr_dim / 2)

        layer3.append(
            SpectralNorm(
                nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer3.append(norm_layer(int(curr_dim / 2)))
        layer3.append(nn.ReLU(inplace=False))

        if self.imsize == 64:
            layer4 = []
            curr_dim = int(curr_dim / 2)
            layer4.append(
                SpectralNorm(
                    nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
            layer4.append(norm_layer(int(curr_dim / 2)))
            layer4.append(nn.ReLU(inplace=False))
            self.l4 = nn.Sequential(*layer4)
            curr_dim = int(curr_dim / 2)

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)

        last.append(nn.ConvTranspose2d(curr_dim, 3, 4, 2, 1))
        last.append(nn.Tanh())
        self.last = nn.Sequential(*last)

        self.up_attn1 = Self_Attention(128)
        self.up_attn2 = Self_Attention(64)
Пример #3
0
    def __init__(self, input_nc, hidden_nc, output_nc):
        super(ResBlockEncoder, self).__init__()
        self.nonlinearity = nn.LeakyReLU()
        self.conv1 = SpectralNorm(nn.Conv2d(input_nc, hidden_nc, 3, 1, 1))
        self.conv2 = SpectralNorm(nn.Conv2d(hidden_nc, output_nc, 3, 1, 1))
        self.conv3 = SpectralNorm(nn.Conv2d(input_nc, output_nc, 1, 1, 0))

        self.mianpass = nn.Sequential(
            self.conv1, self.nonlinearity, self.conv2, self.nonlinearity, nn.AvgPool2d(2, 2)
        )

        self.bypass = nn.Sequential(
            nn.AvgPool2d(2, 2), self.conv3
        )
Пример #4
0
    def __init__(self, input_nc, hidden_nc, output_nc):
        super(ResBlockDecoder, self).__init__()
        self.nonlinearity = nn.LeakyReLU()
        self.conv1 = SpectralNorm(nn.Conv2d(input_nc, hidden_nc, 3, 1, 1))
        self.conv2 = SpectralNorm(nn.ConvTranspose2d(hidden_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1))
        self.conv3 = SpectralNorm(nn.ConvTranspose2d(input_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1))

        self.mainpass = nn.Sequential(
            self.nonlinearity, self.conv1, self.nonlinearity, self.conv2
        )

        self.bypass = nn.Sequential(
            self.conv3
        )
Пример #5
0
    def __init__(self, input_nc, hidden_nc, output_nc, sample_type='none', sample_size=2):
        super(ResBlock, self).__init__()
        self.nonlinearity = nn.LeakyReLU()
        self.conv1 = SpectralNorm(nn.Conv2d(input_nc, hidden_nc, 3, 1, 1))
        self.conv2 = SpectralNorm(nn.Conv2d(hidden_nc, output_nc, 3, 1, 1))
        self.conv3 = SpectralNorm(nn.Conv2d(input_nc, output_nc, 1, 1, 0))
        self.pooling = nn.AvgPool2d(2, 2) if sample_type == 'down' else None

        self.mianpass = nn.Sequential(
            self.nonlinearity, self.conv1, self.nonlinearity, self.conv2,
        )

        self.bypass = nn.Sequential(
            self.conv3,
        )
Пример #6
0
    def __init__(self,
                 in_size,
                 out_size,
                 normalize=None,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 bias=True,
                 dropout=0,
                 activation_fn=nn.LeakyReLU(0.2)):
        super(ConvBlock, self).__init__()
        conv = nn.Conv2d(in_size,
                         out_size,
                         kernel_size=kernel_size,
                         stride=stride,
                         padding=padding,
                         bias=bias)
        torch.nn.init.xavier_uniform_(conv.weight)
        model = [conv]
        if normalize == "batch":
            model.append(nn.BatchNorm2d(out_size))
        elif normalize == "spectral":
            model = [SpectralNorm(conv)]

        model.append(activation_fn)
        self.model = nn.Sequential(*model)
Пример #7
0
 def block(in_feat, out_feat, spectral_norm=True):
     if spectral_norm:
         layers = [SpectralNorm(nn.Linear(in_feat, out_feat))]
     else:
         layers = [nn.Linear(in_feat, out_feat)]
     layers.append(nn.LeakyReLU())
     return layers
Пример #8
0
    def __init__(self):
        super(Generator, self).__init__()
        self.layer = 6
        self.bcn = 64  # basic channel number
        self.mcn = 1024  # max channel number
        self.zcn = 128  # latent variable channel number
        self.nonlinearity = nn.LeakyReLU()

        mult = min(2**(self.layer-1), self.mcn // self.bcn)

        self.baseDecoder = nn.Sequential(
            ResBlock(self.zcn, self.bcn*mult, self.bcn*mult, 'none'),
            ResBlock(self.bcn*mult, self.bcn*mult, self.bcn*mult, 'none')
        )

        for i in range(self.layer):
            pre_mult = mult
            mult = min(2**(self.layer-i-1), self.mcn // self.bcn)
            block = ResBlockDecoder(self.bcn*pre_mult, self.bcn*mult, self.bcn*mult)
            setattr(self, "decoder" + str(i), block)

        self.output = nn.Sequential(
            self.nonlinearity,
            nn.ReflectionPad2d(1),
            SpectralNorm(nn.Conv2d(self.bcn*mult, 3, 3)),
            nn.Tanh()
        )
Пример #9
0
def linear_unit(indim, outdim, sn_layer=False):
    lin = nn.Linear(indim, outdim, bias=False)
    weights_init_normal(lin)
    if sn_layer:
        return SpectralNorm(lin)
    else:
        return lin
Пример #10
0
 def __init__(self):
     super(DiscriminatorConditional, self).__init__()
     self.res_block1 = ResidualBlock('D', 3, 64, resample='down', spectral_norm=True)
     self.res_block2 = ResidualBlock('D', 64, 128, resample='down', spectral_norm=True)
     self.res_block3 = ResidualBlock('D', 128, 256, resample='down', spectral_norm=True)
     self.res_block4 = ResidualBlock('D', 256, 512, resample='down', spectral_norm=True)
     self.res_block5 = ResidualBlock('D', 512 + NUM_CLASSES, 1024, resample='down', spectral_norm=True)
     self.res_block6 = ResidualBlock('D', 1024, 1024, spectral_norm=True)
     self.relu = nn.ReLU()
     self.fc1 = SpectralNorm(nn.Linear(1024, 1))
Пример #11
0
    def __init__(self):
        super(DiscriminatorImage, self).__init__()
        self.nonlinearity = nn.LeakyReLU()
        self.mcn = 1024

        self.encoder = Encoder()
        self.output = nn.Sequential(
            self.nonlinearity,
            SpectralNorm(nn.Conv2d(self.mcn, 1, 3))
        )
    def __init__(self, image_size=128, conv_dim=64, repeat_num=3):
        super(discriminator, self).__init__()
        layers = []

        layers.append(
            SpectralNorm(
                nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)))
        layers.append(nn.LeakyReLU(0.01, inplace=True))

        curr_dim = conv_dim
        for _ in range(1, repeat_num):

            layers.append(
                SpectralNorm(
                    nn.Conv2d(curr_dim,
                              curr_dim * 2,
                              kernel_size=4,
                              stride=2,
                              padding=1)))
            layers.append(nn.LeakyReLU(0.01, inplace=True))
            curr_dim = curr_dim * 2

        #k_size = int(image_size / np.power(2, repeat_num))

        layers.append(
            SpectralNorm(
                nn.Conv2d(curr_dim,
                          curr_dim * 2,
                          kernel_size=4,
                          stride=1,
                          padding=1)))
        layers.append(nn.LeakyReLU(0.01, inplace=True))
        curr_dim = curr_dim * 2

        self.main = nn.Sequential(*layers)
        self.conv1 = SpectralNorm(
            nn.Conv2d(curr_dim,
                      1,
                      kernel_size=4,
                      stride=1,
                      padding=1,
                      bias=False))
    def __init__(self, ndf, nc, num_classes=10):
        super(discriminator_source, self).__init__()
        self.ndf = ndf
        self.lrelu = nn.ReLU()
        self.conv1 = nn.Conv2d(nc, ndf, 4, 2, 1)

        self.conv3 = nn.Conv2d(ndf, ndf * 4, 4, 2, 1)
        self.bn3 = nn.BatchNorm2d(ndf * 4)
        self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1)
        self.bn4 = nn.BatchNorm2d(ndf * 8)
        self.conv5 = nn.Conv2d(ndf * 8, ndf * 1, 4, 1, 0)
        self.gan_linear = nn.Linear(ndf * 1, 1)
        self.aux_linear = nn.Linear(ndf * 1, num_classes)

        self.sigmoid = nn.Sigmoid()

        self.fc1 = SpectralNorm(nn.Linear(32 * 32, 512))
        self.fc2 = SpectralNorm(nn.Linear(512, 128))
        self.fc3 = SpectralNorm(nn.Linear(128, 32))
        # self.fc4 = (nn.Linear(64,32))

        self.c = SpectralNorm(nn.Linear(32, 10))
        self.mi = SpectralNorm(nn.Linear(32, 10))
        self.fc4 = SpectralNorm(nn.Linear(32, 1))
        self.batch_norm1 = (nn.BatchNorm1d(512))
        self.batch_norm2 = (nn.BatchNorm1d(128))
        self.batch_norm3 = (nn.BatchNorm1d(32))
        self.__initialize_weights()
Пример #14
0
def downsampling_module(x, filters, kernels, strides=2, dropout=None, batch_norm=True, spectral_norm=False, initializer='glorot_uniform'):
    x = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernels, strides=strides, padding='same', use_bias=not batch_norm,
                                   kernel_initializer=initializer, kernel_constraint=SpectralNorm() if spectral_norm else None)(x)

    if batch_norm:
        x = tf.keras.layers.BatchNormalization()(x)

    x = tf.keras.layers.LeakyReLU()(x)

    if dropout:
        x = tf.keras.layers.Dropout(rate=dropout)(x)

    return x
Пример #15
0
    def __init__(self, batch_size=64, image_size=64, conv_dim=64):
        super(Discriminator_SA, self).__init__()
        self.imsize = image_size
        layer1 = []
        layer2 = []
        layer3 = []
        last = []

        layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))
        layer1.append(nn.LeakyReLU(0.1))

        curr_dim = conv_dim

        layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
        layer2.append(nn.LeakyReLU(0.1))
        curr_dim = curr_dim * 2

        layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
        layer3.append(nn.LeakyReLU(0.1))
        curr_dim = curr_dim * 2

        if self.imsize == 64:
            layer4 = []
            layer4.append(
                SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
            layer4.append(nn.LeakyReLU(0.1))
            self.l4 = nn.Sequential(*layer4)
            curr_dim = curr_dim * 2
        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)

        last.append(nn.Conv2d(curr_dim, 1, 4))
        self.last = nn.Sequential(*last)

        self.attn1 = Self_Attention(256)
        self.attn2 = Self_Attention(512)
Пример #16
0
    def __init__(self):
        super(DiscriminatorLatent, self).__init__()
        self.layer = 5
        self.mcn = 1024

        mult = 1
        for i in range(self.layer):
            pre_mult = mult
            mult = min(4**(i+1), self.mcn // 1)
            block = ResBlock(self.mcn // pre_mult, self.mcn // mult, self.mcn // mult, 'none')
            setattr(self, "disc" + str(i), block)

        self.output = nn.Sequential(
            self.nonlinearity,
            SpectralNorm(nn.Conv2d(self.mcn, 1, 3))
        )
Пример #17
0
def conv3x3(in_channels,
            out_channels,
            kernel_size=3,
            stride=1,
            padding=1,
            sn_layer=False):
    conv = nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     bias=False)
    weights_init_normal(conv)
    if sn_layer:
        return SpectralNorm(conv)
    else:
        return conv
    def __init__(self, ndf, nc, num_classes=10):
        super(discriminator_source_fm, self).__init__()
        self.ndf = ndf
        self.lrelu = nn.ReLU()
        self.conv1 = SpectralNorm(nn.Conv2d(nc, ndf, 4, 2, 1))

        self.conv3 = SpectralNorm(nn.Conv2d(ndf, ndf * 4, 4, 2, 1))
        self.bn3 = nn.BatchNorm2d(ndf * 4)
        self.conv4 = SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1))
        self.bn4 = nn.BatchNorm2d(ndf * 8)
        self.conv5 = SpectralNorm(nn.Conv2d(ndf * 8, ndf * 1, 4, 1, 0))
        self.gan_linear = SpectralNorm(nn.Linear(ndf * 1, 1))
        self.aux_linear = SpectralNorm(nn.Linear(ndf * 1, num_classes))
        self.mi_linear = SpectralNorm(nn.Linear(ndf * 1, num_classes))

        self.sigmoid = nn.Sigmoid()
Пример #19
0
    def __init__(self,
                 in_size,
                 out_size,
                 normalize=None,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 dropout=0,
                 activation_fn=nn.LeakyReLU(0.2)):
        super(ConvBlock, self).__init__()
        model = [
            nn.Conv2d(in_size,
                      out_size,
                      kernel_size=kernel_size,
                      stride=stride,
                      padding=padding)
        ]

        if normalize == 'batch':
            # + batchnorm
            model.append(nn.BatchNorm2d(out_size))
        elif normalize == 'instance':
            # + instancenorm
            model.append(nn.InstanceNorm2d(out_size))
        elif normalize == 'spectral':
            # conv + spectralnorm
            model = [
                SpectralNorm(
                    nn.Conv2d(in_size,
                              out_size,
                              kernel_size=kernel_size,
                              stride=stride,
                              padding=padding))
            ]

        model.append(activation_fn)

        if dropout > 0:
            model.append(nn.Dropout(dropout))

        self.model = nn.Sequential(*model)
    def __init__(self, lambd):
        super(Discriminator, self).__init__()

        self.grl = GradReverse(lambd)
        self.image_conv = nn.Sequential(
            SpectralNorm(nn.Conv2d(4, 32, 3, stride=2)), nn.LeakyReLU())
        self.viewpoint_fc = nn.Sequential(SpectralNorm(nn.Linear(3, 32)),
                                          nn.LeakyReLU())
        self.conv1 = nn.Sequential(
            SpectralNorm(nn.Conv2d(64, 64, 3, stride=2)), nn.LeakyReLU())
        self.conv2 = nn.Sequential(
            SpectralNorm(nn.Conv2d(64, 128, 3, stride=2)), nn.LeakyReLU())
        self.conv3 = nn.Sequential(
            SpectralNorm(nn.Conv2d(128, 256, 3, stride=2)), nn.LeakyReLU())
        # self.conv4 = nn.Sequential(
        #     SpectralNorm(nn.Conv2d(256, 256, 2, stride=2)),
        #     nn.LeakyReLU()
        # )
        # self.conv5 = SpectralNorm(nn.Conv2d(256, 1, 2, stride=2))
        self.fc = nn.Sequential(SpectralNorm(nn.Linear(4 * 4 * 256, 1)),
                                nn.Sigmoid())
Пример #21
0
    def __init__(self,
                 batch_size,
                 image_size=64,
                 z_dim=100,
                 conv_dim=64,
                 norm_layer=nn.InstanceNorm2d):
        super(Generator_SA, self).__init__()
        # 下采样
        self.up_sample = nn.Sequential(
            nn.Conv2d(3, conv_dim, kernel_size=4, padding=1, stride=2),
            norm_layer(conv_dim),
            nn.ReLU(inplace=False),
            # Self_Attention(conv_dim),
            nn.Conv2d(conv_dim,
                      conv_dim * 2,
                      kernel_size=4,
                      padding=1,
                      stride=2),
            norm_layer(conv_dim * 2),
            nn.ReLU(inplace=False),
            # Self_Attention(conv_dim*2),
            nn.Conv2d(conv_dim * 2,
                      conv_dim * 4,
                      kernel_size=4,
                      padding=1,
                      stride=2),
            norm_layer(conv_dim * 4),
            nn.ReLU(inplace=False),
            nn.Conv2d(conv_dim * 4,
                      conv_dim * 8,
                      kernel_size=4,
                      padding=1,
                      stride=2),
            norm_layer(conv_dim * 8),
            nn.ReLU(inplace=False),
            nn.Conv2d(conv_dim * 8, 100, kernel_size=4, padding=0, stride=1),
            nn.ReLU(inplace=True))
        # 上采样
        self.imsize = image_size
        layer1 = []
        layer2 = []
        layer3 = []
        last = []

        repeat_num = int(np.log2(self.imsize)) - 3
        mult = 2**repeat_num  # 8
        layer1.append(
            SpectralNorm(nn.ConvTranspose2d(z_dim, conv_dim * mult, 4)))
        layer1.append(norm_layer(conv_dim * mult))
        layer1.append(nn.ReLU(inplace=False))

        curr_dim = conv_dim * mult

        layer2.append(
            SpectralNorm(
                nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer2.append(norm_layer(int(curr_dim / 2)))
        layer2.append(nn.ReLU(inplace=False))

        curr_dim = int(curr_dim / 2)

        layer3.append(
            SpectralNorm(
                nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer3.append(norm_layer(int(curr_dim / 2)))
        layer3.append(nn.ReLU(inplace=False))

        if self.imsize == 64:
            layer4 = []
            curr_dim = int(curr_dim / 2)
            layer4.append(
                SpectralNorm(
                    nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
            layer4.append(norm_layer(int(curr_dim / 2)))
            layer4.append(nn.ReLU(inplace=False))
            self.l4 = nn.Sequential(*layer4)
            curr_dim = int(curr_dim / 2)

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)

        last.append(nn.ConvTranspose2d(curr_dim, 3, 4, 2, 1))
        last.append(nn.Tanh())
        self.last = nn.Sequential(*last)

        self.up_attn1 = Self_Attention(128)
        self.up_attn2 = Self_Attention(64)
Пример #22
0
 def spectral_norm(self, module, use_spect=True):
     """use spectral normal layer to stable the training process"""
     if use_spect:
         return SpectralNorm(module)
     else:
         return module
Пример #23
0
 def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
     super(DownsampleConv, self).__init__()
     self.downsample = nn.AvgPool2d(2, stride=2)
     self.conv = SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))
Пример #24
0
def Conv2dSN(in_channels, out_channels, kernel_size=3, stride=1, padding=1, spectral_norm=False):
    if spectral_norm:
        return SpectralNorm(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding))
    else:
        return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)