Beispiel #1
0
def ConditionalLinearGenerator(n_samples,
                               labels,
                               n_features,
                               embedding_dim=100,
                               noise=None):
    assert labels.shape[0] == n_samples
    if noise is None:
        noise = tf.random.normal([n_samples, 128])
    else:
        assert labels.shape[0] == noise.shape[0]
    label_embedding = Embedding('ConditionalLinearGenerator.Embedding', 11,
                                embedding_dim, labels)

    noise_labels = tf.concat([noise, label_embedding], 1)

    output = Linear('ConditionalLinearGenerator.Input', 128 + embedding_dim,
                    n_features * 2, noise_labels)
    output = tf.nn.relu(output)
    output = Linear('ConditionalLinearGenerator.2', n_features * 2,
                    round(1.5 * n_features), output)
    output = tf.nn.relu(output)
    output = Linear('ConditionalLinearGenerator.Output',
                    round(1.5 * n_features), n_features, output)
    output = tf.nn.relu(output)

    return output
Beispiel #2
0
def DCifarResnet(inputs, labels, dim, conditional=True, acgan=True):
    output = tf.reshape(inputs, [-1, 3, 32, 32])
    output = OptimizedResBlockDisc1('Discriminator.1', output, dim)
    output = ResidualBlock('Discriminator.2',
                           dim,
                           dim,
                           3,
                           output,
                           resample='down',
                           labels=labels)
    output = ResidualBlock('Discriminator.3',
                           dim,
                           dim,
                           3,
                           output,
                           resample=None,
                           labels=labels)
    output = ResidualBlock('Discriminator.4',
                           dim,
                           dim,
                           3,
                           output,
                           resample=None,
                           labels=labels)
    output = nonlinearity(output)
    output = tf.reduce_mean(output, axis=[2, 3])
    output_wgan = Linear('Discriminator.Output', dim, 1, output)
    output_wgan = tf.reshape(output_wgan, [-1])
    if conditional and acgan:
        output_acgan = Linear('Discriminator.ACGANOutput', dim, 10, output)
        return output_wgan, output_acgan
    else:
        return output_wgan, None
Beispiel #3
0
def LinearGenerator(n_samples, n_features, noise=None):
    z_dim = 128
    if noise is None:
        noise = tf.random.normal([n_samples, z_dim])

    output = Linear('LinearGenerator.Input', z_dim, z_dim * 2, noise)
    output = tf.nn.relu(output)
    output = Linear('LinearGenerator.2', z_dim * 2, z_dim * 4, output)
    output = tf.nn.relu(output)
    output = Linear('LinearGenerator.Output', z_dim * 4, n_features, output)
    return output
Beispiel #4
0
def LinearDiscriminator(inputs, DIM=64):
    n_features = int(inputs.shape[1])

    output = Linear('LinearDiscriminator.1', n_features, DIM, inputs)
    output = tf.nn.leaky_relu(output)
    output = Linear('LinearDiscriminator.2', DIM, int(DIM / 2), output)
    output = tf.nn.leaky_relu(output)
    output = Linear('LinearDiscriminator.3', int(DIM / 2), int(DIM / 4),
                    output)
    output = tf.nn.leaky_relu(output)
    output = Linear('LinearDiscriminator.Output', int(DIM / 4), 1, output)

    return output
Beispiel #5
0
def GResnet(n_samples, dim, dim_out, noise=None):
    if noise is None:
        noise = tf.random_normal([n_samples, 128])

    output = Linear('Generator.Input', 128, 4*4*8*dim, noise)
    output = tf.reshape(output, [-1, 8*dim, 4, 4])

    for i in xrange(6):
        output = BottleneckResidualBlock('Generator.4x4_{}'.format(i), 8*dim, 8*dim, 3, output, resample=None)
    output = BottleneckResidualBlock('Generator.Up1', 8*dim, 4*dim, 3, output, resample='up')
    for i in xrange(6):
        output = BottleneckResidualBlock('Generator.8x8_{}'.format(i), 4*dim, 4*dim, 3, output, resample=None)
    output = BottleneckResidualBlock('Generator.Up2', 4*dim, 2*dim, 3, output, resample='up')
    for i in xrange(6):
        output = BottleneckResidualBlock('Generator.16x16_{}'.format(i), 2*dim, 2*dim, 3, output, resample=None)
    output = BottleneckResidualBlock('Generator.Up3', 2*dim, 1*dim, 3, output, resample='up')
    for i in xrange(6):
        output = BottleneckResidualBlock('Generator.32x32_{}'.format(i), 1*dim, 1*dim, 3, output, resample=None)
    output = BottleneckResidualBlock('Generator.Up4', 1*dim, dim/2, 3, output, resample='up')
    for i in xrange(5):
        output = BottleneckResidualBlock('Generator.64x64_{}'.format(i), dim/2, dim/2, 3, output, resample=None)

    output = Conv2D('Generator.Out', dim/2, 3, 1, output, he_init=False)
    output = tf.tanh(output / 5.)

    return tf.reshape(output, [-1, dim_out])
Beispiel #6
0
def Generator(n_samples,
              DIM=64,
              OUTPUT_DIM=28 * 28,
              MODE='wgan-gp',
              noise=None):
    if noise is None:
        noise = tf.random.normal([n_samples, 128])

    output = Linear('Generator.Input', 128, 4 * 4 * 4 * DIM, noise)
    if MODE == 'wgan':
        output = Batchnorm('Generator.BN1', [0], output)
    output = tf.nn.relu(output)
    output = tf.reshape(output, [-1, 4 * DIM, 4, 4])

    output = Deconv2D('Generator.2', 4 * DIM, 2 * DIM, 5, output)
    if MODE == 'wgan':
        output = Batchnorm('Generator.BN2', [0, 2, 3], output)
    output = tf.nn.relu(output)

    output = output[:, :, :7, :7]

    output = Deconv2D('Generator.3', 2 * DIM, DIM, 5, output)
    if MODE == 'wgan':
        output = Batchnorm('Generator.BN3', [0, 2, 3], output)

    output = Deconv2D('Generator.5', DIM, 1, 5, output)
    output = tf.nn.sigmoid(output)

    return tf.reshape(output, [-1, OUTPUT_DIM])
Beispiel #7
0
def ConditionalDiscriminator(inputs,
                             labels,
                             embedding_dim=100,
                             INPUT_WIDTH=28,
                             INPUT_HEIGHT=28,
                             DIM=64,
                             MODE='wgan-gp'):
    assert labels.shape[0] == inputs.shape[0]
    labels_in = Embedding('ConditionalDiscriminator.Embedding', 11,
                          embedding_dim, labels)
    labels_in = Linear('ConditionalDiscriminator.Labels', embedding_dim,
                       INPUT_WIDTH * INPUT_HEIGHT, labels_in)
    labels_in = tf.reshape(labels_in, [-1, 1, INPUT_WIDTH, INPUT_HEIGHT])

    images_in = tf.reshape(inputs, [-1, 1, INPUT_WIDTH, INPUT_HEIGHT])

    output = tf.concat([images_in, labels_in], axis=1)

    output = Conv2D('ConditionalDiscriminator.1', 1, DIM, 5, output, stride=2)
    output = LeakyReLU(output)

    output = Conv2D('ConditionalDiscriminator.2',
                    DIM,
                    2 * DIM,
                    5,
                    output,
                    stride=2)
    if MODE == 'wgan':
        output = Batchnorm('ConditionalDiscriminator.BN2', [0, 2, 3], output)
    output = LeakyReLU(output)

    output = Conv2D('ConditionalDiscriminator.3',
                    2 * DIM,
                    4 * DIM,
                    5,
                    output,
                    stride=2)
    if MODE == 'wgan':
        output = Batchnorm('ConditionalDiscriminator.BN3', [0, 2, 3], output)
    output = LeakyReLU(output)

    output = tf.reshape(output, [-1, 4 * 4 * 4 * DIM])
    output = Linear('ConditionalDiscriminator.Output', 4 * 4 * 4 * DIM, 1,
                    output)

    return tf.reshape(output, [-1])
Beispiel #8
0
def DResnetOptim(inputs, dim, labels=None, conditional=True, acgan=True):
    output = tf.reshape(inputs, [-1, 3, 64, 64])
    output = Conv2D('Discriminator.Input', 3, dim, 3, output, he_init=False)

    output = ResidualBlock('Discriminator.Res1',
                           dim,
                           2 * dim,
                           3,
                           output,
                           resample='down',
                           labels=labels)
    output = ResidualBlock('Discriminator.Res2',
                           2 * dim,
                           4 * dim,
                           3,
                           output,
                           resample='down',
                           labels=labels)
    output = ResidualBlock('Discriminator.Res3',
                           4 * dim,
                           8 * dim,
                           3,
                           output,
                           resample='down',
                           labels=labels)
    output = ResidualBlock('Discriminator.Res4',
                           8 * dim,
                           8 * dim,
                           3,
                           output,
                           resample='down',
                           labels=labels)

    output = tf.reshape(output, [-1, 4 * 4 * 8 * dim])
    # output = tf.reduce_mean(output, axis=[2,3])
    output_wgan = Linear('Discriminator.Output', 4 * 4 * 8 * dim, 1, output)
    output_wgan = tf.reshape(output_wgan, [-1])

    if conditional and acgan:
        output_acgan = Linear('Discriminator.ACGANOutput', 4 * 4 * 8 * dim,
                              1000, output)
        return output_wgan, output_acgan

    return output_wgan, None
Beispiel #9
0
def DCifar(inputs, dim):
    output = tf.reshape(inputs, [-1, 3, 32, 32])
    output = Conv2D('D1.1', 3, dim, 5, output, stride=2)
    output = LeakyReLU(output)
    output = Conv2D('D1.2', dim, 2 * dim, 5, output, stride=2)
    output = LeakyReLU(output)
    output = Conv2D('D1.3', 2 * dim, 4 * dim, 5, output, stride=2)
    output = LeakyReLU(output)
    # we relax the dimensionality constraint here. Make it big
    output = tf.reshape(output, [-1, 4 * 4 * 4 * dim])
    output = Linear('D1.Output', 4 * 4 * 4 * dim, 1, output)
    return tf.reshape(output, [-1])
Beispiel #10
0
def ConditionalLinearDiscriminator(inputs, labels, embedding_dim=100, DIM=64):
    assert labels.shape[0] == inputs.shape[0]
    n_features = int(inputs.shape[1])
    labels_in = Embedding('ConditionalLinearDiscriminator.Embedding', 11,
                          embedding_dim, labels)
    # TO DO: remove INPUT_WIDTH and INPUT_HEIGHT

    output = tf.concat([inputs, labels_in], axis=1)

    output = Linear('ConditionalLinearDiscriminator.1',
                    n_features + embedding_dim, DIM, output)
    output = tf.nn.leaky_relu(output)
    output = Linear('ConditionalLinearDiscriminator.2', DIM, DIM * 2, output)
    output = tf.nn.leaky_relu(output)
    output = Linear('ConditionalLinearDiscriminator.3', DIM * 2, DIM * 4,
                    output)
    output = tf.nn.leaky_relu(output)
    output = Linear('ConditionalLinearDiscriminator.Output', DIM * 4, 1,
                    output)

    return output
Beispiel #11
0
def GCifarResnet(n_samples, dim, dim_out, labels, noise=None):
    if noise is None:
        noise = tf.random_normal([n_samples, 128])
    output = Linear('Generator.Input', 128, 4*4*dim, noise)
    output = tf.reshape(output, [-1, dim, 4, 4])
    output = ResidualBlock('Generator.1', dim, dim, 3, output, resample='up', labels=labels)
    output = ResidualBlock('Generator.2', dim, dim, 3, output, resample='up', labels=labels)
    output = ResidualBlock('Generator.3', dim, dim, 3, output, resample='up', labels=labels)
    output = Normalize('Generator.OutputN', output)
    output = nonlinearity(output)
    output = Conv2D('Generator.Output', dim, 3, 3, output, he_init=False)
    output = tf.tanh(output)
    return tf.reshape(output, [-1, dim_out])
Beispiel #12
0
def DMnist(inputs, dim):
    output = tf.reshape(inputs, [-1, 1, 28, 28])

    output = Conv2D('Discriminator.1', 1, dim, 5, output, stride=2)
    output = LeakyReLU(output)
    output = Conv2D('Discriminator.2', dim, 2 * dim, 5, output, stride=2)
    output = LeakyReLU(output)
    output = Conv2D('Discriminator.3', 2 * dim, 4 * dim, 5, output, stride=2)
    output = LeakyReLU(output)
    output = tf.reshape(output, [-1, 4 * 4 * 4 * dim])
    output = Linear('Discriminator.Output', 4 * 4 * 4 * dim, 1, output)

    return tf.reshape(output, [-1])
Beispiel #13
0
def GCifar(n_samples, dim, dim_out, n, noise=None):

    if noise is None:
        noise = tf.random_normal([n_samples, n])
    output = Linear('G.Input', n, 4*4*4*dim, noise)
    output = Batchnorm('G.BN1', [0], output)
    output = tf.nn.relu(output)
    output = tf.reshape(output, [-1, 4*dim, 4, 4])
    output = Deconv2D('G.2', 4*dim, 2*dim, 5, output)
    output = Batchnorm('G.BN2', [0,2,3], output)
    output = tf.nn.relu(output)
    output = Deconv2D('G.3', 2*dim, dim, 5, output)
    output = Batchnorm('G.BN3', [0,2,3], output)
    output = tf.nn.relu(output)
    output = Deconv2D('G.5', dim, 3, 5, output)
    output = tf.tanh(output)
    return tf.reshape(output, [-1, dim_out])
Beispiel #14
0
def GResnetOptim(n_samples, dim, dim_out, labels, noise=None, nonlinearity=tf.nn.relu):
    if noise is None:
        noise = tf.random_normal([n_samples, 128])

    output = Linear('Generator.Input', 128, 4*4*8*dim, noise)
    output = tf.reshape(output, [-1, 8*dim, 4, 4])

    output = ResidualBlock('Generator.Res1', 8*dim, 8*dim, 3, output, resample='up', labels=labels)
    output = ResidualBlock('Generator.Res2', 8*dim, 4*dim, 3, output, resample='up', labels=labels)
    output = ResidualBlock('Generator.Res3', 4*dim, 2*dim, 3, output, resample='up', labels=labels)
    output = ResidualBlock('Generator.Res4', 2*dim, 1*dim, 3, output, resample='up', labels=labels)
    #output = ResidualBlock('Generator.Res5', 1*dim, 1*dim, 3, output, resample='up')

    output = NormalizeD('Generator.OutputN', [0,2,3], output)
    output = tf.nn.relu(output)
    output = Conv2D('Generator.Output', 1*dim, 3, 3, output)
    output = tf.tanh(output)
    return tf.reshape(output, [-1, dim_out])
Beispiel #15
0
def ConditionalGenerator(n_samples,
                         labels,
                         embedding_dim=100,
                         DIM=64,
                         OUTPUT_DIM=28 * 28,
                         MODE='wgan-gp',
                         noise=None):
    assert labels.shape[0] == n_samples
    if noise is None:
        noise = tf.random.normal([n_samples, 128])
    else:
        assert labels.shape[0] == noise.shape[0]
    label_embedding = Embedding('ConditionalGenerator.Embedding', 11,
                                embedding_dim, labels)

    noise_labels = tf.concat([noise, label_embedding], 1)

    #embeddings = tf.keras.layers.Embedding(10, 10)
    #embed = embeddings(words_ids)

    output = Linear('ConditionalGenerator.Input', 128 + embedding_dim,
                    4 * 4 * 4 * DIM, noise_labels)
    if MODE == 'wgan':
        output = Batchnorm('ConditionalGenerator.BN1', [0], output)
    output = tf.nn.relu(output)
    output = tf.reshape(output, [-1, 4 * DIM, 4, 4])

    output = Deconv2D('ConditionalGenerator.2', 4 * DIM, 2 * DIM, 5, output)
    if MODE == 'wgan':
        output = Batchnorm('ConditionalGenerator.BN2', [0, 2, 3], output)
    output = tf.nn.relu(output)

    output = output[:, :, :7, :7]

    output = Deconv2D('ConditionalGenerator.3', 2 * DIM, DIM, 5, output)
    if MODE == 'wgan':
        output = Batchnorm('ConditionalGenerator.BN3', [0, 2, 3], output)

    output = Deconv2D('ConditionalGenerator.5', DIM, 1, 5, output)
    output = tf.nn.sigmoid(output)

    return tf.reshape(output, [-1, OUTPUT_DIM])
Beispiel #16
0
def GMnist(n_samples, dim, dim_out, noise=None):

    if noise is None:
        noise = tf.random_normal([n_samples, 128])

    output = Linear('Generator.Input', 128, 4*4*4*dim, noise)
    output = tf.nn.relu(output)
    output = tf.reshape(output, [-1, 4*dim, 4, 4])

    output = Deconv2D('Generator.2', 4*dim, 2*dim, 5, output)
    output = tf.nn.relu(output)

    output = output[:, :, :7, :7]

    output = Deconv2D('Generator.3', 2*dim, dim, 5, output)
    output = tf.nn.relu(output)

    output = Deconv2D('Generator.5', dim, 1, 5, output)
    output = tf.nn.sigmoid(output)

    return tf.reshape(output, [-1, dim_out])
Beispiel #17
0
def Discriminator(inputs,
                  INPUT_WIDTH=28,
                  INPUT_HEIGHT=28,
                  DIM=64,
                  MODE='wgan-gp'):
    output = tf.reshape(inputs, [-1, 1, INPUT_WIDTH, INPUT_HEIGHT])

    output = Conv2D('Discriminator.1', 1, DIM, 5, output, stride=2)
    output = LeakyReLU(output)

    output = Conv2D('Discriminator.2', DIM, 2 * DIM, 5, output, stride=2)
    if MODE == 'wgan':
        output = Batchnorm('Discriminator.BN2', [0, 2, 3], output)
    output = LeakyReLU(output)

    output = Conv2D('Discriminator.3', 2 * DIM, 4 * DIM, 5, output, stride=2)
    if MODE == 'wgan':
        output = Batchnorm('Discriminator.BN3', [0, 2, 3], output)
    output = LeakyReLU(output)

    output = tf.reshape(output, [-1, 4 * 4 * 4 * DIM])
    output = Linear('Discriminator.Output', 4 * 4 * 4 * DIM, 1, output)

    return tf.reshape(output, [-1])
Beispiel #18
0
def discriminator(x, z):
    '''Three Spatial Neighbourhood Sizes (WINDOW_SIZE): 7, 9, 11.'''
    if opt.WINDOW_SIZE == 7:
        output = tf.reshape(x, [-1, opt.CHANNEL, 7, 7])

        output = Conv2D('Discriminator.1',
                        opt.CHANNEL,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.2',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.3',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='SAME',
                        stride=1)
        output = leakyrelu(output)

    elif opt.WINDOW_SIZE == 9:
        output = tf.reshape(x, [-1, opt.CHANNEL, 9, 9])

        output = Conv2D('Discriminator.1',
                        opt.CHANNEL,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.2',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.3',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

    elif opt.WINDOW_SIZE == 11:
        output = tf.reshape(x, [-1, opt.CHANNEL, 11, 11])

        output = Conv2D('Discriminator.1',
                        opt.CHANNEL,
                        opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.2',
                        opt.DIM,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.3',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Discriminator.4',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)
    else:
        raise NotImplementedError

    output = tf.reshape(output, [-1, 3 * 3 * 8 * opt.DIM])
    '''treat D as an extractor, this is the output'''
    de_output = Linear('Discriminator.de1', 3 * 3 * 8 * opt.DIM,
                       opt.LATENT_DIM, output)

    # """Scheme 1."""
    # '''for classification'''
    # cls_output = Linear('Discriminator.c1', 3 * 3 * 8 * opt.DIM, opt.N_CLS, output)
    # # cls_output = Linear('Discriminator.c1', 3 * 3 * 4 * opt.DIM, 256, output)
    # # cls_output = Linear('Discriminator.c2', 256, opt.N_CLS, output)
    # # cls_output = tf.nn.softmax(cls_output)
    #
    # z_output = Linear('Discriminator.z1', opt.LATENT_DIM, 512, z)
    # # z_output = Linear('Discriminator.z1', opt.LATENT_DIM, 512, z)
    # z_output = leakyrelu(z_output)
    #
    # output = tf.concat([output, z_output], 1)
    # output = Linear('Discriminator.zx1', 3 * 3 * 8 * opt.DIM + 512, 128, output)
    # # output = Linear('Discriminator.zx1', 3 * 3 * 4 * opt.DIM + 512, 512, output)
    # output = leakyrelu(output)
    #
    # output = Linear('Discriminator.Output', 128, 1, output)
    # # output = Linear('Discriminator.Output', 512, 1, output)
    """Scheme 2."""
    z_output = Linear('Discriminator.z1', opt.LATENT_DIM, 512, z)
    z_output = leakyrelu(z_output)

    output = tf.concat([output, z_output], 1)
    output = Linear('Discriminator.zx1', 3 * 3 * 8 * opt.DIM + 512, 128,
                    output)
    # output = Linear('Discriminator.zx1', 3 * 3 * 8 * opt.DIM + 512, 512, output)
    output = leakyrelu(output)

    cls_output = Linear('Discriminator.c1', 128, opt.N_CLS, output)
    # cls_output = Linear('Discriminator.c1', 3 * 3 * 8 * opt.DIM, 256, output)
    # cls_output = Linear('Discriminator.c2', 256, opt.N_CLS, output)
    # cls_output = tf.nn.softmax(cls_output)

    output = Linear('Discriminator.Output', 128, 1, output)
    # output = Linear('Discriminator.Output', 512, 1, output)

    return tf.reshape(output, [-1]), tf.reshape(cls_output,
                                                [-1, opt.N_CLS]), tf.reshape(
                                                    de_output,
                                                    [-1, opt.LATENT_DIM])
Beispiel #19
0
def extractor(inputs):
    '''Three Spatial Neighbourhood Sizes (WINDOW_SIZE): 7, 9, 11.'''
    if opt.WINDOW_SIZE == 7:
        output = tf.reshape(inputs, [-1, opt.CHANNEL, 7, 7])

        output = Conv2D('Extractor.1',
                        opt.CHANNEL,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Extractor.2',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN1', [0, 2, 3], output)
        output = leakyrelu(output)

        output = Conv2D('Extractor.3',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='SAME',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN2', [0, 2, 3], output)
        output = leakyrelu(output)

    elif opt.WINDOW_SIZE == 9:
        output = tf.reshape(inputs, [-1, opt.CHANNEL, 9, 9])

        output = Conv2D('Extractor.1',
                        opt.CHANNEL,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Extractor.2',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN1', [0, 2, 3], output)
        output = leakyrelu(output)

        output = Conv2D('Extractor.3',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN2', [0, 2, 3], output)
        output = leakyrelu(output)

    elif opt.WINDOW_SIZE == 11:
        output = tf.reshape(inputs, [-1, opt.CHANNEL, 11, 11])

        output = Conv2D('Extractor.1',
                        opt.CHANNEL,
                        opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        output = leakyrelu(output)

        output = Conv2D('Extractor.2',
                        opt.DIM,
                        2 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN1', [0, 2, 3], output)
        output = leakyrelu(output)

        output = Conv2D('Extractor.3',
                        2 * opt.DIM,
                        4 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN2', [0, 2, 3], output)
        output = leakyrelu(output)

        output = Conv2D('Extractor.4',
                        4 * opt.DIM,
                        8 * opt.DIM,
                        3,
                        output,
                        padding='VALID',
                        stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Extractor.BN3', [0, 2, 3], output)
        output = leakyrelu(output)

    else:
        raise NotImplementedError

    output = tf.reshape(output, [-1, 3 * 3 * 8 * opt.DIM])
    output = Linear('Extractor.Output', 3 * 3 * 8 * opt.DIM, opt.LATENT_DIM,
                    output)

    return tf.reshape(output, [-1, opt.LATENT_DIM])
Beispiel #20
0
def generator(noise, label):
    # We have implemented three types of combination for noise and label. We used the first finally.
    if opt.EMBEDDING_TYPE == 'LATENT':
        label_emb = Embedding('Generator.Embedding', opt.N_CLS, opt.LATENT_DIM,
                              label)
        gen_input = tf.multiply(label_emb, noise)
        output = Linear('Generator.Input', opt.LATENT_DIM, 3 * 3 * 8 * opt.DIM,
                        gen_input)
    elif opt.EMBEDDING_TYPE == '10':
        label_emb = Embedding('Generator.Embedding', opt.N_CLS, 10, label)
        gen_input = tf.concat([label_emb, noise], 1)
        output = Linear('Generator.Input', opt.LATENT_DIM + 10,
                        3 * 3 * 8 * opt.DIM, gen_input)
    elif opt.EMBEDDING_TYPE == 'NONE':
        gen_input = tf.concat(
            [tf.one_hot(indices=label, depth=opt.N_CLS), noise], 1)
        output = Linear('Generator.Input', opt.LATENT_DIM + opt.N_CLS,
                        3 * 3 * 8 * opt.DIM, gen_input)
    else:
        raise NotImplementedError

    if opt.BN_FLAG:
        output = Batchnorm('Generator.BN1', [0], output)
    output = tf.nn.relu(output)
    output = tf.reshape(output, [-1, 8 * opt.DIM, 3, 3])
    '''Three Spatial Neighbourhood Sizes (WINDOW_SIZE): 7, 9, 11.'''
    if opt.WINDOW_SIZE == 7:
        output = Deconv2D('Generator.2',
                          8 * opt.DIM,
                          4 * opt.DIM,
                          3,
                          output,
                          padding='SAME',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN2', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.3',
                          4 * opt.DIM,
                          2 * opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN3', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.4',
                          2 * opt.DIM,
                          opt.CHANNEL,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        output = tf.tanh(output)

    elif opt.WINDOW_SIZE == 9:
        output = Deconv2D('Generator.2',
                          8 * opt.DIM,
                          4 * opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN2', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.3',
                          4 * opt.DIM,
                          2 * opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN3', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.4',
                          2 * opt.DIM,
                          opt.CHANNEL,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        output = tf.tanh(output)

    elif opt.WINDOW_SIZE == 11:
        output = Deconv2D('Generator.2',
                          8 * opt.DIM,
                          4 * opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN2', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.3',
                          4 * opt.DIM,
                          2 * opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN3', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.4',
                          2 * opt.DIM,
                          opt.DIM,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        if opt.BN_FLAG:
            output = Batchnorm('Generator.BN4', [0, 2, 3], output)
        output = tf.nn.relu(output)

        output = Deconv2D('Generator.5',
                          opt.DIM,
                          opt.CHANNEL,
                          3,
                          output,
                          padding='VALID',
                          stride=1)
        output = tf.tanh(output)
    else:
        raise NotImplementedError

    return tf.reshape(output, [-1, opt.OUTPUT_DIM])
Beispiel #21
0
def DResnet(inputs, dim, dim_out):
    output = tf.reshape(inputs, [-1, 3, 64, 64])
    output = Conv2D('Discriminator.In', 3, dim / 2, 1, output, he_init=False)

    for i in xrange(5):
        output = ResidualBlock('Discriminator.64x64_{}'.format(i),
                               dim / 2,
                               dim / 2,
                               3,
                               output,
                               resample=None)
    output = ResidualBlock('Discriminator.Down1',
                           dim / 2,
                           dim * 1,
                           3,
                           output,
                           resample='down')
    for i in xrange(6):
        output = ResidualBlock('Discriminator.32x32_{}'.format(i),
                               dim * 1,
                               dim * 1,
                               3,
                               output,
                               resample=None)
    output = ResidualBlock('Discriminator.Down2',
                           dim * 1,
                           dim * 2,
                           3,
                           output,
                           resample='down')
    for i in xrange(6):
        output = ResidualBlock('Discriminator.16x16_{}'.format(i),
                               dim * 2,
                               dim * 2,
                               3,
                               output,
                               resample=None)
    output = ResidualBlock('Discriminator.Down3',
                           dim * 2,
                           dim * 4,
                           3,
                           output,
                           resample='down')
    for i in xrange(6):
        output = ResidualBlock('Discriminator.8x8_{}'.format(i),
                               dim * 4,
                               dim * 4,
                               3,
                               output,
                               resample=None)
    output = ResidualBlock('Discriminator.Down4',
                           dim * 4,
                           dim * 8,
                           3,
                           output,
                           resample='down')
    for i in xrange(6):
        output = ResidualBlock('Discriminator.4x4_{}'.format(i),
                               dim * 8,
                               dim * 8,
                               3,
                               output,
                               resample=None)

    output = tf.reshape(output, [-1, 4 * 4 * 8 * dim])
    output = Linear('Discriminator.Output', 4 * 4 * 8 * dim, 1, output)

    return tf.reshape(output / 5., [-1])
Beispiel #22
0
def LeakyReLULayer(name, n_in, n_out, inputs):
    output = Linear(name + '.Linear', n_in, n_out, inputs, initialization='he')
    return LeakyReLU(output)
Beispiel #23
0
def ReLULayer(name, n_in, n_out, inputs):
    output = Linear(name + '.Linear', n_in, n_out, inputs, initialization='he')
    return tf.nn.relu(output)