예제 #1
0
def Res_Block(inputs, output_channels):
    x = layers.BatchNormalization()(inputs)
    x = layers.LeakyReLU(alpha=0.1)(x)
    convtr = layers.Conv2DTranspose(output_channels,
                                    3,
                                    2,
                                    padding='same',
                                    activation='relu',
                                    use_bias=False)
    x = SpectralNormalization(convtr)(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    convtr = layers.Conv2DTranspose(output_channels,
                                    3,
                                    1,
                                    padding='same',
                                    activation='relu',
                                    use_bias=False)
    x = SpectralNormalization(convtr)(x)

    x_ = layers.BatchNormalization()(inputs)
    x_ = layers.LeakyReLU(alpha=0.1)(x_)
    convtr_ = layers.Conv2DTranspose(output_channels,
                                     3,
                                     2,
                                     padding='same',
                                     activation='relu',
                                     use_bias=False)
    x_ = SpectralNormalization(convtr_)(x_)

    return layers.add([x_, x])
def Optimized_Block(inputs, output_channels, training=False):
    conv = layers.Conv2D(output_channels, 3, 1, padding='same')
    x = SpectralNormalization(conv)(inputs, training=training)
    x = layers.ReLU()(x)

    conv = layers.Conv2D(output_channels, 3, 2, padding='same')  # downsample
    x = SpectralNormalization(conv)(x, training=training)

    conv = layers.Conv2D(output_channels, 3, 2, padding='same')
    x_ = SpectralNormalization(conv)(inputs, training=training)

    return layers.add([x_, x])
def Optimized_Block(inputs, output_channels):
    conv = layers.Conv2D(output_channels, 3, 1, padding='same')
    x = SpectralNormalization(conv)(inputs)
    x = layers.LeakyReLU(alpha=0.1)(x)

    conv = layers.Conv2D(output_channels, 3, 2, padding='same')  # downsample
    x = SpectralNormalization(conv)(x)

    conv_ = layers.Conv2D(output_channels, 3, 2, padding='same')
    x_ = SpectralNormalization(conv_)(inputs)

    return layers.add([x_, x])
예제 #4
0
def Block(inputs, output_channels, training=False):
    x = layers.BatchNormalization()(inputs, training=training)
    x = layers.ReLU()(x)
    # no tf.image.resize_nearest_neighbor. Use convtr instead.
    # x = upsample(x)
    convtr = layers.Conv2DTranspose(output_channels, 3, 2, padding='same')
    x = SpectralNormalization(convtr)(x, training=training)
    x = layers.BatchNormalization()(x, training=training)
    x = layers.ReLU()(x)
    conv = layers.Conv2D(output_channels, 3, 1, padding='same')
    x = SpectralNormalization(conv)(x, training=training)

    convtr = layers.Conv2DTranspose(output_channels, 3, 2, padding='same')
    x_ = SpectralNormalization(convtr)(inputs, training=training)

    return layers.add([x_, x])
def Block(inputs, output_channels, downsample=True, training=False):
    stride = 2 if downsample else 1

    x = layers.ReLU()(inputs)
    conv = layers.Conv2D(output_channels, 3, 1, padding='same')
    x = SpectralNormalization(conv)(x, training=training)

    x = layers.ReLU()(x)
    conv = layers.Conv2D(output_channels, 3, stride, padding='same')
    x = SpectralNormalization(conv)(x, training=training)

    x_ = layers.ReLU()(inputs)
    conv = layers.Conv2D(output_channels, 3, stride, padding='same')
    x_ = SpectralNormalization(conv)(x_, training=training)

    return layers.add([x_, x])
예제 #6
0
def get_res_generator(config):
    gf_dim = config['gf_dim']
    z = Input(shape=(config['z_dim'], ), name='noisy')
    condition_label = Input(shape=(), dtype=tf.int32, name='condition_label')
    if config['use_label']:
        one_hot_label = tf.one_hot(condition_label, depth=num_classes)
        x = layers.Concatenate()([z, one_hot_label])
    else:
        x = z

    x = SpectralNormalization(layers.Dense(4 * 4 * gf_dim * 2**(power - 1)))(x)
    x = tf.reshape(x, [-1, 4, 4, gf_dim * 2**(power - 1)])

    # to handle different size of images.
    power = np.log2(config['img_size'] / 4).astype('int')
    for p in reversed(range(power)):
        x = Res_Block(x, gf_dim * 2**p)
        if config['use_attention'] and int(x.shape[1]) in config['attn_dim_G']:
            x = AttentionLayer()(x)

    # x = layers.BatchNormalization()(x)
    # x = layers.ReLU()(x)
    outputs = layers.Conv2D(3, 1, 1, padding='same', activation='tanh')(x)

    return Model(inputs=[z, condition_label], outputs=outputs)
예제 #7
0
def get_generator(config):
    gf_dim = config['gf_dim']
    z = Input(shape=(config['z_dim'], ),
              batch_size=config['batch_size'],
              name='noisy')
    condition_label = Input(shape=(),
                            batch_size=config['batch_size'],
                            dtype=tf.int32,
                            name='condition_label')

    if config['use_label']:
        one_hot_label = tf.one_hot(condition_label,
                                   depth=config['num_classes'])
        x = layers.Concatenate()([x, one_hot_label])
    else:
        x = z

    x = SpectralNormalization(layers.Dense(4 * 4 * gf_dim * 16))(x)
    x = tf.reshape(x, [-1, 4, 4, gf_dim * 16])

    # to handle different size of images.
    power = np.log2(config['img_size'] / 4).astype('int')  # 64->4; 128->5

    for p in reversed(range(power)):
        x = Block(x, gf_dim * (2**p))
        if config['use_attention'] and int(x.shape[1]) in config['attn_dim_G']:
            x = AttentionLayer()(x)

    outputs = layers.Conv2D(3,
                            4,
                            1,
                            padding='same',
                            use_bias=False,
                            activation='tanh')(x)
    return Model(inputs=[z, condition_label], outputs=outputs)
def get_res_discriminator(config):
    df_dim = config['df_dim']
    img = Input(shape=(config['img_size'], config['img_size'], 3),
                name='image')
    power = np.log2(config['img_size'] / 4).astype('int')
    condition_label = Input(shape=(), dtype=tf.int32, name='condition_label')

    x = Optimized_Block(img, df_dim * 1)  # 64x64
    for p in range(1, power):
        x = Res_Block(x, df_dim * 2**p)  # 32x32
        if config['use_attention'] and int(x.shape[1]) in config['attn_dim_G']:
            x = AttentionLayer()(x)

    x = Res_Block(x, df_dim * 2**power, downsample=False)  # 4x4

    if config['use_label']:
        x = layers.ReLU()(x)
        x = tf.reduce_sum(x, axis=[1, 2])
        outputs = SpectralNormalization(layers.Dense(1))(x)

        # embedding = layers.Embedding(config['num_classes'], df_dim * 16)
        # label_feature = SpectralNormalization(embedding)(condition_label)
        label_feature = layers.Embedding(config['num_classes'],
                                         df_dim * 16)(condition_label)

        outputs += tf.reduce_sum(x * label_feature, axis=1, keepdims=True)
        return Model(inputs=[img, condition_label], outputs=outputs)
    else:
        outputs = layers.Conv2D(1, 4, 1, padding='same')(x)
        # outputs = SpectralNormalization(conv)(x)
        return Model(inputs=[img, condition_label], outputs=outputs)
def Res_Block(inputs, output_channels, downsample=True):
    stride = 2 if downsample else 1

    x = layers.LeakyReLU(alpha=0.1)(inputs)
    conv = layers.Conv2D(output_channels, 3, 1, padding='same')
    x = SpectralNormalization(conv)(x)

    x = layers.LeakyReLU(alpha=0.1)(x)
    conv = layers.Conv2D(output_channels, 3, stride, padding='same')
    x = SpectralNormalization(conv)(x)

    x_ = layers.LeakyReLU(alpha=0.1)(inputs)
    conv_ = layers.Conv2D(output_channels, 3, stride, padding='same')(x_)
    x = SpectralNormalization(conv_)(x_)

    return layers.add([x_, x])
예제 #10
0
def Block(inputs, output_channels):
    convtr = layers.Conv2DTranspose(output_channels,
                                    4,
                                    2,
                                    padding='same',
                                    use_bias=False)
    x = SpectralNormalization(convtr)(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    return x
예제 #11
0
def get_generator(num_classes, gf_dim=16, training=False):
    z = Input(shape=(128, ), name='noisy')
    condition_label = Input(shape=(), dtype=tf.int32, name='condition_label')
    one_hot_label = tf.one_hot(condition_label, depth=num_classes)
    x = layers.Concatenate()([z, one_hot_label])
    x = SpectralNormalization(layers.Dense(4 * 4 * gf_dim * 16))(x)
    x = tf.reshape(x, [-1, 4, 4, gf_dim * 16])

    x = Block(x, gf_dim * 16, training=training)  # 8x8
    x = Block(x, gf_dim * 8, training=training)  # 16x16
    x = Block(x, gf_dim * 4, training=training)  # 32x32
    x = Attention_Layer()(x)
    x = Block(x, gf_dim * 2, training=training)  # 64x64
    x = Block(x, gf_dim * 1, training=training)  # 128x128

    x = layers.BatchNormalization()(x, training=training)
    x = layers.ReLU()(x)
    conv = layers.Conv2D(3, 3, 1, padding='same', activation='tanh')
    outputs = SpectralNormalization(conv)(x)

    return Model(inputs=[z, condition_label], outputs=outputs)
def get_discriminator(num_classes, df_dim=16, training=False):
    img = Input(shape=(128, 128, 3), name='image')
    condition_label = Input(shape=(), dtype=tf.int32, name='condition_label')

    x = Optimized_Block(img, df_dim * 1, training=training)  # 64x64
    x = Block(x, df_dim * 2, training=training)  # 32x32
    x = Attention_Layer()(x)

    x = Block(x, df_dim * 4, training=training)  # 16x16
    x = Block(x, df_dim * 8, training=training)  # 8x8
    x = Block(x, df_dim * 16, training=training)  # 4x4
    x = Block(x, df_dim * 16, downsample=False, training=training)  # 4x4

    x = layers.ReLU()(x)
    x = tf.reduce_sum(x, axis=[1, 2])

    outputs = SpectralNormalization(layers.Dense(1))(x)
    embedding = layers.Embedding(num_classes, df_dim * 16)
    label_feature = SpectralNormalization(embedding)(condition_label)
    outputs += tf.reduce_sum(x * label_feature, axis=1, keepdims=True)

    return Model(inputs=[img, condition_label], outputs=outputs)
def Block(inputs, output_channels):
    conv = layers.Conv2D(output_channels, 4, 2, padding='same')
    x = SpectralNormalization(conv)(inputs)
    x = layers.LeakyReLU(alpha=0.1)(x)
    return x