Beispiel #1
0
def discriminator_block(x, res, n_f0, n_f1):
    gain = np.sqrt(2)
    lrmul = 1.0
    with tf.variable_scope('{:d}x{:d}'.format(res, res)):
        with tf.variable_scope('Conv0'):
            x = equalized_conv2d(x, n_f0, kernel=3, gain=gain, lrmul=lrmul)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)

        with tf.variable_scope('Conv1_down'):
            x = blur2d(x, [1, 2, 1])
            x = conv2d_downscale2d(x, n_f1, kernel=3, gain=gain, lrmul=lrmul)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
    return x
Beispiel #2
0
def discriminator_last_block(x, res, n_f0, n_f1):
    gain = np.sqrt(2)
    lrmul = 1.0
    with tf.variable_scope('{:d}x{:d}'.format(res, res)):
        x = minibatch_stddev_layer(x, group_size=4, num_new_features=1)
        with tf.variable_scope('Conv0'):
            x = equalized_conv2d(x, n_f0, kernel=3, gain=gain, lrmul=lrmul)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
        with tf.variable_scope('Dense0'):
            x = equalized_dense(x, n_f1, gain=gain, lrmul=lrmul)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
        with tf.variable_scope('Dense1'):
            x = equalized_dense(x, 1, gain=1.0, lrmul=lrmul)
            x = apply_bias(x, lrmul=lrmul)
    return x
Beispiel #3
0
def synthesis_block(x, res, w0, w1, n_f):
    lrmul = 1.0
    with tf.variable_scope('{:d}x{:d}'.format(res, res)):
        with tf.variable_scope('Conv0_up'):
            x = upscale2d_conv2d(x, n_f, kernel=3, gain=np.sqrt(2), lrmul=1.0)
            x = blur2d(x, [1, 2, 1])
            x = apply_noise(x)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
            x = adaptive_instance_norm(x, w0)

        with tf.variable_scope('Conv1'):
            x = equalized_conv2d(x, n_f, kernel=3, gain=np.sqrt(2), lrmul=1.0)
            x = apply_noise(x)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
            x = adaptive_instance_norm(x, w1)
    return x
Beispiel #4
0
def synthesis_const_block(res, w0, w1, n_f):
    lrmul = 1.0
    batch_size = tf.shape(w0)[0]

    with tf.variable_scope('{:d}x{:d}'.format(res, res)):
        with tf.variable_scope('Const'):
            x = tf.get_variable('const', shape=[1, n_f, 4, 4], dtype=tf.float32, initializer=tf.initializers.ones())
            x = tf.tile(x, [batch_size, 1, 1, 1])
            x = apply_noise(x)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
            x = adaptive_instance_norm(x, w0)

        with tf.variable_scope('Conv'):
            x = equalized_conv2d(x, n_f, kernel=3, gain=np.sqrt(2), lrmul=1.0)
            x = apply_noise(x)
            x = apply_bias(x, lrmul=lrmul)
            x = tf.nn.leaky_relu(x)
            x = adaptive_instance_norm(x, w1)
    return x
Beispiel #5
0
def g_mapping(z, w_dim, n_mapping, n_broadcast):
    with tf.variable_scope('g_mapping', reuse=tf.AUTO_REUSE):
        gain = np.sqrt(2)
        lrmul = 0.01

        # normalize input first
        x = pixel_norm(z)

        # run through mapping network
        for ii in range(n_mapping):
            with tf.variable_scope('Dense{:d}'.format(ii)):
                x = equalized_dense(x, w_dim, gain=gain, lrmul=lrmul)
                x = apply_bias(x, lrmul=lrmul)
                x = tf.nn.leaky_relu(x)

        # broadcast to n_layers
        with tf.variable_scope('Broadcast'):
            x = tf.tile(x[:, np.newaxis], [1, n_broadcast, 1])
    return tf.identity(x, name='w_broadcasted')