def grow(res_increase, res_decrease): x = lambda: from_rgb(layers.downscale2d(inputs, 2**res_decrease), res_increase) if res_decrease > 0: x = utils.cset( x, (res_training > res_increase), lambda: grow(res_increase+1, res_decrease-1)) x = block(x(), res_increase); y = lambda: x if res_increase > 2: y = utils.cset( y, (res_training < res_increase), lambda: utils.lerp(x, from_rgb(layers.downscale2d(inputs, 2**(res_decrease+1)), res_increase-1), res_increase-res_training)) return y()
def from_rgb(x, number): with tf.compat.v1.variable_scope('Input_{}'.format(number), reuse=tf.compat.v1.AUTO_REUSE): x = layers.downscale2d(x, factor=2**(res_building - number)) x = layers.conv2d(x, fmaps=latent_size, kernel=1) x = BA(x) return x
def dummy(inputs, regularizer_rate=0): def BAN(x): x = layers.bias(x, regularizer_rate=regularizer_rate) x = tf.nn.selu(x) return x def conv_layer(name, x, fmaps, kernel=3, strides=1, padding='SAME'): with tf.compat.v1.variable_scope('Conv2D_{}'.format(name)): x = layers.conv2d(x, fmaps=fmaps, kernel=kernel, strides=strides, padding=padding, regularizer_rate=regularizer_rate) x = BAN(x) return x def dense_layer(x, fmaps, name=0, use_bias=True): with tf.compat.v1.variable_scope('Dense_{}'.format(name)): x = layers.dense(x, fmaps=fmaps, regularizer_rate=regularizer_rate) if use_bias: x = layers.bias(x, regularizer_rate=regularizer_rate) return x x = conv_layer('inputs', x=inputs, fmaps=64, kernel=1) fmaps = [64, 128, 256] for i in range(len(fmaps)): x = conv_layer(name=i * 2, x=x, fmaps=fmaps[i]) x = conv_layer(name=i * 2 + 1, x=x, fmaps=fmaps[i]) if i < (len(fmaps) - 1): x = layers.downscale2d(x) x = dense_layer(x, fmaps=1, name='0', use_bias=False) x = layers.alpha_dropout(x, rate=0.2) x = tf.compat.v1.nn.l2_normalize(x, axis=1) return x
def msg_gan(image_inputs, noise_inputs, latent_size, res_building, minibatch_size): # Multi-scaled input images real_inputs = [] for factor in [2**res for res in range(3, -1, -1)]: real_input = layers.downscale2d(image_inputs, factor=factor) real_input = layers.upscale2d(real_input, factor=factor) real_inputs += [real_input] # Define networks generator = network.Network('generator', msg_generator, noise_inputs, res_building=res_building, latent_size=latent_size) discriminator = network.Network('discriminator', msg_discriminator, real_inputs, res_building=res_building, latent_size=latent_size) # Retrieve network outputs fake_images = generator(noise_inputs) fake_outputs = discriminator(fake_images) real_outputs = discriminator(real_inputs) # Losses gen_loss, disc_loss = losses.RelativisticAverageBCE( real_outputs, fake_outputs) disc_loss += losses.GradientPenaltyMSG(discriminator, real_inputs, fake_images, minibatch_size) # disc_loss += losses.EpsilonPenalty(real_outputs) return gen_loss, disc_loss, fake_images
def block(x, res): with tf.compat.v1.variable_scope('Block_{}'.format(res)): if res==2: x = layers.minibatch_stddev_layer(x) x = conv_layer(x, number=0, fmaps=latent_size) x = dense_layer(x, fmaps=latent_size, number=1) x = dense_layer(x, fmaps=1, number=0) else: x = conv_layer(x, number='{}_0'.format(res), fmaps=nbof_fmaps(res)) x = conv_layer(x, number='{}_1'.format(res), fmaps=nbof_fmaps(res-1)) x = layers.downscale2d(x) return x