def test_scalar_concat_4d_images_and_scalar(self): x = tf.constant( [[[[1, 2], [4, 5]], [[7, 8], [10, 11]]], [[[0, 0], [-1, -2]], [[1, -2], [2, 5]]]], dtype=tf.float32) with self.test_session(use_gpu=True) as sess: output_np = sess.run(layers.scalar_concat(x, 7)) expected_np = [[[[1, 2, 7], [4, 5, 7]], [[7, 8, 7], [10, 11, 7]]], [[[0, 0, 7], [-1, -2, 7]], [[1, -2, 7], [2, 5, 7]]]] self.assertNDArrayNear(output_np, expected_np, 1.0e-5)
def discriminator(x, progress, num_filters_fn, resolution_schedule, num_blocks=None, kernel_size=3, scope='progressive_gan_discriminator', reuse=None): """Discriminator network for the progressive GAN model. Args: x: A `Tensor`of NHWC format representing images of size `resolution`. progress: A scalar float `Tensor` of training progress. num_filters_fn: A function that maps `block_id` to # of filters for the block. resolution_schedule: An object of `ResolutionSchedule`. num_blocks: An integer of number of blocks. None means maximum number of blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None. kernel_size: An integer of convolution kernel size. scope: A string or variable scope. reuse: Whether to reuse `scope`. Defaults to None which means to inherit the reuse option of the parent scope. Returns: A `Tensor` of model output and a dictionary of model end points. """ if num_blocks is None: num_blocks = resolution_schedule.num_resolutions def _conv2d(scope, x, kernel_size, filters, padding='SAME'): return layers.custom_conv2d(x=x, filters=filters, kernel_size=kernel_size, padding=padding, activation=tf.nn.leaky_relu, he_initializer_slope=0.0, scope=scope) def _from_rgb(x, block_id): return _conv2d('from_rgb', x, 1, num_filters_fn(block_id)) end_points = {} with tf.variable_scope(scope, reuse=reuse): x0 = x end_points['rgb'] = x0 lods = [] for block_id in range(num_blocks, 0, -1): with tf.variable_scope(block_name(block_id)): scale = resolution_schedule.scale_factor(block_id) lod = layers.downscale(x0, scale) end_points['downscaled_rgb_{}'.format(block_id)] = lod lod = _from_rgb(lod, block_id) # alpha_i is used to replace lod_select. alpha = _discriminator_alpha(block_id, progress) end_points['alpha_{}'.format(block_id)] = alpha lods.append((lod, alpha)) lods_iter = iter(lods) x, _ = lods_iter.__next__() for block_id in range(num_blocks, 1, -1): with tf.variable_scope(block_name(block_id)): x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id)) x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1)) x = layers.downscale(x, resolution_schedule.scale_base) lod, alpha = lods_iter.__next__() x = alpha * lod + (1.0 - alpha) * x with tf.variable_scope(block_name(1)): x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x)) x = _conv2d('conv0', x, kernel_size, num_filters_fn(1)) x = _conv2d('conv1', x, resolution_schedule.start_resolutions, num_filters_fn(0), 'VALID') end_points['last_conv'] = x logits = layers.custom_dense(x=x, units=1, scope='logits') end_points['logits'] = logits return logits, end_points
def minibatch_stats(x): return layers.scalar_concat(x, layers.minibatch_mean_variance(x))
def test_scalar_concat_invalid_input_throws_exception(self): with self.assertRaises(ValueError): layers.scalar_concat(tf.constant(1.2), 2.0)
def discriminator(x, progress, num_filters_fn, resolution_schedule, num_blocks=None, kernel_size=3, scope='progressive_gan_discriminator', reuse=None): """Discriminator network for the progressive GAN model. Args: x: A `Tensor`of NHWC format representing images of size `resolution`. progress: A scalar float `Tensor` of training progress. num_filters_fn: A function that maps `block_id` to # of filters for the block. resolution_schedule: An object of `ResolutionSchedule`. num_blocks: An integer of number of blocks. None means maximum number of blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None. kernel_size: An integer of convolution kernel size. scope: A string or variable scope. reuse: Whether to reuse `scope`. Defaults to None which means to inherit the reuse option of the parent scope. Returns: A `Tensor` of model output and a dictionary of model end points. """ if num_blocks is None: num_blocks = resolution_schedule.num_resolutions def _conv2d(scope, x, kernel_size, filters, padding='SAME'): return layers.custom_conv2d( x=x, filters=filters, kernel_size=kernel_size, padding=padding, activation=tf.nn.leaky_relu, he_initializer_slope=0.0, scope=scope) def _from_rgb(x, block_id): return _conv2d('from_rgb', x, 1, num_filters_fn(block_id)) end_points = {} with tf.variable_scope(scope, reuse=reuse): x0 = x end_points['rgb'] = x0 lods = [] for block_id in range(num_blocks, 0, -1): with tf.variable_scope(block_name(block_id)): scale = resolution_schedule.scale_factor(block_id) lod = layers.downscale(x0, scale) end_points['downscaled_rgb_{}'.format(block_id)] = lod lod = _from_rgb(lod, block_id) # alpha_i is used to replace lod_select. alpha = _discriminator_alpha(block_id, progress) end_points['alpha_{}'.format(block_id)] = alpha lods.append((lod, alpha)) lods_iter = iter(lods) x, _ = lods_iter.next() for block_id in range(num_blocks, 1, -1): with tf.variable_scope(block_name(block_id)): x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id)) x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id - 1)) x = layers.downscale(x, resolution_schedule.scale_base) lod, alpha = lods_iter.next() x = alpha * lod + (1.0 - alpha) * x with tf.variable_scope(block_name(1)): x = layers.scalar_concat(x, layers.minibatch_mean_stddev(x)) x = _conv2d('conv0', x, kernel_size, num_filters_fn(1)) x = _conv2d('conv1', x, resolution_schedule.start_resolutions, num_filters_fn(0), 'VALID') end_points['last_conv'] = x logits = layers.custom_dense(x=x, units=1, scope='logits') end_points['logits'] = logits return logits, end_points