Ejemplo n.º 1
0
 def test_generator_run(self):
     tf.set_random_seed(1234)
     noise = tf.random_normal([100, 64])
     image, _ = dcgan.generator(noise)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         image.eval()
Ejemplo n.º 2
0
def _decoder(codes, final_size, is_training, depth=64):
    """Compression decoder."""
    decoded_img, _ = dcgan.generator(codes,
                                     depth=depth,
                                     final_size=final_size,
                                     num_outputs=3,
                                     is_training=is_training,
                                     scope='Decoder')

    # Map output to [-1, 1].
    # Use softsign instead of tanh, as per empirical results of
    # http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.
    return tf.nn.softsign(decoded_img)
Ejemplo n.º 3
0
    def test_generator_invalid_input(self):
        wrong_dim_input = tf.zeros([5, 32, 32])
        with self.assertRaises(ValueError):
            dcgan.generator(wrong_dim_input)

        correct_input = tf.zeros([3, 2])
        with self.assertRaisesRegexp(ValueError, 'must be a power of 2'):
            dcgan.generator(correct_input, final_size=30)

        with self.assertRaisesRegexp(ValueError, 'must be greater than 8'):
            dcgan.generator(correct_input, final_size=4)
Ejemplo n.º 4
0
def generator(noise, is_training=True):
    """Generator to produce CIFAR images.

  Args:
    noise: A 2D Tensor of shape [batch size, noise dim]. Since this example
      does not use conditioning, this Tensor represents a noise vector of some
      kind that will be reshaped by the generator into CIFAR examples.
    is_training: If `True`, batch norm uses batch statistics. If `False`, batch
      norm uses the exponential moving average collected from population
      statistics.

  Returns:
    A single Tensor with a batch of generated CIFAR images.
  """
    images, _ = dcgan.generator(noise,
                                is_training=is_training,
                                fused_batch_norm=True)

    # Make sure output lies between [-1, 1].
    return tf.tanh(images)
Ejemplo n.º 5
0
def conditional_generator(inputs, is_training=True):
    """Generator to produce CIFAR images.

  Args:
    inputs: A 2-tuple of Tensors (noise, one_hot_labels) and creates a
      conditional generator.
    is_training: If `True`, batch norm uses batch statistics. If `False`, batch
      norm uses the exponential moving average collected from population
      statistics.

  Returns:
    A single Tensor with a batch of generated CIFAR images.
  """
    noise, one_hot_labels = inputs
    noise = tfgan.features.condition_tensor_from_onehot(noise, one_hot_labels)

    images, _ = dcgan.generator(noise,
                                is_training=is_training,
                                fused_batch_norm=True)

    # Make sure output lies between [-1, 1].
    return tf.tanh(images)
Ejemplo n.º 6
0
    def test_generator_graph(self):
        tf.set_random_seed(1234)
        # Check graph construction for a number of image size/depths and batch
        # sizes.
        for i, batch_size in zip(xrange(3, 7), xrange(3, 8)):
            tf.reset_default_graph()
            final_size = 2**i
            noise = tf.random_normal([batch_size, 64])
            image, end_points = dcgan.generator(noise,
                                                depth=32,
                                                final_size=final_size)

            self.assertAllEqual([batch_size, final_size, final_size, 3],
                                image.shape.as_list())

            expected_names = ['deconv%i' % j
                              for j in xrange(1, i)] + ['logits']
            self.assertSetEqual(set(expected_names), set(end_points.keys()))

            # Check layer depths.
            for j in range(1, i):
                layer = end_points['deconv%i' % j]
                self.assertEqual(32 * 2**(i - j - 1),
                                 layer.get_shape().as_list()[-1])