Beispiel #1
0
 def test_single_example_correct(self, mock_tfhub_load):
     mock_tfhub_load.return_value = fake_logit_fn
     real_score = util.mnist_score(real_digit())
     fake_score = util.mnist_score(fake_digit())
     with self.cached_session() as sess:
         self.assertNear(1.0, sess.run(real_score), 1e-6)
         self.assertNear(1.0, sess.run(fake_score), 1e-6)
Beispiel #2
0
 def test_single_example_correct(self):
   if tf.executing_eagerly():
     # `run_image_classifier` doesn't work in eager.
     return
   real_score = util.mnist_score(real_digit())
   fake_score = util.mnist_score(fake_digit())
   with self.cached_session() as sess:
     self.assertNear(1.0, sess.run(real_score), 1e-6)
     self.assertNear(1.0, sess.run(fake_score), 1e-6)
Beispiel #3
0
def get_metrics(gan_model):
    """Return metrics for MNIST experiment."""
    real_mnist_score = util.mnist_score(gan_model.real_data)
    generated_mnist_score = util.mnist_score(gan_model.generated_data)
    frechet_distance = util.mnist_frechet_distance(gan_model.real_data,
                                                   gan_model.generated_data)
    return {
        'real_mnist_score': tf.compat.v1.metrics.mean(real_mnist_score),
        'mnist_score': tf.compat.v1.metrics.mean(generated_mnist_score),
        'frechet_distance': tf.compat.v1.metrics.mean(frechet_distance),
    }
Beispiel #4
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    # Fetch real images.
    with tf.compat.v1.name_scope('inputs'):
        real_images, _ = data_provider.provide_data(
            'train', hparams.num_images_generated, hparams.dataset_dir)

    image_write_ops = None
    if hparams.eval_real_images:
        tf.compat.v1.summary.scalar(
            'MNIST_Classifier_score',
            util.mnist_score(real_images, hparams.classifier_filename))
    else:
        # In order for variables to load, use the same variable scope as in the
        # train job.
        with tf.compat.v1.variable_scope('Generator'):
            images = networks.unconditional_generator(tf.random.normal(
                [hparams.num_images_generated, hparams.noise_dims]),
                                                      is_training=False)
        tf.compat.v1.summary.scalar(
            'MNIST_Frechet_distance',
            util.mnist_frechet_distance(real_images, images,
                                        hparams.classifier_filename))
        tf.compat.v1.summary.scalar(
            'MNIST_Classifier_score',
            util.mnist_score(images, hparams.classifier_filename))
        if hparams.num_images_generated >= 100 and hparams.write_to_disk:
            reshaped_images = tfgan.eval.image_reshaper(images[:100, ...],
                                                        num_cols=10)
            uint8_images = data_provider.float_image_to_uint8(reshaped_images)
            image_write_ops = tf.io.write_file(
                '%s/%s' % (hparams.eval_dir, 'unconditional_gan.png'),
                tf.image.encode_png(uint8_images[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Beispiel #5
0
 def _disabled_test_minibatch_correct(self):
   """Tests the correctness of the mnist_score function."""
   # Disabled since it requires loading the tfhub MNIST module.
   mscore = util.mnist_score(
       tf.concat([real_digit(), real_digit(), fake_digit()], 0))
   with self.cached_session() as sess:
     self.assertNear(1.612828, sess.run(mscore), 1e-6)
Beispiel #6
0
 def test_batch_splitting_doesnt_change_value(self):
     for num_batches in [1, 2, 4, 8]:
         mscore = util.mnist_score(tf.concat([real_digit()] * 4 +
                                             [fake_digit()] * 4, 0),
                                   num_batches=num_batches)
         with self.cached_session() as sess:
             self.assertNear(1.649209, sess.run(mscore), 1e-6)
Beispiel #7
0
 def test_minibatch_correct(self):
   if tf.executing_eagerly():
     # `run_image_classifier` doesn't work in eager.
     return
   mscore = util.mnist_score(
       tf.concat([real_digit(), real_digit(), fake_digit()], 0))
   with self.cached_session() as sess:
     self.assertNear(1.612828, sess.run(mscore), 1e-6)
Beispiel #8
0
 def _disabled_test_batch_splitting_doesnt_change_value(self):
     """Tests the correctness of mnist_score function over different batches."""
     # Disabled since it requires loading the tfhub MNIST module.
     for num_batches in [1, 2, 4, 8]:
         mscore = util.mnist_score(tf.concat([real_digit()] * 4 +
                                             [fake_digit()] * 4, 0),
                                   num_batches=num_batches)
         with self.cached_session() as sess:
             self.assertNear(1.649209, sess.run(mscore), 1e-6)
Beispiel #9
0
 def test_any_batch_size(self):
   if tf.executing_eagerly():
     # Placeholders don't work in eager execution mode.
     return
   inputs = tf.compat.v1.placeholder(tf.float32, shape=[None, 28, 28, 1])
   mscore = util.mnist_score(inputs)
   for batch_size in [4, 16, 30]:
     with self.cached_session() as sess:
       sess.run(mscore, feed_dict={inputs: np.zeros([batch_size, 28, 28, 1])})
Beispiel #10
0
 def test_any_batch_size(self, mock_tfhub_load):
   mock_tfhub_load.return_value = fake_logit_fn
   # Create a graph since placeholders don't work in eager execution mode.
   with tf.Graph().as_default():
     inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
     mscore = util.mnist_score(inputs)
     for batch_size in [4, 16, 30]:
       with self.cached_session() as sess:
         sess.run(
             mscore, feed_dict={inputs: np.zeros([batch_size, 28, 28, 1])})
Beispiel #11
0
 def test_batch_splitting_doesnt_change_value(self):
   if tf.executing_eagerly():
     # `run_image_classifier` doesn't work in eager.
     return
   for num_batches in [1, 2, 4, 8]:
     mscore = util.mnist_score(
         tf.concat([real_digit()] * 4 + [fake_digit()] * 4, 0),
         num_batches=num_batches)
     with self.cached_session() as sess:
       self.assertNear(1.649209, sess.run(mscore), 1e-6)
Beispiel #12
0
    def test_deterministic(self):
        m_score = util.mnist_score(real_digit())
        with self.cached_session() as sess:
            m_score1 = sess.run(m_score)
            m_score2 = sess.run(m_score)
        self.assertEqual(m_score1, m_score2)

        with self.cached_session() as sess:
            m_score3 = sess.run(m_score)
        self.assertEqual(m_score1, m_score3)
Beispiel #13
0
    def test_deterministic(self, mock_tfhub_load):
        mock_tfhub_load.return_value = fake_logit_fn
        m_score = util.mnist_score(real_digit())
        with self.cached_session() as sess:
            m_score1 = sess.run(m_score)
            m_score2 = sess.run(m_score)
        self.assertEqual(m_score1, m_score2)

        with self.cached_session() as sess:
            m_score3 = sess.run(m_score)
        self.assertEqual(m_score1, m_score3)
Beispiel #14
0
  def test_deterministic(self):
    if tf.executing_eagerly():
      # `run_image_classifier` doesn't work in eager.
      return
    m_score = util.mnist_score(real_digit())
    with self.cached_session() as sess:
      m_score1 = sess.run(m_score)
      m_score2 = sess.run(m_score)
    self.assertEqual(m_score1, m_score2)

    with self.cached_session() as sess:
      m_score3 = sess.run(m_score)
    self.assertEqual(m_score1, m_score3)
Beispiel #15
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    with tf.compat.v1.name_scope('inputs'):
        noise, one_hot_labels = _get_generator_inputs(
            hparams.num_images_per_class, NUM_CLASSES, hparams.noise_dims)

    # Generate images.
    with tf.compat.v1.variable_scope(
            'Generator'):  # Same scope as in train job.
        images = networks.conditional_generator((noise, one_hot_labels),
                                                is_training=False)

    # Visualize images.
    reshaped_img = tfgan.eval.image_reshaper(
        images, num_cols=hparams.num_images_per_class)
    tf.compat.v1.summary.image('generated_images', reshaped_img, max_outputs=1)

    # Calculate evaluation metrics.
    tf.compat.v1.summary.scalar(
        'MNIST_Classifier_score',
        util.mnist_score(images, hparams.classifier_filename))
    tf.compat.v1.summary.scalar(
        'MNIST_Cross_entropy',
        util.mnist_cross_entropy(images, one_hot_labels,
                                 hparams.classifier_filename))

    # Write images to disk.
    image_write_ops = None
    if hparams.write_to_disk:
        image_write_ops = tf.io.write_file(
            '%s/%s' % (hparams.eval_dir, 'conditional_gan.png'),
            tf.image.encode_png(
                data_provider.float_image_to_uint8(reshaped_img[0])))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Beispiel #16
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    with tf.name_scope('inputs'):
        noise_args = (hparams.noise_samples, CAT_SAMPLE_POINTS,
                      CONT_SAMPLE_POINTS, hparams.unstructured_noise_dims,
                      hparams.continuous_noise_dims)
        # Use fixed noise vectors to illustrate the effect of each dimension.
        display_noise1 = util.get_eval_noise_categorical(*noise_args)
        display_noise2 = util.get_eval_noise_continuous_dim1(*noise_args)
        display_noise3 = util.get_eval_noise_continuous_dim2(*noise_args)
        _validate_noises([display_noise1, display_noise2, display_noise3])

    # Visualize the effect of each structured noise dimension on the generated
    # image.
    def generator_fn(inputs):
        return networks.infogan_generator(inputs,
                                          len(CAT_SAMPLE_POINTS),
                                          is_training=False)

    with tf.variable_scope(
            'Generator') as genscope:  # Same scope as in training.
        categorical_images = generator_fn(display_noise1)
    reshaped_categorical_img = tfgan.eval.image_reshaper(
        categorical_images, num_cols=len(CAT_SAMPLE_POINTS))
    tf.summary.image('categorical', reshaped_categorical_img, max_outputs=1)

    with tf.variable_scope(genscope, reuse=True):
        continuous1_images = generator_fn(display_noise2)
    reshaped_continuous1_img = tfgan.eval.image_reshaper(
        continuous1_images, num_cols=len(CONT_SAMPLE_POINTS))
    tf.summary.image('continuous1', reshaped_continuous1_img, max_outputs=1)

    with tf.variable_scope(genscope, reuse=True):
        continuous2_images = generator_fn(display_noise3)
    reshaped_continuous2_img = tfgan.eval.image_reshaper(
        continuous2_images, num_cols=len(CONT_SAMPLE_POINTS))
    tf.summary.image('continuous2', reshaped_continuous2_img, max_outputs=1)

    # Evaluate image quality.
    all_images = tf.concat(
        [categorical_images, continuous1_images, continuous2_images], 0)
    tf.summary.scalar('MNIST_Classifier_score', util.mnist_score(all_images))

    # Write images to disk.
    image_write_ops = []
    if hparams.write_to_disk:
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'categorical_infogan.png',
                                 reshaped_categorical_img[0]))
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'continuous1_infogan.png',
                                 reshaped_continuous1_img[0]))
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'continuous2_infogan.png',
                                 reshaped_continuous2_img[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Beispiel #17
0
 def test_single_example_correct(self):
     real_score = util.mnist_score(real_digit())
     fake_score = util.mnist_score(fake_digit())
     with self.cached_session() as sess:
         self.assertNear(1.0, sess.run(real_score), 1e-6)
         self.assertNear(1.0, sess.run(fake_score), 1e-6)
Beispiel #18
0
 def test_minibatch_correct(self):
     mscore = util.mnist_score(
         tf.concat([real_digit(), real_digit(),
                    fake_digit()], 0))
     with self.cached_session() as sess:
         self.assertNear(1.612828, sess.run(mscore), 1e-6)
Beispiel #19
0
def get_inception_score(images, splits=10):
    images = tf.convert_to_tensor(images)
    generated_mnist_score = eval_util.mnist_score(images)
    print(generated_mnist_score)
    return [generated_mnist_score]
Beispiel #20
0
 def get_mnist_eval_metrics(real, fake):
     frechet = tfgan_mnist.mnist_frechet_distance(real, fake, 1)
     score = tfgan_mnist.mnist_score(fake, 1)
     return tf.stack(list(map(tf.stop_gradient, (frechet, score))))