Exemple #1
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    # Fetch and generate images to run through Inception.
    with tf.name_scope('inputs'):
        real_data, _ = data_provider.provide_data('test',
                                                  hparams.num_images_generated,
                                                  shuffle=False)
        generated_data = _get_generated_data(hparams.num_images_generated)

    # Compute Frechet Inception Distance.
    if hparams.eval_frechet_inception_distance:
        fid = util.get_frechet_inception_distance(real_data, generated_data,
                                                  hparams.num_images_generated,
                                                  hparams.num_inception_images)
        tf.summary.scalar('frechet_inception_distance', fid)

    # Compute normal Inception scores.
    if hparams.eval_real_images:
        inc_score = util.get_inception_scores(real_data,
                                              hparams.num_images_generated,
                                              hparams.num_inception_images)
    else:
        inc_score = util.get_inception_scores(generated_data,
                                              hparams.num_images_generated,
                                              hparams.num_inception_images)
    tf.summary.scalar('inception_score', inc_score)

    # Create ops that write images to disk.
    image_write_ops = None
    if hparams.num_images_generated >= 100 and hparams.write_to_disk:
        reshaped_imgs = tfgan.eval.image_reshaper(generated_data[:100],
                                                  num_cols=10)
        uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
        image_write_ops = tf.io.write_file(
            '%s/%s' % (hparams.eval_dir, 'unconditional_cifar10.png'),
            tf.image.encode_png(uint8_images[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop: return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        master=hparams.master,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Exemple #2
0
    def test_provide_data_can_be_reinitialized(self, mock_tfds):
        """Test that the iterator created in `provide_data` can be reused."""
        batch_size = 5
        mock_tfds.load.return_value = self.mock_ds

        images, labels = data_provider.provide_data('test', batch_size)

        with self.session() as sess:
            sess.run([images, labels])
            sess.run([images, labels])
        with self.session() as sess:
            sess.run([images, labels])
            sess.run([images, labels])
Exemple #3
0
  def test_provide_data(self, mock_tfds, one_hot):
    batch_size = 5
    mock_tfds.load.return_value = self.mock_ds

    images, labels = data_provider.provide_data(
        'test', batch_size, one_hot=one_hot)

    with self.cached_session() as sess:
      images, labels = sess.run([images, labels])
    self.assertTupleEqual(images.shape, (batch_size, 32, 32, 3))
    self.assertTrue(np.all(np.abs(images) <= 1))
    if one_hot:
      expected_lbls_shape = (batch_size, 10)
    else:
      expected_lbls_shape = (batch_size,)
    self.assertTupleEqual(labels.shape, expected_lbls_shape)
Exemple #4
0
def train(hparams):
    """Trains a CIFAR10 GAN.

  Args:
    hparams: An HParams instance containing the hyperparameters for training.
  """
    if not tf.io.gfile.exists(hparams.train_log_dir):
        tf.io.gfile.makedirs(hparams.train_log_dir)

    with tf.device(
            tf.compat.v1.train.replica_device_setter(hparams.ps_replicas)):
        # Force all input processing onto CPU in order to reserve the GPU for
        # the forward inference and back-propagation.
        with tf.compat.v1.name_scope('inputs'):
            with tf.device('/cpu:0'):
                images, _ = data_provider.provide_data('train',
                                                       hparams.batch_size,
                                                       num_parallel_calls=4)

        # Define the GANModel tuple.
        generator_fn = networks.generator
        discriminator_fn = networks.discriminator
        generator_inputs = tf.random.normal([hparams.batch_size, 64])
        gan_model = tfgan.gan_model(generator_fn,
                                    discriminator_fn,
                                    real_data=images,
                                    generator_inputs=generator_inputs)
        tfgan.eval.add_gan_model_image_summaries(gan_model)

        # Get the GANLoss tuple. Use the selected GAN loss functions.
        with tf.compat.v1.name_scope('loss'):
            gan_loss = tfgan.gan_loss(gan_model,
                                      gradient_penalty_weight=1.0,
                                      add_summaries=True)

        # Get the GANTrain ops using the custom optimizers and optional
        # discriminator weight clipping.
        with tf.compat.v1.name_scope('train'):
            gen_opt, dis_opt = _get_optimizers(hparams)
            train_ops = tfgan.gan_train_ops(gan_model,
                                            gan_loss,
                                            generator_optimizer=gen_opt,
                                            discriminator_optimizer=dis_opt,
                                            summarize_gradients=True)

        # Run the alternating training loop. Skip it if no steps should be taken
        # (used for graph construction tests).
        status_message = tf.strings.join([
            'Starting train step: ',
            tf.as_string(tf.compat.v1.train.get_or_create_global_step())
        ],
                                         name='status_message')
        if hparams.max_number_of_steps == 0:
            return
        tfgan.gan_train(train_ops,
                        hooks=([
                            tf.estimator.StopAtStepHook(
                                num_steps=hparams.max_number_of_steps),
                            tf.estimator.LoggingTensorHook([status_message],
                                                           every_n_iter=10)
                        ]),
                        logdir=hparams.train_log_dir,
                        master=hparams.master,
                        is_chief=hparams.task == 0)