Пример #1
0
  def testEvaluateWithEvalFeedDict(self):
    if tf.executing_eagerly():
      # tf.placeholder() is not compatible with eager execution.
      return
    # Create a checkpoint.
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_with_eval_feed_dict')
    self._train_model(checkpoint_dir, num_steps=1)

    # We need a variable that the saver will try to restore.
    tf.compat.v1.train.get_or_create_global_step()

    # Create a variable and an eval op that increments it with a placeholder.
    try:
      my_var = _local_variable(0.0, name='my_var')
    except TypeError:  # `collections` doesn't exist in TF2.
      return
    increment = tf.compat.v1.placeholder(dtype=tf.float32)
    eval_ops = tf.compat.v1.assign_add(my_var, increment)

    increment_value = 3
    num_evals = 5
    expected_value = increment_value * num_evals
    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=eval_ops,
        feed_dict={increment: 3},
        final_ops={'my_var': tf.identity(my_var)},
        hooks=[
            evaluation.StopAfterNEvalsHook(num_evals),
        ],
        max_number_of_evaluations=1)
    self.assertEqual(final_values['my_var'], expected_value)
Пример #2
0
  def testEvaluatePerfectModel(self):
    if tf.executing_eagerly():
      return
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_repeated')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = tf.constant(self._inputs, dtype=tf.float32)
    labels = tf.constant(self._labels, dtype=tf.float32)
    logits = logistic_classifier(inputs)
    predictions = tf.round(logits)

    accuracy, update_op = tf.compat.v1.metrics.accuracy(
        predictions=predictions, labels=labels)

    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ],
        max_number_of_evaluations=1)
    self.assertTrue(final_values['accuracy'] > .99)
Пример #3
0
  def testEvaluationLoopTimeout(self):
    if tf.executing_eagerly():
      # This test uses `tf.placeholder`, which doesn't work in eager executing.
      return
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluation_loop_timeout')
    if not tf.io.gfile.exists(checkpoint_dir):
      tf.io.gfile.makedirs(checkpoint_dir)

    # We need a variable that the saver will try to restore.
    tf.compat.v1.train.get_or_create_global_step()

    # Run with placeholders. If we actually try to evaluate this, we'd fail
    # since we're not using a feed_dict.
    cant_run_op = tf.compat.v1.placeholder(dtype=tf.float32)

    start = time.time()
    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=cant_run_op,
        hooks=[evaluation.StopAfterNEvalsHook(10)],
        timeout=6)
    end = time.time()
    self.assertFalse(final_values)

    # Assert that we've waited for the duration of the timeout (minus the sleep
    # time).
    self.assertGreater(end - start, 5.0)

    # Then the timeout kicked in and stops the loop.
    self.assertLess(end - start, 7)
Пример #4
0
  def testEvalOpAndFinalOp(self):
    if tf.executing_eagerly():
      return
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = tf.constant(self._inputs, dtype=tf.float32)
    logistic_classifier(inputs)

    num_evals = 5
    final_increment = 9.0

    try:
      my_var = _local_variable(0.0, name='MyVar')
    except TypeError:  # `collections` doesn't exist in TF2.
      return
    eval_ops = tf.compat.v1.assign_add(my_var, 1.0)
    final_ops = tf.identity(my_var) + final_increment

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=eval_ops,
        final_ops={'value': final_ops},
        hooks=[
            evaluation.StopAfterNEvalsHook(num_evals),
        ])
    self.assertEqual(final_ops_values['value'], num_evals + final_increment)
Пример #5
0
  def testEvaluatePerfectModel(self):
    if tf.executing_eagerly():
      # tf.metrics.accuracy is not supported when eager execution is enabled.
      return
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_once')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = tf.constant(self._inputs, dtype=tf.float32)
    labels = tf.constant(self._labels, dtype=tf.float32)
    logits = logistic_classifier(inputs)
    predictions = tf.round(logits)

    accuracy, update_op = tf.compat.v1.metrics.accuracy(
        predictions=predictions, labels=labels)

    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ])
    self.assertTrue(final_ops_values['accuracy'] > .99)
Пример #6
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    # Fetch and generate images to run through Inception.
    with tf.name_scope('inputs'):
        real_data, _ = data_provider.provide_data('test',
                                                  hparams.num_images_generated,
                                                  shuffle=False)
        generated_data = _get_generated_data(hparams.num_images_generated)

    # Compute Frechet Inception Distance.
    if hparams.eval_frechet_inception_distance:
        fid = util.get_frechet_inception_distance(real_data, generated_data,
                                                  hparams.num_images_generated,
                                                  hparams.num_inception_images)
        tf.summary.scalar('frechet_inception_distance', fid)

    # Compute normal Inception scores.
    if hparams.eval_real_images:
        inc_score = util.get_inception_scores(real_data,
                                              hparams.num_images_generated,
                                              hparams.num_inception_images)
    else:
        inc_score = util.get_inception_scores(generated_data,
                                              hparams.num_images_generated,
                                              hparams.num_inception_images)
    tf.summary.scalar('inception_score', inc_score)

    # Create ops that write images to disk.
    image_write_ops = None
    if hparams.num_images_generated >= 100 and hparams.write_to_disk:
        reshaped_imgs = tfgan.eval.image_reshaper(generated_data[:100],
                                                  num_cols=10)
        uint8_images = data_provider.float_image_to_uint8(reshaped_imgs)
        image_write_ops = tf.io.write_file(
            '%s/%s' % (hparams.eval_dir, 'unconditional_cifar10.png'),
            tf.image.encode_png(uint8_images[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop: return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        master=hparams.master,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Пример #7
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    with tf.compat.v1.name_scope('inputs'):
        noise, one_hot_labels = _get_generator_inputs(
            hparams.num_images_per_class, NUM_CLASSES, hparams.noise_dims)

    # Generate images.
    with tf.compat.v1.variable_scope(
            'Generator'):  # Same scope as in train job.
        images = networks.conditional_generator((noise, one_hot_labels),
                                                is_training=False)

    # Visualize images.
    reshaped_img = tfgan.eval.image_reshaper(
        images, num_cols=hparams.num_images_per_class)
    tf.compat.v1.summary.image('generated_images', reshaped_img, max_outputs=1)

    # Calculate evaluation metrics.
    tf.compat.v1.summary.scalar(
        'MNIST_Classifier_score',
        util.mnist_score(images, hparams.classifier_filename))
    tf.compat.v1.summary.scalar(
        'MNIST_Cross_entropy',
        util.mnist_cross_entropy(images, one_hot_labels,
                                 hparams.classifier_filename))

    # Write images to disk.
    image_write_ops = None
    if hparams.write_to_disk:
        image_write_ops = tf.io.write_file(
            '%s/%s' % (hparams.eval_dir, 'conditional_gan.png'),
            tf.image.encode_png(
                data_provider.float_image_to_uint8(reshaped_img[0])))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Пример #8
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    # Fetch real images.
    with tf.compat.v1.name_scope('inputs'):
        real_images, _ = data_provider.provide_data(
            'train', hparams.num_images_generated, hparams.dataset_dir)

    image_write_ops = None
    if hparams.eval_real_images:
        tf.compat.v1.summary.scalar(
            'MNIST_Classifier_score',
            util.mnist_score(real_images, hparams.classifier_filename))
    else:
        # In order for variables to load, use the same variable scope as in the
        # train job.
        with tf.compat.v1.variable_scope('Generator'):
            images = networks.unconditional_generator(tf.random.normal(
                [hparams.num_images_generated, hparams.noise_dims]),
                                                      is_training=False)
        tf.compat.v1.summary.scalar(
            'MNIST_Frechet_distance',
            util.mnist_frechet_distance(real_images, images,
                                        hparams.classifier_filename))
        tf.compat.v1.summary.scalar(
            'MNIST_Classifier_score',
            util.mnist_score(images, hparams.classifier_filename))
        if hparams.num_images_generated >= 100 and hparams.write_to_disk:
            reshaped_images = tfgan.eval.image_reshaper(images[:100, ...],
                                                        num_cols=10)
            uint8_images = data_provider.float_image_to_uint8(reshaped_images)
            image_write_ops = tf.io.write_file(
                '%s/%s' % (hparams.eval_dir, 'unconditional_gan.png'),
                tf.image.encode_png(uint8_images[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)
Пример #9
0
    def testEvaluationLoopTimeoutWithTimeoutFn(self):
        if tf.executing_eagerly():
            # tf.metrics.accuracy is not supported when eager execution is enabled.
            return
        checkpoint_dir = os.path.join(
            self.get_temp_dir(), 'evaluation_loop_timeout_with_timeout_fn')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = tf.constant(self._inputs, dtype=tf.float32)
        labels = tf.constant(self._labels, dtype=tf.float32)
        logits = logistic_classifier(inputs)
        predictions = tf.round(logits)

        accuracy, update_op = tf.compat.v1.metrics.accuracy(
            predictions=predictions, labels=labels)

        timeout_fn_calls = [0]

        def timeout_fn():
            timeout_fn_calls[0] += 1
            return timeout_fn_calls[0] > 3

        final_values = evaluation.evaluate_repeatedly(
            checkpoint_dir=checkpoint_dir,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ],
            eval_interval_secs=1,
            max_number_of_evaluations=2,
            timeout=0.1,
            timeout_fn=timeout_fn)
        # We should have evaluated once.
        self.assertTrue(final_values['accuracy'] > .99)
        # And called 4 times the timeout fn
        self.assertEqual(4, timeout_fn_calls[0])
Пример #10
0
def evaluate(hparams, run_eval_loop=True):
    """Runs an evaluation loop.

  Args:
    hparams: An HParams instance containing the eval hyperparameters.
    run_eval_loop: Whether to run the full eval loop. Set to False for testing.
  """
    with tf.name_scope('inputs'):
        noise_args = (hparams.noise_samples, CAT_SAMPLE_POINTS,
                      CONT_SAMPLE_POINTS, hparams.unstructured_noise_dims,
                      hparams.continuous_noise_dims)
        # Use fixed noise vectors to illustrate the effect of each dimension.
        display_noise1 = util.get_eval_noise_categorical(*noise_args)
        display_noise2 = util.get_eval_noise_continuous_dim1(*noise_args)
        display_noise3 = util.get_eval_noise_continuous_dim2(*noise_args)
        _validate_noises([display_noise1, display_noise2, display_noise3])

    # Visualize the effect of each structured noise dimension on the generated
    # image.
    def generator_fn(inputs):
        return networks.infogan_generator(inputs,
                                          len(CAT_SAMPLE_POINTS),
                                          is_training=False)

    with tf.variable_scope(
            'Generator') as genscope:  # Same scope as in training.
        categorical_images = generator_fn(display_noise1)
    reshaped_categorical_img = tfgan.eval.image_reshaper(
        categorical_images, num_cols=len(CAT_SAMPLE_POINTS))
    tf.summary.image('categorical', reshaped_categorical_img, max_outputs=1)

    with tf.variable_scope(genscope, reuse=True):
        continuous1_images = generator_fn(display_noise2)
    reshaped_continuous1_img = tfgan.eval.image_reshaper(
        continuous1_images, num_cols=len(CONT_SAMPLE_POINTS))
    tf.summary.image('continuous1', reshaped_continuous1_img, max_outputs=1)

    with tf.variable_scope(genscope, reuse=True):
        continuous2_images = generator_fn(display_noise3)
    reshaped_continuous2_img = tfgan.eval.image_reshaper(
        continuous2_images, num_cols=len(CONT_SAMPLE_POINTS))
    tf.summary.image('continuous2', reshaped_continuous2_img, max_outputs=1)

    # Evaluate image quality.
    all_images = tf.concat(
        [categorical_images, continuous1_images, continuous2_images], 0)
    tf.summary.scalar('MNIST_Classifier_score', util.mnist_score(all_images))

    # Write images to disk.
    image_write_ops = []
    if hparams.write_to_disk:
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'categorical_infogan.png',
                                 reshaped_categorical_img[0]))
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'continuous1_infogan.png',
                                 reshaped_continuous1_img[0]))
        image_write_ops.append(
            _get_write_image_ops(hparams.eval_dir, 'continuous2_infogan.png',
                                 reshaped_continuous2_img[0]))

    # For unit testing, use `run_eval_loop=False`.
    if not run_eval_loop:
        return
    evaluation.evaluate_repeatedly(
        hparams.checkpoint_dir,
        hooks=[
            evaluation.SummaryAtEndHook(hparams.eval_dir),
            evaluation.StopAfterNEvalsHook(1)
        ],
        eval_ops=image_write_ops,
        max_number_of_evaluations=hparams.max_number_of_evaluations)