Ejemplo n.º 1
0
    def testEvalOpAndFinalOp(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'eval_ops_and_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        num_evals = 5
        final_increment = 9.0

        my_var = variables.local_variable(0.0, name='MyVar')
        eval_ops = state_ops.assign_add(my_var, 1.0)
        final_ops = array_ops.identity(my_var) + final_increment

        final_ops_values = evaluation.evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=eval_ops,
            final_ops={'value': final_ops},
            hooks=[
                evaluation.StopAfterNEvalsHook(num_evals),
            ])
        self.assertEqual(final_ops_values['value'],
                         num_evals + final_increment)
Ejemplo n.º 2
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_once')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metric_ops.streaming_accuracy(
            predictions, labels)

        checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

        final_ops_values = evaluation.evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
Ejemplo n.º 3
0
  def testEvalOpAndFinalOp(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    num_evals = 5
    final_increment = 9.0

    my_var = variables.local_variable(0.0, name='MyVar')
    eval_ops = state_ops.assign_add(my_var, 1.0)
    final_ops = array_ops.identity(my_var) + final_increment

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=eval_ops,
        final_ops={'value': final_ops},
        hooks=[
            evaluation.StopAfterNEvalsHook(num_evals),
        ])
    self.assertEqual(final_ops_values['value'], num_evals + final_increment)
Ejemplo n.º 4
0
  def testEvaluatePerfectModel(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_once')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metric_ops.streaming_accuracy(predictions, labels)

    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ])
    self.assertTrue(final_ops_values['accuracy'] > .99)
Ejemplo n.º 5
0
  def testOnlyFinalOp(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')

    # Train a model for a single step to get a checkpoint.
    self._train_model(checkpoint_dir, num_steps=1)
    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    # Create the model so we have something to restore.
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    logistic_classifier(inputs)

    final_increment = 9.0

    my_var = variables.local_variable(0.0, name='MyVar')
    final_ops = array_ops.identity(my_var) + final_increment

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
    self.assertEqual(final_ops_values['value'], final_increment)
Ejemplo n.º 6
0
    def testOnlyFinalOp(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')

        # Train a model for a single step to get a checkpoint.
        self._train_model(checkpoint_dir, num_steps=1)
        checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

        # Create the model so we have something to restore.
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        logistic_classifier(inputs)

        final_increment = 9.0

        my_var = variables.local_variable(0.0, name='MyVar')
        final_ops = array_ops.identity(my_var) + final_increment

        final_ops_values = evaluation.evaluate_once(
            checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
        self.assertEqual(final_ops_values['value'], final_increment)
Ejemplo n.º 7
0
def evaluate_once(master,
                  checkpoint_path,
                  logdir,
                  num_evals=1,
                  initial_op=None,
                  initial_op_feed_dict=None,
                  eval_op=None,
                  eval_op_feed_dict=None,
                  final_op=None,
                  final_op_feed_dict=None,
                  summary_op=_USE_DEFAULT,
                  summary_op_feed_dict=None,
                  variables_to_restore=None,
                  session_config=None,
                  hooks=None):
    """Evaluates the model at the given checkpoint path.

  Args:
    master: The BNS address of the TensorFlow master.
    checkpoint_path: The path to a checkpoint to use for evaluation.
    logdir: The directory where the TensorFlow summaries are written to.
    num_evals: The number of times to run `eval_op`.
    initial_op: An operation run at the beginning of evaluation.
    initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
    eval_op: A operation run `num_evals` times.
    eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
    final_op: An operation to execute after all of the `eval_op` executions. The
      value of `final_op` is returned.
    final_op_feed_dict: A feed dictionary to use when executing `final_op`.
    summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
      default the summary_op is set to tf.compat.v1.summary.merge_all().
    summary_op_feed_dict: An optional feed dictionary to use when running the
      `summary_op`.
    variables_to_restore: A list of TensorFlow variables to restore during
      evaluation. If the argument is left as `None` then
      slim.variables.GetVariablesToRestore() is used.
    session_config: An instance of `tf.compat.v1.ConfigProto` that will be used
      to configure the `Session`. If left as `None`, the default will be used.
    hooks: A list of additional `SessionRunHook` objects to pass during the
      evaluation.

  Returns:
    The value of `final_op` or `None` if `final_op` is `None`.
  """
    if summary_op == _USE_DEFAULT:
        summary_op = summary.merge_all()

    all_hooks = [
        evaluation.StopAfterNEvalsHook(num_evals),
    ]

    if summary_op is not None:
        all_hooks.append(
            evaluation.SummaryAtEndHook(log_dir=logdir,
                                        summary_op=summary_op,
                                        feed_dict=summary_op_feed_dict))
    if hooks is not None:
        all_hooks.extend(hooks)

    saver = None
    if variables_to_restore is not None:
        saver = tf_saver.Saver(variables_to_restore)

    return evaluation.evaluate_once(checkpoint_path,
                                    master=master,
                                    scaffold=monitored_session.Scaffold(
                                        init_op=initial_op,
                                        init_feed_dict=initial_op_feed_dict,
                                        saver=saver),
                                    eval_ops=eval_op,
                                    feed_dict=eval_op_feed_dict,
                                    final_ops=final_op,
                                    final_ops_feed_dict=final_op_feed_dict,
                                    hooks=all_hooks,
                                    config=session_config)
Ejemplo n.º 8
0
def evaluate_once(master,
                  checkpoint_path,
                  logdir,
                  num_evals=1,
                  initial_op=None,
                  initial_op_feed_dict=None,
                  eval_op=None,
                  eval_op_feed_dict=None,
                  final_op=None,
                  final_op_feed_dict=None,
                  summary_op=_USE_DEFAULT,
                  summary_op_feed_dict=None,
                  variables_to_restore=None,
                  session_config=None):
  """Evaluates the model at the given checkpoint path.

  Args:
    master: The BNS address of the TensorFlow master.
    checkpoint_path: The path to a checkpoint to use for evaluation.
    logdir: The directory where the TensorFlow summaries are written to.
    num_evals: The number of times to run `eval_op`.
    initial_op: An operation run at the beginning of evaluation.
    initial_op_feed_dict: A feed dictionary to use when executing `initial_op`.
    eval_op: A operation run `num_evals` times.
    eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
    final_op: An operation to execute after all of the `eval_op` executions. The
      value of `final_op` is returned.
    final_op_feed_dict: A feed dictionary to use when executing `final_op`.
    summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
      default the summary_op is set to tf.summary.merge_all().
    summary_op_feed_dict: An optional feed dictionary to use when running the
      `summary_op`.
    variables_to_restore: A list of TensorFlow variables to restore during
      evaluation. If the argument is left as `None` then
      slim.variables.GetVariablesToRestore() is used.
    session_config: An instance of `tf.ConfigProto` that will be used to
      configure the `Session`. If left as `None`, the default will be used.

  Returns:
    The value of `final_op` or `None` if `final_op` is `None`.
  """
  if summary_op == _USE_DEFAULT:
    summary_op = summary.merge_all()

  hooks = [
      evaluation.StopAfterNEvalsHook(num_evals),
  ]

  if summary_op is not None:
    hooks.append(
        evaluation.SummaryAtEndHook(logdir, summary_op, summary_op_feed_dict))

  saver = None
  if variables_to_restore is not None:
    saver = tf_saver.Saver(variables_to_restore)

  return evaluation.evaluate_once(
      checkpoint_path,
      master=master,
      scaffold=monitored_session.Scaffold(
          init_op=initial_op, init_feed_dict=initial_op_feed_dict, saver=saver),
      eval_ops=eval_op,
      feed_dict=eval_op_feed_dict,
      final_ops=final_op,
      final_ops_feed_dict=final_op_feed_dict,
      hooks=hooks,
      config=session_config)