コード例 #1
0
ファイル: evaluation_test.py プロジェクト: Immexxx/tensorflow
  def _create_names_to_metrics(self, predictions, labels):
    accuracy0, update_op0 = metric_ops.streaming_accuracy(predictions, labels)
    accuracy1, update_op1 = metric_ops.streaming_accuracy(predictions + 1,
                                                          labels)

    names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
    names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
    return names_to_values, names_to_updates
コード例 #2
0
  def _create_names_to_metrics(self, predictions, labels):
    accuracy0, update_op0 = metric_ops.streaming_accuracy(predictions, labels)
    accuracy1, update_op1 = metric_ops.streaming_accuracy(predictions + 1,
                                                          labels)

    names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
    names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
    return names_to_values, names_to_updates
コード例 #3
0
  def testWithEpochLimit(self):
    predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
    labels_limited = input.limit_epochs(self._labels, num_epochs=1)

    value_op, update_op = metric_ops.streaming_accuracy(
        predictions_limited, labels_limited)

    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    # Create checkpoint and log directories:
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to a checkpoint directory:
    saver = saver_lib.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = evaluation.evaluation_loop(
        '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
        max_number_of_evaluations=1, num_evals=10000)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #4
0
    def testWithEpochLimit(self):
        predictions_limited = input.limit_epochs(self._predictions,
                                                 num_epochs=1)
        labels_limited = input.limit_epochs(self._labels, num_epochs=1)

        value_op, update_op = metric_ops.streaming_accuracy(
            predictions_limited, labels_limited)

        init_op = control_flow_ops.group(
            variables.global_variables_initializer(),
            variables.local_variables_initializer())
        # Create checkpoint and log directories:
        chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
        gfile.MakeDirs(chkpt_dir)
        logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
        gfile.MakeDirs(logdir)

        # Save initialized variables to a checkpoint directory:
        saver = saver_lib.Saver()
        with self.test_session() as sess:
            init_op.run()
            saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

        # Now, run the evaluation loop:
        accuracy_value = evaluation.evaluation_loop(
            '',
            chkpt_dir,
            logdir,
            eval_op=update_op,
            final_op=value_op,
            max_number_of_evaluations=1,
            num_evals=10000)
        self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #5
0
  def testAdditionalHooks(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    self._prepareCheckpoint(checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)

    dumping_root = os.path.join(self.get_temp_dir(), 'tfdbg_dump_dir')
    dumping_hook = hooks.DumpingDebugHook(dumping_root, log_usage=False)
    try:
      # Run the evaluation and verify the results:
      accuracy_value = evaluation.evaluate_once(
          '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op,
          hooks=[dumping_hook])
      self.assertAlmostEqual(accuracy_value, self._expected_accuracy)

      dump = debug_data.DebugDumpDir(
          glob.glob(os.path.join(dumping_root, 'run_*'))[0])
      # Here we simply assert that the dumped data has been loaded and is
      # non-empty. We do not care about the detailed model-internal tensors or
      # their values.
      self.assertTrue(dump.dumped_tensor_data)
    finally:
      if os.path.isdir(dumping_root):
        shutil.rmtree(dumping_root)
コード例 #6
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_repeated')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metric_ops.streaming_accuracy(
            predictions, labels)

        final_values = evaluation.evaluate_repeatedly(
            checkpoint_dir=checkpoint_dir,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ],
            max_number_of_evaluations=1)
        self.assertTrue(final_values['accuracy'] > .99)
コード例 #7
0
  def testEvaluationLoopTimeoutWithTimeoutFn(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluation_loop_timeout_with_timeout_fn')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metric_ops.streaming_accuracy(predictions, labels)

    timeout_fn_calls = [0]
    def timeout_fn():
      timeout_fn_calls[0] += 1
      return timeout_fn_calls[0] > 3

    final_values = evaluation.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ],
        eval_interval_secs=1,
        max_number_of_evaluations=2,
        timeout=0.1,
        timeout_fn=timeout_fn)
    # We should have evaluated once.
    self.assertTrue(final_values['accuracy'] > .99)
    # And called 4 times the timeout fn
    self.assertEqual(4, timeout_fn_calls[0])
コード例 #8
0
  def testEvaluatePerfectModel(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_once')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    labels = constant_op.constant(self._labels, dtype=dtypes.float32)
    logits = logistic_classifier(inputs)
    predictions = math_ops.round(logits)

    accuracy, update_op = metric_ops.streaming_accuracy(predictions, labels)

    checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)

    final_ops_values = evaluation.evaluate_once(
        checkpoint_path=checkpoint_path,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            evaluation.StopAfterNEvalsHook(1),
        ])
    self.assertTrue(final_ops_values['accuracy'] > .99)
コード例 #9
0
    def testAdditionalHooks(self):
        checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
        log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

        # First, save out the current model to a checkpoint:
        self._prepareCheckpoint(checkpoint_path)

        # Next, determine the metric to evaluate:
        value_op, update_op = metric_ops.streaming_accuracy(
            self._predictions, self._labels)

        dumping_root = os.path.join(self.get_temp_dir(), 'tfdbg_dump_dir')
        dumping_hook = hooks.DumpingDebugHook(dumping_root, log_usage=False)
        try:
            # Run the evaluation and verify the results:
            accuracy_value = evaluation.evaluate_once('',
                                                      checkpoint_path,
                                                      log_dir,
                                                      eval_op=update_op,
                                                      final_op=value_op,
                                                      hooks=[dumping_hook])
            self.assertAlmostEqual(accuracy_value, self._expected_accuracy)

            dump = debug_data.DebugDumpDir(
                glob.glob(os.path.join(dumping_root, 'run_*'))[0])
            # Here we simply assert that the dumped data has been loaded and is
            # non-empty. We do not care about the detailed model-internal tensors or
            # their values.
            self.assertTrue(dump.dumped_tensor_data)
        finally:
            if os.path.isdir(dumping_root):
                shutil.rmtree(dumping_root)
コード例 #10
0
    def testFinalOpsOnEvaluationLoop(self):
        value_op, update_op = metric_ops.streaming_accuracy(
            self._predictions, self._labels)
        init_op = control_flow_ops.group(
            variables.global_variables_initializer(),
            variables.local_variables_initializer())
        # Create Checkpoint and log directories
        chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
        gfile.MakeDirs(chkpt_dir)
        logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
        gfile.MakeDirs(logdir)

        # Save initialized variables to checkpoint directory
        saver = saver_lib.Saver()
        with self.test_session() as sess:
            init_op.run()
            saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

        # Now, run the evaluation loop:
        accuracy_value = evaluation.evaluation_loop(
            '',
            chkpt_dir,
            logdir,
            eval_op=update_op,
            final_op=value_op,
            max_number_of_evaluations=1)
        self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #11
0
  def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)
    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    # Create Checkpoint and log directories
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to checkpoint directory
    saver = saver_lib.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    # Now, run the evaluation loop:
    accuracy_value = evaluation.evaluation_loop(
        '',
        chkpt_dir,
        logdir,
        eval_op=update_op,
        final_op=value_op,
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #12
0
  def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    self._prepareCheckpoint(checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = evaluation.evaluate_once(
        '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #13
0
  def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    self._prepareCheckpoint(checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = evaluation.evaluate_once(
        '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #14
0
  def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)
    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    # Create checkpoint and log directories:
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to a checkpoint directory:
    saver = saver_lib.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    class Object(object):

      def __init__(self):
        self.hook_was_run = False

    obj = Object()

    # Create a custom session run hook.
    class CustomHook(session_run_hook.SessionRunHook):

      def __init__(self, obj):
        self.obj = obj

      def end(self, session):
        self.obj.hook_was_run = True

    # Now, run the evaluation loop:
    accuracy_value = evaluation.evaluation_loop(
        '',
        chkpt_dir,
        logdir,
        eval_op=update_op,
        final_op=value_op,
        hooks=[CustomHook(obj)],
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)

    # Validate that custom hook ran.
    self.assertTrue(obj.hook_was_run)
コード例 #15
0
  def testFinalOpsOnEvaluationLoop(self):
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)
    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    # Create checkpoint and log directories:
    chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
    gfile.MakeDirs(chkpt_dir)
    logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
    gfile.MakeDirs(logdir)

    # Save initialized variables to a checkpoint directory:
    saver = saver_lib.Saver()
    with self.test_session() as sess:
      init_op.run()
      saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))

    class Object(object):

      def __init__(self):
        self.hook_was_run = False

    obj = Object()

    # Create a custom session run hook.
    class CustomHook(session_run_hook.SessionRunHook):

      def __init__(self, obj):
        self.obj = obj

      def end(self, session):
        self.obj.hook_was_run = True

    # Now, run the evaluation loop:
    accuracy_value = evaluation.evaluation_loop(
        '',
        chkpt_dir,
        logdir,
        eval_op=update_op,
        final_op=value_op,
        hooks=[CustomHook(obj)],
        max_number_of_evaluations=1)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)

    # Validate that custom hook ran.
    self.assertTrue(obj.hook_was_run)
コード例 #16
0
  def testRestoredModelPerformance(self):
    checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
    log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

    # First, save out the current model to a checkpoint:
    init_op = control_flow_ops.group(variables.global_variables_initializer(),
                                     variables.local_variables_initializer())
    saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
    with self.test_session() as sess:
      sess.run(init_op)
      saver.save(sess, checkpoint_path)

    # Next, determine the metric to evaluate:
    value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
                                                        self._labels)

    # Run the evaluation and verify the results:
    accuracy_value = evaluation.evaluate_once(
        '', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
    self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #17
0
    def create_metrics(self):
        """Creating the discriminator accuracy metric.

        For the real image the ground truth labeling is 1. For the fake image
        the ground truth labeling is 0. We use threshold .5 to get predictions
        from labels.
        """
        probs = tf.concat([
            self.prob_real_a_is_real, self.prob_real_b_is_real,
            self.prob_fake_pool_a_is_real, self.prob_fake_pool_b_is_real
        ],
                          axis=0)
        predictions = math_ops.to_float(math_ops.greater_equal(probs, .5))
        labels = tf.concat([
            tf.ones([1, 35, 35, 1]),
            tf.ones([1, 35, 35, 1]),
            tf.zeros([1, 35, 35, 1]),
            tf.zeros([1, 35, 35, 1])
        ],
                           axis=0)
        return metric_ops.streaming_accuracy(predictions=predictions,
                                             labels=labels)
コード例 #18
0
    def testEvaluationLoopTimeoutWithTimeoutFn(self):
        checkpoint_dir = os.path.join(
            self.get_temp_dir(), 'evaluation_loop_timeout_with_timeout_fn')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy, update_op = metric_ops.streaming_accuracy(
            predictions, labels)

        timeout_fn_calls = [0]

        def timeout_fn():
            timeout_fn_calls[0] += 1
            return timeout_fn_calls[0] > 3

        final_values = evaluation.evaluate_repeatedly(
            checkpoint_dir=checkpoint_dir,
            eval_ops=update_op,
            final_ops={'accuracy': accuracy},
            hooks=[
                evaluation.StopAfterNEvalsHook(1),
            ],
            eval_interval_secs=1,
            max_number_of_evaluations=2,
            timeout=0.1,
            timeout_fn=timeout_fn)
        # We should have evaluated once.
        self.assertTrue(final_values['accuracy'] > .99)
        # And called 4 times the timeout fn
        self.assertEqual(4, timeout_fn_calls[0])
コード例 #19
0
    def testRestoredModelPerformance(self):
        checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
        log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')

        # First, save out the current model to a checkpoint:
        init_op = control_flow_ops.group(
            variables.global_variables_initializer(),
            variables.local_variables_initializer())
        saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
        with self.test_session() as sess:
            sess.run(init_op)
            saver.save(sess, checkpoint_path)

        # Next, determine the metric to evaluate:
        value_op, update_op = metric_ops.streaming_accuracy(
            self._predictions, self._labels)

        # Run the evaluation and verify the results:
        accuracy_value = evaluation.evaluate_once('',
                                                  checkpoint_path,
                                                  log_dir,
                                                  eval_op=update_op,
                                                  final_op=value_op)
        self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
コード例 #20
0
def _accuracy(predictions, targets, weights=None):
    return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
コード例 #21
0
ファイル: eval_metrics.py プロジェクト: 363158858/tensorflow
def _accuracy(probabilities, targets):
  predictions = math_ops.argmax(probabilities, 1)
  # undo one-hot
  labels = math_ops.argmax(targets, 1)
  return metric_ops.streaming_accuracy(predictions, labels)
コード例 #22
0
def _accuracy(predictions, targets, weights=None):
  return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
コード例 #23
0
 def _accuracy_metric(predictions, labels, weights=None):
   threshold_predictions = math_ops.cast(
       math_ops.greater_equal(predictions, threshold), dtypes.float32)
   return metric_ops.streaming_accuracy(
       predictions=threshold_predictions, labels=labels, weights=weights)
コード例 #24
0
def _accuracy(probabilities, targets):
    predictions = math_ops.argmax(probabilities, 1)
    # undo one-hot
    labels = math_ops.argmax(targets, 1)
    return metric_ops.streaming_accuracy(predictions, labels)
コード例 #25
0
 def _accuracy_metric(predictions, labels, weights=None):
     threshold_predictions = math_ops.to_float(
         math_ops.greater_equal(predictions, threshold))
     return metric_ops.streaming_accuracy(predictions=threshold_predictions,
                                          labels=labels,
                                          weights=weights)