Example #1
0
 def metrics(self, regularization_losses=None):
   """Creates metrics. See `base_head.Head` for details."""
   keys = metric_keys.MetricKeys
   with ops.name_scope('metrics', values=(regularization_losses,)):
     # Mean metric.
     eval_metrics = {}
     eval_metrics[self._loss_mean_key] = metrics.Mean(name=keys.LOSS_MEAN)
     eval_metrics[self._accuracy_key] = metrics.Accuracy(name=keys.ACCURACY)
     eval_metrics[self._precision_key] = metrics.Precision(name=keys.PRECISION)
     eval_metrics[self._recall_key] = metrics.Recall(name=keys.RECALL)
     eval_metrics[self._prediction_mean_key] = metrics.Mean(
         name=keys.PREDICTION_MEAN)
     eval_metrics[self._label_mean_key] = metrics.Mean(name=keys.LABEL_MEAN)
     eval_metrics[self._accuracy_baseline_key] = (
         metrics.Mean(name=keys.ACCURACY_BASELINE))
     # The default summation_method is "interpolation" in the AUC metric.
     eval_metrics[self._auc_key] = metrics.AUC(name=keys.AUC)
     eval_metrics[self._auc_pr_key] = metrics.AUC(curve='PR', name=keys.AUC_PR)
     if regularization_losses is not None:
       eval_metrics[self._loss_regularization_key] = metrics.Mean(
           name=keys.LOSS_REGULARIZATION)
     for i, threshold in enumerate(self._thresholds):
       eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
           name=self._accuracy_keys[i], threshold=threshold)
       eval_metrics[self._precision_keys[i]] = metrics.Precision(
           name=self._precision_keys[i], thresholds=threshold)
       eval_metrics[self._recall_keys[i]] = metrics.Recall(
           name=self._recall_keys[i], thresholds=threshold)
   return eval_metrics
Example #2
0
 def test_unweighted(self):
   p_obj = metrics.Precision()
   y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
   y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred)
   self.assertAlmostEqual(0.5, self.evaluate(result))
Example #3
0
 def test_config(self):
   p_obj = metrics.Precision(name='my_precision', thresholds=[0.4, 0.9])
   self.assertEqual(p_obj.name, 'my_precision')
   self.assertEqual(len(p_obj.variables), 2)
   self.assertEqual([v.name for v in p_obj.variables],
                    ['true_positives:0', 'false_positives:0'])
   self.assertEqual(p_obj.thresholds, [0.4, 0.9])
Example #4
0
 def test_unweighted_top_k(self):
   p_obj = metrics.Precision(top_k=3)
   y_pred = constant_op.constant([0.2, 0.1, 0.5, 0, 0.2], shape=(1, 5))
   y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred)
   self.assertAlmostEqual(1. / 3, self.evaluate(result))
Example #5
0
 def test_unweighted_with_threshold(self):
   p_obj = metrics.Precision(thresholds=[0.5, 0.7])
   y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
   y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred)
   self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
Example #6
0
 def test_div_by_zero(self):
   p_obj = metrics.Precision()
   y_pred = constant_op.constant([0, 0, 0, 0])
   y_true = constant_op.constant([0, 0, 0, 0])
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred)
   self.assertEqual(0, self.evaluate(result))
Example #7
0
 def metrics(self, regularization_losses=None):
     """Creates metrics. See `base_head.Head` for details."""
     keys = metric_keys.MetricKeys
     with ops.name_scope(None, 'metrics', (regularization_losses, )):
         # Mean metric.
         eval_metrics = {}
         eval_metrics[self._loss_mean_key] = metrics.Mean(
             name=keys.LOSS_MEAN)
         # The default summation_method is "interpolation" in the AUC metric.
         eval_metrics[self._auc_key] = metrics.AUC(name=keys.AUC)
         eval_metrics[self._auc_pr_key] = metrics.AUC(curve='PR',
                                                      name=keys.AUC_PR)
         if regularization_losses is not None:
             eval_metrics[self._loss_regularization_key] = metrics.Mean(
                 name=keys.LOSS_REGULARIZATION)
         for i, threshold in enumerate(self._thresholds):
             eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
                 name=self._accuracy_keys[i], threshold=threshold)
             eval_metrics[self._precision_keys[i]] = (metrics.Precision(
                 name=self._precision_keys[i], thresholds=threshold))
             eval_metrics[self._recall_keys[i]] = metrics.Recall(
                 name=self._recall_keys[i], thresholds=threshold)
         for i in range(len(self._classes_for_class_based_metrics)):
             eval_metrics[self._prob_keys[i]] = metrics.Mean(
                 name=self._prob_keys[i])
             eval_metrics[self._auc_keys[i]] = metrics.AUC(
                 name=self._auc_keys[i])
             eval_metrics[self._auc_pr_keys[i]] = metrics.AUC(
                 curve='PR', name=self._auc_pr_keys[i])
     return eval_metrics
Example #8
0
 def test_unweighted_all_incorrect(self):
   p_obj = metrics.Precision(thresholds=[0.5])
   inputs = np.random.randint(0, 2, size=(100, 1))
   y_pred = constant_op.constant(inputs)
   y_true = constant_op.constant(1 - inputs)
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred)
   self.assertAlmostEqual(0, self.evaluate(result))
Example #9
0
  def test_unweighted_top_k_and_threshold(self):
    p_obj = metrics.Precision(thresholds=.7, top_k=2)
    self.evaluate(variables.variables_initializer(p_obj.variables))

    y_pred = constant_op.constant([0.2, 0.8, 0.6, 0, 0.2], shape=(1, 5))
    y_true = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
    result = p_obj(y_true, y_pred)
    self.assertAlmostEqual(1, self.evaluate(result))
    self.assertAlmostEqual(1, self.evaluate(p_obj.true_positives))
    self.assertAlmostEqual(0, self.evaluate(p_obj.false_positives))
Example #10
0
 def test_reset_states_precision(self):
     p_obj = metrics.Precision()
     model = _get_model([p_obj])
     x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
     y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
     model.evaluate(x, y)
     self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
     self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
     model.evaluate(x, y)
     self.assertEqual(self.evaluate(p_obj.true_positives), 50.)
     self.assertEqual(self.evaluate(p_obj.false_positives), 50.)
Example #11
0
 def test_reset_states(self):
   p_obj = metrics.Precision()
   model = _get_simple_sequential_model([p_obj])
   x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
   y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
   model.evaluate(x, y)
   self.assertEqual(self.evaluate(p_obj.tp), 50.)
   self.assertEqual(self.evaluate(p_obj.fp), 50.)
   model.evaluate(x, y)
   self.assertEqual(self.evaluate(p_obj.tp), 50.)
   self.assertEqual(self.evaluate(p_obj.fp), 50.)
Example #12
0
 def test_extreme_thresholds(self):
     p_obj = metrics.Precision(thresholds=[-1.0,
                                           2.0])  # beyond values range
     y_pred = math_ops.cast(constant_op.constant([1, 0, 1, 0],
                                                 shape=(1, 4)),
                            dtype=dtypes.float32)
     y_true = math_ops.cast(constant_op.constant([0, 1, 1, 1],
                                                 shape=(1, 4)),
                            dtype=dtypes.float32)
     self.evaluate(variables.variables_initializer(p_obj.variables))
     result = p_obj(y_true, y_pred)
     self.assertArrayNear([0.75, 0.], self.evaluate(result), 0)
Example #13
0
 def test_weighted(self):
   p_obj = metrics.Precision()
   y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
   y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(
       y_true,
       y_pred,
       sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
   weighted_tp = 3.0 + 4.0
   weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
   expected_precision = weighted_tp / weighted_positives
   self.assertAlmostEqual(expected_precision, self.evaluate(result))
Example #14
0
    def test_config(self):
        p_obj = metrics.Precision(name='my_precision', thresholds=[0.4, 0.9])
        self.assertEqual(p_obj.name, 'my_precision')
        self.assertEqual(len(p_obj.variables), 2)
        self.assertEqual([v.name for v in p_obj.variables],
                         ['true_positives:0', 'false_positives:0'])
        self.assertEqual(p_obj.thresholds, [0.4, 0.9])

        # Check save and restore config
        p_obj2 = metrics.Precision.from_config(p_obj.get_config())
        self.assertEqual(p_obj2.name, 'my_precision')
        self.assertEqual(len(p_obj2.variables), 2)
        self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
Example #15
0
 def test_weighted_with_threshold(self):
   p_obj = metrics.Precision(thresholds=[0.5, 1.])
   y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
   y_pred = constant_op.constant([[1, 0], [0.6, 0]],
                                 shape=(2, 2),
                                 dtype=dtypes.float32)
   weights = constant_op.constant([[4, 0], [3, 1]],
                                  shape=(2, 2),
                                  dtype=dtypes.float32)
   self.evaluate(variables.variables_initializer(p_obj.variables))
   result = p_obj(y_true, y_pred, sample_weight=weights)
   weighted_tp = 0 + 3.
   weighted_positives = (0 + 3.) + (4. + 0.)
   expected_precision = weighted_tp / weighted_positives
   self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
Example #16
0
  def test_value_is_idempotent(self):
    p_obj = metrics.Precision(thresholds=[0.3, 0.72])
    y_pred = random_ops.random_uniform(shape=(10, 3))
    y_true = random_ops.random_uniform(shape=(10, 3))
    update_op = p_obj.update_state(y_true, y_pred)
    self.evaluate(variables.variables_initializer(p_obj.variables))

    # Run several updates.
    for _ in range(10):
      self.evaluate(update_op)

    # Then verify idempotency.
    initial_precision = self.evaluate(p_obj.result())
    for _ in range(10):
      self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
                           1e-3)
Example #17
0
    def test_unweighted_top_k_and_class_id(self):
        p_obj = metrics.Precision(class_id=2, top_k=2)
        self.evaluate(variables.variables_initializer(p_obj.variables))

        y_pred = constant_op.constant([0.2, 0.6, 0.3, 0, 0.2], shape=(1, 5))
        y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
        result = p_obj(y_true, y_pred)
        self.assertAlmostEqual(1, self.evaluate(result))
        self.assertAlmostEqual(1, self.evaluate(p_obj.tp))
        self.assertAlmostEqual(0, self.evaluate(p_obj.fp))

        y_pred = constant_op.constant([1, 1, 0.9, 1, 1], shape=(1, 5))
        y_true = constant_op.constant([0, 1, 1, 0, 0], shape=(1, 5))
        result = p_obj(y_true, y_pred)
        self.assertAlmostEqual(1, self.evaluate(result))
        self.assertAlmostEqual(1, self.evaluate(p_obj.tp))
        self.assertAlmostEqual(0, self.evaluate(p_obj.fp))
Example #18
0
    def test_weighted_top_k(self):
        p_obj = metrics.Precision(top_k=3)
        y_pred1 = constant_op.constant([0.2, 0.1, 0.4, 0, 0.2], shape=(1, 5))
        y_true1 = constant_op.constant([0, 1, 1, 0, 1], shape=(1, 5))
        self.evaluate(variables.variables_initializer(p_obj.variables))
        self.evaluate(
            p_obj(y_true1,
                  y_pred1,
                  sample_weight=constant_op.constant([[1, 4, 2, 3, 5]])))

        y_pred2 = constant_op.constant([0.2, 0.6, 0.4, 0.2, 0.2], shape=(1, 5))
        y_true2 = constant_op.constant([1, 0, 1, 1, 1], shape=(1, 5))
        result = p_obj(y_true2, y_pred2, sample_weight=constant_op.constant(3))

        tp = (2 + 5) + (3 + 3)
        predicted_positives = (1 + 2 + 5) + (3 + 3 + 3)
        expected_precision = tp / predicted_positives
        self.assertAlmostEqual(expected_precision, self.evaluate(result))
Example #19
0
  def test_multiple_updates(self):
    p_obj = metrics.Precision(thresholds=[0.5, 1.])
    y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
    y_pred = constant_op.constant([[1, 0], [0.6, 0]],
                                  shape=(2, 2),
                                  dtype=dtypes.float32)
    weights = constant_op.constant([[4, 0], [3, 1]],
                                   shape=(2, 2),
                                   dtype=dtypes.float32)
    self.evaluate(variables.variables_initializer(p_obj.variables))
    update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
    for _ in range(2):
      self.evaluate(update_op)

    weighted_tp = (0 + 3.) + (0 + 3.)
    weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
    expected_precision = weighted_tp / weighted_positives
    self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
                         1e-3)