Esempio n. 1
0
 def testAccuracyDtype(self):
   # Can override default dtype of float64.
   m = metrics.Accuracy(dtype=dtypes.float32)
   m([0, 0], [0, 1])
   self.assertEqual(0.5, m.result().numpy())
   self.assertEqual(dtypes.float32, m.dtype)
   self.assertEqual(dtypes.float32, m.result().dtype)
Esempio n. 2
0
 def testWeightedAccuracy(self):
   m = metrics.Accuracy()
   # 1 correct, total weight of 2
   m([0, 1, 2, 3], [0, 0, 0, 0], weights=[1, 1, 0, 0])
   m([4], [4], weights=[0.5])  # 1 correct with a weight of 0.5
   m([5], [0], weights=[0.5])  # 0 correct, weight 0.5
   m([6], [6])  # 1 correct, weight 1
   m([7], [2])  # 0 correct, weight 1
   self.assertEqual(2.5/5, m.result().numpy())
Esempio n. 3
0
 def testAccuracy(self):
   m = metrics.Accuracy()
   m([0, 1, 2, 3], [0, 0, 0, 0])  # 1 correct
   m([4], [4])  # 1 correct
   m([5], [0])  # 0 correct
   m([6], [6])  # 1 correct
   m([7], [2])  # 0 correct
   self.assertEqual(3.0/8, m.result().numpy())
   self.assertEqual(dtypes.float64, m.dtype)
   self.assertEqual(dtypes.float64, m.result().dtype)
 def __init__(self,
              model,
              loss_key="loss",
              label_key="label",
              predicted_class_key="predicted_class",
              weights_key="weights"):
     super(SparseSoftmaxEvaluator, self).__init__(model)
     # TODO(josh11b): Expand this to include everything from the standard
     # SparseSoftmax Head.
     self.avg_loss = self.track_metric(metrics.Mean("Avg Loss"))
     self.accuracy = self.track_metric(metrics.Accuracy())
     self.loss_key = loss_key
     self.label_key = label_key
     self.predicted_class_key = predicted_class_key
     self.weights_key = weights_key
Esempio n. 5
0
 def testAccuracyDifferentShapes(self):
   m = metrics.Accuracy()
   with self.assertRaises(errors.InvalidArgumentError):
     m([[0], [0]], [0, 1])