Exemple #1
0
 def testTwoMeans(self):
   # Verify two metrics with the same class and name don't
   # accidentally share state.
   m1 = metrics.Mean()
   m1(0)
   m2 = metrics.Mean()
   m2(2)
   self.assertAllEqual(0.0, m1.result())
   self.assertAllEqual(2.0, m2.result())
Exemple #2
0
 def testTwoMeansGraph(self):
   # Verify two metrics with the same name in the same graph raises a
   # ValueError.
   with context.graph_mode():
     m1 = metrics.Mean()
     m1(0)
     with self.assertRaises(ValueError):
       m2 = metrics.Mean()
       m2(2)
Exemple #3
0
 def testMetricsChain(self):
   with context.graph_mode(), self.cached_session():
     m1 = metrics.Mean()
     m2 = metrics.Mean(name="m2")
     update_m2 = m2(3.0)
     update_m2_2 = m2(m1(1.0))
     m1.init_variables().run()
     m2.init_variables().run()
     update_m2.eval()
     update_m2_2.eval()
     self.assertAllEqual(m2.result().eval(), 2.0)
     self.assertAllEqual(m1.result().eval(), 1.0)
Exemple #4
0
 def testMeanDtype(self):
   # Can override default dtype of float64.
   m = metrics.Mean(dtype=dtypes.float32)
   m([0, 2])
   self.assertEqual(1, m.result().numpy())
   self.assertEqual(dtypes.float32, m.dtype)
   self.assertEqual(dtypes.float32, m.result().dtype)
Exemple #5
0
 def testInitVariables(self):
   m = metrics.Mean()
   m([1, 10, 100, 1000])
   m([10000.0, 100000.0])
   self.assertEqual(111111.0/6, m.result().numpy())
   m.init_variables()
   m(7)
   self.assertEqual(7.0, m.result().numpy())
Exemple #6
0
 def testMean(self):
   m = metrics.Mean()
   m([1, 10, 100])
   m(1000)
   m([10000.0, 100000.0])
   self.assertEqual(111111.0/6, m.result().numpy())
   self.assertEqual(dtypes.float64, m.dtype)
   self.assertEqual(dtypes.float64, m.result().dtype)
Exemple #7
0
 def testBuildMean(self):
   # Verify that calling build() on Mean and then calling it won't recreate
   # variables.
   m = metrics.Mean()
   m.build()
   old_numer = m.numer
   m(0.0)
   self.assertTrue(old_numer is m.numer)
Exemple #8
0
 def testUseGlobalVariablesCollections(self):
   with context.graph_mode(), ops.Graph().as_default():
     m = metrics.Mean(use_global_variables=True)
     m(1000)
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
     self.assertEqual(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES), [])
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
Exemple #9
0
 def testGraphAndEagerTensorGlobalVariables(self):
   m = metrics.Mean(use_global_variables=True)
   inputs = ops.convert_to_tensor([1.0, 2.0])
   accumulate = m(inputs)
   result = m.result()
   self.evaluate(m.init_variables())
   self.evaluate(accumulate)
   self.assertEqual(self.evaluate(result), 1.5)
   # Second init resets all the variables.
   self.evaluate(m.init_variables())
   inputs = ops.convert_to_tensor([2.0, 3.0])
   self.evaluate(m(inputs))
   value = m.value()
   self.assertEqual(self.evaluate(value), 2.5)
Exemple #10
0
  def testSaveRestore(self):
    checkpoint_directory = self.get_temp_dir()
    checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
    mean = metrics.Mean()
    checkpoint = trackable_utils.Checkpoint(mean=mean)
    mean.build()
    mean._built = True
    self.evaluate(mean.init_variables())
    self.evaluate(mean(100.))
    self.evaluate(mean(200.))
    save_path = checkpoint.save(checkpoint_prefix)
    self.evaluate(mean(1000.))
    checkpoint.restore(save_path).assert_consumed().run_restore_ops()
    self.evaluate(mean(300.))
    self.assertAllEqual(200., self.evaluate(mean.value()))

    restore_mean = metrics.Mean()
    restore_checkpoint = trackable_utils.Checkpoint(mean=restore_mean)
    status = restore_checkpoint.restore(save_path)
    restore_update = restore_mean(300.)
    status.assert_consumed().run_restore_ops()
    self.evaluate(restore_update)
    self.assertAllEqual(200., self.evaluate(restore_mean.value()))
    self.assertEqual(3, self.evaluate(restore_mean.denom))
Exemple #11
0
 def testGraphWithPlaceholder(self):
   with context.graph_mode(), self.cached_session() as sess:
     m = metrics.Mean()
     p = array_ops.placeholder(dtypes.float32)
     accumulate = m(p)
     init_op = m.init_variables()
     init_op.run()
     sess.run(accumulate, feed_dict={p: [1, 10, 100]})
     sess.run(accumulate, feed_dict={p: 1000})
     sess.run(accumulate, feed_dict={p: [10000, 100000]})
     self.assertAllEqual(m.result().eval(), 111111.0/6)
     # Second init resets all the variables.
     init_op.run()
     sess.run(accumulate, feed_dict={p: 7})
     self.assertAllEqual(m.result().eval(), 7)
 def __init__(self,
              model,
              loss_key="loss",
              label_key="label",
              predicted_class_key="predicted_class",
              weights_key="weights"):
     super(SparseSoftmaxEvaluator, self).__init__(model)
     # TODO(josh11b): Expand this to include everything from the standard
     # SparseSoftmax Head.
     self.avg_loss = self.track_metric(metrics.Mean("Avg Loss"))
     self.accuracy = self.track_metric(metrics.Accuracy())
     self.loss_key = loss_key
     self.label_key = label_key
     self.predicted_class_key = predicted_class_key
     self.weights_key = weights_key
Exemple #13
0
  def testWriteSummaries(self):
    m = metrics.Mean()
    m([1, 10, 100])
    training_util.get_or_create_global_step()
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result()  # As a side-effect will write summaries.

    events = summary_test_util.events_from_logdir(logdir)
    self.assertEqual(len(events), 2)
    self.assertEqual(events[1].summary.value[0].simple_value, 37.0)

    # Get result without saving the summary.
    logdir = tempfile.mkdtemp()
    with summary_ops.create_file_writer(
        logdir, max_queue=0,
        name="t0").as_default(), summary_ops.always_record_summaries():
      m.result(write_summary=False)  # As a side-effect will write summaries.
      # events_from_logdir(_) asserts the directory exists.
    events = summary_test_util.events_from_logdir(logdir)
    self.assertEqual(len(events), 1)
Exemple #14
0
  def testGraphAndEagerTensorWhileLoopDoubleCall(self):
    m = metrics.Mean()
    init_value = constant_op.constant(1)
    cond = lambda i: math_ops.less(i, 3)
    def body(x):
      with ops.control_dependencies([m(x)]):
        return math_ops.add(x, 1)
    accumulate = control_flow_ops.while_loop(cond, body, [init_value])

    result = m.result()
    self.evaluate(m.init_variables())
    self.evaluate(accumulate)
    self.assertEqual(self.evaluate(result), 1.5)
    # Second init resets all the variables.
    self.evaluate(m.init_variables())
    inputs = ops.convert_to_tensor([2.0, 3.0])
    self.evaluate(m(inputs))
    if ops.context.executing_eagerly():
      self.evaluate(control_flow_ops.while_loop(cond, body, [init_value]))
    else:
      # Reuse the loop operators in graph mode
      self.evaluate(accumulate)
    value = m.value()
    self.assertEqual(self.evaluate(value), 2.0)
Exemple #15
0
 def testNamesWithSpaces(self):
   m1 = metrics.Mean("has space")
   m1(0)
   self.assertEqual(m1.name, "has space")
   self.assertEqual(m1.numer.name, "has_space/numer:0")
Exemple #16
0
 def testWeightedMean(self):
   m = metrics.Mean()
   m([1, 100, 100000], weights=[1, 0.2, 0.3])
   m([500000, 5000, 500])  # weights of 1 each
   self.assertNear(535521/4.5, m.result().numpy(), 0.001)