예제 #1
0
 def testTwoMeans(self):
     # Verify two metrics with the same class and name don't
     # accidentally share state.
     m1 = metrics.Mean()
     m1(0)
     with self.assertRaises(ValueError):
         m2 = metrics.Mean()
         m2(2)
예제 #2
0
 def testTwoMeans(self):
     # Verify two metrics with the same class and name don't
     # accidentally share state.
     m1 = metrics.Mean()
     m1(0)
     m2 = metrics.Mean()
     m2(2)
     self.assertAllEqual(0.0, m1.result())
     self.assertAllEqual(2.0, m2.result())
예제 #3
0
 def testTwoMeansGraph(self):
     # Verify two metrics with the same name in the same graph raises a
     # ValueError.
     with context.graph_mode():
         m1 = metrics.Mean()
         m1(0)
         with self.assertRaises(ValueError):
             m2 = metrics.Mean()
             m2(2)
예제 #4
0
 def testTwoMeans(self):
   # Verify two metrics with the same class and name don't
   # accidentally share state.
   m1 = metrics.Mean()
   m2 = metrics.Mean()
   m1(0)
   m2(2)
   self.assertEqual(0, m1.result().numpy())
   self.assertEqual(2, m2.result().numpy())
   self.assertNotEqual(m1.name, m2.name)
예제 #5
0
 def testNamesWithSpaces(self):
   # Verify two metrics with the same class and name don't
   # accidentally share state.
   m1 = metrics.Mean("has space")
   m2 = metrics.Mean("has space")
   m2(2)
   m1(0)
   self.assertEqual(m1.name, "has space")
   self.assertEqual(m1.numer.name, "has_space/numer:0")
   self.assertEqual(m2.name, "has space_1")
   self.assertEqual(m2.numer.name, "has_space_1/numer:0")
예제 #6
0
 def testTwoMeansGraph(self):
   # Verify two metrics with the same class and name don't
   # accidentally share state.
   with context.graph_mode(), self.test_session() as sess:
     m1 = metrics.Mean()
     m2 = metrics.Mean()
     accumulate1 = m1(0)
     accumulate2 = m2(2)
     variables.global_variables_initializer().run()
     sess.run([accumulate1, accumulate2])
     self.assertEqual(0, m1.result().eval())
     self.assertEqual(2, m2.result().eval())
예제 #7
0
 def testMetricsChain(self):
     with context.graph_mode(), self.test_session():
         m1 = metrics.Mean()
         m2 = metrics.Mean(name="m2")
         update_m2 = m2(3.0)
         update_m2_2 = m2(m1(1.0))
         m1.init_variables().run()
         m2.init_variables().run()
         update_m2.eval()
         update_m2_2.eval()
         self.assertAllEqual(m2.result().eval(), 2.0)
         self.assertAllEqual(m1.result().eval(), 1.0)
예제 #8
0
 def testMeanDtype(self):
     # Can override default dtype of float64.
     m = metrics.Mean(dtype=dtypes.float32)
     m([0, 2])
     self.assertEqual(1, m.result().numpy())
     self.assertEqual(dtypes.float32, m.dtype)
     self.assertEqual(dtypes.float32, m.result().dtype)
예제 #9
0
    def testGraphAndEagerTensorWhileLoopDoubleCall(self):
        m = metrics.Mean()
        init_value = constant_op.constant(1)
        cond = lambda i: math_ops.less(i, 3)

        def body(x):
            with ops.control_dependencies([m(x)]):
                return math_ops.add(x, 1)

        accumulate = control_flow_ops.while_loop(cond, body, [init_value])

        result = m.result()
        self.evaluate(m.init_variables())
        self.evaluate(accumulate)
        self.assertEqual(self.evaluate(result), 1.5)
        # Second init resets all the variables.
        self.evaluate(m.init_variables())
        inputs = ops.convert_to_tensor([2.0, 3.0])
        self.evaluate(m(inputs))
        if ops.context.executing_eagerly():
            self.evaluate(control_flow_ops.while_loop(cond, body,
                                                      [init_value]))
        else:
            # Reuse the loop operators in graph mode
            self.evaluate(accumulate)
        value = m.value()
        self.assertEqual(self.evaluate(value), 2.0)
예제 #10
0
 def testInitVariables(self):
     m = metrics.Mean()
     m([1, 10, 100, 1000])
     m([10000.0, 100000.0])
     self.assertEqual(111111.0 / 6, m.result().numpy())
     m.init_variables()
     m(7)
     self.assertEqual(7.0, m.result().numpy())
예제 #11
0
 def testBuildMean(self):
     # Verify that calling build() on Mean and then calling it won't recreate
     # variables.
     m = metrics.Mean()
     m.build()
     old_numer = m.numer
     m(0.0)
     self.assertTrue(old_numer is m.numer)
예제 #12
0
 def testMean(self):
     m = metrics.Mean()
     m([1, 10, 100])
     m(1000)
     m([10000.0, 100000.0])
     self.assertEqual(111111.0 / 6, m.result().numpy())
     self.assertEqual(dtypes.float64, m.dtype)
     self.assertEqual(dtypes.float64, m.result().dtype)
예제 #13
0
 def testVariableCollections(self):
   with context.graph_mode(), ops.Graph().as_default():
     m = metrics.Mean()
     m(1000)
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)))
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
예제 #14
0
 def testGraph(self):
   with context.graph_mode(), self.test_session() as sess:
     m = metrics.Mean()
     p = array_ops.placeholder(dtypes.float32)
     accumulate = m(p)
     variables.global_variables_initializer().run()
     sess.run(accumulate, feed_dict={p: [1, 10, 100]})
     sess.run(accumulate, feed_dict={p: 1000})
     sess.run(accumulate, feed_dict={p: [10000, 100000]})
     self.assertAllEqual(m.result().eval(), 111111.0/6)
예제 #15
0
 def testUseGlobalVariablesCollections(self):
   with context.graph_mode(), ops.Graph().as_default():
     m = metrics.Mean(use_global_variables=True)
     m(1000)
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
     self.assertEqual(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES), [])
     self.assertEqual(
         set(m.variables),
         set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
예제 #16
0
 def testSummaryArg(self):
     m = metrics.Mean()
     m([1, 10, 100])
     m(1000)
     m([10000.0, 100000.0])
     self.assertEqual(111111.0 / 6, m.result(write_summary=True).numpy())
     self.assertEqual(111111.0 / 6, m.result(write_summary=False).numpy())
     with self.assertRaises(ValueError):
         m.result(write_summary=5)
     with self.assertRaises(ValueError):
         m.result(write_summary=[True])
예제 #17
0
    def testSharedMetric(self):
        class MetricArgEvaluator(evaluator.Evaluator):
            def __init__(self, model, m):
                super(MetricArgEvaluator, self).__init__(model)
                self.m = self.track_metric(m)

        metric = metrics.Mean("mean")
        model = IdentityModel()
        e = MetricArgEvaluator(model, metric)
        with self.assertRaisesRegexp(ValueError, "already added"):
            MetricArgEvaluator(model, metric)
        del e
예제 #18
0
    def testWriteSummaries(self):
        m = metrics.Mean()
        m([1, 10, 100])
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_file_writer(
                logdir, max_queue=0,
                name="t0").as_default(), summary_ops.always_record_summaries():
            m.result()  # As a side-effect will write summaries.

        events = summary_test_util.events_from_logdir(logdir)
        self.assertEqual(len(events), 2)
        self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
예제 #19
0
 def testGraphAndEagerTensor(self):
     m = metrics.Mean()
     inputs = ops.convert_to_tensor([1.0, 2.0])
     accumulate = m(inputs)
     result = m.result()
     self.evaluate(m.init_variables())
     self.evaluate(accumulate)
     self.assertEqual(self.evaluate(result), 1.5)
     # Second init resets all the variables.
     self.evaluate(m.init_variables())
     inputs = ops.convert_to_tensor([2.0, 3.0])
     self.evaluate(m(inputs))
     value = m.value()
     self.assertEqual(self.evaluate(value), 2.5)
예제 #20
0
 def testGraphWithPlaceholder(self):
     with context.graph_mode(), self.test_session() as sess:
         m = metrics.Mean()
         p = array_ops.placeholder(dtypes.float32)
         accumulate = m(p)
         init_op = m.init_variables()
         init_op.run()
         sess.run(accumulate, feed_dict={p: [1, 10, 100]})
         sess.run(accumulate, feed_dict={p: 1000})
         sess.run(accumulate, feed_dict={p: [10000, 100000]})
         self.assertAllEqual(m.result().eval(), 111111.0 / 6)
         # Second init resets all the variables.
         init_op.run()
         sess.run(accumulate, feed_dict={p: 7})
         self.assertAllEqual(m.result().eval(), 7)
예제 #21
0
    def testSaveRestore(self):
        checkpoint_directory = self.get_temp_dir()
        checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
        mean = metrics.Mean()
        checkpoint = checkpointable_utils.Checkpoint(mean=mean)
        mean.build()
        mean._built = True
        self.evaluate(mean.init_variables())
        self.evaluate(mean(100.))
        self.evaluate(mean(200.))
        save_path = checkpoint.save(checkpoint_prefix)
        self.evaluate(mean(1000.))
        checkpoint.restore(save_path).assert_consumed().run_restore_ops()
        self.evaluate(mean(300.))
        self.assertAllEqual(200., self.evaluate(mean.value()))

        restore_mean = metrics.Mean()
        restore_checkpoint = checkpointable_utils.Checkpoint(mean=restore_mean)
        status = restore_checkpoint.restore(save_path)
        restore_update = restore_mean(300.)
        status.assert_consumed().run_restore_ops()
        self.evaluate(restore_update)
        self.assertAllEqual(200., self.evaluate(restore_mean.value()))
        self.assertEqual(3, self.evaluate(restore_mean.denom))
예제 #22
0
 def __init__(self,
              model,
              loss_key="loss",
              label_key="label",
              predicted_class_key="predicted_class",
              weights_key="weights"):
     super(SparseSoftmaxEvaluator, self).__init__(model)
     # TODO(josh11b): Expand this to include everything from the standard
     # SparseSoftmax Head.
     self.avg_loss = self.track_metric(metrics.Mean("Avg Loss"))
     self.accuracy = self.track_metric(metrics.Accuracy())
     self.loss_key = loss_key
     self.label_key = label_key
     self.predicted_class_key = predicted_class_key
     self.weights_key = weights_key
예제 #23
0
    def testWriteSummaries(self):
        m = metrics.Mean()
        m([1, 10, 100])
        training_util.get_or_create_global_step()
        logdir = tempfile.mkdtemp()
        with summary_ops.create_summary_file_writer(
                logdir, max_queue=0,
                name="t0").as_default(), summary_ops.always_record_summaries():
            m.result()  # As a side-effect will write summaries.

        self.assertTrue(gfile.Exists(logdir))
        files = gfile.ListDirectory(logdir)
        self.assertEqual(len(files), 1)
        records = list(
            tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
        self.assertEqual(len(records), 2)
        event = event_pb2.Event()
        event.ParseFromString(records[1])
        self.assertEqual(event.summary.value[0].simple_value, 37.0)
예제 #24
0
 def __init__(self, model):
     super(DelegatingEvaluator, self).__init__(model)
     self.sub = self.add_evaluator("inner", SimpleEvaluator(model))
     self.mean = self.add_metric(metrics.Mean("outer-mean"))
예제 #25
0
 def __init__(self, model):
     super(SimpleEvaluator, self).__init__(model)
     self.mean = self.add_metric(metrics.Mean("mean"))
예제 #26
0
 def testNamesWithSpaces(self):
     m1 = metrics.Mean("has space")
     m1(0)
     self.assertEqual(m1.name, "has space")
     self.assertEqual(m1.numer.name, "has_space/numer:0")
예제 #27
0
 def testWeightedMean(self):
     m = metrics.Mean()
     m([1, 100, 100000], weights=[1, 0.2, 0.3])
     m([500000, 5000, 500])  # weights of 1 each
     self.assertNear(535521 / 4.5, m.result().numpy(), 0.001)
예제 #28
0
 def __init__(self, model):
     super(MetricTwiceEvaluator, self).__init__(model)
     self.m = self.track_metric(metrics.Mean("mean"))
     self.track_metric(self.m)  # okay to track same metric again