def testTwoMeans(self): # Verify two metrics with the same class and name don't # accidentally share state. m1 = metrics.Mean() m1(0) with self.assertRaises(ValueError): m2 = metrics.Mean() m2(2)
def testTwoMeans(self): # Verify two metrics with the same class and name don't # accidentally share state. m1 = metrics.Mean() m1(0) m2 = metrics.Mean() m2(2) self.assertAllEqual(0.0, m1.result()) self.assertAllEqual(2.0, m2.result())
def testTwoMeansGraph(self): # Verify two metrics with the same name in the same graph raises a # ValueError. with context.graph_mode(): m1 = metrics.Mean() m1(0) with self.assertRaises(ValueError): m2 = metrics.Mean() m2(2)
def testTwoMeans(self): # Verify two metrics with the same class and name don't # accidentally share state. m1 = metrics.Mean() m2 = metrics.Mean() m1(0) m2(2) self.assertEqual(0, m1.result().numpy()) self.assertEqual(2, m2.result().numpy()) self.assertNotEqual(m1.name, m2.name)
def testNamesWithSpaces(self): # Verify two metrics with the same class and name don't # accidentally share state. m1 = metrics.Mean("has space") m2 = metrics.Mean("has space") m2(2) m1(0) self.assertEqual(m1.name, "has space") self.assertEqual(m1.numer.name, "has_space/numer:0") self.assertEqual(m2.name, "has space_1") self.assertEqual(m2.numer.name, "has_space_1/numer:0")
def testTwoMeansGraph(self): # Verify two metrics with the same class and name don't # accidentally share state. with context.graph_mode(), self.test_session() as sess: m1 = metrics.Mean() m2 = metrics.Mean() accumulate1 = m1(0) accumulate2 = m2(2) variables.global_variables_initializer().run() sess.run([accumulate1, accumulate2]) self.assertEqual(0, m1.result().eval()) self.assertEqual(2, m2.result().eval())
def testMetricsChain(self): with context.graph_mode(), self.test_session(): m1 = metrics.Mean() m2 = metrics.Mean(name="m2") update_m2 = m2(3.0) update_m2_2 = m2(m1(1.0)) m1.init_variables().run() m2.init_variables().run() update_m2.eval() update_m2_2.eval() self.assertAllEqual(m2.result().eval(), 2.0) self.assertAllEqual(m1.result().eval(), 1.0)
def testMeanDtype(self): # Can override default dtype of float64. m = metrics.Mean(dtype=dtypes.float32) m([0, 2]) self.assertEqual(1, m.result().numpy()) self.assertEqual(dtypes.float32, m.dtype) self.assertEqual(dtypes.float32, m.result().dtype)
def testGraphAndEagerTensorWhileLoopDoubleCall(self): m = metrics.Mean() init_value = constant_op.constant(1) cond = lambda i: math_ops.less(i, 3) def body(x): with ops.control_dependencies([m(x)]): return math_ops.add(x, 1) accumulate = control_flow_ops.while_loop(cond, body, [init_value]) result = m.result() self.evaluate(m.init_variables()) self.evaluate(accumulate) self.assertEqual(self.evaluate(result), 1.5) # Second init resets all the variables. self.evaluate(m.init_variables()) inputs = ops.convert_to_tensor([2.0, 3.0]) self.evaluate(m(inputs)) if ops.context.executing_eagerly(): self.evaluate(control_flow_ops.while_loop(cond, body, [init_value])) else: # Reuse the loop operators in graph mode self.evaluate(accumulate) value = m.value() self.assertEqual(self.evaluate(value), 2.0)
def testInitVariables(self): m = metrics.Mean() m([1, 10, 100, 1000]) m([10000.0, 100000.0]) self.assertEqual(111111.0 / 6, m.result().numpy()) m.init_variables() m(7) self.assertEqual(7.0, m.result().numpy())
def testBuildMean(self): # Verify that calling build() on Mean and then calling it won't recreate # variables. m = metrics.Mean() m.build() old_numer = m.numer m(0.0) self.assertTrue(old_numer is m.numer)
def testMean(self): m = metrics.Mean() m([1, 10, 100]) m(1000) m([10000.0, 100000.0]) self.assertEqual(111111.0 / 6, m.result().numpy()) self.assertEqual(dtypes.float64, m.dtype) self.assertEqual(dtypes.float64, m.result().dtype)
def testVariableCollections(self): with context.graph_mode(), ops.Graph().as_default(): m = metrics.Mean() m(1000) self.assertEqual( set(m.variables), set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))) self.assertEqual( set(m.variables), set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testGraph(self): with context.graph_mode(), self.test_session() as sess: m = metrics.Mean() p = array_ops.placeholder(dtypes.float32) accumulate = m(p) variables.global_variables_initializer().run() sess.run(accumulate, feed_dict={p: [1, 10, 100]}) sess.run(accumulate, feed_dict={p: 1000}) sess.run(accumulate, feed_dict={p: [10000, 100000]}) self.assertAllEqual(m.result().eval(), 111111.0/6)
def testUseGlobalVariablesCollections(self): with context.graph_mode(), ops.Graph().as_default(): m = metrics.Mean(use_global_variables=True) m(1000) self.assertEqual( set(m.variables), set(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertEqual(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES), []) self.assertEqual( set(m.variables), set(ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
def testSummaryArg(self): m = metrics.Mean() m([1, 10, 100]) m(1000) m([10000.0, 100000.0]) self.assertEqual(111111.0 / 6, m.result(write_summary=True).numpy()) self.assertEqual(111111.0 / 6, m.result(write_summary=False).numpy()) with self.assertRaises(ValueError): m.result(write_summary=5) with self.assertRaises(ValueError): m.result(write_summary=[True])
def testSharedMetric(self): class MetricArgEvaluator(evaluator.Evaluator): def __init__(self, model, m): super(MetricArgEvaluator, self).__init__(model) self.m = self.track_metric(m) metric = metrics.Mean("mean") model = IdentityModel() e = MetricArgEvaluator(model, metric) with self.assertRaisesRegexp(ValueError, "already added"): MetricArgEvaluator(model, metric) del e
def testWriteSummaries(self): m = metrics.Mean() m([1, 10, 100]) training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name="t0").as_default(), summary_ops.always_record_summaries(): m.result() # As a side-effect will write summaries. events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
def testGraphAndEagerTensor(self): m = metrics.Mean() inputs = ops.convert_to_tensor([1.0, 2.0]) accumulate = m(inputs) result = m.result() self.evaluate(m.init_variables()) self.evaluate(accumulate) self.assertEqual(self.evaluate(result), 1.5) # Second init resets all the variables. self.evaluate(m.init_variables()) inputs = ops.convert_to_tensor([2.0, 3.0]) self.evaluate(m(inputs)) value = m.value() self.assertEqual(self.evaluate(value), 2.5)
def testGraphWithPlaceholder(self): with context.graph_mode(), self.test_session() as sess: m = metrics.Mean() p = array_ops.placeholder(dtypes.float32) accumulate = m(p) init_op = m.init_variables() init_op.run() sess.run(accumulate, feed_dict={p: [1, 10, 100]}) sess.run(accumulate, feed_dict={p: 1000}) sess.run(accumulate, feed_dict={p: [10000, 100000]}) self.assertAllEqual(m.result().eval(), 111111.0 / 6) # Second init resets all the variables. init_op.run() sess.run(accumulate, feed_dict={p: 7}) self.assertAllEqual(m.result().eval(), 7)
def testSaveRestore(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") mean = metrics.Mean() checkpoint = checkpointable_utils.Checkpoint(mean=mean) mean.build() mean._built = True self.evaluate(mean.init_variables()) self.evaluate(mean(100.)) self.evaluate(mean(200.)) save_path = checkpoint.save(checkpoint_prefix) self.evaluate(mean(1000.)) checkpoint.restore(save_path).assert_consumed().run_restore_ops() self.evaluate(mean(300.)) self.assertAllEqual(200., self.evaluate(mean.value())) restore_mean = metrics.Mean() restore_checkpoint = checkpointable_utils.Checkpoint(mean=restore_mean) status = restore_checkpoint.restore(save_path) restore_update = restore_mean(300.) status.assert_consumed().run_restore_ops() self.evaluate(restore_update) self.assertAllEqual(200., self.evaluate(restore_mean.value())) self.assertEqual(3, self.evaluate(restore_mean.denom))
def __init__(self, model, loss_key="loss", label_key="label", predicted_class_key="predicted_class", weights_key="weights"): super(SparseSoftmaxEvaluator, self).__init__(model) # TODO(josh11b): Expand this to include everything from the standard # SparseSoftmax Head. self.avg_loss = self.track_metric(metrics.Mean("Avg Loss")) self.accuracy = self.track_metric(metrics.Accuracy()) self.loss_key = loss_key self.label_key = label_key self.predicted_class_key = predicted_class_key self.weights_key = weights_key
def testWriteSummaries(self): m = metrics.Mean() m([1, 10, 100]) training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_summary_file_writer( logdir, max_queue=0, name="t0").as_default(), summary_ops.always_record_summaries(): m.result() # As a side-effect will write summaries. self.assertTrue(gfile.Exists(logdir)) files = gfile.ListDirectory(logdir) self.assertEqual(len(files), 1) records = list( tf_record.tf_record_iterator(os.path.join(logdir, files[0]))) self.assertEqual(len(records), 2) event = event_pb2.Event() event.ParseFromString(records[1]) self.assertEqual(event.summary.value[0].simple_value, 37.0)
def __init__(self, model): super(DelegatingEvaluator, self).__init__(model) self.sub = self.add_evaluator("inner", SimpleEvaluator(model)) self.mean = self.add_metric(metrics.Mean("outer-mean"))
def __init__(self, model): super(SimpleEvaluator, self).__init__(model) self.mean = self.add_metric(metrics.Mean("mean"))
def testNamesWithSpaces(self): m1 = metrics.Mean("has space") m1(0) self.assertEqual(m1.name, "has space") self.assertEqual(m1.numer.name, "has_space/numer:0")
def testWeightedMean(self): m = metrics.Mean() m([1, 100, 100000], weights=[1, 0.2, 0.3]) m([500000, 5000, 500]) # weights of 1 each self.assertNear(535521 / 4.5, m.result().numpy(), 0.001)
def __init__(self, model): super(MetricTwiceEvaluator, self).__init__(model) self.m = self.track_metric(metrics.Mean("mean")) self.track_metric(self.m) # okay to track same metric again