def testAccuracyTopKMetric(self): predictions = np.random.randint(1, 5, size=(12, 12, 12, 1)) targets = np.random.randint(1, 5, size=(12, 12, 12, 1)) expected = np.mean((predictions == targets).astype(float)) with self.test_session() as session: predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32) scores1, _ = metrics.padded_accuracy_topk( predicted, tf.constant(targets, dtype=tf.int32), k=1) scores2, _ = metrics.padded_accuracy_topk( predicted, tf.constant(targets, dtype=tf.int32), k=7) a1 = tf.reduce_mean(scores1) a2 = tf.reduce_mean(scores2) session.run(tf.global_variables_initializer()) actual1, actual2 = session.run([a1, a2]) self.assertAlmostEqual(actual1, expected) self.assertAlmostEqual(actual2, 1.0)