def setUp(self): self.vrd_eval = vrd_evaluation._VRDDetectionEvaluation( matching_iou_threshold=0.5) image_key1 = 'img1' groundtruth_box_tuples1 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples1 = np.array( [(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key1, groundtruth_box_tuples1, groundtruth_class_tuples1) image_key2 = 'img2' groundtruth_box_tuples2 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples2 = np.array( [(1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key2, groundtruth_box_tuples2, groundtruth_class_tuples2) image_key3 = 'img3' groundtruth_box_tuples3 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples3 = np.array( [(1, 2, 4)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key3, groundtruth_box_tuples3, groundtruth_class_tuples3) image_key = 'img1' detected_box_tuples = np.array( [([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) detected_class_tuples = np.array( [(1, 2, 3), (1, 2, 3)], dtype=vrd_evaluation.label_data_type) detected_scores = np.array([0.7, 0.8], dtype=float) self.vrd_eval.add_single_detected_image_info( image_key, detected_box_tuples, detected_scores, detected_class_tuples) metrics = self.vrd_eval.evaluate() expected_weighted_average_precision = 0.25 expected_mean_average_precision = 0.16666666666666 expected_precision = np.array([1., 0.5], dtype=float) expected_recall = np.array([0.25, 0.25], dtype=float) expected_recall_50 = 0.25 expected_recall_100 = 0.25 expected_median_rank_50 = 0 expected_median_rank_100 = 0 self.assertAlmostEqual(expected_weighted_average_precision, metrics.weighted_average_precision) self.assertAlmostEqual(expected_mean_average_precision, metrics.mean_average_precision) self.assertAlmostEqual(expected_mean_average_precision, metrics.mean_average_precision) self.assertAllClose(expected_precision, metrics.precisions) self.assertAllClose(expected_recall, metrics.recalls) self.assertAlmostEqual(expected_recall_50, metrics.recall_50) self.assertAlmostEqual(expected_recall_100, metrics.recall_100) self.assertAlmostEqual(expected_median_rank_50, metrics.median_rank_50) self.assertAlmostEqual(expected_median_rank_100, metrics.median_rank_100)