Esempio n. 1
0
 def test_trivial_solutions_non_unitary(self):
     map_computer = MeanAveragePrecisionMetric(0.0, 'non-unitary')
     map_computer.update([[0, 0, 26, 26, 1, 0]], [[5, 2, 15, 9, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0]])
     _ = map_computer.compute()
     assert np.allclose(map_computer.precision_per_class[0], 1.0)
     assert np.allclose(map_computer.recall_per_class[0], 1.0)
     assert map_computer.number_true_detection_per_class[0] == 1
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 5
     assert map_computer. number_missed_ground_truth_per_class[0] == 0
     map_computer.reset()
     map_computer.update([[5, 2, 15, 9, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0]], [[0, 0, 26, 26, 0]])
     _ = map_computer.compute()
     assert np.allclose(map_computer.precision_per_class[0], 1.0)
     assert np.allclose(map_computer.recall_per_class[0], 1.0)
     assert map_computer.number_true_detection_per_class[0] == 5
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 1
     assert map_computer.number_missed_ground_truth_per_class[0] == 0
Esempio n. 2
0
 def test_accumulate_pg_coco(self):
     map1 = MeanAveragePrecisionMetric(0.01, 'coco')
     map1.update(detections[0], gt)
     map2 = MeanAveragePrecisionMetric(0.01, 'coco')
     map2.update(detections[0], gt)
     map2.update(detections[0], gt)
     assert np.isclose(map1.compute(), map2.compute())
Esempio n. 3
0
 def test_empty_equals(self):
     detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 0]])
     ground_truths = np.array([[2, 6, 11, 16, 0], [20, 11, 45, 25, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     temp = map_computer.compute()
     map_computer.update(np.array([]), np.array([]))
     assert map_computer.compute() == temp
Esempio n. 4
0
 def test_empty(self):
     detections = np.array([])
     ground_truths = np.array([])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert np.isnan(map_computer.compute())
     with pytest.warns(RuntimeWarning, match='Mean of empty slice'):
         map_computer.compute()
Esempio n. 5
0
 def _ground_truth_label_test(detections, ground_truths, ground_truth_labels_list):
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert sorted(list(map_computer.ground_truth_labels)) == sorted(ground_truth_labels_list)
     map_computer.compute()
     assert sorted(list(map_computer.precision_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.recall_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.average_precision_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_false_detection_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_found_ground_truth_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_missed_ground_truth_per_class.keys())) == \
         sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_true_detection_per_class.keys())) == sorted(ground_truth_labels_list)
Esempio n. 6
0
 def test_empty_detection(self):
     detections = np.array([])
     ground_truths = np.array([[2, 6, 11, 16, 0], [20, 11, 45, 25, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert map_computer.compute() == 0.0
     assert map_computer.number_true_detection_per_class[0] == 0
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 0
     assert map_computer.number_missed_ground_truth_per_class[0] == 2
Esempio n. 7
0
    def test_empty_ground_truth(self):
        detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 0]])
        ground_truths = np.array([])
        map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
        map_computer.update(detections, ground_truths)
        assert map_computer.compute() == 0.0
        assert map_computer.number_true_detection_per_class[0] == 0
        assert map_computer.number_false_detection_per_class[0] == 2
        assert map_computer.number_found_ground_truth_per_class[0] == 0
        assert map_computer.number_missed_ground_truth_per_class[0] == 0

        assert 0 in map_computer.precision_per_class
        assert map_computer.precision_per_class[0] == 0
        assert 0 in map_computer.recall_per_class
        assert np.isnan(map_computer.recall_per_class[0])
        assert 0 in map_computer.average_precision_per_class

        assert 0 in map_computer.ground_truth_labels
Esempio n. 8
0
 def test_asymmetrical_empty_ground_truth(self):
     detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 1]])
     ground_truths = np.array([[2, 6, 11, 16, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert np.isclose(map_computer.compute(), 0.5)