예제 #1
0
 def test_trivial_solutions_non_unitary(self):
     map_computer = MeanAveragePrecisionMetric(0.0, 'non-unitary')
     map_computer.update([[0, 0, 26, 26, 1, 0]], [[5, 2, 15, 9, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0],
                                                  [18, 10, 26, 15, 0]])
     _ = map_computer.compute()
     assert np.allclose(map_computer.precision_per_class[0], 1.0)
     assert np.allclose(map_computer.recall_per_class[0], 1.0)
     assert map_computer.number_true_detection_per_class[0] == 1
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 5
     assert map_computer. number_missed_ground_truth_per_class[0] == 0
     map_computer.reset()
     map_computer.update([[5, 2, 15, 9, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0],
                          [18, 10, 26, 15, 1, 0]], [[0, 0, 26, 26, 0]])
     _ = map_computer.compute()
     assert np.allclose(map_computer.precision_per_class[0], 1.0)
     assert np.allclose(map_computer.recall_per_class[0], 1.0)
     assert map_computer.number_true_detection_per_class[0] == 5
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 1
     assert map_computer.number_missed_ground_truth_per_class[0] == 0
예제 #2
0
 def test_accumulate_pg_coco(self):
     map1 = MeanAveragePrecisionMetric(0.01, 'coco')
     map1.update(detections[0], gt)
     map2 = MeanAveragePrecisionMetric(0.01, 'coco')
     map2.update(detections[0], gt)
     map2.update(detections[0], gt)
     assert np.isclose(map1.compute(), map2.compute())
예제 #3
0
 def test_empty_detection(self):
     detections = np.array([])
     ground_truths = np.array([[2, 6, 11, 16, 0], [20, 11, 45, 25, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert map_computer.compute() == 0.0
     assert map_computer.number_true_detection_per_class[0] == 0
     assert map_computer.number_false_detection_per_class[0] == 0
     assert map_computer.number_found_ground_truth_per_class[0] == 0
     assert map_computer.number_missed_ground_truth_per_class[0] == 2
예제 #4
0
    def test_threshold_property(self):
        class MatchEngine(MatchEngineBase):
            def compute_similarity_matrix(self, detections, ground_truths, label_mean_area=None):
                pass

            def trim_similarity_matrix(self, similarity_matrix, detections, ground_truths, label_mean_area=None):
                pass

        map_with_threshold = MeanAveragePrecisionMetric(0.5, 'coco', match_engine=MatchEngineIoU(0.5, 'coco'))
        map_without_threshold = MeanAveragePrecisionMetric(0.5, 'coco', match_engine=MatchEngine('coco'))

        assert map_with_threshold.threshold == 0.5
        assert map_without_threshold.threshold is None
예제 #5
0
 def test_empty_equals(self):
     detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 0]])
     ground_truths = np.array([[2, 6, 11, 16, 0], [20, 11, 45, 25, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     temp = map_computer.compute()
     map_computer.update(np.array([]), np.array([]))
     assert map_computer.compute() == temp
예제 #6
0
    def test_threshold_warning(self):
        import warnings
        warnings.filterwarnings("always", category=RuntimeWarning)

        class MatchEngine(MatchEngineBase):
            def compute_similarity_matrix(self, detections, ground_truths, label_mean_area=None):
                pass

            def trim_similarity_matrix(self, similarity_matrix, detections, ground_truths, label_mean_area=None):
                pass

        with pytest.warns(RuntimeWarning, match='Discrepancy between user provided threshold'):
            map_with_threshold = MeanAveragePrecisionMetric(0.1, 'coco', match_engine=MatchEngineIoU(0.5, 'coco'))

        with pytest.warns(RuntimeWarning, match='Discrepancy between user provided threshold'):
            map_without_threshold = MeanAveragePrecisionMetric(0.1, 'coco', match_engine=MatchEngine('coco'))
예제 #7
0
    def test_empty_ground_truth(self):
        detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 0]])
        ground_truths = np.array([])
        map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
        map_computer.update(detections, ground_truths)
        assert map_computer.compute() == 0.0
        assert map_computer.number_true_detection_per_class[0] == 0
        assert map_computer.number_false_detection_per_class[0] == 2
        assert map_computer.number_found_ground_truth_per_class[0] == 0
        assert map_computer.number_missed_ground_truth_per_class[0] == 0

        assert 0 in map_computer.precision_per_class
        assert map_computer.precision_per_class[0] == 0
        assert 0 in map_computer.recall_per_class
        assert np.isnan(map_computer.recall_per_class[0])
        assert 0 in map_computer.average_precision_per_class

        assert 0 in map_computer.ground_truth_labels
예제 #8
0
 def test_empty(self):
     detections = np.array([])
     ground_truths = np.array([])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert np.isnan(map_computer.compute())
     with pytest.warns(RuntimeWarning, match='Mean of empty slice'):
         map_computer.compute()
예제 #9
0
    def test_match_engine_override(self):
        class MatchEngine(MatchEngineBase):
            def compute_similarity_matrix(self, detections, ground_truths, label_mean_area=None):
                pass

            def trim_similarity_matrix(self, similarity_matrix, detections, ground_truths, label_mean_area=None):
                pass

        map = MeanAveragePrecisionMetric(0.5, 'coco', match_engine=MatchEngine('coco'))

        assert isinstance(map.match_engine, MatchEngine)
예제 #10
0
    def __init__(self, beta, thresholds, **kwargs):
        """Initialize instance."""
        if not isinstance(thresholds, (tuple, list)):
            raise TypeError(
                "Argument thresholds should be list or tuple, but given {}".
                format(type(thresholds)))
        if not (isinstance(beta, (int, float)) and beta > 0):
            raise ValueError("Argument beta should be positive float")

        self._map_computers = [
            MeanAveragePrecisionMetric(threshold=t, **kwargs)
            for t in thresholds
        ]
        self.beta = beta
        self.score = self.score_per_class = self._internal_score_per_class = self._counter = None
        self.reset()
예제 #11
0
 def _ground_truth_label_test(detections, ground_truths, ground_truth_labels_list):
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert sorted(list(map_computer.ground_truth_labels)) == sorted(ground_truth_labels_list)
     map_computer.compute()
     assert sorted(list(map_computer.precision_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.recall_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.average_precision_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_false_detection_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_found_ground_truth_per_class.keys())) == sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_missed_ground_truth_per_class.keys())) == \
         sorted(ground_truth_labels_list)
     assert sorted(list(map_computer.number_true_detection_per_class.keys())) == sorted(ground_truth_labels_list)
예제 #12
0
 def test_match_algorithm_warning(self):
     with pytest.warns(RuntimeWarning, match='Discrepancy between user provided match_algorithm'):
         _ = MeanAveragePrecisionMetric(0.5, 'xview', match_engine=MatchEngineIoU(0.5, 'coco'))
예제 #13
0
 def test_asymmetrical_empty_ground_truth(self):
     detections = np.array([[2, 6, 11, 16, 0.9, 0], [20, 11, 45, 25, 0.8, 1]])
     ground_truths = np.array([[2, 6, 11, 16, 0]])
     map_computer = MeanAveragePrecisionMetric(0.5, 'coco')
     map_computer.update(detections, ground_truths)
     assert np.isclose(map_computer.compute(), 0.5)