Example #1
0
    def test_filter_predictions_by_min_confidence_with_remove(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'min_confidence': 0.5, 'remove_filtered': True}]
        predictions = [DetectionPrediction(scores=[0.3, 0.8]), DetectionPrediction(scores=[0.4, 0.5])]
        expected_predictions = [DetectionPrediction(scores=[0.8]), DetectionPrediction(scores=[0.5])]

        postprocess_data(PostprocessingExecutor(config), [None, None], predictions)

        assert np.array_equal(predictions, expected_predictions)
Example #2
0
    def test_filter_predictions_by_labels_with_remove(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'labels': ['to_be_filtered'], 'remove_filtered': True}]
        prediction = DetectionPrediction(labels=['some_label', 'to_be_filtered'])
        expected = DetectionPrediction(labels=['some_label'])

        postprocess_data(PostprocessingExecutor(config), [None], [prediction])

        assert prediction == expected
Example #3
0
    def test_filter_predictions_by_labels_with_ignore(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'labels': ['to_be_filtered'], 'remove_filtered': False}]
        prediction = DetectionPrediction(labels=['some_label', 'to_be_filtered'])
        expected = DetectionPrediction(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})

        postprocess_data(PostprocessingExecutor(config), [None], [prediction])

        assert prediction == expected
Example #4
0
    def test_filter_container_annotations_and_container_predictions_by_labels_with_remove_using_apply_to(self):
        config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': True}]
        prediction = ContainerPrediction({'prediction': DetectionPrediction(labels=['some_label', 'to_be_filtered'])})
        expected_prediction = ContainerPrediction({'prediction': DetectionPrediction(labels=['some_label'])})
        annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])})
        expected_annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label'])})

        postprocess_data(PostprocessingExecutor(config), [annotation], [prediction])

        assert prediction == expected_prediction and annotation == expected_annotation
Example #5
0
    def test_filter_predictions_by_labels_with_remove_on_container(self):
        config = [{'type': 'filter', 'prediction_source': 'detection_out',
                   'labels': ['to_be_filtered'], 'remove_filtered': True}]
        prediction = ContainerPrediction(
            {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])}
        )
        expected = ContainerPrediction({'detection_out': DetectionPrediction(labels=['some_label'])})

        postprocess_data(PostprocessingExecutor(config), [None], [prediction])

        assert prediction == expected
Example #6
0
    def test_filter_predictions_by_min_confidence_with_ignore(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'min_confidence': 0.5, 'remove_filtered': False}]
        predictions = [DetectionPrediction(scores=[0.3, 0.8]), DetectionPrediction(scores=[0.5, 0.4])]
        expected_predictions = [
            DetectionPrediction(scores=[0.3, 0.8], metadata={'difficult_boxes': [0]}),
            DetectionPrediction(scores=[0.5, 0.4], metadata={'difficult_boxes': [1]})
        ]

        executor = PostprocessingExecutor(config)
        postprocess_data(executor, [None, None], predictions)

        assert np.array_equal(predictions, expected_predictions)
Example #7
0
    def test_filter_regular_annotations_and_regular_predictions_by_labels_with_ignore_using_apply_to(self):
        config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': False}]
        prediction = DetectionPrediction(labels=['some_label', 'to_be_filtered'])
        expected_prediction = DetectionPrediction(labels=['some_label', 'to_be_filtered'],
                                                  metadata={'difficult_boxes': [1]})
        annotation = DetectionAnnotation(labels=['some_label', 'to_be_filtered'])
        expected_annotation = DetectionAnnotation(labels=['some_label', 'to_be_filtered'],
                                                  metadata={'difficult_boxes': [1]})

        postprocess_data(PostprocessingExecutor(config), [annotation], [prediction])

        assert prediction == expected_prediction and annotation == expected_annotation
Example #8
0
    def test_filter_predictions_by_height_range_with_remove(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'height_range': '(10.0, 20.0)', 'remove_filtered': True}]
        predictions = [
            DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[15.0, 40.0]),
            DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[35.0, 50.0])
        ]
        expected = [
            DetectionPrediction(y_mins=[5.0], y_maxs=[15.0]),
            DetectionPrediction(y_mins=[], y_maxs=[])
        ]

        postprocess_data(PostprocessingExecutor(config), [None, None], predictions)

        assert np.array_equal(predictions, expected)
Example #9
0
    def test_filter_by_unknown_visibility_does_nothing_with_predictions(self):
        config = [{'type': 'filter', 'apply_to': 'prediction', 'min_visibility': 'unknown'}]
        predictions = [
           DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[15.0, 40.0]),
           DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[35.0, 50.0])
        ]
        expected = [
           DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[15.0, 40.0], metadata={'difficult_boxes': []}),
           DetectionPrediction(y_mins=[5.0, 10.0], y_maxs=[35.0, 50.0], metadata={'difficult_boxes': []})
        ]

        postprocess_data(PostprocessingExecutor(config), [None, None], predictions)

        assert np.array_equal(predictions, expected)
Example #10
0
    def test_resize_prediction_boxes(self):
        config = [{'type': 'resize_prediction_boxes'}]
        annotation = DetectionAnnotation(metadata={'image_size': (100, 100, 3)})
        prediction = DetectionPrediction(x_mins=[0, 7], y_mins=[0, 7], x_maxs=[5, 8], y_maxs=[5, 8])
        expected_prediction = DetectionPrediction(
            x_mins=[pytest.approx(0), pytest.approx(700)],
            y_mins=[pytest.approx(0), pytest.approx(700)],
            x_maxs=[pytest.approx(500), pytest.approx(800)],
            y_maxs=[pytest.approx(500), pytest.approx(800)]
        )

        postprocess_data(PostprocessingExecutor(config), [annotation], [prediction])

        assert prediction == expected_prediction
Example #11
0
    def test_clip_predictions_normalized_boxes_with_size_as_normalized(self):
        config = [{'type': 'clip_boxes', 'apply_to': 'prediction', 'boxes_normalized': True, 'size': 10}]
        annotation = DetectionAnnotation(metadata={'image_size': (10, 10, 3)})
        prediction = DetectionPrediction(x_mins=[-1, 9], y_mins=[0, 11], x_maxs=[5, 10], y_maxs=[5, 10])
        expected_prediction = DetectionPrediction(
            x_mins=[pytest.approx(0), pytest.approx(1)],
            y_mins=[pytest.approx(0), pytest.approx(1)],
            x_maxs=[pytest.approx(1), pytest.approx(1)],
            y_maxs=[pytest.approx(1), pytest.approx(1)]
        )

        postprocess_data(PostprocessingExecutor(config), [annotation], [prediction])

        assert prediction == expected_prediction
Example #12
0
    def test_cast_to_int_to_greater(self):
        config = [{'type': 'cast_to_int', 'round_policy': 'greater'}]
        annotation = DetectionAnnotation(x_mins=[-1, 9], y_mins=[0, 11], x_maxs=[5, 10], y_maxs=[5, 10])
        prediction = DetectionPrediction(
            x_mins=[-1.1, -9.9],
            y_mins=[0.5, 11.5],
            x_maxs=[5.9, 10.9],
            y_maxs=[5.1, 10.1]
        )
        expected_annotation = DetectionAnnotation(x_mins=[-1, 9], y_mins=[0, 11], x_maxs=[5, 10], y_maxs=[5, 10])
        expected_prediction = DetectionPrediction(x_mins=[-1, -9], y_mins=[1, 12], x_maxs=[6, 11], y_maxs=[6, 11])

        postprocess_data(PostprocessingExecutor(config), [annotation], [prediction])

        assert prediction == expected_prediction and annotation == expected_annotation
Example #13
0
def make_representation(bounding_boxes, is_ground_truth=False, score=None):
    """
    Args:
        bounding_boxes: string or list of strings `score label x0 y0 x1 y1; label score x0 y0 x1 y1; ...`
        is_ground_truth: True if bbs are annotation boxes
        score: value in [0, 1], if not None, all prediction boxes are considered with the given score
    """
    if not isinstance(bounding_boxes, list):
        bounding_boxes = [bounding_boxes]
    res = []
    for i, bb in enumerate(bounding_boxes):
        arr = np.array(np.mat(bb))

        if bb == "":
            arr = np.array([]).reshape((0, 5))

        if is_ground_truth or score is not None:
            assert arr.shape[1] == 5
        elif not is_ground_truth and score is None:
            assert arr.shape[1] == 6
        if not is_ground_truth and score is not None:
            arr = np.c_[np.full(arr.shape[0], score), arr]

        if is_ground_truth:
            r = DetectionAnnotation(str(i), arr[:, 0], arr[:, 1], arr[:, 2],
                                    arr[:, 3], arr[:, 4])
        else:
            r = DetectionPrediction(str(i), arr[:, 1], arr[:, 0], arr[:, 2],
                                    arr[:, 3], arr[:, 4], arr[:, 5])
        res.append(r)
    return res
    def test_accuracy_with_wrong_prediction_type_raise_config_error_exception(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(annotations, predictions)
    def test_accuracy_with_unsupported_prediction_type_as_prediction_source_for_container_raises_config_error(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction'}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(annotations, predictions)
    def test_accuracy_with_unsupported_prediction_in_container_raise_config_error_exception(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
        config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}

        dispatcher = MetricsExecutor(config, None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(annotations, predictions)
Example #17
0
    def test_filter_predictions_source_not_found_raise_config_error_exception(self):
        config = [{'type': 'filter', 'prediction_source': 'undefined', 'labels': ['to_be_filtered']}]
        prediction = ContainerPrediction(
            {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])}
        )
        executor = PostprocessingExecutor(config)

        with pytest.raises(ConfigError):
            postprocess_data(executor, [None], [prediction])
Example #18
0
    def test_filter_multi_source_predictions_by_labels_with_ignore_using_apply_to(self):
        config = [
            {'type': 'filter', 'apply_to': 'prediction', 'labels': ['to_be_filtered'],
             'remove_filtered': False}]
        prediction = ContainerPrediction(
            {'detection_out1': DetectionPrediction(labels=['some_label', 'to_be_filtered']),
             'detection_out2': DetectionPrediction(labels=['some_label', 'to_be_filtered'])})
        expected = ContainerPrediction(
            {
                'detection_out1': DetectionPrediction(labels=['some_label', 'to_be_filtered'],
                                                      metadata={'difficult_boxes': [1]}),
                'detection_out2': DetectionPrediction(labels=['some_label', 'to_be_filtered'],
                                                      metadata={'difficult_boxes': [1]})
            }
        )

        postprocess_data(PostprocessingExecutor(config), [None], [prediction])

        assert prediction == expected
Example #19
0
def make_representation(bounding_boxes,
                        is_ground_truth=False,
                        score=None,
                        meta=None):
    """
    Args:
        bounding_boxes: string or list of strings `score label x0 y0 x1 y1; label score x0 y0 x1 y1; ...`.
        is_ground_truth: True if bbs are annotation boxes.
        score: value in [0, 1], if not None, all prediction boxes are considered with the given score.
        meta: metadata for representation
    """

    if not isinstance(bounding_boxes, list):
        bounding_boxes = [bounding_boxes]

    result = []
    for idx, box in enumerate(bounding_boxes):
        if box == "":
            arr = np.array([]).reshape((0, 5))
        else:
            arr = np.array(
                [np.fromstring(row, sep=' ') for row in box.split(';')])

        if is_ground_truth or score:
            assert arr.shape[1] == 5
        elif not is_ground_truth and not score:
            assert arr.shape[1] == 6

        if not is_ground_truth and score:
            score_ = score
            if np.isscalar(score_) or len(score_) == 1:
                score_ = np.full(arr.shape[0], score_)
            arr = np.c_[score_, arr]

        if is_ground_truth:
            detection = DetectionAnnotation(str(idx), arr[:, 0], arr[:, 1],
                                            arr[:, 2], arr[:, 3], arr[:, 4])
        else:
            detection = DetectionPrediction(str(idx), arr[:, 1], arr[:, 0],
                                            arr[:, 2], arr[:, 3], arr[:, 4],
                                            arr[:, 5])

        if meta:
            detection.metadata = meta[idx]

        result.append(detection)

    return result