Beispiel #1
0
    def test_classification_per_class_accuracy_prediction_top3(self):
        annotation = [
            ClassificationAnnotation('identifier_1', 1),
            ClassificationAnnotation('identifier_2', 1)
        ]
        prediction = [
            ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]),
            ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0])
        ]
        dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'})
        dispatcher = MetricsExecutor([{
            'type': 'accuracy_per_class',
            'top_k': 3
        }], dataset)

        dispatcher.update_metrics_on_batch(range(len(annotation)), annotation,
                                           prediction)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotation, prediction):
            assert evaluation_result.name == 'accuracy_per_class'
            assert len(evaluation_result.evaluated_value) == 4
            assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
            assert evaluation_result.evaluated_value[1] == pytest.approx(0.5)
            assert evaluation_result.evaluated_value[2] == pytest.approx(0.0)
            assert evaluation_result.evaluated_value[3] == pytest.approx(0.0)
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
Beispiel #2
0
    def test_classification_accuracy_result_for_batch_2_with_not_ordered_ids(
            self):
        annotations = [
            ClassificationAnnotation('identifier', 3),
            ClassificationAnnotation('identifier1', 1)
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
            ClassificationPrediction('identifier2', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        metric_result, _ = dispatcher.update_metrics_on_batch([42, 17],
                                                              annotations,
                                                              predictions)
        expected_metric_result = [
            PerImageMetricResult('accuracy', 'accuracy', 1.0, 'higher-better'),
            PerImageMetricResult('accuracy', 'accuracy', 0.0, 'higher-better')
        ]
        assert len(metric_result) == 2
        assert 42 in metric_result
        assert len(metric_result[42]) == 1
        assert metric_result[42][0] == expected_metric_result[0]
        assert 17 in metric_result
        assert len(metric_result[17]) == 1
        assert metric_result[17][0] == expected_metric_result[1]
Beispiel #3
0
    def test_classification_accuracy_result_for_batch_1_with_2_metrics(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{
            'name': 'top1',
            'type': 'accuracy',
            'top_k': 1
        }, {
            'name': 'top3',
            'type': 'accuracy',
            'top_k': 3
        }], None)
        metric_result, _ = dispatcher.update_metrics_on_batch(
            range(len(annotations)), annotations, predictions)
        expected_metric_result = [
            PerImageMetricResult('top1', 'accuracy', 1.0, 'higher-better'),
            PerImageMetricResult('top3', 'accuracy', 1.0, 'higher-better')
        ]
        assert len(metric_result) == 1
        assert 0 in metric_result
        assert len(metric_result[0]) == 2
        assert metric_result[0][0] == expected_metric_result[0]
        assert metric_result[0][1] == expected_metric_result[1]
Beispiel #4
0
    def test_complete_accuracy_with_container_sources(self):
        annotations = [
            ContainerAnnotation(
                {'a': ClassificationAnnotation('identifier', 3)})
        ]
        predictions = [
            ContainerPrediction({
                'p':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]
        config = [{
            'type': 'accuracy',
            'top_k': 1,
            'annotation_source': 'a',
            'prediction_source': 'p'
        }]

        dispatcher = MetricsExecutor(config, None)
        dispatcher.update_metrics_on_batch(range(len(annotations)),
                                           annotations, predictions)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotations, predictions):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == pytest.approx(1.0)
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
Beispiel #5
0
    def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(
            self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ContainerPrediction({
                'prediction1':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
                'prediction2':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
Beispiel #6
0
    def test_accuracy_with_wrong_annotation_type_raise_config_error_exception(
            self):
        annotations = [DetectionAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
    def test_config_default_presenter(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]
        config = [{'type': 'accuracy', 'top_k': 1}]
        dispatcher = MetricsExecutor(config, None)
        dispatcher.update_metrics_on_batch(range(len(annotations)),
                                           annotations, predictions)

        for presenter, _ in dispatcher.iterate_metrics(annotations,
                                                       predictions):
            assert isinstance(presenter, ScalarPrintPresenter)
Beispiel #8
0
    def test_zero_accuracy_top_3(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotations, predictions):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == 0.0
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
Beispiel #9
0
    def test_complete_accuracy_top_3(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 3.0, 4.0, 2.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None)
        dispatcher.update_metrics_on_batch(range(len(annotations)),
                                           annotations, predictions)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotations, predictions):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == pytest.approx(1.0)
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
Beispiel #10
0
    def test_threshold_is_10_by_config(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])
        ]

        dispatcher = MetricsExecutor([{
            'type': 'accuracy',
            'top_k': 3,
            'abs_threshold': 10
        }], None)

        for _, evaluation_result in dispatcher.iterate_metrics([annotations],
                                                               [predictions]):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == 0.0
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold == 10
Beispiel #11
0
    def test_accuracy_with_unsupported_annotation_type_as_annotation_source_for_container_raises_config_error(
            self):
        annotations = [
            ContainerAnnotation(
                {'annotation': DetectionAnnotation('identifier', 3)})
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{
            'type': 'accuracy',
            'top_k': 1,
            'annotation_source': 'annotation'
        }], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
Beispiel #12
0
 def test_classification_per_class_accuracy_fully_zero_prediction(self):
     annotation = ClassificationAnnotation('identifier', 0)
     prediction = ClassificationPrediction('identifier', [1.0, 2.0])
     dataset = DummyDataset(label_map={0: '0', 1: '1'})
     dispatcher = MetricsExecutor([{
         'type': 'accuracy_per_class',
         'top_k': 1
     }], dataset)
     dispatcher.update_metrics_on_batch(range(1), [annotation],
                                        [prediction])
     for _, evaluation_result in dispatcher.iterate_metrics([annotation],
                                                            [prediction]):
         assert evaluation_result.name == 'accuracy_per_class'
         assert len(evaluation_result.evaluated_value) == 2
         assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
         assert evaluation_result.evaluated_value[1] == pytest.approx(0.0)
         assert evaluation_result.reference_value is None
         assert evaluation_result.abs_threshold is None
         assert evaluation_result.rel_threshold is None
Beispiel #13
0
 def test_predictions_loading_with_adapter(self, mocker):
     launcher_config = {
         'framework': 'dummy',
         'loader': 'pickle',
         'data_path': '/path'
     }
     raw_prediction_batch = StoredPredictionBatch(
         {'prediction': np.array([[0, 1]])}, [1], [{}])
     expected_prediction = ClassificationPrediction(1, np.array([0, 1]))
     adapter = ClassificationAdapter({'type': 'classification'})
     mocker.patch(
         'openvino.tools.accuracy_checker.launcher.loaders.pickle_loader.PickleLoader.read_pickle',
         return_value=[raw_prediction_batch])
     launcher = DummyLauncher(launcher_config, adapter=adapter)
     assert len(launcher._loader.data) == 1
     prediction = launcher.predict([1])
     assert len(prediction) == 1
     assert isinstance(prediction[0], ClassificationPrediction)
     assert prediction[0].identifier == expected_prediction.identifier
     assert np.array_equal(prediction[0].scores, expected_prediction.scores)