Exemple #1
0
    def test_complete_accuracy_with_container_sources(self):
        annotations = [
            ContainerAnnotation(
                {'a': ClassificationAnnotation('identifier', 3)})
        ]
        predictions = [
            ContainerPrediction({
                'p':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]
        config = [{
            'type': 'accuracy',
            'top_k': 1,
            'annotation_source': 'a',
            'prediction_source': 'p'
        }]

        dispatcher = MetricsExecutor(config, None)
        dispatcher.update_metrics_on_batch(range(len(annotations)),
                                           annotations, predictions)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotations, predictions):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == pytest.approx(1.0)
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
Exemple #2
0
    def test_accuracy_with_unsupported_prediction_in_container_raise_config_error_exception(
            self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ContainerPrediction({
                'prediction':
                DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
Exemple #3
0
    def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(
            self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ContainerPrediction({
                'prediction1':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
                'prediction2':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)