示例#1
0
    def test_complete_accuracy_with_container_sources(self):
        annotations = [
            ContainerAnnotation(
                {'a': ClassificationAnnotation('identifier', 3)})
        ]
        predictions = [
            ContainerPrediction({
                'p':
                ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
            })
        ]
        config = [{
            'type': 'accuracy',
            'top_k': 1,
            'annotation_source': 'a',
            'prediction_source': 'p'
        }]

        dispatcher = MetricsExecutor(config, None)
        dispatcher.update_metrics_on_batch(range(len(annotations)),
                                           annotations, predictions)

        for _, evaluation_result in dispatcher.iterate_metrics(
                annotations, predictions):
            assert evaluation_result.name == 'accuracy'
            assert evaluation_result.evaluated_value == pytest.approx(1.0)
            assert evaluation_result.reference_value is None
            assert evaluation_result.abs_threshold is None
            assert evaluation_result.rel_threshold is None
示例#2
0
    def test_accuracy_with_unsupported_annotations_in_container_raise_config_error_exception(
            self):
        annotations = [
            ContainerAnnotation(
                {'annotation': DetectionAnnotation('identifier', 3)})
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)
示例#3
0
    def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception(
            self):
        annotations = [
            ContainerAnnotation({
                'annotation1':
                ClassificationAnnotation('identifier', 3),
                'annotation2':
                ClassificationAnnotation('identifier', 3)
            })
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        with pytest.raises(ConfigError):
            dispatcher.update_metrics_on_batch(range(len(annotations)),
                                               annotations, predictions)