def test_filter_annotations_by_labels_with_remove_on_container_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'annotation', 'labels': ['to_be_filtered'], 'remove_filtered': True}] annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label'])}) postprocess_data(PostprocessingExecutor(config), [annotation], [None]) assert annotation == expected
def test_filter_container_annotations_by_labels_with_ignore_using_source(self): config = [{'type': 'filter', 'annotation_source': 'annotation', 'labels': ['to_be_filtered'], 'remove_filtered': False}] annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})}) postprocess_data(PostprocessingExecutor(config), [annotation], [None]) assert annotation == expected
def test_filter_container_annotations_and_regular_predictions_by_labels_with_remove_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': True}] prediction = DetectionPrediction(labels=['some_label', 'to_be_filtered']) expected_prediction = DetectionPrediction(labels=['some_label']) annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected_annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label'])}) postprocess_data(PostprocessingExecutor(config), [annotation], [prediction]) assert prediction == expected_prediction and annotation == expected_annotation
def test_filter_multi_source_annotations_by_labels_with_remove(self): config = [{'type': 'filter', 'annotation_source': ['annotation1', 'annotation2'], 'labels': ['to_be_filtered'], 'remove_filtered': True}] annotation = ContainerAnnotation({'annotation1': DetectionAnnotation(labels=['some_label', 'to_be_filtered']), 'annotation2': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected = ContainerAnnotation({'annotation1': DetectionAnnotation(labels=['some_label']), 'annotation2': DetectionAnnotation(labels=['some_label'])}) postprocess_data(PostprocessingExecutor(config), [annotation], [None]) assert annotation == expected
def test_complete_accuracy_with_container_sources(self): annotations = [ ContainerAnnotation( {'a': ClassificationAnnotation('identifier', 3)}) ] predictions = [ ContainerPrediction({ 'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) }) ] config = [{ 'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p' }] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_filter_annotations_source_not_found_raise_config_error_exception(self): config = [{'type': 'filter', 'annotation_source': 'ann', 'labels': ['to_be_filtered']}] annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) executor = PostprocessingExecutor(config) with pytest.raises(ConfigError): postprocess_data(executor, [annotation], [None])
def test_filter_annotations_unsupported_source_type_in_container_raise_type_error_exception(self): config = [{'type': 'filter', 'annotation_source': 'annotation', 'labels': ['to_be_filtered']}] annotation = ContainerAnnotation({'annotation': ClassificationAnnotation()}) executor = PostprocessingExecutor(config) with pytest.raises(TypeError): postprocess_data(executor, [annotation], [None])
def test_accuracy_with_unsupported_annotation_type_as_annotation_source_for_container_raises_config_error(self): annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation'}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_on_container_with_wrong_annotation_source_name_raise_config_error_exception(self): annotations = [ContainerAnnotation({'annotation': ClassificationAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a'}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_filter_container_annotations_and_container_predictions_by_labels_with_ignore_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': False}] prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])}) expected_prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})} ) annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected_annotation = ContainerAnnotation( {'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})} ) postprocess_data(PostprocessingExecutor(config), [annotation], [prediction]) assert prediction == expected_prediction and annotation == expected_annotation
def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception(self): annotations = [ContainerAnnotation({'annotation1': ClassificationAnnotation('identifier', 3), 'annotation2': ClassificationAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]} dispatcher = MetricsExecutor(config, None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_annotations_in_container_raise_config_error_exception(self): annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] config = {'annotation': 'mocked', 'metrics': [ {'type': 'accuracy', 'top_k': 1}]} dispatcher = MetricsExecutor(config, None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)