def test_filter_container_annotations_and_container_predictions_by_labels_with_remove_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': True}] prediction = ContainerPrediction({'prediction': DetectionPrediction(labels=['some_label', 'to_be_filtered'])}) expected_prediction = ContainerPrediction({'prediction': DetectionPrediction(labels=['some_label'])}) annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label', 'to_be_filtered'])}) expected_annotation = ContainerAnnotation({'annotation': DetectionAnnotation(labels=['some_label'])}) postprocess_data(PostprocessingExecutor(config), [annotation], [prediction]) assert prediction == expected_prediction and annotation == expected_annotation
def test_filter_predictions_by_labels_with_remove_on_container_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'prediction', 'labels': ['to_be_filtered'], 'remove_filtered': True}] prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])} ) expected = ContainerPrediction({'detection_out': DetectionPrediction(labels=['some_label'])}) postprocess_data(PostprocessingExecutor(config), [None], [prediction]) assert prediction == expected
def test_filter_regular_annotations_and_container_predictions_by_labels_with_ignore_using_apply_to(self): config = [{'type': 'filter', 'apply_to': 'all', 'labels': ['to_be_filtered'], 'remove_filtered': False}] prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])}) expected_prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})}) annotation = DetectionAnnotation(labels=['some_label', 'to_be_filtered']) expected_annotation = DetectionAnnotation(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]}) postprocess_data(PostprocessingExecutor(config), [annotation], [prediction]) assert prediction == expected_prediction and annotation == expected_annotation
def test_filter_predictions_by_labels_with_ignore_on_container(self): config = [{'type': 'filter', 'prediction_source': 'detection_out', 'labels': ['to_be_filtered'], 'remove_filtered': False}] prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])} ) expected = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'], metadata={'difficult_boxes': [1]})} ) postprocess_data(PostprocessingExecutor(config), [None], [prediction]) assert prediction == expected
def test_filter_multi_source_predictions_by_labels_with_remove(self): config = [{'type': 'filter', 'prediction_source': ['detection_out1', 'detection_out2'], 'labels': ['to_be_filtered'], 'remove_filtered': True}] prediction = ContainerPrediction( {'detection_out1': DetectionPrediction(labels=['some_label', 'to_be_filtered']), 'detection_out2': DetectionPrediction(labels=['some_label', 'to_be_filtered'])} ) expected = ContainerPrediction( {'detection_out1': DetectionPrediction(labels=['some_label']), 'detection_out2': DetectionPrediction(labels=['some_label'])} ) postprocess_data(PostprocessingExecutor(config), [None], [prediction]) assert prediction == expected
def test_complete_accuracy_with_container_sources(self): annotations = [ ContainerAnnotation( {'a': ClassificationAnnotation('identifier', 3)}) ] predictions = [ ContainerPrediction({ 'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) }) ] config = [{ 'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p' }] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_accuracy_with_unsupported_prediction_type_as_prediction_source_for_container_raises_config_error(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction'}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_filter_predictions_unsupported_source_type_raise_type_error_exception(self): config = [{'type': 'filter', 'prediction_source': 'detection_out', 'labels': ['to_be_filtered'], 'remove_filtered': False}] prediction = ContainerPrediction({'detection_out': ClassificationAnnotation()}) executor = PostprocessingExecutor(config) with pytest.raises(TypeError): postprocess_data(executor, [None], [prediction])
def test_accuracy_with_unsupported_prediction_in_container_raise_config_error_exception(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]} dispatcher = MetricsExecutor(config, None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_filter_predictions_source_not_found_raise_config_error_exception(self): config = [{'type': 'filter', 'prediction_source': 'undefined', 'labels': ['to_be_filtered']}] prediction = ContainerPrediction( {'detection_out': DetectionPrediction(labels=['some_label', 'to_be_filtered'])} ) executor = PostprocessingExecutor(config) with pytest.raises(ConfigError): postprocess_data(executor, [None], [prediction])
def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ContainerPrediction({ 'prediction1': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]), 'prediction2': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) })] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)