def test_classification_per_class_accuracy_particual_prediction(self): annotation = [ ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 0), ClassificationAnnotation('identifier_3', 0) ] prediction = [ ClassificationPrediction('identifier_1', [1.0, 2.0]), ClassificationPrediction('identifier_2', [2.0, 1.0]), ClassificationPrediction('identifier_3', [1.0, 5.0]) ] config = { 'annotation': 'mocked', 'metrics': [{ 'type': 'accuracy_per_class', 'top_k': 1 }] } dataset = DummyDataset(label_map={0: '0', 1: '1'}) dispatcher = MetricsExecutor(config, dataset) dispatcher.update_metrics_on_batch(annotation, prediction) for _, evaluation_result in dispatcher.iterate_metrics( annotation, prediction): assert evaluation_result.name == 'accuracy_per_class' assert len(evaluation_result.evaluated_value) == 2 assert evaluation_result.evaluated_value[0] == pytest.approx(0.5) assert evaluation_result.evaluated_value[1] == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_prediction_top3(self): annotation = [ ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 1) ] prediction = [ ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]), ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0]) ] dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'}) dispatcher = MetricsExecutor([{ 'type': 'accuracy_per_class', 'top_k': 3 }], dataset) dispatcher.update_metrics_on_batch(range(len(annotation)), annotation, prediction) for _, evaluation_result in dispatcher.iterate_metrics( annotation, prediction): assert evaluation_result.name == 'accuracy_per_class' assert len(evaluation_result.evaluated_value) == 4 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0) assert evaluation_result.evaluated_value[1] == pytest.approx(0.5) assert evaluation_result.evaluated_value[2] == pytest.approx(0.0) assert evaluation_result.evaluated_value[3] == pytest.approx(0.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ContainerPrediction({'prediction1': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]), 'prediction2': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]} dispatcher = MetricsExecutor(config, None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_wrong_annotation_type_raise_config_error_exception(self): annotations = [DetectionAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_annotation_type_as_annotation_source_for_container_raises_config_error(self): annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation'}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_classification_accuracy_result_for_batch_1_with_2_metrics(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{ 'name': 'top1', 'type': 'accuracy', 'top_k': 1 }, { 'name': 'top3', 'type': 'accuracy', 'top_k': 3 }], None) metric_result = dispatcher.update_metrics_on_batch( range(len(annotations)), annotations, predictions) expected_metric_result = [ PerImageMetricResult('top1', 'accuracy', 1.0, 'higher-better'), PerImageMetricResult('top3', 'accuracy', 1.0, 'higher-better') ] assert len(metric_result) == 1 assert 0 in metric_result assert len(metric_result[0]) == 2 assert metric_result[0][0] == expected_metric_result[0] assert metric_result[0][1] == expected_metric_result[1]
def test_complete_accuracy_with_container_sources(self): annotations = [ ContainerAnnotation( {'a': ClassificationAnnotation('identifier', 3)}) ] predictions = [ ContainerPrediction({ 'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) }) ] config = [{ 'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p' }] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_accuracy_on_container_with_wrong_annotation_source_name_raise_config_error_exception(self): annotations = [ContainerAnnotation({'annotation': ClassificationAnnotation('identifier', 3)})] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a'}]} dispatcher = MetricsExecutor(config, None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(annotations, predictions)
def test_config_vector_presenter(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] config = [{'type': 'accuracy', 'top_k': 1, 'presenter': 'print_vector'}] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for presenter, _ in dispatcher.iterate_metrics(annotations, predictions): assert isinstance(presenter, VectorPrintPresenter)
def test_threshold_is_10_by_config(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3, 'threshold': 10}], None) for _, evaluation_result in dispatcher.iterate_metrics([annotations], [predictions]): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value is None assert evaluation_result.threshold == 10
def test_zero_accuracy_top_3(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None) for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_complete_accuracy_top_3(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [1.0, 3.0, 4.0, 2.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None) dispatcher.update_metrics_on_batch(annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_reference_is_10_by_config(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3, 'reference': 10}]} dispatcher = MetricsExecutor(config, None) for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value == 10 assert evaluation_result.threshold is None
def test_zero_accuracy(self): annotation = [ClassificationAnnotation('identifier', 2)] prediction = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]} dispatcher = MetricsExecutor(config, None) for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_fully_zero_prediction(self): annotation = ClassificationAnnotation('identifier', 0) prediction = ClassificationPrediction('identifier', [1.0, 2.0]) dataset = DummyDataset(label_map={0: '0', 1: '1'}) dispatcher = MetricsExecutor([{'type': 'accuracy_per_class', 'top_k': 1}], dataset) dispatcher.update_metrics_on_batch([annotation], [prediction]) for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]): assert evaluation_result.name == 'accuracy_per_class' assert len(evaluation_result.evaluated_value) == 2 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0) assert evaluation_result.evaluated_value[1] == pytest.approx(0.0) assert evaluation_result.reference_value is None assert evaluation_result.threshold is None
def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception( self): annotations = [ ContainerAnnotation({ 'annotation1': ClassificationAnnotation('identifier', 3), 'annotation2': ClassificationAnnotation('identifier', 3) }) ] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions)
def test_config_default_presenter(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] config = { 'annotation': 'mocked', 'metrics': [{ 'type': 'accuracy', 'top_k': 1 }] } dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(annotations, predictions) for presenter, _ in dispatcher.iterate_metrics(annotations, predictions): assert isinstance(presenter, ScalarPrintPresenter)
def test_predictions_loading_with_adapter(self, mocker): launcher_config = { 'framework': 'dummy', 'loader': 'pickle', 'data_path': '/path' } raw_prediction_batch = StoredPredictionBatch( {'prediction': np.array([[0, 1]])}, [1], [{}] ) expected_prediction = ClassificationPrediction(1, np.array([0, 1])) adapter = ClassificationAdapter({'type': 'classification'}) mocker.patch( 'accuracy_checker.launcher.loaders.pickle_loader.PickleLoader.read_pickle', return_value=[raw_prediction_batch]) launcher = DummyLauncher(launcher_config, adapter=adapter) assert len(launcher._loader.data) == 1 prediction = launcher.predict([1]) assert len(prediction) == 1 assert isinstance(prediction[0], ClassificationPrediction) assert prediction[0].identifier == expected_prediction.identifier assert np.array_equal(prediction[0].scores, expected_prediction.scores)