def test_classification_per_class_accuracy_prediction_top3(self): annotation = [ ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 1) ] prediction = [ ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]), ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0]) ] dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'}) dispatcher = MetricsExecutor([{ 'type': 'accuracy_per_class', 'top_k': 3 }], dataset) dispatcher.update_metrics_on_batch(range(len(annotation)), annotation, prediction) for _, evaluation_result in dispatcher.iterate_metrics( annotation, prediction): assert evaluation_result.name == 'accuracy_per_class' assert len(evaluation_result.evaluated_value) == 4 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0) assert evaluation_result.evaluated_value[1] == pytest.approx(0.5) assert evaluation_result.evaluated_value[2] == pytest.approx(0.0) assert evaluation_result.evaluated_value[3] == pytest.approx(0.0) assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold is None assert evaluation_result.rel_threshold is None
def test_classification_accuracy_result_for_batch_2_with_not_ordered_ids( self): annotations = [ ClassificationAnnotation('identifier', 3), ClassificationAnnotation('identifier1', 1) ] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]), ClassificationPrediction('identifier2', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) metric_result, _ = dispatcher.update_metrics_on_batch([42, 17], annotations, predictions) expected_metric_result = [ PerImageMetricResult('accuracy', 'accuracy', 1.0, 'higher-better'), PerImageMetricResult('accuracy', 'accuracy', 0.0, 'higher-better') ] assert len(metric_result) == 2 assert 42 in metric_result assert len(metric_result[42]) == 1 assert metric_result[42][0] == expected_metric_result[0] assert 17 in metric_result assert len(metric_result[17]) == 1 assert metric_result[17][0] == expected_metric_result[1]
def test_classification_accuracy_result_for_batch_1_with_2_metrics(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{ 'name': 'top1', 'type': 'accuracy', 'top_k': 1 }, { 'name': 'top3', 'type': 'accuracy', 'top_k': 3 }], None) metric_result, _ = dispatcher.update_metrics_on_batch( range(len(annotations)), annotations, predictions) expected_metric_result = [ PerImageMetricResult('top1', 'accuracy', 1.0, 'higher-better'), PerImageMetricResult('top3', 'accuracy', 1.0, 'higher-better') ] assert len(metric_result) == 1 assert 0 in metric_result assert len(metric_result[0]) == 2 assert metric_result[0][0] == expected_metric_result[0] assert metric_result[0][1] == expected_metric_result[1]
def test_complete_accuracy_with_container_sources(self): annotations = [ ContainerAnnotation( {'a': ClassificationAnnotation('identifier', 3)}) ] predictions = [ ContainerPrediction({ 'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) }) ] config = [{ 'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p' }] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold is None assert evaluation_result.rel_threshold is None
def test_accuracy_with_wrong_prediction_type_raise_config_error_exception( self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions)
def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception( self): annotations = [ ContainerAnnotation({ 'annotation1': ClassificationAnnotation('identifier', 3), 'annotation2': ClassificationAnnotation('identifier', 3) }) ] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions)
def test_config_default_presenter(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] config = [{'type': 'accuracy', 'top_k': 1}] dispatcher = MetricsExecutor(config, None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for presenter, _ in dispatcher.iterate_metrics(annotations, predictions): assert isinstance(presenter, ScalarPrintPresenter)
def test_zero_accuracy_top_3(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0]) ] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold is None assert evaluation_result.rel_threshold is None
def test_complete_accuracy_top_3(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [1.0, 3.0, 4.0, 2.0]) ] dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 3}], None) dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions) for _, evaluation_result in dispatcher.iterate_metrics( annotations, predictions): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == pytest.approx(1.0) assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold is None assert evaluation_result.rel_threshold is None
def test_accuracy_on_container_with_wrong_annotation_source_name_raise_config_error_exception( self): annotations = [ ContainerAnnotation( {'annotation': ClassificationAnnotation('identifier', 3)}) ] predictions = [ ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) ] dispatcher = MetricsExecutor([{ 'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a' }], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions)
def test_threshold_is_10_by_config(self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0]) ] dispatcher = MetricsExecutor([{ 'type': 'accuracy', 'top_k': 3, 'abs_threshold': 10 }], None) for _, evaluation_result in dispatcher.iterate_metrics([annotations], [predictions]): assert evaluation_result.name == 'accuracy' assert evaluation_result.evaluated_value == 0.0 assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold == 10
def test_accuracy_with_unsupported_prediction_type_as_prediction_source_for_container_raises_config_error( self): annotations = [ClassificationAnnotation('identifier', 3)] predictions = [ ContainerPrediction({ 'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0]) }) ] dispatcher = MetricsExecutor([{ 'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction' }], None) with pytest.raises(ConfigError): dispatcher.update_metrics_on_batch(range(len(annotations)), annotations, predictions)
def test_classification_per_class_accuracy_fully_zero_prediction(self): annotation = ClassificationAnnotation('identifier', 0) prediction = ClassificationPrediction('identifier', [1.0, 2.0]) dataset = DummyDataset(label_map={0: '0', 1: '1'}) dispatcher = MetricsExecutor([{ 'type': 'accuracy_per_class', 'top_k': 1 }], dataset) dispatcher.update_metrics_on_batch(range(1), [annotation], [prediction]) for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]): assert evaluation_result.name == 'accuracy_per_class' assert len(evaluation_result.evaluated_value) == 2 assert evaluation_result.evaluated_value[0] == pytest.approx(0.0) assert evaluation_result.evaluated_value[1] == pytest.approx(0.0) assert evaluation_result.reference_value is None assert evaluation_result.abs_threshold is None assert evaluation_result.rel_threshold is None