Пример #1
0
    def test_classification_accuracy_result_for_batch_2_with_not_ordered_ids(
            self):
        annotations = [
            ClassificationAnnotation('identifier', 3),
            ClassificationAnnotation('identifier1', 1)
        ]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
            ClassificationPrediction('identifier2', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{'type': 'accuracy', 'top_k': 1}], None)
        metric_result, _ = dispatcher.update_metrics_on_batch([42, 17],
                                                              annotations,
                                                              predictions)
        expected_metric_result = [
            PerImageMetricResult('accuracy', 'accuracy', 1.0, 'higher-better'),
            PerImageMetricResult('accuracy', 'accuracy', 0.0, 'higher-better')
        ]
        assert len(metric_result) == 2
        assert 42 in metric_result
        assert len(metric_result[42]) == 1
        assert metric_result[42][0] == expected_metric_result[0]
        assert 17 in metric_result
        assert len(metric_result[17]) == 1
        assert metric_result[17][0] == expected_metric_result[1]
Пример #2
0
    def test_classification_accuracy_result_for_batch_1_with_2_metrics(self):
        annotations = [ClassificationAnnotation('identifier', 3)]
        predictions = [
            ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])
        ]

        dispatcher = MetricsExecutor([{
            'name': 'top1',
            'type': 'accuracy',
            'top_k': 1
        }, {
            'name': 'top3',
            'type': 'accuracy',
            'top_k': 3
        }], None)
        metric_result, _ = dispatcher.update_metrics_on_batch(
            range(len(annotations)), annotations, predictions)
        expected_metric_result = [
            PerImageMetricResult('top1', 'accuracy', 1.0, 'higher-better'),
            PerImageMetricResult('top3', 'accuracy', 1.0, 'higher-better')
        ]
        assert len(metric_result) == 1
        assert 0 in metric_result
        assert len(metric_result[0]) == 2
        assert metric_result[0][0] == expected_metric_result[0]
        assert metric_result[0][1] == expected_metric_result[1]