Exemple #1
0
from tests.helpers import seed_all
from tests.retrieval.helpers import (
    RetrievalMetricTester,
    _concat_tests,
    _default_metric_class_input_arguments,
    _default_metric_class_input_arguments_ignore_index,
    _default_metric_functional_input_arguments,
    _errors_test_class_metric_parameters_default,
    _errors_test_class_metric_parameters_no_pos_target,
    _errors_test_functional_metric_parameters_default,
)
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.retrieval.r_precision import RetrievalRPrecision

seed_all(42)


def _r_precision(target: np.ndarray, preds: np.ndarray):
    """Didn't find a reliable implementation of R-Precision in Information Retrieval, so, reimplementing here.

    A good explanation can be found
    `here <https://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-1-per.pdf>_`.
    """
    assert target.shape == preds.shape
    assert len(target.shape) == 1  # works only with single dimension inputs

    if target.sum() > 0:
        order_indexes = np.argsort(preds, axis=0)[::-1]
        relevant = np.sum(target[order_indexes][:target.sum()])
        return relevant * 1.0 / target.sum()
Exemple #2
0
from typing import Callable, List

import numpy as np
import pytest
import torch
from torch import Tensor

from tests.helpers import seed_all

seed_all(1337)


def _compute_sklearn_metric(metric: Callable, target: List[np.ndarray],
                            preds: List[np.ndarray], behaviour: str) -> Tensor:
    """ Compute metric with multiple iterations over every query predictions set. """
    sk_results = []

    for b, a in zip(target, preds):
        if b.sum() == 0:
            if behaviour == 'skip':
                pass
            elif behaviour == 'pos':
                sk_results.append(1.0)
            else:
                sk_results.append(0.0)
        else:
            res = metric(b, a)
            sk_results.append(res)

    if len(sk_results) > 0:
        return np.mean(sk_results)