def _test(average): pr = Precision(average=average) y_pred = torch.rand(10, 5, 18, 16) y = torch.randint(0, 4, size=(10, 18, 16)).type(torch.LongTensor) pr.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() assert pr._type == 'multiclass' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() sklearn_average_parameter = 'macro' if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average=sklearn_average_parameter) == pytest.approx(pr_compute) pr.reset() y_pred = torch.rand(10, 7, 20, 12) y = torch.randint(0, 6, size=(10, 20, 12)).type(torch.LongTensor) pr.update((y_pred, y)) np_y_pred = y_pred.numpy().argmax(axis=1).ravel() np_y = y.numpy().ravel() assert pr._type == 'multiclass' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() sklearn_average_parameter = 'macro' if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average=sklearn_average_parameter) == pytest.approx(pr_compute)
def _test(average): pr = Precision(average=average) # TODO: y_pred should be binary after 0.1.2 release # y_pred = torch.randint(0, 2, size=(10, 12, 10)).type(torch.LongTensor) y_pred = torch.rand(10, 12, 10) y = torch.randint(0, 2, size=(10, 12, 10)).type(torch.LongTensor) pr.update((y_pred, y)) np_y = y.numpy().ravel() # np_y_pred = y_pred.numpy().ravel() np_y_pred = (y_pred.numpy().ravel() > 0.5).astype('int') assert pr._type == 'binary' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) pr.reset() # TODO: y_pred should be binary after 0.1.2 release # y_pred = torch.randint(0, 2, size=(10, 1, 12, 10)).type(torch.LongTensor) y_pred = torch.rand(10, 1, 12, 10) y = torch.randint(0, 2, size=(10, 1, 12, 10)).type(torch.LongTensor) pr.update((y_pred, y)) np_y = y.numpy().ravel() # np_y_pred = y_pred.numpy().ravel() np_y_pred = (y_pred.numpy().ravel() > 0.5).astype('int') assert pr._type == 'binary' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute)
def _test(average): pr = Precision(average=average) y_pred = torch.rand(10, 5, 18, 16) y = torch.randint(0, 5, size=(10, 18, 16)).long() pr.update((y_pred, y)) num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) assert sk_compute == pytest.approx(pr_compute) pr.reset() y_pred = torch.rand(10, 7, 20, 12) y = torch.randint(0, 7, size=(10, 20, 12)).long() pr.update((y_pred, y)) num_classes = y_pred.shape[1] np_y_pred = y_pred.argmax(dim=1).numpy().ravel() np_y = y.numpy().ravel() assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) assert sk_compute == pytest.approx(pr_compute) # Batched Updates pr.reset() y_pred = torch.rand(100, 8, 12, 14) y = torch.randint(0, 8, size=(100, 12, 14)).long() batch_size = 16 n_iters = y.shape[0] // batch_size + 1 for i in range(n_iters): idx = i * batch_size pr.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size])) num_classes = y_pred.shape[1] np_y = y.numpy().ravel() np_y_pred = y_pred.argmax(dim=1).numpy().ravel() assert pr._type == "multiclass" assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() sk_average_parameter = "macro" if average else None with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) sk_compute = precision_score(np_y, np_y_pred, labels=range(0, num_classes), average=sk_average_parameter) assert sk_compute == pytest.approx(pr_compute)
def _test(average): pr = Precision(average=average, is_multilabel=True) y_pred = torch.randint(0, 2, size=(10, 5, 18, 16)) y = torch.randint(0, 2, size=(10, 5, 18, 16)).long() pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 4, 20, 23)) y = torch.randint(0, 2, size=(10, 4, 20, 23)).long() pr.update((y_pred, y)) np_y_pred = to_numpy_multilabel(y_pred) np_y = to_numpy_multilabel(y) assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average="samples") == pytest.approx(pr_compute) # Batched Updates pr.reset() y_pred = torch.randint(0, 2, size=(100, 5, 12, 14)) y = torch.randint(0, 2, size=(100, 5, 12, 14)).long() batch_size = 16 n_iters = y.shape[0] // batch_size + 1 for i in range(n_iters): idx = i * batch_size pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) np_y = to_numpy_multilabel(y) np_y_pred = to_numpy_multilabel(y_pred) assert pr._type == "multilabel" pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average="samples") == pytest.approx(pr_compute)
def _test(average): pr = Precision(average=average, is_multilabel=True) y_pred = torch.randint(0, 2, size=(20, 5)) y = torch.randint(0, 2, size=(20, 5)).type(torch.LongTensor) pr.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() assert pr._type == 'multilabel' pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 4)) y = torch.randint(0, 2, size=(10, 4)).type(torch.LongTensor) pr.update((y_pred, y)) np_y_pred = y_pred.numpy() np_y = y.numpy() assert pr._type == 'multilabel' pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average='samples') == pytest.approx(pr_compute) # Batched Updates pr.reset() y_pred = torch.randint(0, 2, size=(100, 4)) y = torch.randint(0, 2, size=(100, 4)).type(torch.LongTensor) batch_size = 16 n_iters = y.shape[0] // batch_size + 1 for i in range(n_iters): idx = i * batch_size pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) np_y = y.numpy() np_y_pred = y_pred.numpy() assert pr._type == 'multilabel' pr_compute = pr.compute() if average else pr.compute().mean().item() with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UndefinedMetricWarning) assert precision_score( np_y, np_y_pred, average='samples') == pytest.approx(pr_compute)
class FbetaScore(Metric): def __init__( self, beta: int = 1, output_transform: Callable = lambda x: x, average: str = "macro", is_multilabel: bool = False, device: Optional[Union[str, torch.device]] = None, ): self._beta = beta self._average = average _average_flag = self._average != "macro" self._precision = Precision( output_transform=output_transform, average=_average_flag, is_multilabel=is_multilabel, device=device, ) self._recall = Recall( output_transform=output_transform, average=_average_flag, is_multilabel=is_multilabel, device=device, ) super(FbetaScore, self).__init__( output_transform=output_transform, device=device ) @reinit__is_reduced def reset(self) -> None: self._precision.reset() self._recall.reset() def compute(self) -> torch.Tensor: precision_val = self._precision.compute() recall_val = self._recall.compute() fbeta_val = ( (1.0 + self._beta ** 2) * precision_val * recall_val / (self._beta ** 2 * precision_val + recall_val + 1e-15) ) if self._average == "macro": fbeta_val = torch.mean(fbeta_val).item() return fbeta_val @reinit__is_reduced def update(self, output: Sequence[torch.Tensor]) -> None: self._precision.update(output) self._recall.update(output)
def _test(average): pr = Precision(average=average) y_pred = torch.randint(0, 2, size=(10, 12, 10)) y = torch.randint(0, 2, size=(10, 12, 10)).type(torch.LongTensor) pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() assert pr._type == 'binary' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) pr.reset() y_pred = torch.randint(0, 2, size=(10, 1, 12, 10)) y = torch.randint(0, 2, size=(10, 1, 12, 10)).type(torch.LongTensor) pr.update((y_pred, y)) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() assert pr._type == 'binary' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute) pr = Precision(average=average) # Batched Updates pr.reset() y_pred = torch.randint(0, 2, size=(100, 12, 10)) y = torch.randint(0, 2, size=(100, 1, 12, 10)).type(torch.LongTensor) batch_size = 16 n_iters = y.shape[0] // batch_size + 1 for i in range(n_iters): idx = i * batch_size pr.update((y_pred[idx:idx + batch_size], y[idx:idx + batch_size])) np_y = y.numpy().ravel() np_y_pred = y_pred.numpy().ravel() assert pr._type == 'binary' assert isinstance(pr.compute(), float if average else torch.Tensor) pr_compute = pr.compute() if average else pr.compute().numpy() assert precision_score(np_y, np_y_pred, average='binary') == pytest.approx(pr_compute)
def test_binary_shapes(): precision = Precision(average=True) y = torch.LongTensor([1, 0]) y_pred = torch.FloatTensor([0.9, 0.2]) y_pred = y_pred.unsqueeze(1) indices = torch.max(torch.cat([1.0 - y_pred, y_pred], dim=1), dim=1)[1] precision.update((y_pred, y)) assert precision.compute() == pytest.approx( precision_score(y.data.numpy(), indices.data.numpy(), average='macro')) assert precision.compute() == 1.0 y = torch.LongTensor([[1], [0]]) y_pred = torch.FloatTensor([[0.9], [0.2]]) indices = torch.max(torch.cat([1.0 - y_pred, y_pred], dim=1), dim=1)[1] precision.reset() precision.update((y_pred, y)) assert precision.compute() == pytest.approx( precision_score(y.data.numpy(), indices.data.numpy(), average='macro')) assert precision.compute() == 1.0
def test_compute(): precision = Precision() y_pred = torch.eye(4) y = torch.ones(4).type(torch.LongTensor) precision.update((y_pred, y)) results = list(precision.compute()) assert results[0] == 0.0 assert results[1] == 1.0 assert results[2] == 0.0 assert results[3] == 0.0 precision.reset() y_pred = torch.eye(2) y = torch.ones(2).type(torch.LongTensor) precision.update((y_pred, y)) y = torch.zeros(2).type(torch.LongTensor) precision.update((y_pred, y)) results = list(precision.compute()) assert results[0] == 0.5 assert results[1] == 0.5