def _cohen_kappa_compute(confmat: torch.Tensor,
                         weights: Optional[str] = None) -> torch.Tensor:
    confmat = _confusion_matrix_compute(confmat)
    n_classes = confmat.shape[0]
    sum0 = confmat.sum(dim=0, keepdim=True)
    sum1 = confmat.sum(dim=1, keepdim=True)
    expected = sum1 @ sum0 / sum0.sum()  # outer product

    if weights is None:
        w_mat = torch.ones_like(confmat).flatten()
        w_mat[::n_classes + 1] = 0
        w_mat = w_mat.reshape(n_classes, n_classes)
    elif weights == "linear" or weights == "quadratic":
        w_mat = torch.zeros_like(confmat)
        w_mat += torch.arange(n_classes,
                              dtype=w_mat.dtype,
                              device=w_mat.device)
        if weights == "linear":
            w_mat = torch.abs(w_mat - w_mat.T)
        else:
            w_mat = torch.pow(w_mat - w_mat.T, 2.0)
    else:
        raise ValueError(
            f"Received {weights} for argument ``weights`` but should be either"
            " None, 'linear' or 'quadratic'")

    k = torch.sum(w_mat * confmat) / torch.sum(w_mat * expected)
    return 1 - k
Exemple #2
0
    def compute(self) -> Tensor:
        """Computes confusion matrix.

        Returns:
            If ``multilabel=False`` this will be a ``[n_classes, n_classes]`` tensor and if ``multilabel=True``
            this will be a ``[n_classes, 2, 2]`` tensor.
        """
        return _confusion_matrix_compute(self.confmat, self.normalize)
Exemple #3
0
def _cohen_kappa_compute(confmat: Tensor,
                         weights: Optional[str] = None) -> Tensor:
    """Computes Cohen's kappa based on the weighting type.

    Args:
        confmat: Confusion matrix without normalization
        weights: Weighting type to calculate the score. Choose from:

            - ``None`` or ``'none'``: no weighting
            - ``'linear'``: linear weighting
            - ``'quadratic'``: quadratic weighting

    Example:
        >>> target = torch.tensor([1, 1, 0, 0])
        >>> preds = torch.tensor([0, 1, 0, 0])
        >>> confmat = _cohen_kappa_update(preds, target, num_classes=2)
        >>> _cohen_kappa_compute(confmat)
        tensor(0.5000)
    """

    confmat = _confusion_matrix_compute(confmat)
    confmat = confmat.float() if not confmat.is_floating_point() else confmat
    n_classes = confmat.shape[0]
    sum0 = confmat.sum(dim=0, keepdim=True)
    sum1 = confmat.sum(dim=1, keepdim=True)
    expected = sum1 @ sum0 / sum0.sum()  # outer product

    if weights is None:
        w_mat = torch.ones_like(confmat).flatten()
        w_mat[::n_classes + 1] = 0
        w_mat = w_mat.reshape(n_classes, n_classes)
    elif weights in ("linear", "quadratic"):
        w_mat = torch.zeros_like(confmat)
        w_mat += torch.arange(n_classes,
                              dtype=w_mat.dtype,
                              device=w_mat.device)
        if weights == "linear":
            w_mat = torch.abs(w_mat - w_mat.T)
        else:
            w_mat = torch.pow(w_mat - w_mat.T, 2.0)
    else:
        raise ValueError(
            f"Received {weights} for argument ``weights`` but should be either"
            " None, 'linear' or 'quadratic'")

    k = torch.sum(w_mat * confmat) / torch.sum(w_mat * expected)
    return 1 - k
 def compute(self) -> torch.Tensor:
     """
     Computes confusion matrix
     """
     return _confusion_matrix_compute(self.confmat, self.normalize)