def classwise_accuracy(
    output: torch.Tensor,
    target: torch.Tensor,
    num_classes: int = None,
    topk: int = 1,
) -> Tuple[torch.Tensor, torch.Tensor]:
    """Computes the precision@k for each class respectively

    Args:
        output (:obj:`torch.Tensor`): The logits or probs as the classifier output
        target (:obj:`torch.Tensor`): The ground truth labels
        num_classes (`int`): The number of classes
        topk (`int`): The k to computer precision@k for

    Returns:
        accuracy (:obj:`torch.Tensor`): The accuracy, i.e. the precision@k for
            each class
        counts (:obj:`torch.Tensor`): The number of data points of each class
    """
    _, pred = output.topk(topk, dim=1, largest=True, sorted=True)
    correct = pred.T.eq(target.view(-1)).sum(dim=0).bool()
    counts = target.bincount(minlength=num_classes)
    accuracy = target.bincount(weights=correct, minlength=num_classes) / counts
    # classes = torch.arange(counts.size(0))
    return accuracy, counts  # , classes
Beispiel #2
0
    def analyze_by_size(self, cluster_class: torch.Tensor, **kwargs) -> list[int]:
        r"""The smallest cluster.

        Args:
            cluster_class (torch.Tensor): Clustering result tensor
                with shape ``(N)``.

        Returns:
            list[int]: Predicted poison cluster classes list with shape ``(1)``
        """
        return [cluster_class.bincount(minlength=self.nb_clusters).argmin().item()]
Beispiel #3
0
    def forward(  # type: ignore
            self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
        inputs = torch.argmax(inputs, dim=1)
        inputs = inputs.byte().flatten()
        targets = targets.byte().flatten()

        if self.ignore_index is not None:
            is_not_ignore = targets != self.ignore_index
            inputs = inputs[is_not_ignore]
            targets = targets[is_not_ignore]

        intersection = inputs[inputs == targets]
        area_intersection = intersection.bincount(minlength=self.num_classes)

        bincount_pred = inputs.bincount(minlength=self.num_classes)
        bincount_true = targets.bincount(minlength=self.num_classes)

        area_union = bincount_pred + bincount_true - area_intersection

        mean_iou = torch.mean(area_intersection / (area_union + self.eps))
        return mean_iou
Beispiel #4
0
    def fit(self, X: torch.Tensor) -> None:
        """
        Update the probs based on the observed counts using maximum likelihood
        estimation; i.e., it computes the probabilities that maximize the data,
        which reduces to the counts / total.

        :param X: a 1-D tensor of emissions.

        >>> CategoricalModel(3).fit(torch.tensor([0, 0, 1, 1, 2, 0]))
        """
        counts = X.bincount(minlength=self.probs.shape[0]).float()
        self.probs = (counts + self.prior) / (counts.sum() + self.prior.sum())
Beispiel #5
0
def intersection_and_union(preds: torch.Tensor, labels: torch.Tensor,
                           ignore_index=255, n_classes=19):

    assert ignore_index > n_classes, 'ignore_index should be grater than n_classes'

    preds = preds.byte().flatten()
    labels = labels.byte().flatten()

    is_not_ignore = labels != ignore_index
    preds = preds[is_not_ignore]
    labels = labels[is_not_ignore]

    intersection = preds[preds == labels]
    area_intersection = intersection.bincount(minlength=n_classes)

    bincount_preds = preds.bincount(minlength=n_classes)
    bincount_labels = labels.bincount(minlength=n_classes)
    area_union = bincount_preds + bincount_labels - area_intersection

    area_intersection = area_intersection.float().cpu().numpy()
    area_union = area_union.float().cpu().numpy()

    return area_intersection, area_union
Beispiel #6
0
    def analyze_by_relative_size(self, cluster_class: torch.Tensor,
                                 size_threshold: float = 0.35,
                                 **kwargs) -> list[int]:
        r"""Small clusters whose proportion is smaller than :attr:`size_threshold`.

        Args:
            cluster_class (torch.Tensor): Clustering result tensor
                with shape ``(N)``.
            size_threshold (float): Defaults to ``0.35``.

        Returns:
            list[int]: Predicted poison cluster classes list with shape ``(K)``
        """
        relative_size = cluster_class.bincount(minlength=self.nb_clusters) / len(cluster_class)
        return torch.arange(self.nb_clusters)[relative_size < size_threshold].tolist()