def test_class_reduce(): num = torch.randint(1, 10, (100, )).float() denom = torch.randint(10, 20, (100, )).float() weights = torch.randint(1, 100, (100, )).float() assert torch.allclose(class_reduce(num, denom, weights, 'micro'), torch.sum(num) / torch.sum(denom)) assert torch.allclose(class_reduce(num, denom, weights, 'macro'), torch.mean(num / denom)) assert torch.allclose( class_reduce(num, denom, weights, 'weighted'), torch.sum(num / denom * (weights / torch.sum(weights))) ) assert torch.allclose(class_reduce(num, denom, weights, 'none'), num / denom)
def precision_recall( pred: torch.Tensor, target: torch.Tensor, num_classes: Optional[int] = None, class_reduction: str = 'micro', return_support: bool = False, return_state: bool = False) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes precision and recall for different thresholds Args: pred: estimated probabilities target: ground-truth labels num_classes: number of classes class_reduction: method to reduce metric score over labels - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class return_support: returns the support for each class, need for fbeta/f1 calculations return_state: returns a internal state that can be ddp reduced before doing the final calculation Return: Tensor with precision and recall Example: >>> x = torch.tensor([0, 1, 2, 3]) >>> y = torch.tensor([0, 2, 2, 2]) >>> precision_recall(x, y, class_reduction='macro') (tensor(0.5000), tensor(0.3333)) """ tps, fps, tns, fns, sups = stat_scores_multiple_classes( pred=pred, target=target, num_classes=num_classes) precision = class_reduce(tps, tps + fps, sups, class_reduction=class_reduction) recall = class_reduce(tps, tps + fns, sups, class_reduction=class_reduction) if return_state: return {'tps': tps, 'fps': fps, 'fns': fns, 'sups': sups} if return_support: return precision, recall, sups return precision, recall
def _fbeta_compute( true_positives: torch.Tensor, predicted_positives: torch.Tensor, actual_positives: torch.Tensor, beta: float = 1.0, average: str = "micro" ) -> torch.Tensor: if average == "micro": precision = true_positives.sum().float() / predicted_positives.sum() recall = true_positives.sum().float() / actual_positives.sum() else: precision = true_positives.float() / predicted_positives recall = true_positives.float() / actual_positives num = (1 + beta ** 2) * precision * recall denom = beta ** 2 * precision + recall return class_reduce(num, denom, weights=actual_positives, class_reduction=average)
def accuracy(pred: torch.Tensor, target: torch.Tensor, num_classes: Optional[int] = None, class_reduction: str = 'micro', return_state: bool = False) -> torch.Tensor: """ Computes the accuracy classification score Args: pred: predicted labels target: ground truth labels num_classes: number of classes class_reduction: method to reduce metric score over labels - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class return_state: returns a internal state that can be ddp reduced before doing the final calculation Return: A Tensor with the accuracy score. Example: >>> x = torch.tensor([0, 1, 2, 3]) >>> y = torch.tensor([0, 1, 2, 2]) >>> accuracy(x, y) tensor(0.7500) """ tps, fps, tns, fns, sups = stat_scores_multiple_classes( pred=pred, target=target, num_classes=num_classes) if return_state: return {'tps': tps, 'sups': sups} return class_reduce(tps, sups, sups, class_reduction=class_reduction)