Example #1
0
    def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):

        assert logits.ndim == 2
        assert labels.ndim == 1
        assert len(logits) == len(labels)

        with torch.no_grad():
            preds = logits.argmax(dim=1)
            correct = torch.eq(preds, labels)

            return torch.mean(correct.float())
Example #2
0
    def _update_metrics(self, output: torch.FloatTensor,
                        target: torch.IntTensor) -> None:
        """Updates acc value

        Args:
            pred (torch.FloatTensor): A tensor of predictions, in the format
                (N, n_classes).
            target (torch.IntTensor): Ground truth, in the format (N).
        """
        pred = output.argmax(dim=1, keepdim=True)
        self._metric += pred.eq(target.view_as(pred)).sum().item()
Example #3
0
 def __call__(self, losses: torch.Tensor, logits: torch.FloatTensor,
              gold_labels: torch.LongTensor) -> None:
     self._all_losses = torch.cat(
         [self._all_losses,
          losses.to(self._all_losses.device)], dim=0)
     predictions = logits.argmax(-1).to(self._all_predictions.device)
     self._all_predictions = torch.cat([self._all_predictions, predictions],
                                       dim=0)
     self._all_gold_labels = torch.cat([
         self._all_gold_labels,
         gold_labels.to(self._all_gold_labels.device)
     ],
                                       dim=0)
def calculate_accuracy(predictions: torch.FloatTensor,
                       labels: torch.LongTensor) -> torch.FloatTensor:
    assert len(
        predictions.shape
    ) == 2, f'predictions tensor must be 2-D, got{predictions.shape}'
    assert len(
        labels.shape) == 1, f'labels tensor must be 1-D, got {labels.shape}'
    batch_size, n_classes = predictions.shape
    assert batch_size == labels.shape[
        0], f'predictions {predictions.shape} and labels {labels.shape} shape mismatch'
    top_predictions = predictions.argmax(dim=1)
    correct = top_predictions.eq(labels).sum()
    accuracy = correct / batch_size
    return accuracy
Example #5
0
    def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor):
        assert logits.ndim == 2
        assert labels.ndim == 1

        with torch.no_grad():
            if self.average == 'macro':
                f1_scores = torch.zeros(self.num_classes, device=logits.device)
                for c in range(self.num_classes):
                    pred = logits.argmax(dim=1) == c
                    true = labels == c
                    f1 = BinaryFBetaScore.macro_f_beta_score(pred,
                                                             true,
                                                             beta=1)
                    f1_scores[c] = f1
                return torch.mean(f1_scores)
            elif self.average == 'micro':
                raise NotImplementedError
            elif self.average == 'weighted':
                raise NotImplementedError
            else:
                raise NotImplementedError