def test_sce_equals_ce(self):
        # All correct predictions
        Y = torch.tensor([1, 2, 3], dtype=torch.long)
        Y_s = pred_to_prob(Y, k=4).float()

        sce = SoftCrossEntropyLoss(reduction="none")
        ce = nn.CrossEntropyLoss(reduction="none")
        for _ in range(10):
            Y_ps = torch.rand_like(Y_s)
            Y_ps = Y_ps / Y_ps.sum(dim=1).reshape(-1, 1)
            self.assertTrue((sce(Y_ps, Y_s) == ce(Y_ps, Y - 1)).all())

        sce = SoftCrossEntropyLoss(reduction="sum")
        ce = nn.CrossEntropyLoss(reduction="sum")
        for _ in range(10):
            Y_ps = torch.rand_like(Y_s)
            Y_ps = Y_ps / Y_ps.sum(dim=1).reshape(-1, 1)
            self.assertAlmostEqual(sce(Y_ps, Y_s).numpy(),
                                   ce(Y_ps, Y - 1).numpy(),
                                   places=5)

        sce = SoftCrossEntropyLoss(reduction="mean")
        ce = nn.CrossEntropyLoss(reduction="mean")
        for _ in range(10):
            Y_ps = torch.rand_like(Y_s)
            Y_ps = Y_ps / Y_ps.sum(dim=1).reshape(-1, 1)
            self.assertAlmostEqual(sce(Y_ps, Y_s).numpy(),
                                   ce(Y_ps, Y - 1).numpy(),
                                   places=5)
Example #2
0
def roc_auc_score(gold, probs, ignore_in_gold=[], ignore_in_pred=[]):
    """Compute the ROC AUC score, given the gold labels and predicted probs.

    Args:
        gold: A 1d array-like of gold labels
        probs: A 2d array-like of predicted probabilities
        ignore_in_gold: A list of labels for which elements having that gold
            label will be ignored.

    Returns:
        roc_auc_score: The (float) roc_auc score
    """
    gold = arraylike_to_numpy(gold)

    # Filter out the ignore_in_gold (but not ignore_in_pred)
    # Note the current sub-functions (below) do not handle this...
    if len(ignore_in_pred) > 0:
        raise ValueError("ignore_in_pred not defined for ROC-AUC score.")
    keep = [x not in ignore_in_gold for x in gold]
    gold = gold[keep]
    probs = probs[keep, :]

    # Convert gold to one-hot indicator format, using the k inferred from probs
    gold_s = pred_to_prob(torch.from_numpy(gold), k=probs.shape[1]).numpy()
    return skm.roc_auc_score(gold_s, probs)
Example #3
0
    def _preprocess_Y(self, Y, k):
        """Convert Y to prob labels if necessary"""
        Y = Y.clone()

        # If preds, convert to probs
        if Y.dim() == 1 or Y.shape[1] == 1:
            Y = pred_to_prob(Y.long(), k=k)
        return Y
    def test_perfect_predictions(self):
        Y = torch.tensor([1, 2, 3], dtype=torch.long)
        Y_s = pred_to_prob(Y, k=4)

        sce = SoftCrossEntropyLoss()
        # Guess nearly perfectly
        Y_ps = Y_s.clone().float()
        Y_ps[Y_ps == 1] = 100
        Y_ps[Y_ps == 0] = -100
        self.assertAlmostEqual(sce(Y_ps, Y_s).numpy(), 0)
Example #5
0
    def score(self, probs, target_probs):
        """
        """
        metrics = defaultdict(dict)
        for task_idx, _ in enumerate(probs):
            probs_t = torch.tensor(probs[task_idx]).double()
            preds_t = soft_to_hard(probs_t, break_ties='random')

            target_probs_t = torch.tensor(target_probs[task_idx]).double()
            targets = soft_to_hard(target_probs_t, break_ties='random')

            print(pred_to_prob(targets, k=probs_t.shape[1]))
            for metric in METRICS_LIST:
                metrics[self.idx_to_task[task_idx]][metric] = metric_score(
                    targets + 1, preds_t + 1, metric, probs=probs_t)

        return metrics
    def test_loss_weights(self):
        # All incorrect predictions
        Y = torch.tensor([1, 1, 2], dtype=torch.long)
        Y_s = pred_to_prob(Y, k=3)
        Y_ps = torch.tensor([[-100.0, 100.0, -100.0], [-100.0, 100.0, -100.0],
                             [-100.0, 100.0, -100.0]])
        weight1 = torch.tensor([1, 2, 1], dtype=torch.float)
        weight2 = torch.tensor([10, 20, 10], dtype=torch.float)
        ce1 = nn.CrossEntropyLoss(weight=weight1, reduction="none")
        sce1 = SoftCrossEntropyLoss(weight=weight1)
        sce2 = SoftCrossEntropyLoss(weight=weight2)

        self.assertAlmostEqual(float(ce1(Y_ps, Y - 1).mean()),
                               float(sce1(Y_ps, Y_s)),
                               places=3)
        self.assertAlmostEqual(float(sce1(Y_ps, Y_s)) * 10,
                               float(sce2(Y_ps, Y_s)),
                               places=3)
 def test_pred_to_prob(self):
     x = torch.tensor([1, 2, 2, 1])
     target = torch.tensor([[1, 0], [0, 1], [0, 1], [1, 0]])
     self.assertTrue((pred_to_prob(x, 2).float() == target.float()
                      ).sum() == torch.prod(torch.tensor(target.shape)))