def test_stats_step_batch_reduce(test_pred_batch, test_mask_batch):
    pred_classes = test_pred_batch.argmax(dim=1)
    assert pred_classes.shape == (4, 4, 4)
    stats = func.statistics_step(pred_classes,
                                 test_mask_batch,
                                 num_classes=6,
                                 ignore_index=255,
                                 reduction=True)
    assert len(stats) == 4
    for s in stats:
        assert s.size() == ()
def test_fscore_empty(test_mask_batch):
    num_classes = 6
    pred_classes = (test_mask_batch - 1) % num_classes
    assert pred_classes.shape == (4, 4, 4)
    tp, fp, tn, fn = func.statistics_step(pred_classes,
                                          test_mask_batch,
                                          num_classes=num_classes,
                                          ignore_index=255,
                                          reduction=False)
    score = func.f1_score(tp, fp, fn, reduce=True)
    assert score == 0.0
    LOG.debug(score)
def test_fscore_best():
    num_classes = 6
    test_mask_batch = torch.randint(0, 6, size=(4, 4, 4))
    pred_classes = test_mask_batch.clone()
    assert pred_classes.shape == (4, 4, 4)
    tp, fp, tn, fn = func.statistics_step(pred_classes,
                                          test_mask_batch,
                                          num_classes=num_classes,
                                          ignore_index=255,
                                          reduction=False)
    score = func.f1_score(tp, fp, fn, reduce=False)
    LOG.debug(score)
    assert torch.all(score[:4] >= 0.99)
def test_fscore_batch_macro(test_pred_batch, test_mask_batch):
    pred_classes = test_pred_batch.argmax(dim=1)
    assert pred_classes.shape == (4, 4, 4)
    tp, fp, tn, fn = func.statistics_step(pred_classes,
                                          test_mask_batch,
                                          num_classes=6,
                                          ignore_index=255,
                                          reduction=False)
    fscore = func.f1_score(tp, fp, fn, reduce=True)
    y_true, y_pred = func.valid_samples(255, test_mask_batch, pred_classes)
    skl_prec = f1_score(y_true.cpu().numpy(),
                        y_pred.cpu().numpy(),
                        average="macro")
    LOG.debug("sklearn: %s - custom: %s", str(skl_prec), str(fscore))
    # sklearn does not account for empty classes
    diff = abs(skl_prec - fscore.item())
    assert diff <= EPS
def test_recall_batch(test_pred_batch, test_mask_batch):
    pred_classes = test_pred_batch.argmax(dim=1)
    assert pred_classes.shape == (4, 4, 4)
    tp, fp, tn, fn = func.statistics_step(pred_classes,
                                          test_mask_batch,
                                          num_classes=6,
                                          ignore_index=255,
                                          reduction=False)
    recall = func.recall_score(tp, fn, reduce=False)
    y_true, y_pred = func.valid_samples(255, test_mask_batch, pred_classes)
    skl_prec = recall_score(y_true.cpu().numpy(),
                            y_pred.cpu().numpy(),
                            average=None)
    LOG.debug("sklearn: %s - custom: %s", str(skl_prec), str(recall))
    # sklearn does not account for empty classes
    for i in range(len(skl_prec)):
        diff = abs(skl_prec[i] - recall[i].item())
        assert diff <= EPS
Example #6
0
    def update(self, pred: torch.Tensor, target: torch.Tensor) -> None:
        """Updates the statistics by including the provided predictions and targets.

        :param pred: prediction batch, yet to be tansformed into indices, with size [B, C, H, W]
        :type pred: torch.Tensor
        :param target: true targets, provided as indices, size [B, H, W]
        :type target: torch.Tensor
        """
        # assume 0=batch size, 1=classes, 2, 3 = dims
        indices = pred.argmax(dim=1) if len(pred.shape) > 3 else pred
        tp, fp, tn, fn = func.statistics_step(pred=indices,
                                              target=target,
                                              num_classes=self.num_classes,
                                              ignore_index=self.ignore_index,
                                              reduction=self.is_micro)
        self.tp += tp
        self.fp += fp
        self.tn += tn
        self.fn += fn
def test_miou_batch_macro(big_rand_batch):
    num_classes = 6
    noise_data = torch.randint(0, num_classes, size=big_rand_batch.size())
    # generate a random mask in 0-1, use it with a threshold to substitute a percent of values
    # from the batch with noise data
    change_mask = torch.rand_like(big_rand_batch, dtype=torch.float)
    pred_batch = torch.where(change_mask >= 0.1, big_rand_batch, noise_data)
    tp, fp, tn, fn = func.statistics_step(pred_batch,
                                          big_rand_batch,
                                          num_classes=6,
                                          ignore_index=255,
                                          reduction=False)
    miou = func.iou_from_statistics(tp=tp, fp=fp, fn=fn, reduce=True)
    y_true, y_pred = func.valid_samples(255, big_rand_batch, pred_batch)
    skl_iou = jaccard_score(y_true.cpu().numpy(),
                            y_pred.cpu().numpy(),
                            average="macro")
    LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(miou))
    assert miou.size() == ()
    diff = abs(skl_iou - miou.item())
    assert diff <= EPS