def test_miou_empty(test_mask_batch): test_pred = (test_mask_batch - 1) % 6 iou = func.intersection_over_union(test_pred, test_mask_batch, num_classes=6, ignore_index=255, reduce=True) y_true, y_pred = func.valid_samples(255, test_mask_batch, test_pred) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average="micro") LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert iou.size() == () assert iou == 0 diff = abs(skl_iou - iou.item()) assert diff <= EPS
def test_iou_empty(test_mask): test_pred = (test_mask - 1) % 6 iou = func.intersection_over_union(test_pred, test_mask, num_classes=6, ignore_index=255, reduce=False) y_true, y_pred = func.valid_samples(255, test_mask, test_pred) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average=None) LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert iou.size() == (6, ) assert torch.all(iou == 0) for i in range(len(skl_iou)): diff = abs(skl_iou[i] - iou[i].item()) assert diff <= EPS
def test_fscore_batch_macro(test_pred_batch, test_mask_batch): pred_classes = test_pred_batch.argmax(dim=1) assert pred_classes.shape == (4, 4, 4) tp, fp, tn, fn = func.statistics_step(pred_classes, test_mask_batch, num_classes=6, ignore_index=255, reduction=False) fscore = func.f1_score(tp, fp, fn, reduce=True) y_true, y_pred = func.valid_samples(255, test_mask_batch, pred_classes) skl_prec = f1_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average="macro") LOG.debug("sklearn: %s - custom: %s", str(skl_prec), str(fscore)) # sklearn does not account for empty classes diff = abs(skl_prec - fscore.item()) assert diff <= EPS
def test_miou_perfect(test_mask_batch): test_pred = test_mask_batch.clone() iou = func.intersection_over_union(test_pred, test_mask_batch, num_classes=6, ignore_index=255, reduce=True) y_true, y_pred = func.valid_samples(255, test_mask_batch, test_pred) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average="macro") LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert iou.size() == () diff = abs(skl_iou - iou.item()) assert diff <= EPS assert iou.size() == () assert torch.all(iou >= (1.0 - EPS))
def test_iou_batch(test_pred_batch, test_mask_batch): pred_classes = test_pred_batch.argmax(dim=1) assert pred_classes.shape == (4, 4, 4) iou = func.intersection_over_union(pred_classes, test_mask_batch, num_classes=6, ignore_index=255, reduce=False) y_true, y_pred = func.valid_samples(255, test_mask_batch, pred_classes) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average=None) LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert iou[:4].mean() > 0.5 for i in range(len(skl_iou)): diff = abs(skl_iou[i] - iou[i].item()) assert diff <= EPS
def test_iou_single_image(test_pred, test_mask): pred_classes = test_pred.argmax(dim=0) assert pred_classes.shape == (4, 4) iou = func.intersection_over_union(pred_classes, test_mask, num_classes=6, ignore_index=255, reduce=False) y_true, y_pred = func.valid_samples(255, test_mask, pred_classes) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average=None) LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert torch.all(iou[1:3] > 0) assert iou[0] == 0 for i in range(len(skl_iou)): diff = abs(skl_iou[i] - iou[i].item()) assert diff <= EPS
def test_iou_perfect(test_mask): test_pred = test_mask.clone() iou = func.intersection_over_union(test_pred, test_mask, num_classes=6, ignore_index=255, reduce=False) # excluding the last one since empty and accounting for epsilon y_true, y_pred = func.valid_samples(255, test_mask, test_pred) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average=None) LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(iou)) assert iou.size() == (6, ) assert torch.all(iou[:-1] >= 0.999) for i in range(len(skl_iou)): diff = abs(skl_iou[i] - iou[i].item()) assert diff <= EPS
def test_recall_batch(test_pred_batch, test_mask_batch): pred_classes = test_pred_batch.argmax(dim=1) assert pred_classes.shape == (4, 4, 4) tp, fp, tn, fn = func.statistics_step(pred_classes, test_mask_batch, num_classes=6, ignore_index=255, reduction=False) recall = func.recall_score(tp, fn, reduce=False) y_true, y_pred = func.valid_samples(255, test_mask_batch, pred_classes) skl_prec = recall_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average=None) LOG.debug("sklearn: %s - custom: %s", str(skl_prec), str(recall)) # sklearn does not account for empty classes for i in range(len(skl_prec)): diff = abs(skl_prec[i] - recall[i].item()) assert diff <= EPS
def test_miou_batch_macro(big_rand_batch): num_classes = 6 noise_data = torch.randint(0, num_classes, size=big_rand_batch.size()) # generate a random mask in 0-1, use it with a threshold to substitute a percent of values # from the batch with noise data change_mask = torch.rand_like(big_rand_batch, dtype=torch.float) pred_batch = torch.where(change_mask >= 0.1, big_rand_batch, noise_data) tp, fp, tn, fn = func.statistics_step(pred_batch, big_rand_batch, num_classes=6, ignore_index=255, reduction=False) miou = func.iou_from_statistics(tp=tp, fp=fp, fn=fn, reduce=True) y_true, y_pred = func.valid_samples(255, big_rand_batch, pred_batch) skl_iou = jaccard_score(y_true.cpu().numpy(), y_pred.cpu().numpy(), average="macro") LOG.debug("sklearn: %s - custom: %s", str(skl_iou), str(miou)) assert miou.size() == () diff = abs(skl_iou - miou.item()) assert diff <= EPS