Exemple #1
0
def test_v1_4_0_deprecated_metrics():
    from pytorch_lightning.metrics.functional.classification import stat_scores_multiple_classes
    with pytest.deprecated_call(match='will be removed in v1.4'):
        stat_scores_multiple_classes(pred=torch.tensor([0, 1]),
                                     target=torch.tensor([0, 1]))

    from pytorch_lightning.metrics.functional.classification import iou
    with pytest.deprecated_call(match='will be removed in v1.4'):
        iou(torch.randint(0, 2, (10, 3, 3)), torch.randint(0, 2, (10, 3, 3)))

    from pytorch_lightning.metrics.functional.classification import recall
    with pytest.deprecated_call(match='will be removed in v1.4'):
        recall(torch.randint(0, 2, (10, 3, 3)),
               torch.randint(0, 2, (10, 3, 3)))

    from pytorch_lightning.metrics.functional.classification import precision
    with pytest.deprecated_call(match='will be removed in v1.4'):
        precision(torch.randint(0, 2, (10, 3, 3)),
                  torch.randint(0, 2, (10, 3, 3)))

    from pytorch_lightning.metrics.functional.classification import precision_recall
    with pytest.deprecated_call(match='will be removed in v1.4'):
        precision_recall(torch.randint(0, 2, (10, 3, 3)),
                         torch.randint(0, 2, (10, 3, 3)))

    # Testing deprecation of class_reduction arg in the *new* precision
    from pytorch_lightning.metrics.functional import precision
    with pytest.deprecated_call(match='will be removed in v1.4'):
        precision(torch.randint(0, 2, (10, )),
                  torch.randint(0, 2, (10, )),
                  class_reduction='micro')

    # Testing deprecation of class_reduction arg in the *new* recall
    from pytorch_lightning.metrics.functional import recall
    with pytest.deprecated_call(match='will be removed in v1.4'):
        recall(torch.randint(0, 2, (10, )),
               torch.randint(0, 2, (10, )),
               class_reduction='micro')

    from pytorch_lightning.metrics.functional.classification import auc
    with pytest.deprecated_call(match='will be removed in v1.4'):
        auc(torch.rand(10, ).sort().values, torch.rand(10, ))

    from pytorch_lightning.metrics.functional.classification import auroc
    with pytest.deprecated_call(match='will be removed in v1.4'):
        auroc(torch.rand(10, ), torch.randint(0, 2, (10, )))

    from pytorch_lightning.metrics.functional.classification import multiclass_auroc
    with pytest.deprecated_call(match='will be removed in v1.4'):
        multiclass_auroc(torch.rand(20, 5).softmax(dim=-1),
                         torch.randint(0, 5, (20, )),
                         num_classes=5)

    from pytorch_lightning.metrics.functional.classification import auc_decorator
    with pytest.deprecated_call(match='will be removed in v1.4'):
        auc_decorator()

    from pytorch_lightning.metrics.functional.classification import multiclass_auc_decorator
    with pytest.deprecated_call(match='will be removed in v1.4'):
        multiclass_auc_decorator()
Exemple #2
0
    def update(self, pred: torch.Tensor, target: torch.Tensor):
        # executed individual non-blocking on each thread/GPU
        BS = pred.shape[0]
        # add 1 so the index ranges from 0 to NUM_CLASSES
        pred = pred.type(torch.int) + 1
        target = target.type(torch.int) + 1
        # NOW class=0 should not induce a loss

        # Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss.
        # Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong.
        # pred = pred * (target > 0).type(pred.dtype)
        iou = 0.0

        # we have to do this calculation for each image.
        for b in range(BS):
            with warnings.catch_warnings():

                TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(
                    pred[b], target[b], self._num_classes + 1)
            IoU = TPS[1:] / (TPS[1:] + FPS[1:] + FNS[1:])
            mIoU = (IoU[torch.isnan(IoU) == False]).mean()
            iou += mIoU

        self.iou += iou
        self.total_batches += int(BS)
Exemple #3
0
def IoU_SCNN(pred, target, num_classes, verbose=False):
	"""[summary]
	IoU = TP ⁄ (TP+FP+FN) simply summed up over the full batch. 
	This is the fastest metric but also the worst one.

	Parameters
	----------
	pred : [torch.tensor]
			BSxD1xD2xD3 , predict class for each pixel. No need to predict the -1 class! element of 0-(num_classes-1)
	target : [torch.tensor]
			BSxD1xD2xD3	, -1 for the invalid pixels that should not induce an error! element of -1-(num_classes-1)
	num_classes : [int]
			invalid class does not count as a class. So lets say targets takes values -1 - 19 then you have 20 classes
	"""

	BS = pred.shape[0]
	# add 1 so the index ranges from 0 to NUM_CLASSES
	pred = pred.type(torch.int) + 1
	target = target.type(torch.int) + 1
	# NOW class=0 should not induce a loss

	# Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss. 
	# Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong. 
	pred = pred * (target > 0).type(pred.dtype) 
	iou_per_image = torch.zeros( (BS), device=pred.device)

	# we have to do this calculation for each image. 
	TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(pred, target, num_classes+1)
	IoU = (TPS[1:]).sum() / (TPS[1:] + FPS[1:] + FNS[1:]).sum()
	if verbose: 
		print(f'TPS:{TPS}, \nFPS:{FPS}, \nFNS:{FNS}, \nTNS:{TNS}')
		print(F'Inter: {TPS},\nUnion: {TPS+FPS+FNS}')

	return IoU
def test_stat_scores_multiclass(pred, target, expected_tp, expected_fp,
                                expected_tn, expected_fn):
    tp, fp, tn, fn = stat_scores_multiple_classes(pred, target)

    assert torch.allclose(torch.tensor(expected_tp).to(tp), tp)
    assert torch.allclose(torch.tensor(expected_fp).to(fp), fp)
    assert torch.allclose(torch.tensor(expected_tn).to(tn), tn)
    assert torch.allclose(torch.tensor(expected_fn).to(fn), fn)
Exemple #5
0
def test_stat_scores_multiclass(pred, target, reduction, expected_tp, expected_fp, expected_tn, expected_fn, expected_support):
    tp, fp, tn, fn, sup = stat_scores_multiple_classes(pred, target, reduction=reduction)

    assert torch.allclose(torch.tensor(expected_tp).to(tp), tp)
    assert torch.allclose(torch.tensor(expected_fp).to(fp), fp)
    assert torch.allclose(torch.tensor(expected_tn).to(tn), tn)
    assert torch.allclose(torch.tensor(expected_fn).to(fn), fn)
    assert torch.allclose(torch.tensor(expected_support).to(sup), sup)
Exemple #6
0
    def update(self, prediction: torch.Tensor, target: torch.Tensor):
        tps, fps, _, fns, sups = stat_scores_multiple_classes(
            prediction, target, self.n_classes)

        self.true_positive += tps
        self.false_positive += fps
        self.false_negative += fns
        self.support += sups
Exemple #7
0
def iIoU_class(pred, target, num_classes, verbose=False):
	"""[summary]
	It is well-known that the global IoU measure is biased toward object instances that cover a large image area. 
	In street scenes with their strong scale variation this can be problematic. 
	Specifically for traffic participants, which are the key classes in our scenario, 
	we aim to evaluate how well the individual instances in the scene are represented in the labeling. 
	To address this, we additionally evaluate the semantic labeling using an 
	
	instance-level intersection-over-union metric iIoU = iTP ⁄ (iTP+FP+iFN). 
	Again iTP, FP, and iFN denote the numbers of true positive, false positive, and false negative pixels, respectively. 
	However, in contrast to the standard IoU measure, iTP and iFN are computed by weighting the contribution of each pixel by the ratio of the class’ 
	average instance size to the size of the respective ground truth instance.
	It is important to note here that unlike the instance-level task below, 
	we assume that the methods only yield a standard per-pixel semantic class labeling as output. 
	Therefore, the false positive pixels are not associated with any instance and thus do not require normalization. 
	The final scores, iIoUcategory and iIoUclass, are obtained as the means for the two semantic granularities.


	Parameters
	----------
	pred : [torch.tensor]
			BSxD1xD2xD3 , predict class for each pixel. No need to predict the -1 class! element of 0-(num_classes-1)
	target : [torch.tensor]
			BSxD1xD2xD3	, -1 for the VOID pixels that should not induce an error! element of -1-(num_classes-1)
	num_classes : [int]
			invalid class does not count as a class. So lets say targets takes values -1 - 19 then you have 20 classes
	"""
	
	BS = pred.shape[0]
	# add 1 so the index ranges from 0 to NUM_CLASSES
	pred = pred.type(torch.int) + 1
	target = target.type(torch.int) + 1
	# NOW class=0 should not induce a loss

	# Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss. 
	# Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong. 
	pred = pred * (target > 0).type(pred.dtype) 
	iou_per_image = torch.zeros( (BS), device=pred.device)
	
	# we have to do this calculation for each image. 
	for b in range(BS):
		weight = torch.bincount(target[b].flatten())[1:] 
		weight = weight/ weight.sum()
		w = torch.zeros( (num_classes), device=target.device, dtype=weight.dtype )
		w[:weight.shape[0]] = weight
		TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(pred[b], target[b], num_classes+1)
		if verbose: 
			print(f'TPS:{TPS}, \nFPS:{FPS}, \nFNS:{FNS}, \nTNS:{TNS}')
			print(F'Inter: {TPS},\nUnion: {TPS+FPS+FNS}')
		IoU = (TPS[1:]*w) / ((TPS[1:]*w) + FPS[1:] + (FNS[1:]*w) )

		mIoU = (IoU[torch.isnan(IoU)==False]).mean()
		iou_per_image[b] = mIoU
		
	return torch.mean( iou_per_image ) #returns mean over batch
Exemple #8
0
    def update(self, pred: torch.Tensor, target: torch.Tensor):
        # executed individual non-blocking on each thread/GPU
        assert pred.shape == target.shape

        # add 1 so the index ranges from 0 to NUM_CLASSES
        pred = pred.type(torch.int) + 1
        target = target.type(torch.int) + 1
        # NOW class=0 should not induce a loss

        # Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss.
        # Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong.
        pred = pred * (target > 0).type(pred.dtype)
        # we have to do this calculation for each image.
        with warnings.catch_warnings():
            TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(
                pred, target, self._num_classes + 1)

        self.total_inter += (TPS[1:]).sum().type(torch.int)
        self.total_union += (TPS[1:] + FPS[1:] + FNS[1:]).sum().type(torch.int)
Exemple #9
0
def IoU_class(pred, target, num_classes, verbose=False):
	"""[summary]
	To assess performance, we rely on the standard Jaccard Index, commonly known as the 
	PASCAL VOC intersection-over-union metric IoU = TP ⁄ (TP+FP+FN) [1], 
	where TP, FP, and FN are the numbers of true positive, false positive, and false negative pixels, respectively, 
	determined over the whole test set. 
	Owing to the two semantic granularities, i.e. classes and categories, we report two separate mean performance scores: 
	IoUcategory and IoUclass. 
	In either case, pixels labeled as void do not contribute to the score.

	Parameters
	----------
	pred : [torch.tensor]
			BSxD1xD2xD3 , predict class for each pixel. No need to predict the -1 class! element of 0-(num_classes-1)
	target : [torch.tensor]
			BSxD1xD2xD3	, -1 for the invalid pixels that should not induce an error! element of -1-(num_classes-1)
	num_classes : [int]
			invalid class does not count as a class. So lets say targets takes values -1 - 19 then you have 20 classes
	"""

	BS = pred.shape[0]
	# add 1 so the index ranges from 0 to NUM_CLASSES
	pred = pred.type(torch.int) + 1
	target = target.type(torch.int) + 1
	# NOW class=0 should not induce a loss

	# Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss. 
	# Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong. 
	pred = pred * (target > 0).type(pred.dtype) 
	iou_per_image = torch.zeros( (BS), device=pred.device)

	# we have to do this calculation for each image. 
	for b in range( BS ):
		TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(pred[b], target[b], num_classes+1)
		if verbose: 
			print(f'TPS:{TPS}, \nFPS:{FPS}, \nFNS:{FNS}, \nTNS:{TNS}')
			print(F'Inter: {TPS},\nUnion: {TPS+FPS+FNS}')
		IoU = TPS[1:] / (TPS[1:] + FPS[1:] + FNS[1:])
		mIoU = (IoU[torch.isnan(IoU)==False]).mean()
		iou_per_image[b] = mIoU

	return torch.mean( iou_per_image ) #returns mean over batch