def test_empty_class_2d(self): num_classes = 2 target = torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) # add another dimension corresponding to the batch (batch size = 1 here) target = target.unsqueeze(0) pred_very_good = 1000 * F.one_hot( target, num_classes=num_classes).permute(0, 3, 1, 2).float() pred_very_poor = 1000 * F.one_hot( 1 - target, num_classes=num_classes).permute(0, 3, 1, 2).float() for w_mode in ["default", "GDL"]: # initialize the loss loss = GeneralizedWassersteinDiceLoss(dist_matrix=np.array( [[0.0, 1.0], [1.0, 0.0]]), weighting_mode=w_mode) # loss for pred_very_good should be close to 0 loss_good = float(loss.forward(pred_very_good, target)) self.assertAlmostEqual(loss_good, 0.0, places=3) # loss for pred_very_poor should be close to 1 loss_poor = float(loss.forward(pred_very_poor, target)) self.assertAlmostEqual(loss_poor, 1.0, places=3)
def test_bin_seg_3d(self): # define 3d examples target = torch.tensor([ # raw 0 [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], # raw 1 [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], # raw 2 [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], ]) # add another dimension corresponding to the batch (batch size = 1 here) target = target.unsqueeze(0) # shape (1, H, W, D) pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute( 0, 4, 1, 2, 3).float() pred_very_poor = 1000 * F.one_hot(1 - target, num_classes=2).permute( 0, 4, 1, 2, 3).float() for w_mode in ["default", "GDL"]: # initialize the loss loss = GeneralizedWassersteinDiceLoss(dist_matrix=np.array( [[0.0, 1.0], [1.0, 0.0]]), weighting_mode=w_mode) # mean dice loss for pred_very_good should be close to 0 loss_good = float(loss.forward(pred_very_good, target)) self.assertAlmostEqual(loss_good, 0.0, places=3) # mean dice loss for pred_very_poor should be close to 1 loss_poor = float(loss.forward(pred_very_poor, target)) self.assertAlmostEqual(loss_poor, 1.0, places=3)
def test_bin_seg_2d(self): target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) # add another dimension corresponding to the batch (batch size = 1 here) target = target.unsqueeze(0) pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute( 0, 3, 1, 2).float() pred_very_poor = 1000 * F.one_hot(1 - target, num_classes=2).permute( 0, 3, 1, 2).float() # initialize the loss loss = GeneralizedWassersteinDiceLoss( dist_matrix=np.array([[0.0, 1.0], [1.0, 0.0]])) # the loss for pred_very_good should be close to 0 loss_good = float(loss.forward(pred_very_good, target)) self.assertAlmostEqual(loss_good, 0.0, places=3) # same test, but with target with a class dimension target_4dim = target.unsqueeze(1) loss_good = float(loss.forward(pred_very_good, target_4dim)) self.assertAlmostEqual(loss_good, 0.0, places=3) # the loss for pred_very_poor should be close to 1 loss_poor = float(loss.forward(pred_very_poor, target)) self.assertAlmostEqual(loss_poor, 1.0, places=3)
def test_different_target_data_type(self): """ Test if the loss is compatible with all the integer types for the target segmentation. """ # define 2d examples target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]) # add another dimension corresponding to the batch (batch size = 1 here) target = target.unsqueeze(0) # shape (1, H, W) pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute( 0, 3, 1, 2).float() target_uint8 = target.to(torch.uint8) target_int8 = target.to(torch.int8) target_short = target.short() target_int = target.int() target_long = target.long() target_list = [ target_uint8, target_int8, target_short, target_int, target_long ] for w_mode in ["default", "GDL"]: # initialize the loss loss = GeneralizedWassersteinDiceLoss(dist_matrix=np.array( [[0.0, 1.0], [1.0, 0.0]]), weighting_mode=w_mode) # The test should pass irrespectively of the integer type used for t in target_list: # the loss for pred_very_good should be close to 0 loss_good = float(loss.forward(pred_very_good, t)) self.assertAlmostEqual(loss_good, 0.0, places=3)