def loss_fx(self): return DiceLoss( include_background=False, to_onehot_y=True, softmax=True, reduction=self.reduction, )
def run_test(batch_size=64, train_steps=100, device=torch.device("cuda:0")): class _TestBatch(Dataset): def __getitem__(self, _unused_id): im, seg = create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1) return im[None], seg[None].astype(np.float32) def __len__(self): return train_steps net = UNet( dimensions=2, in_channels=1, out_channels=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2, ) loss = DiceLoss(do_sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-4) src = DataLoader(_TestBatch(), batch_size=batch_size) trainer = create_supervised_trainer(net, opt, loss, device, False) trainer.run(src, 1) loss = trainer.state.output print('Loss:', loss) if loss >= 1: print('Loss value is wrong, expect to be < 1.') return loss
def test_ill_opts(self): with self.assertRaisesRegex(ValueError, ""): MaskedLoss(loss=[]) dice_loss = DiceLoss(include_background=True, sigmoid=True, smooth_nr=1e-5, smooth_dr=1e-5) with self.assertRaisesRegex(ValueError, ""): masked = MaskedLoss(loss=dice_loss) masked(input=torch.zeros((3, 1, 2, 2)), target=torch.zeros((3, 1, 2, 2)), mask=torch.zeros((3, 3, 2, 2))) with self.assertRaisesRegex(ValueError, ""): masked = MaskedLoss(loss=dice_loss) masked(input=torch.zeros((3, 3, 2, 2)), target=torch.zeros((3, 2, 2, 2)), mask=torch.zeros((3, 3, 2, 2)))
def test_shape(self, input_param, input_data, expected_val): result = DiceLoss(**input_param).forward(**input_data) self.assertAlmostEqual(result.item(), expected_val, places=5)