Exemplo n.º 1
0
 def test_construct_with_pre_applied_transforms(self):
     key = "im"
     _, im = self.get_im()
     tr = Compose([BorderPadd(key, 1), DivisiblePadd(key, 16)])
     data = tr({key: im})
     m = MetaTensor(im, applied_operations=data["im"].applied_operations)
     self.assertEqual(len(m.applied_operations), len(tr.transforms))
Exemplo n.º 2
0
    def test_epistemic_scoring(self):
        input_size = (20, 20, 20)
        device = "cuda" if torch.cuda.is_available() else "cpu"
        keys = ["image", "label"]
        num_training_ims = 10
        train_data = self.get_data(num_training_ims, input_size)
        test_data = self.get_data(1, input_size)

        transforms = Compose([
            AddChanneld(keys),
            CropForegroundd(keys, source_key="image"),
            DivisiblePadd(keys, 4),
        ])

        infer_transforms = Compose([
            AddChannel(),
            CropForeground(),
            DivisiblePad(4),
        ])

        train_ds = CacheDataset(train_data, transforms)
        # output might be different size, so pad so that they match
        train_loader = DataLoader(train_ds,
                                  batch_size=2,
                                  collate_fn=pad_list_data_collate)

        model = UNet(3, 1, 1, channels=(6, 6), strides=(2, 2)).to(device)
        loss_function = DiceLoss(sigmoid=True)
        optimizer = torch.optim.Adam(model.parameters(), 1e-3)

        num_epochs = 10
        for _ in trange(num_epochs):
            epoch_loss = 0

            for batch_data in train_loader:
                inputs, labels = batch_data["image"].to(
                    device), batch_data["label"].to(device)
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = loss_function(outputs, labels)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            epoch_loss /= len(train_loader)

        entropy_score = EpistemicScoring(model=model,
                                         transforms=infer_transforms,
                                         roi_size=[20, 20, 20],
                                         num_samples=10)
        # Call Individual Infer from Epistemic Scoring
        ip_stack = [test_data["image"], test_data["image"], test_data["image"]]
        ip_stack = np.array(ip_stack)
        score_3d = entropy_score.entropy_3d_volume(ip_stack)
        score_3d_sum = np.sum(score_3d)
        # Call Entropy Metric from Epistemic Scoring
        self.assertEqual(score_3d.shape, input_size)
        self.assertIsInstance(score_3d_sum, np.float32)
        self.assertGreater(score_3d_sum, 3.0)
Exemplo n.º 3
0
    def test_transforms(self):
        key = "im"
        _, im = self.get_im()
        tr = Compose([ToMetaTensord(key), BorderPadd(key, 1), DivisiblePadd(key, 16), FromMetaTensord(key)])
        num_tr = len(tr.transforms)
        data = {key: im, PostFix.meta(key): {"affine": torch.eye(4)}}

        # apply one at a time
        for i, _tr in enumerate(tr.transforms):
            data = _tr(data)
            is_meta = isinstance(_tr, (ToMetaTensord, BorderPadd, DivisiblePadd))
            if is_meta:
                self.assertEqual(len(data), 1 if not config.USE_META_DICT else 2)  # im, im_transforms, compatibility
                self.assertIsInstance(data[key], MetaTensor)
                n_applied = len(data[key].applied_operations)
            else:
                self.assertEqual(len(data), 3)  # im, im_meta_dict, im_transforms
                self.assertIsInstance(data[key], torch.Tensor)
                self.assertNotIsInstance(data[key], MetaTensor)
                n_applied = len(data[PostFix.transforms(key)])

            self.assertEqual(n_applied, i + 1)

        # inverse one at a time
        for i, _tr in enumerate(tr.transforms[::-1]):
            data = _tr.inverse(data)
            is_meta = isinstance(_tr, (FromMetaTensord, BorderPadd, DivisiblePadd))
            if is_meta:
                self.assertEqual(len(data), 1)  # im
                self.assertIsInstance(data[key], MetaTensor)
                n_applied = len(data[key].applied_operations)
            else:
                self.assertEqual(len(data), 3)  # im, im_meta_dict, im_transforms
                self.assertIsInstance(data[key], torch.Tensor)
                self.assertNotIsInstance(data[key], MetaTensor)
                n_applied = len(data[PostFix.transforms(key)])

            self.assertEqual(n_applied, num_tr - i - 1)

        # apply all in one go
        data = tr({key: im, PostFix.meta(key): {"affine": torch.eye(4)}})
        self.assertEqual(len(data), 3)  # im, im_meta_dict, im_transforms
        self.assertIsInstance(data[key], torch.Tensor)
        self.assertNotIsInstance(data[key], MetaTensor)
        n_applied = len(data[PostFix.transforms(key)])
        self.assertEqual(n_applied, num_tr)

        # inverse all in one go
        data = tr.inverse(data)
        self.assertEqual(len(data), 3)  # im, im_meta_dict, im_transforms
        self.assertIsInstance(data[key], torch.Tensor)
        self.assertNotIsInstance(data[key], MetaTensor)
        n_applied = len(data[PostFix.transforms(key)])
        self.assertEqual(n_applied, 0)
Exemplo n.º 4
0
 def test_deep_copy(self):
     data = {"img": np.ones((1, 10, 11, 12))}
     num_samples = 3
     sampler = RandSpatialCropSamplesd(
         keys=["img"], roi_size=(3, 3, 3), num_samples=num_samples, random_center=True, random_size=False
     )
     transform = Compose([DivisiblePadd(keys="img", k=5), sampler])
     samples = transform(data)
     self.assertEqual(len(samples), num_samples)
     for sample in samples:
         self.assertEqual(len(sample["img"].applied_operations), len(transform))
Exemplo n.º 5
0
    0,
    BorderPadd(KEYS, [3, 7]),
))

TESTS.append((
    "BorderPadd 3d",
    "3D",
    0,
    BorderPadd(KEYS, [4]),
))

TESTS.append((
    "DivisiblePadd 2d",
    "2D",
    0,
    DivisiblePadd(KEYS, k=4),
))

TESTS.append((
    "DivisiblePadd 3d",
    "3D",
    0,
    DivisiblePadd(KEYS, k=[4, 8, 11]),
))

TESTS.append((
    "CenterSpatialCropd 2d",
    "2D",
    0,
    CenterSpatialCropd(KEYS, roi_size=95),
))
    def test_test_time_augmentation(self):
        input_size = (20, 40)  # test different input data shape to pad list collate
        keys = ["image", "label"]
        num_training_ims = 10

        train_data = self.get_data(num_training_ims, input_size)
        test_data = self.get_data(1, input_size)
        device = "cuda" if torch.cuda.is_available() else "cpu"

        transforms = Compose(
            [
                AddChanneld(keys),
                RandAffined(
                    keys,
                    prob=1.0,
                    spatial_size=(30, 30),
                    rotate_range=(np.pi / 3, np.pi / 3),
                    translate_range=(3, 3),
                    scale_range=((0.8, 1), (0.8, 1)),
                    padding_mode="zeros",
                    mode=("bilinear", "nearest"),
                    as_tensor_output=False,
                ),
                CropForegroundd(keys, source_key="image"),
                DivisiblePadd(keys, 4),
            ]
        )

        train_ds = CacheDataset(train_data, transforms)
        # output might be different size, so pad so that they match
        train_loader = DataLoader(train_ds, batch_size=2, collate_fn=pad_list_data_collate)

        model = UNet(2, 1, 1, channels=(6, 6), strides=(2, 2)).to(device)
        loss_function = DiceLoss(sigmoid=True)
        optimizer = torch.optim.Adam(model.parameters(), 1e-3)

        num_epochs = 10
        for _ in trange(num_epochs):
            epoch_loss = 0

            for batch_data in train_loader:
                inputs, labels = batch_data["image"].to(device), batch_data["label"].to(device)
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = loss_function(outputs, labels)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            epoch_loss /= len(train_loader)

        post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])

        tt_aug = TestTimeAugmentation(
            transform=transforms,
            batch_size=5,
            num_workers=0,
            inferrer_fn=model,
            device=device,
            to_tensor=True,
            output_device="cpu",
            post_func=post_trans,
        )
        mode, mean, std, vvc = tt_aug(test_data)
        self.assertEqual(mode.shape, (1,) + input_size)
        self.assertEqual(mean.shape, (1,) + input_size)
        self.assertTrue(all(np.unique(mode) == (0, 1)))
        self.assertGreaterEqual(mean.min(), 0.0)
        self.assertLessEqual(mean.max(), 1.0)
        self.assertEqual(std.shape, (1,) + input_size)
        self.assertIsInstance(vvc, float)
Exemplo n.º 7
0
 def test_pad_shape(self, input_param, input_data, expected_val):
     padder = DivisiblePadd(**input_param)
     result = padder(input_data)
     np.testing.assert_allclose(result["img"], expected_val)
Exemplo n.º 8
0
TESTS.append(("SpatialCropd 3d", "3D", 0, True,
              SpatialCropd(KEYS, [49, 51, 44], [90, 89, 93])))

TESTS.append(("RandSpatialCropd 2d", "2D", 0, True,
              RandSpatialCropd(KEYS, [96, 93], None, True, False)))

TESTS.append(("RandSpatialCropd 3d", "3D", 0, True,
              RandSpatialCropd(KEYS, [96, 93, 92], None, False, False)))

TESTS.append(("BorderPadd 2d", "2D", 0, True, BorderPadd(KEYS, [3, 7, 2, 5])))

TESTS.append(("BorderPadd 2d", "2D", 0, True, BorderPadd(KEYS, [3, 7])))

TESTS.append(("BorderPadd 3d", "3D", 0, True, BorderPadd(KEYS, [4])))

TESTS.append(("DivisiblePadd 2d", "2D", 0, True, DivisiblePadd(KEYS, k=4)))

TESTS.append(
    ("DivisiblePadd 3d", "3D", 0, True, DivisiblePadd(KEYS, k=[4, 8, 11])))

TESTS.append(("CenterSpatialCropd 2d", "2D", 0, True,
              CenterSpatialCropd(KEYS, roi_size=95)))

TESTS.append(("CenterSpatialCropd 3d", "3D", 0, True,
              CenterSpatialCropd(KEYS, roi_size=[95, 97, 98])))

TESTS.append(("CropForegroundd 2d", "2D", 0, True,
              CropForegroundd(KEYS, source_key="label", margin=2)))

TESTS.append(("CropForegroundd 3d", "3D", 0, True,
              CropForegroundd(KEYS,