예제 #1
0
    def test_pad_collation(self, t_type, collate_method, transform):

        if t_type == dict:
            dataset = CacheDataset(self.dict_data, transform, progress=False)
        else:
            dataset = _Dataset(self.list_data, self.list_labels, transform)

        # Default collation should raise an error
        loader_fail = DataLoader(dataset, batch_size=10)
        with self.assertRaises(RuntimeError):
            for _ in loader_fail:
                pass

        # Padded collation shouldn't
        loader = DataLoader(dataset, batch_size=10, collate_fn=collate_method)
        # check collation in forward direction
        for data in loader:
            if t_type == dict:
                shapes = []
                decollated_data = decollate_batch(data)
                for d in decollated_data:
                    output = PadListDataCollate.inverse(d)
                    shapes.append(output["image"].shape)
                self.assertTrue(
                    len(set(shapes)) > 1
                )  # inverted shapes must be different because of random xforms
예제 #2
0
 def test_loading_dict(self):
     set_determinism(seed=1234)
     # test sequence input data with dict
     data = [
         {
             "image": np.arange(16, dtype=float).reshape(1, 4, 4),
             "label": np.arange(16, dtype=float).reshape(1, 4, 4),
             "metadata": "test string",
         },
         {
             "image": np.arange(16, dtype=float).reshape(1, 4, 4),
             "label": np.arange(16, dtype=float).reshape(1, 4, 4),
             "metadata": "test string",
         },
     ]
     # image level
     patch_intensity = RandShiftIntensityd(keys="image",
                                           offsets=1.0,
                                           prob=1.0)
     patch_iter = PatchIterd(keys=["image", "label"],
                             patch_size=(2, 2),
                             start_pos=(0, 0))
     ds = GridPatchDataset(data=data,
                           patch_iter=patch_iter,
                           transform=patch_intensity,
                           with_coordinates=True)
     # use the grid patch dataset
     for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
         np.testing.assert_equal(item[0]["image"].shape, (2, 1, 2, 2))
         np.testing.assert_equal(item[0]["label"].shape, (2, 1, 2, 2))
         self.assertListEqual(item[0]["metadata"],
                              ["test string", "test string"])
     np.testing.assert_allclose(
         item[0]["image"],
         np.array([[[[1.4965, 2.4965], [5.4965, 6.4965]]],
                   [[[11.3584, 12.3584], [15.3584, 16.3584]]]]),
         rtol=1e-4,
     )
     np.testing.assert_allclose(item[1],
                                np.array([[[0, 1], [0, 2], [2, 4]],
                                          [[0, 1], [2, 4], [2, 4]]]),
                                rtol=1e-5)
     if sys.platform != "win32":
         for item in DataLoader(ds,
                                batch_size=2,
                                shuffle=False,
                                num_workers=2):
             np.testing.assert_equal(item[0]["image"].shape, (2, 1, 2, 2))
         np.testing.assert_allclose(
             item[0]["image"],
             np.array([[[[1.2548, 2.2548], [5.2548, 6.2548]]],
                       [[[9.1106, 10.1106], [13.1106, 14.1106]]]]),
             rtol=1e-3,
         )
         np.testing.assert_allclose(item[1],
                                    np.array([[[0, 1], [0, 2], [2, 4]],
                                              [[0, 1], [2, 4], [2, 4]]]),
                                    rtol=1e-5)
    def test_loading_array(self):
        set_determinism(seed=1234)
        # image dataset
        images = [
            np.arange(16, dtype=float).reshape(1, 4, 4),
            np.arange(16, dtype=float).reshape(1, 4, 4)
        ]
        # image patch sampler
        n_samples = 8
        sampler = RandSpatialCropSamples(roi_size=(3, 3),
                                         num_samples=n_samples,
                                         random_center=True,
                                         random_size=False)

        # image level
        patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0)
        image_ds = Dataset(images, transform=patch_intensity)
        # patch level
        ds = PatchDataset(dataset=image_ds,
                          patch_func=sampler,
                          samples_per_image=n_samples,
                          transform=patch_intensity)

        np.testing.assert_equal(len(ds), n_samples * len(images))
        # use the patch dataset, length: len(images) x samplers_per_image
        for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
            np.testing.assert_equal(tuple(item.shape), (2, 1, 3, 3))
        np.testing.assert_allclose(
            item[0],
            np.array([[[1.338681, 2.338681, 3.338681],
                       [5.338681, 6.338681, 7.338681],
                       [9.338681, 10.338681, 11.338681]]]),
            rtol=1e-5,
        )
        if sys.platform != "win32":
            for item in DataLoader(ds,
                                   batch_size=2,
                                   shuffle=False,
                                   num_workers=2):
                np.testing.assert_equal(tuple(item.shape), (2, 1, 3, 3))
            np.testing.assert_allclose(
                item[0],
                np.array([[
                    [4.957847, 5.957847, 6.957847],
                    [8.957847, 9.957847, 10.957847],
                    [12.957847, 13.957847, 14.957847],
                ]]),
                rtol=1e-5,
            )
        set_determinism(seed=None)
    def test_dataloader(self):
        dataset = Dataset(
            data=[{
                "img": np.array([[[0.0, 1.0], [2.0, 3.0]]])
            }, {
                "img": np.array([[[0.0, 1.0], [2.0, 3.0]]])
            }],
            transform=IntensityStatsd(keys="img",
                                      ops=["max", "mean"],
                                      key_prefix="orig"),
        )
        # set num workers = 0 for mac / win
        num_workers = 2 if sys.platform == "linux" else 0
        dataloader = DataLoader(dataset=dataset,
                                num_workers=num_workers,
                                batch_size=2)
        orig_method = mp.get_start_method()
        mp.set_start_method("spawn", force=True)

        for d in dataloader:
            meta = d["img_meta_dict"]
            np.testing.assert_allclose(meta["orig_max"], [3.0, 3.0], atol=1e-3)
            np.testing.assert_allclose(meta["orig_mean"], [1.5, 1.5],
                                       atol=1e-3)
        # restore the mp method
        mp.set_start_method(orig_method, force=True)
    def test_collation(self, _, transform, collate_fn, ndim):
        data = self.data_3d if ndim == 3 else self.data_2d
        if collate_fn:
            modified_transform = transform
        else:
            modified_transform = Compose(
                [transform,
                 ResizeWithPadOrCropd(KEYS, 100),
                 ToTensord(KEYS)])

        # num workers = 0 for mac or gpu transforms
        num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available(
        ) else 2

        dataset = CacheDataset(data,
                               transform=modified_transform,
                               progress=False)
        loader = DataLoader(dataset,
                            num_workers,
                            batch_size=self.batch_size,
                            collate_fn=collate_fn)

        for item in loader:
            np.testing.assert_array_equal(
                item["image_transforms"][0]["do_transforms"],
                item["label_transforms"][0]["do_transforms"])
예제 #6
0
    def test_set_data(self):
        data_list1 = list(range(10))

        transform = Compose([
            Lambda(func=lambda x: np.array([x * 10])),
            RandLambda(func=lambda x: x + 1)
        ])

        dataset = CacheDataset(
            data=data_list1,
            transform=transform,
            cache_rate=1.0,
            num_workers=4,
            progress=True,
            copy_cache=not sys.platform == "linux",
        )

        num_workers = 2 if sys.platform == "linux" else 0
        dataloader = DataLoader(dataset=dataset,
                                num_workers=num_workers,
                                batch_size=1)
        for i, d in enumerate(dataloader):
            np.testing.assert_allclose([[data_list1[i] * 10 + 1]], d)
        # simulate another epoch, the cache content should not be modified
        for i, d in enumerate(dataloader):
            np.testing.assert_allclose([[data_list1[i] * 10 + 1]], d)

        # update the datalist and fill the cache content
        data_list2 = list(range(-10, 0))
        dataset.set_data(data=data_list2)
        # rerun with updated cache content
        for i, d in enumerate(dataloader):
            np.testing.assert_allclose([[data_list2[i] * 10 + 1]], d)
예제 #7
0
 def test_exception(self, datalist):
     dataset = Dataset(data=datalist, transform=None)
     dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
     with self.assertRaisesRegex((TypeError, RuntimeError),
                                 "Collate error on the key"):
         for _ in dataloader:
             pass
예제 #8
0
def show_element(dataset, number_of_examples=1):
    check_loader = DataLoader(dataset)
    for i, batch_data in enumerate(check_loader):
        image, label = (batch_data["image"][0][0], batch_data["label"][0][0])
        show3(np.append(image, label * torch.max(image), axis=1))
        if i + 1 == number_of_examples:
            break
예제 #9
0
    def test_decollation(self, *transforms):

        batch_size = 2
        num_workers = 2

        t_compose = Compose(
            [AddChanneld(KEYS),
             Compose(transforms),
             ToTensord(KEYS)])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = Compose([LoadImaged("image"), t_compose])

        dataset = CacheDataset(self.data, t_compose, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)
예제 #10
0
 def test_values(self):
     datalist = [
         {
             "image": "spleen_19.nii.gz",
             "label": "spleen_label_19.nii.gz"
         },
         {
             "image": "spleen_31.nii.gz",
             "label": "spleen_label_31.nii.gz"
         },
     ]
     transform = Compose([
         DataStatsd(keys=["image", "label"],
                    data_shape=False,
                    value_range=False,
                    data_value=True),
         SimulateDelayd(keys=["image", "label"], delay_time=0.1),
     ])
     dataset = CacheDataset(data=datalist,
                            transform=transform,
                            cache_rate=0.5,
                            cache_num=1)
     dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=2)
     for d in dataloader:
         self.assertEqual(d["image"][0], "spleen_19.nii.gz")
         self.assertEqual(d["image"][1], "spleen_31.nii.gz")
         self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
         self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
예제 #11
0
    def test_decollation(self, batch_size=2, num_workers=2):

        im = create_test_image_2d(100, 101)[0]
        data = [{
            "image": make_nifti_image(im) if has_nib else im
        } for _ in range(6)]

        transforms = Compose([
            AddChanneld("image"),
            SpatialPadd("image", 150),
            RandFlipd("image", prob=1.0, spatial_axis=1),
            ToTensord("image"),
        ])
        # If nibabel present, read from disk
        if has_nib:
            transforms = Compose([LoadImaged("image"), transforms])

        dataset = CacheDataset(data, transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)
예제 #12
0
    def test_shape(self):
        expected_shape = (128, 128, 128)
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        test_data = []
        with tempfile.TemporaryDirectory() as tempdir:
            for i in range(6):
                nib.save(test_image,
                         os.path.join(tempdir, f"test_image{str(i)}.nii.gz"))
                test_data.append(
                    {"image": os.path.join(tempdir, f"test_image{i}.nii.gz")})

            test_transform = Compose([
                LoadImaged(keys="image"),
                SimulateDelayd(keys="image", delay_time=1e-7)
            ])

            data_iterator = _Stream(test_data)
            with self.assertRaises(TypeError):  # Dataset doesn't work
                dataset = Dataset(data=data_iterator, transform=test_transform)
                for _ in dataset:
                    pass
            dataset = IterableDataset(data=data_iterator,
                                      transform=test_transform)
            for d in dataset:
                self.assertTupleEqual(d["image"].shape, expected_shape)

            num_workers = 2 if sys.platform == "linux" else 0
            dataloader = DataLoader(dataset=dataset,
                                    batch_size=3,
                                    num_workers=num_workers)
            for d in dataloader:
                self.assertTupleEqual(d["image"].shape[1:], expected_shape)
예제 #13
0
    def test_shape(self):
        expected_shape = (128, 128, 128)
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        test_data = list()
        with tempfile.TemporaryDirectory() as tempdir:
            for i in range(6):
                nib.save(test_image,
                         os.path.join(tempdir, f"test_image{str(i)}.nii.gz"))
                test_data.append({
                    "image":
                    os.path.join(tempdir, f"test_image{str(i)}.nii.gz")
                })

            test_transform = Compose([
                LoadImaged(keys="image"),
                SimulateDelayd(keys="image", delay_time=1e-7),
            ])

            test_stream = _Stream(data=test_data,
                                  dbpath=os.path.join(tempdir, "countDB"))

            dataset = IterableDataset(data=test_stream,
                                      transform=test_transform)
            for d in dataset:
                self.assertTupleEqual(d["image"].shape, expected_shape)

            test_stream.reset()
            dataloader = DataLoader(dataset=dataset,
                                    batch_size=3,
                                    num_workers=2)
            for d in dataloader:
                self.assertTupleEqual(d["image"].shape[1:], expected_shape)
예제 #14
0
def main():
    root_path = Path(
        "../CoronaryVesselGeneration/AngioGenAppNew/output/testing/")

    keys = range(100)
    values = [str(root_path / f"{(i+1):04}" / "mesh.stl") for i in keys]

    meshes = dict(zip(keys, values))
    input()
    dataset = MeshDataset(meshes)
    input()

    dataloader = DataLoader(dataset, batch_size=100, num_workers=0)

    # Evaluate how long this all takes
    import time
    from PIL import Image

    for batch in dataloader:
        print(batch.shape)
        for i in range(3):
            im = Image.fromarray(batch.numpy()[i, :, :, 0].astype(np.uint8),
                                 'L')
            file_name = f'{i:03d}.png'

            im.save(file_name)

    t1 = time.time()
    for batch in dataloader:
        t2 = time.time()
        input(t2 - t1)  # Allows memory to be checked externally
        t1 = time.time()
예제 #15
0
    def test_time(self):
        dataset = Dataset(
            data=self.datalist * 2,
            transform=self.transform)  # contains data for 2 batches
        dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)

        tbuffer = ThreadBuffer(dataloader)

        with PerfContext() as pc:
            for _ in dataloader:
                time.sleep(
                    0.5
                )  # each batch takes 0.8 s to generate on top of this time

        unbuffered_time = pc.total_time

        with PerfContext() as pc:
            for _ in tbuffer:
                time.sleep(
                    0.5
                )  # while "computation" is happening the next batch is being generated, saving 0.4 s

        buffered_time = pc.total_time
        if sys.platform == "darwin":  # skip macOS measure
            print(
                f"darwin: Buffered time {buffered_time} vs unbuffered time {unbuffered_time}"
            )
        else:
            self.assertTrue(
                buffered_time < unbuffered_time,
                f"Buffered time {buffered_time} should be less than unbuffered time {unbuffered_time}",
            )
예제 #16
0
    def test_container(self):
        net = torch.nn.Conv2d(1, 1, 3, padding=1)

        opt = torch.optim.Adam(net.parameters())

        img = torch.rand(1, 16, 16)
        data = {CommonKeys.IMAGE: img, CommonKeys.LABEL: img}
        loader = DataLoader([data for _ in range(10)])

        trainer = SupervisedTrainer(
            device=torch.device("cpu"),
            max_epochs=1,
            train_data_loader=loader,
            network=net,
            optimizer=opt,
            loss_function=torch.nn.L1Loss(),
        )

        con = ThreadContainer(trainer)
        con.start()
        time.sleep(1)  # wait for trainer to start

        self.assertTrue(con.is_alive)
        self.assertIsNotNone(con.status())
        self.assertTrue(len(con.status_dict) > 0)

        con.join()
예제 #17
0
    def test_epistemic_scoring(self):
        input_size = (20, 20, 20)
        device = "cuda" if torch.cuda.is_available() else "cpu"
        keys = ["image", "label"]
        num_training_ims = 10
        train_data = self.get_data(num_training_ims, input_size)
        test_data = self.get_data(1, input_size)

        transforms = Compose([
            AddChanneld(keys),
            CropForegroundd(keys, source_key="image"),
            DivisiblePadd(keys, 4),
        ])

        infer_transforms = Compose([
            AddChannel(),
            CropForeground(),
            DivisiblePad(4),
        ])

        train_ds = CacheDataset(train_data, transforms)
        # output might be different size, so pad so that they match
        train_loader = DataLoader(train_ds,
                                  batch_size=2,
                                  collate_fn=pad_list_data_collate)

        model = UNet(3, 1, 1, channels=(6, 6), strides=(2, 2)).to(device)
        loss_function = DiceLoss(sigmoid=True)
        optimizer = torch.optim.Adam(model.parameters(), 1e-3)

        num_epochs = 10
        for _ in trange(num_epochs):
            epoch_loss = 0

            for batch_data in train_loader:
                inputs, labels = batch_data["image"].to(
                    device), batch_data["label"].to(device)
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = loss_function(outputs, labels)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            epoch_loss /= len(train_loader)

        entropy_score = EpistemicScoring(model=model,
                                         transforms=infer_transforms,
                                         roi_size=[20, 20, 20],
                                         num_samples=10)
        # Call Individual Infer from Epistemic Scoring
        ip_stack = [test_data["image"], test_data["image"], test_data["image"]]
        ip_stack = np.array(ip_stack)
        score_3d = entropy_score.entropy_3d_volume(ip_stack)
        score_3d_sum = np.sum(score_3d)
        # Call Entropy Metric from Epistemic Scoring
        self.assertEqual(score_3d.shape, input_size)
        self.assertIsInstance(score_3d_sum, np.float32)
        self.assertGreater(score_3d_sum, 3.0)
 def test_randomize(self):
     dataset = _RandomDataset()
     dataloader = DataLoader(dataset, batch_size=2, num_workers=3)
     output = []
     for _ in range(2):
         for batch in dataloader:
             output.extend(batch.data.numpy().flatten().tolist())
     self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])
예제 #19
0
 def test_loading_array(self):
     set_determinism(seed=1234)
     # image dataset
     images = [
         np.arange(16, dtype=float).reshape(1, 4, 4),
         np.arange(16, dtype=float).reshape(1, 4, 4)
     ]
     # image level
     patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0)
     patch_iter = PatchIter(patch_size=(2, 2), start_pos=(0, 0))
     ds = GridPatchDataset(dataset=images,
                           patch_iter=patch_iter,
                           transform=patch_intensity)
     # use the grid patch dataset
     for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
         np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2))
     np.testing.assert_allclose(
         item[0],
         np.array([[[[1.7413, 2.7413], [5.7413, 6.7413]]],
                   [[[9.1419, 10.1419], [13.1419, 14.1419]]]]),
         rtol=1e-5,
     )
     np.testing.assert_allclose(
         item[1],
         np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]),
         rtol=1e-5,
     )
     if sys.platform != "win32":
         for item in DataLoader(ds,
                                batch_size=2,
                                shuffle=False,
                                num_workers=2):
             np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2))
         np.testing.assert_allclose(
             item[0],
             np.array([[[[2.3944, 3.3944], [6.3944, 7.3944]]],
                       [[[10.6551, 11.6551], [14.6551, 15.6551]]]]),
             rtol=1e-3,
         )
         np.testing.assert_allclose(
             item[1],
             np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]),
             rtol=1e-5,
         )
예제 #20
0
    def test_inverse_inferred_seg(self):

        test_data = []
        for _ in range(20):
            image, label = create_test_image_2d(100, 101)
            test_data.append({
                "image": image,
                "label": label.astype(np.float32)
            })

        batch_size = 10
        # num workers = 0 for mac
        num_workers = 2 if sys.platform != "darwin" else 0
        transforms = Compose([
            AddChanneld(KEYS),
            SpatialPadd(KEYS, (150, 153)),
            CenterSpatialCropd(KEYS, (110, 99))
        ])
        num_invertible_transforms = sum(1 for i in transforms.transforms
                                        if isinstance(i, InvertibleTransform))

        dataset = CacheDataset(test_data, transform=transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        device = "cuda" if torch.cuda.is_available() else "cpu"
        model = UNet(
            dimensions=2,
            in_channels=1,
            out_channels=1,
            channels=(2, 4),
            strides=(2, ),
        ).to(device)

        data = first(loader)
        labels = data["label"].to(device)
        segs = model(labels).detach().cpu()
        label_transform_key = "label" + InverseKeys.KEY_SUFFIX.value
        segs_dict = {
            "label": segs,
            label_transform_key: data[label_transform_key]
        }

        segs_dict_decollated = decollate_batch(segs_dict)

        # inverse of individual segmentation
        seg_dict = first(segs_dict_decollated)
        with allow_missing_keys_mode(transforms):
            inv_seg = transforms.inverse(seg_dict)["label"]
        self.assertEqual(len(data["label_transforms"]),
                         num_invertible_transforms)
        self.assertEqual(len(seg_dict["label_transforms"]),
                         num_invertible_transforms)
        self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape)
예제 #21
0
    def test_inverse_inferred_seg(self, extra_transform):

        test_data = []
        for _ in range(20):
            image, label = create_test_image_2d(100, 101)
            test_data.append({
                "image": image,
                "label": label.astype(np.float32)
            })

        batch_size = 10
        # num workers = 0 for mac
        num_workers = 2 if sys.platform == "linux" else 0
        transforms = Compose([
            AddChanneld(KEYS),
            SpatialPadd(KEYS, (150, 153)), extra_transform
        ])

        dataset = CacheDataset(test_data, transform=transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        device = "cuda" if torch.cuda.is_available() else "cpu"
        model = UNet(spatial_dims=2,
                     in_channels=1,
                     out_channels=1,
                     channels=(2, 4),
                     strides=(1, )).to(device)

        data = first(loader)
        self.assertEqual(data["image"].shape[0], batch_size * NUM_SAMPLES)

        labels = data["label"].to(device)
        self.assertIsInstance(labels, MetaTensor)
        segs = model(labels).detach().cpu()
        segs_decollated = decollate_batch(segs)
        self.assertIsInstance(segs_decollated[0], MetaTensor)
        # inverse of individual segmentation
        seg_metatensor = first(segs_decollated)
        # test to convert interpolation mode for 1 data of model output batch
        convert_applied_interp_mode(seg_metatensor.applied_operations,
                                    mode="nearest",
                                    align_corners=None)

        # manually invert the last crop samples
        xform = seg_metatensor.applied_operations.pop(-1)
        shape_before_extra_xform = xform["orig_size"]
        resizer = ResizeWithPadOrCrop(spatial_size=shape_before_extra_xform)
        with resizer.trace_transform(False):
            seg_metatensor = resizer(seg_metatensor)

        with allow_missing_keys_mode(transforms):
            inv_seg = transforms.inverse({"label": seg_metatensor})["label"]
        self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape)
예제 #22
0
    def test_pad_collation(self, t_type, transform):

        if t_type == dict:
            dataset = CacheDataset(self.dict_data, transform, progress=False)
        else:
            dataset = _Dataset(self.list_data, self.list_labels, transform)

        # Default collation should raise an error
        loader_fail = DataLoader(dataset, batch_size=10)
        with self.assertRaises(RuntimeError):
            for _ in loader_fail:
                pass

        # Padded collation shouldn't
        loader = DataLoader(dataset,
                            batch_size=2,
                            collate_fn=pad_list_data_collate)
        for _ in loader:
            pass
예제 #23
0
 def test_loading_array(self):
     set_determinism(seed=1234)
     # test sequence input data with images
     images = [
         np.arange(16, dtype=float).reshape(1, 4, 4),
         np.arange(16, dtype=float).reshape(1, 4, 4)
     ]
     # image level
     patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0)
     patch_iter = PatchIter(patch_size=(2, 2), start_pos=(0, 0))
     ds = GridPatchDataset(data=images,
                           patch_iter=patch_iter,
                           transform=patch_intensity)
     # use the grid patch dataset
     for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
         np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2))
     np.testing.assert_allclose(
         item[0],
         np.array([[[[1.4965, 2.4965], [5.4965, 6.4965]]],
                   [[[11.3584, 12.3584], [15.3584, 16.3584]]]]),
         rtol=1e-4,
     )
     np.testing.assert_allclose(item[1],
                                np.array([[[0, 1], [0, 2], [2, 4]],
                                          [[0, 1], [2, 4], [2, 4]]]),
                                rtol=1e-5)
     if sys.platform != "win32":
         for item in DataLoader(ds,
                                batch_size=2,
                                shuffle=False,
                                num_workers=2):
             np.testing.assert_equal(tuple(item[0].shape), (2, 1, 2, 2))
         np.testing.assert_allclose(
             item[0],
             np.array([[[[1.2548, 2.2548], [5.2548, 6.2548]]],
                       [[[9.1106, 10.1106], [13.1106, 14.1106]]]]),
             rtol=1e-3,
         )
         np.testing.assert_allclose(item[1],
                                    np.array([[[0, 1], [0, 2], [2, 4]],
                                              [[0, 1], [2, 4], [2, 4]]]),
                                    rtol=1e-5)
예제 #24
0
    def test_values(self):
        dataset = Dataset(data=self.datalist, transform=self.transform)
        dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)

        tbuffer = ThreadBuffer(dataloader)

        for d in tbuffer:
            self.assertEqual(d["image"][0], "spleen_19.nii.gz")
            self.assertEqual(d["image"][1], "spleen_31.nii.gz")
            self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
            self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
예제 #25
0
    def _define_prediction_data_loaders(
            self,
            prediction_folder_path: Union[Path, str]
    ) -> bool:
        """Initialize prediction datasets and data loaders.

        @Note: in Windows, it is essential to set `persistent_workers=True` in the data loaders!

        @return True if datasets and data loaders could be instantiated, False otherwise.
        """

        # Check that the path exists
        prediction_folder_path = Path(prediction_folder_path)
        if not prediction_folder_path.is_dir():
            return False

        # Scan for images
        self._prediction_image_names = natsorted(
            glob(str(Path(prediction_folder_path) / "*.tif"))
        )

        # Optimize arguments
        if sys.platform == 'win32':
            persistent_workers = True
            pin_memory = False
        else:
            persistent_workers = False
            pin_memory = torch.cuda.is_available()

        if len(self._prediction_image_names) == 0:

            self._prediction_dataset = None
            self._prediction_dataloader = None

            return False

        # Define the transforms
        self._define_prediction_transforms()

        # Prediction
        self._prediction_dataset = Dataset(
            self._prediction_image_names,
            self._prediction_image_transforms
        )
        self._prediction_dataloader = DataLoader(
            self._prediction_dataset,
            batch_size=self._test_batch_size,
            shuffle=False,
            num_workers=self._test_num_workers,
            persistent_workers=persistent_workers,
            pin_memory=pin_memory
        )

        return True
예제 #26
0
    def test_data_loader_2(self):
        set_determinism(seed=123)
        xform_2 = Compose([_RandXform(), _RandXform()])
        train_ds = Dataset([1], transform=xform_2)

        out_2 = train_ds[0]
        self.assertAlmostEqual(out_2, 0.4092510)

        train_loader = DataLoader(train_ds, num_workers=0)
        out_2 = next(iter(train_loader))
        self.assertAlmostEqual(out_2.cpu().item(), 0.7858843729)

        if sys.platform != "win32":  # skip multi-worker tests on win32
            train_loader = DataLoader(train_ds, num_workers=1)
            out_2 = next(iter(train_loader))
            self.assertAlmostEqual(out_2.cpu().item(), 0.305763411)

            train_loader = DataLoader(train_ds, num_workers=2)
            out_1 = next(iter(train_loader))
            self.assertAlmostEqual(out_1.cpu().item(), 0.131966779)
        set_determinism(None)
예제 #27
0
    def test_pad_collation(self, t_type, collate_method, transform):

        if t_type == dict:
            dataset = CacheDataset(self.dict_data, transform, progress=False)
        else:
            dataset = _Dataset(self.list_data, self.list_labels, transform)

        # Default collation should raise an error
        loader_fail = DataLoader(dataset, batch_size=10)
        with self.assertRaises(RuntimeError):
            for _ in loader_fail:
                pass

        # Padded collation shouldn't
        loader = DataLoader(dataset, batch_size=10, collate_fn=collate_method)
        # check collation in forward direction
        for data in loader:
            if t_type == dict:
                decollated_data = decollate_batch(data)
                for d in decollated_data:
                    PadListDataCollate.inverse(d)
예제 #28
0
    def test_shape(self):
        test_dataset = ["vwxyz", "hello", "world"]
        n_per_image = len(test_dataset[0])

        result = PatchDataset(dataset=test_dataset, patch_func=identity, samples_per_image=n_per_image)

        output = []
        n_workers = 0 if sys.platform == "win32" else 2
        for item in DataLoader(result, batch_size=3, num_workers=n_workers):
            output.append("".join(item))
        expected = ["vwx", "yzh", "ell", "owo", "rld"]
        self.assertEqual(output, expected)
예제 #29
0
 def test_decollate(self, dtype):
     batch_size = 3
     ims = [self.get_im(dtype=dtype)[0] for _ in range(batch_size * 2)]
     ds = Dataset(ims)
     dl = DataLoader(ds, num_workers=batch_size, batch_size=batch_size)
     batch = next(iter(dl))
     decollated = decollate_batch(batch)
     self.assertIsInstance(decollated, list)
     self.assertEqual(len(decollated), batch_size)
     for elem, im in zip(decollated, ims):
         self.assertIsInstance(elem, MetaTensor)
         self.check(elem, im, ids=False)
예제 #30
0
    def test_endianness(self, endianness, use_array, image_only):

        hdr = nib.Nifti1Header(endianness=endianness)
        nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr)
        nib.save(nii, self.fname)

        data = [self.fname] if use_array else [{"image": self.fname}]
        tr = LoadImage(image_only=image_only) if use_array else LoadImaged(
            "image", image_only=image_only)
        check_ds = Dataset(data, tr)
        check_loader = DataLoader(check_ds, batch_size=1)
        _ = next(iter(check_loader))