示例#1
0
    def test_shape(self, expected_shape):
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        tempdir = tempfile.mkdtemp()
        nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz"))
        nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz"))
        nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz"))
        nib.save(test_image, os.path.join(tempdir, "test_image2.nii.gz"))
        nib.save(test_image, os.path.join(tempdir, "test_label2.nii.gz"))
        nib.save(test_image, os.path.join(tempdir, "test_extra2.nii.gz"))
        test_data = [
            {
                "image": os.path.join(tempdir, "test_image1.nii.gz"),
                "label": os.path.join(tempdir, "test_label1.nii.gz"),
                "extra": os.path.join(tempdir, "test_extra1.nii.gz"),
            },
            {
                "image": os.path.join(tempdir, "test_image2.nii.gz"),
                "label": os.path.join(tempdir, "test_label2.nii.gz"),
                "extra": os.path.join(tempdir, "test_extra2.nii.gz"),
            },
        ]
        test_transform = Compose([
            LoadNiftid(keys=["image", "label", "extra"]),
            SimulateDelayd(keys=["image", "label", "extra"],
                           delay_time=[1e-7, 1e-6, 1e-5]),
        ])
        dataset = Dataset(data=test_data, transform=test_transform)
        data1 = dataset[0]
        data2 = dataset[1]

        self.assertTupleEqual(data1["image"].shape, expected_shape)
        self.assertTupleEqual(data1["label"].shape, expected_shape)
        self.assertTupleEqual(data1["extra"].shape, expected_shape)
        self.assertTupleEqual(data2["image"].shape, expected_shape)
        self.assertTupleEqual(data2["label"].shape, expected_shape)
        self.assertTupleEqual(data2["extra"].shape, expected_shape)

        dataset = Dataset(
            data=test_data,
            transform=LoadNiftid(keys=["image", "label", "extra"]))
        data1_simple = dataset[0]
        data2_simple = dataset[1]

        self.assertTupleEqual(data1_simple["image"].shape, expected_shape)
        self.assertTupleEqual(data1_simple["label"].shape, expected_shape)
        self.assertTupleEqual(data1_simple["extra"].shape, expected_shape)
        self.assertTupleEqual(data2_simple["image"].shape, expected_shape)
        self.assertTupleEqual(data2_simple["label"].shape, expected_shape)
        self.assertTupleEqual(data2_simple["extra"].shape, expected_shape)
        shutil.rmtree(tempdir)
示例#2
0
    def run_interaction(self, train, compose):
        data = [{
            "image": np.ones((1, 2, 2, 2)).astype(np.float32),
            "label": np.ones((1, 2, 2, 2))
        } for _ in range(5)]
        network = torch.nn.Linear(2, 2)
        lr = 1e-3
        opt = torch.optim.SGD(network.parameters(), lr)
        loss = torch.nn.L1Loss()
        train_transforms = Compose([
            FindAllValidSlicesd(label="label", sids="sids"),
            AddInitialSeedPointd(label="label",
                                 guidance="guidance",
                                 sids="sids"),
            AddGuidanceSignald(image="image", guidance="guidance"),
            ToTensord(keys=("image", "label")),
        ])
        dataset = Dataset(data, transform=train_transforms)
        data_loader = torch.utils.data.DataLoader(dataset, batch_size=5)

        iteration_transforms = [
            Activationsd(keys="pred", sigmoid=True),
            ToNumpyd(keys=["image", "label", "pred"]),
            FindDiscrepancyRegionsd(label="label",
                                    pred="pred",
                                    discrepancy="discrepancy"),
            AddRandomGuidanced(guidance="guidance",
                               discrepancy="discrepancy",
                               probability="probability"),
            AddGuidanceSignald(image="image", guidance="guidance"),
            ToTensord(keys=("image", "label")),
        ]
        iteration_transforms = Compose(
            iteration_transforms) if compose else iteration_transforms

        i = Interaction(transforms=iteration_transforms,
                        train=train,
                        max_interactions=5)
        self.assertEqual(len(i.transforms.transforms), 6,
                         "Mismatch in expected transforms")

        # set up engine
        engine = SupervisedTrainer(
            device=torch.device("cpu"),
            max_epochs=1,
            train_data_loader=data_loader,
            network=network,
            optimizer=opt,
            loss_function=loss,
            iteration_update=i,
        )
        engine.add_event_handler(IterationEvents.INNER_ITERATION_STARTED,
                                 add_one)
        engine.add_event_handler(IterationEvents.INNER_ITERATION_COMPLETED,
                                 add_one)

        engine.run()
        self.assertIsNotNone(engine.state.batch[0].get("guidance"),
                             "guidance is missing")
        self.assertEqual(engine.state.best_metric, 9)
示例#3
0
    def test_shape(self):
        expected_shape = (128, 128, 128)
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        test_data = []
        with tempfile.TemporaryDirectory() as tempdir:
            for i in range(6):
                nib.save(test_image,
                         os.path.join(tempdir, f"test_image{str(i)}.nii.gz"))
                test_data.append(
                    {"image": os.path.join(tempdir, f"test_image{i}.nii.gz")})

            test_transform = Compose([
                LoadImaged(keys="image"),
                SimulateDelayd(keys="image", delay_time=1e-7)
            ])

            data_iterator = _Stream(test_data)
            with self.assertRaises(TypeError):  # Dataset doesn't work
                dataset = Dataset(data=data_iterator, transform=test_transform)
                for _ in dataset:
                    pass
            dataset = IterableDataset(data=data_iterator,
                                      transform=test_transform)
            for d in dataset:
                self.assertTupleEqual(d["image"].shape, expected_shape)

            num_workers = 2 if sys.platform == "linux" else 0
            dataloader = DataLoader(dataset=dataset,
                                    batch_size=3,
                                    num_workers=num_workers)
            for d in dataloader:
                self.assertTupleEqual(d["image"].shape[1:], expected_shape)
示例#4
0
    def query_by_case(self, patient_id: str) -> Dataset:
        """Return nodule volumes for one specific case.

        Args:
            patient_id (str): Patient ID of desired case.

        Returns:
            Dataset: Dataset containing case nodules.
        """
        train_cases, valid_cases = self.splits
        train_pids = [case["pid"] for case in train_cases]
        valid_pids = [case["pid"] for case in valid_cases]
        if patient_id in train_pids:
            data_dict = [{
                "image": nod["image"],
                "label": nod[self.target]
            } for case in train_cases if case["pid"] == patient_id
                         for nod in case["nodules"]]
        elif patient_id in valid_pids:
            data_dict = [{
                "image": nod["image"],
                "label": nod[self.target]
            } for case in valid_cases if case["pid"] == patient_id
                         for nod in case["nodules"]]
        else:
            raise ValueError("Case with given ID could not be found.")

        return Dataset(data_dict, transform=self.val_transforms)
示例#5
0
    def test_anisotropic_spacing(self):
        with tempfile.TemporaryDirectory() as tempdir:

            pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]]
            for i in range(5):
                im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [
                {"image": image_name, "label": label_name} for image_name, label_name in zip(train_images, train_labels)
            ]

            t = Compose([LoadImaged(keys=["image", "label"]), FromMetaTensord(keys=["image", "label"])])
            dataset = Dataset(data=data_dicts, transform=t)

            calculator = DatasetSummary(dataset, num_workers=4, meta_key_postfix=PostFix.meta())

            target_spacing = calculator.get_target_spacing(anisotropic_threshold=4.0, percentile=20.0)
            np.testing.assert_allclose(target_spacing, (1.0, 1.0, 1.8))
示例#6
0
    def run_interaction(self, train, compose):
        data = []
        for i in range(5):
            data.append({"image": torch.tensor([float(i)]), "label": torch.tensor([float(i)])})
        network = torch.nn.Linear(1, 1)
        lr = 1e-3
        opt = torch.optim.SGD(network.parameters(), lr)
        loss = torch.nn.L1Loss()
        dataset = Dataset(data, transform=None)
        data_loader = torch.utils.data.DataLoader(dataset, batch_size=5)

        iteration_transforms = [Activationsd(keys="pred", sigmoid=True), ToNumpyd(keys="pred")]
        iteration_transforms = Compose(iteration_transforms) if compose else iteration_transforms

        i = Interaction(transforms=iteration_transforms, train=train, max_interactions=5)
        self.assertEqual(len(i.transforms.transforms), 2, "Mismatch in expected transforms")

        # set up engine
        engine = SupervisedTrainer(
            device=torch.device("cpu"),
            max_epochs=1,
            train_data_loader=data_loader,
            network=network,
            optimizer=opt,
            loss_function=loss,
            iteration_update=i,
        )
        engine.add_event_handler(IterationEvents.INNER_ITERATION_STARTED, add_one)
        engine.add_event_handler(IterationEvents.INNER_ITERATION_COMPLETED, add_one)

        engine.run()
        self.assertIsNotNone(engine.state.batch.get("probability"), "Probability is missing")
        self.assertEqual(engine.state.best_metric, 9)
示例#7
0
    def test_time(self):
        dataset = Dataset(
            data=self.datalist * 2,
            transform=self.transform)  # contains data for 2 batches
        dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)

        tbuffer = ThreadBuffer(dataloader)

        with PerfContext() as pc:
            for _ in dataloader:
                time.sleep(
                    0.5
                )  # each batch takes 0.8 s to generate on top of this time

        unbuffered_time = pc.total_time

        with PerfContext() as pc:
            for _ in tbuffer:
                time.sleep(
                    0.5
                )  # while "computation" is happening the next batch is being generated, saving 0.4 s

        buffered_time = pc.total_time
        if sys.platform == "darwin":  # skip macOS measure
            print(
                f"darwin: Buffered time {buffered_time} vs unbuffered time {unbuffered_time}"
            )
        else:
            self.assertTrue(
                buffered_time < unbuffered_time,
                f"Buffered time {buffered_time} should be less than unbuffered time {unbuffered_time}",
            )
    def test_dataloader(self):
        dataset = Dataset(
            data=[{
                "img": np.array([[[0.0, 1.0], [2.0, 3.0]]])
            }, {
                "img": np.array([[[0.0, 1.0], [2.0, 3.0]]])
            }],
            transform=IntensityStatsd(keys="img",
                                      ops=["max", "mean"],
                                      key_prefix="orig"),
        )
        # set num workers = 0 for mac / win
        num_workers = 2 if sys.platform == "linux" else 0
        dataloader = DataLoader(dataset=dataset,
                                num_workers=num_workers,
                                batch_size=2)
        orig_method = mp.get_start_method()
        mp.set_start_method("spawn", force=True)

        for d in dataloader:
            meta = d["img_meta_dict"]
            np.testing.assert_allclose(meta["orig_max"], [3.0, 3.0], atol=1e-3)
            np.testing.assert_allclose(meta["orig_mean"], [1.5, 1.5],
                                       atol=1e-3)
        # restore the mp method
        mp.set_start_method(orig_method, force=True)
    def test_content(self, data, trigger_event):
        # set up engine
        gb_count_dict = {}

        def _train_func(engine, batch):
            # store garbage collection counts
            if trigger_event == Events.EPOCH_COMPLETED or trigger_event.lower(
            ) == "epoch":
                if engine.state.iteration % engine.state.epoch_length == 1:
                    gb_count_dict[engine.state.epoch] = gc.get_count()
            elif trigger_event.lower() == "iteration":
                gb_count_dict[engine.state.iteration] = gc.get_count()

        engine = Engine(_train_func)

        # set up testing handler
        dataset = Dataset(data, transform=None)
        data_loader = torch.utils.data.DataLoader(dataset, batch_size=1)
        GarbageCollector(trigger_event=trigger_event,
                         log_level=30).attach(engine)

        engine.run(data_loader, max_epochs=5)

        first_count = 0
        for iter, gb_count in gb_count_dict.items():
            # At least one zero-generation object is collected
            # self.assertGreaterEqual(gb_count[0], 0)
            if iter > 1:
                # Since we are collecting all objects from all generations manually at each call,
                # starting from the second call, there shouldn't be any 1st and 2nd
                # generation objects available to collect.
                self.assertEqual(gb_count[1], first_count)
                self.assertEqual(gb_count[2], first_count)
示例#10
0
 def test_exception(self, datalist):
     dataset = Dataset(data=datalist, transform=None)
     dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
     with self.assertRaisesRegex((TypeError, RuntimeError),
                                 "Collate error on the key"):
         for _ in dataloader:
             pass
示例#11
0
 def test_shape(self, expected_shape):
     test_image = nib.Nifti1Image(
         np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
     tempdir = tempfile.mkdtemp()
     nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz'))
     nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz'))
     nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz'))
     nib.save(test_image, os.path.join(tempdir, 'test_image2.nii.gz'))
     nib.save(test_image, os.path.join(tempdir, 'test_label2.nii.gz'))
     nib.save(test_image, os.path.join(tempdir, 'test_extra2.nii.gz'))
     test_data = [{
         'image': os.path.join(tempdir, 'test_image1.nii.gz'),
         'label': os.path.join(tempdir, 'test_label1.nii.gz'),
         'extra': os.path.join(tempdir, 'test_extra1.nii.gz')
     }, {
         'image': os.path.join(tempdir, 'test_image2.nii.gz'),
         'label': os.path.join(tempdir, 'test_label2.nii.gz'),
         'extra': os.path.join(tempdir, 'test_extra2.nii.gz')
     }]
     dataset = Dataset(
         data=test_data,
         transform=LoadNiftid(keys=['image', 'label', 'extra']))
     data1 = dataset[0]
     data2 = dataset[1]
     shutil.rmtree(tempdir)
     self.assertTupleEqual(data1['image'].shape, expected_shape)
     self.assertTupleEqual(data1['label'].shape, expected_shape)
     self.assertTupleEqual(data1['extra'].shape, expected_shape)
     self.assertTupleEqual(data2['image'].shape, expected_shape)
     self.assertTupleEqual(data2['label'].shape, expected_shape)
     self.assertTupleEqual(data2['extra'].shape, expected_shape)
示例#12
0
    def test_shape(self, expected_shape):
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        tempdir = tempfile.mkdtemp()
        nib.save(test_image, os.path.join(tempdir, 'test_image1.nii.gz'))
        nib.save(test_image, os.path.join(tempdir, 'test_label1.nii.gz'))
        nib.save(test_image, os.path.join(tempdir, 'test_extra1.nii.gz'))
        nib.save(test_image, os.path.join(tempdir, 'test_image2.nii.gz'))
        nib.save(test_image, os.path.join(tempdir, 'test_label2.nii.gz'))
        nib.save(test_image, os.path.join(tempdir, 'test_extra2.nii.gz'))
        test_data = [{
            'image': os.path.join(tempdir, 'test_image1.nii.gz'),
            'label': os.path.join(tempdir, 'test_label1.nii.gz'),
            'extra': os.path.join(tempdir, 'test_extra1.nii.gz')
        }, {
            'image': os.path.join(tempdir, 'test_image2.nii.gz'),
            'label': os.path.join(tempdir, 'test_label2.nii.gz'),
            'extra': os.path.join(tempdir, 'test_extra2.nii.gz')
        }]
        test_transform = Compose([
            LoadNiftid(keys=['image', 'label', 'extra']),
            SimulateDelayd(keys=['image', 'label', 'extra'],
                           delay_time=[1e-7, 1e-6, 1e-5])
        ])
        dataset = Dataset(data=test_data, transform=test_transform)
        data1 = dataset[0]
        data2 = dataset[1]

        self.assertTupleEqual(data1['image'].shape, expected_shape)
        self.assertTupleEqual(data1['label'].shape, expected_shape)
        self.assertTupleEqual(data1['extra'].shape, expected_shape)
        self.assertTupleEqual(data2['image'].shape, expected_shape)
        self.assertTupleEqual(data2['label'].shape, expected_shape)
        self.assertTupleEqual(data2['extra'].shape, expected_shape)

        dataset = Dataset(
            data=test_data,
            transform=LoadNiftid(keys=['image', 'label', 'extra']))
        data1_simple = dataset[0]
        data2_simple = dataset[1]

        self.assertTupleEqual(data1_simple['image'].shape, expected_shape)
        self.assertTupleEqual(data1_simple['label'].shape, expected_shape)
        self.assertTupleEqual(data1_simple['extra'].shape, expected_shape)
        self.assertTupleEqual(data2_simple['image'].shape, expected_shape)
        self.assertTupleEqual(data2_simple['label'].shape, expected_shape)
        self.assertTupleEqual(data2_simple['extra'].shape, expected_shape)
示例#13
0
    def test_decollation_list(self, *transforms):
        t_compose = Compose([AddChannel(), Compose(transforms), ToTensor()])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = _ListCompose([LoadImage(image_only=False), t_compose])

        dataset = Dataset(self.data_list, t_compose)
        self.check_decollate(dataset=dataset)
示例#14
0
    def test_spacing_intensity(self):
        set_determinism(seed=0)
        with tempfile.TemporaryDirectory() as tempdir:

            for i in range(5):
                im, seg = create_test_image_3d(32,
                                               32,
                                               32,
                                               num_seg_classes=1,
                                               num_objs=3,
                                               rad_max=6,
                                               channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(
                glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(
                glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [{
                "image": image_name,
                "label": label_name
            } for image_name, label_name in zip(train_images, train_labels)]

            t = Compose([
                LoadImaged(keys=["image", "label"]),
                ToNumpyd(keys=[
                    "image", "label", "image_meta_dict", "label_meta_dict"
                ]),
            ])
            dataset = Dataset(data=data_dicts, transform=t)

            # test **kwargs of `DatasetSummary` for `DataLoader`
            calculator = DatasetSummary(dataset,
                                        num_workers=4,
                                        meta_key="image_meta_dict",
                                        collate_fn=test_collate)

            target_spacing = calculator.get_target_spacing(
                spacing_key="pixdim")
            self.assertEqual(target_spacing, (1.0, 1.0, 1.0))
            calculator.calculate_statistics()
            np.testing.assert_allclose(calculator.data_mean,
                                       0.892599,
                                       rtol=1e-5,
                                       atol=1e-5)
            np.testing.assert_allclose(calculator.data_std,
                                       0.131731,
                                       rtol=1e-5,
                                       atol=1e-5)
            calculator.calculate_percentiles(sampling_flag=True, interval=2)
            self.assertEqual(calculator.data_max_percentile, 1.0)
            np.testing.assert_allclose(calculator.data_min_percentile,
                                       0.556411,
                                       rtol=1e-5,
                                       atol=1e-5)
示例#15
0
    def test_lazy_instantiation(self):
        config = {"_target_": "DataLoader", "dataset": Dataset(data=[1, 2]), "batch_size": 2}
        configer = ConfigComponent(config=config, locator=None)
        init_config = configer.get_config()
        # modify config content at runtime
        init_config["batch_size"] = 4
        configer.update_config(config=init_config)

        ret = configer.instantiate()
        self.assertTrue(isinstance(ret, DataLoader))
        self.assertEqual(ret.batch_size, 4)
示例#16
0
    def _define_prediction_data_loaders(
            self,
            prediction_folder_path: Union[Path, str]
    ) -> bool:
        """Initialize prediction datasets and data loaders.

        @Note: in Windows, it is essential to set `persistent_workers=True` in the data loaders!

        @return True if datasets and data loaders could be instantiated, False otherwise.
        """

        # Check that the path exists
        prediction_folder_path = Path(prediction_folder_path)
        if not prediction_folder_path.is_dir():
            return False

        # Scan for images
        self._prediction_image_names = natsorted(
            glob(str(Path(prediction_folder_path) / "*.tif"))
        )

        # Optimize arguments
        if sys.platform == 'win32':
            persistent_workers = True
            pin_memory = False
        else:
            persistent_workers = False
            pin_memory = torch.cuda.is_available()

        if len(self._prediction_image_names) == 0:

            self._prediction_dataset = None
            self._prediction_dataloader = None

            return False

        # Define the transforms
        self._define_prediction_transforms()

        # Prediction
        self._prediction_dataset = Dataset(
            self._prediction_image_names,
            self._prediction_image_transforms
        )
        self._prediction_dataloader = DataLoader(
            self._prediction_dataset,
            batch_size=self._test_batch_size,
            shuffle=False,
            num_workers=self._test_num_workers,
            persistent_workers=persistent_workers,
            pin_memory=pin_memory
        )

        return True
示例#17
0
    def test_values(self):
        dataset = Dataset(data=self.datalist, transform=self.transform)
        dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)

        tbuffer = ThreadBuffer(dataloader)

        for d in tbuffer:
            self.assertEqual(d["image"][0], "spleen_19.nii.gz")
            self.assertEqual(d["image"][1], "spleen_31.nii.gz")
            self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
            self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
示例#18
0
 def test_decollate(self, dtype):
     batch_size = 3
     ims = [self.get_im(dtype=dtype)[0] for _ in range(batch_size * 2)]
     ds = Dataset(ims)
     dl = DataLoader(ds, num_workers=batch_size, batch_size=batch_size)
     batch = next(iter(dl))
     decollated = decollate_batch(batch)
     self.assertIsInstance(decollated, list)
     self.assertEqual(len(decollated), batch_size)
     for elem, im in zip(decollated, ims):
         self.assertIsInstance(elem, MetaTensor)
         self.check(elem, im, ids=False)
示例#19
0
    def test_thread_workers(self):
        dataset = Dataset(data=self.datalist, transform=self.transform)
        dataloader = ThreadDataLoader(dataset=dataset,
                                      batch_size=2,
                                      num_workers=2,
                                      use_thread_workers=True)

        for d in dataloader:
            self.assertEqual(d["image"][0], "spleen_19.nii.gz")
            self.assertEqual(d["image"][1], "spleen_31.nii.gz")
            self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
            self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
示例#20
0
    def test_endianness(self, endianness, use_array, image_only):

        hdr = nib.Nifti1Header(endianness=endianness)
        nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr)
        nib.save(nii, self.fname)

        data = [self.fname] if use_array else [{"image": self.fname}]
        tr = LoadImage(image_only=image_only) if use_array else LoadImaged(
            "image", image_only=image_only)
        check_ds = Dataset(data, tr)
        check_loader = DataLoader(check_ds, batch_size=1)
        _ = next(iter(check_loader))
示例#21
0
    def test_endianness(self, endianness, use_array, image_only):

        hdr = nib.Nifti1Header(endianness=endianness)
        nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr)
        nib.save(nii, self.fname)

        data = [self.fname] if use_array else [{"image": self.fname}]
        tr = LoadImage(image_only=image_only) if use_array else LoadImaged("image", image_only=image_only)
        check_ds = Dataset(data, tr)
        check_loader = DataLoader(check_ds, batch_size=1)
        ret = next(iter(check_loader))
        if isinstance(ret, dict) and "image_meta_dict" in ret:
            np.testing.assert_allclose(ret["image_meta_dict"]["spatial_shape"], [[100, 100]])
示例#22
0
 def test_dataloader(self, dtype):
     batch_size = 5
     ims = [self.get_im(dtype=dtype)[0] for _ in range(batch_size * 2)]
     ds = Dataset(ims)
     im_shape = tuple(ims[0].shape)
     affine_shape = tuple(ims[0].affine.shape)
     expected_im_shape = (batch_size,) + im_shape
     expected_affine_shape = (batch_size,) + affine_shape
     dl = DataLoader(ds, num_workers=batch_size, batch_size=batch_size)
     for batch in dl:
         self.assertIsInstance(batch, MetaTensor)
         self.assertTupleEqual(tuple(batch.shape), expected_im_shape)
         self.assertTupleEqual(tuple(batch.affine.shape), expected_affine_shape)
示例#23
0
 def test_with_dataloader(self, file_path, level, expected_spatial_shape, expected_shape):
     train_transform = Compose(
         [
             LoadImaged(keys=["image"], reader=WSIReader, backend=self.backend, level=level),
             ToTensord(keys=["image"]),
         ]
     )
     dataset = Dataset([{"image": file_path}], transform=train_transform)
     data_loader = DataLoader(dataset)
     data: dict = first(data_loader)
     for s in data[PostFix.meta("image")]["spatial_shape"]:
         torch.testing.assert_allclose(s, expected_spatial_shape)
     self.assertTupleEqual(data["image"].shape, expected_shape)
    def test_loading_array(self):
        set_determinism(seed=1234)
        # image dataset
        images = [
            np.arange(16, dtype=float).reshape(1, 4, 4),
            np.arange(16, dtype=float).reshape(1, 4, 4)
        ]
        # image patch sampler
        n_samples = 8
        sampler = RandSpatialCropSamples(roi_size=(3, 3),
                                         num_samples=n_samples,
                                         random_center=True,
                                         random_size=False)

        # image level
        patch_intensity = RandShiftIntensity(offsets=1.0, prob=1.0)
        image_ds = Dataset(images, transform=patch_intensity)
        # patch level
        ds = PatchDataset(dataset=image_ds,
                          patch_func=sampler,
                          samples_per_image=n_samples,
                          transform=patch_intensity)

        np.testing.assert_equal(len(ds), n_samples * len(images))
        # use the patch dataset, length: len(images) x samplers_per_image
        for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
            np.testing.assert_equal(tuple(item.shape), (2, 1, 3, 3))
        np.testing.assert_allclose(
            item[0],
            np.array([[[1.338681, 2.338681, 3.338681],
                       [5.338681, 6.338681, 7.338681],
                       [9.338681, 10.338681, 11.338681]]]),
            rtol=1e-5,
        )
        if sys.platform != "win32":
            for item in DataLoader(ds,
                                   batch_size=2,
                                   shuffle=False,
                                   num_workers=2):
                np.testing.assert_equal(tuple(item.shape), (2, 1, 3, 3))
            np.testing.assert_allclose(
                item[0],
                np.array([[
                    [4.957847, 5.957847, 6.957847],
                    [8.957847, 9.957847, 10.957847],
                    [12.957847, 13.957847, 14.957847],
                ]]),
                rtol=1e-5,
            )
        set_determinism(seed=None)
示例#25
0
 def test_with_dataloader_batch(self, file_path, level, expected_spatial_shape, expected_shape):
     train_transform = Compose(
         [
             LoadImaged(keys=["image"], reader=WSIReader, backend=self.backend, level=level),
             FromMetaTensord(keys=["image"]),
             ToTensord(keys=["image"]),
         ]
     )
     dataset = Dataset([{"image": file_path}, {"image": file_path}], transform=train_transform)
     batch_size = 2
     data_loader = DataLoader(dataset, batch_size=batch_size)
     data: dict = first(data_loader)
     for s in data[PostFix.meta("image")]["spatial_shape"]:
         assert_allclose(s, expected_spatial_shape, type_test=False)
     self.assertTupleEqual(data["image"].shape, (batch_size, *expected_shape[1:]))
示例#26
0
def get_task_params(args):
    """
    This function is used to achieve the spacings of decathlon dataset.
    In addition, for CT images (task 03, 06, 07, 08, 09 and 10), this function
    also prints the mean and std values (used for normalization), and the min (0.5 percentile)
    and max(99.5 percentile) values (used for clip).

    """
    task_id = args.task_id
    root_dir = args.root_dir
    datalist_path = args.datalist_path
    dataset_path = os.path.join(root_dir, task_name[task_id])
    datalist_name = "dataset_task{}.json".format(task_id)

    # get all training data
    datalist = load_decathlon_datalist(
        os.path.join(datalist_path, datalist_name), True, "training", dataset_path
    )

    # get modality info.
    properties = load_decathlon_properties(
        os.path.join(datalist_path, datalist_name), "modality"
    )

    dataset = Dataset(
        data=datalist,
        transform=LoadImaged(keys=["image", "label"]),
    )

    calculator = DatasetSummary(dataset, num_workers=4)
    target_spacing = calculator.get_target_spacing()
    print("spacing: ", target_spacing)
    if properties["modality"]["0"] == "CT":
        print("CT input, calculate statistics:")
        calculator.calculate_statistics()
        print("mean: ", calculator.data_mean, " std: ", calculator.data_std)
        calculator.calculate_percentiles(
            sampling_flag=True, interval=10, min_percentile=0.5, max_percentile=99.5
        )
        print(
            "min: ",
            calculator.data_min_percentile,
            " max: ",
            calculator.data_max_percentile,
        )
    else:
        print("non CT input, skip calculating.")
示例#27
0
    def test_invert(self):
        set_determinism(seed=0)
        im_fname = make_nifti_image(create_test_image_3d(101, 100, 107, noise_max=100)[1])  # label image, discrete
        data = [im_fname for _ in range(12)]
        transform = Compose(
            [
                LoadImage(image_only=True),
                EnsureChannelFirst(),
                Orientation("RPS"),
                Spacing(pixdim=(1.2, 1.01, 0.9), mode="bilinear", dtype=np.float32),
                RandFlip(prob=0.5, spatial_axis=[1, 2]),
                RandAxisFlip(prob=0.5),
                RandRotate90(prob=0, spatial_axes=(1, 2)),
                RandZoom(prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True),
                RandRotate(prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True, dtype=np.float64),
                RandAffine(prob=0.5, rotate_range=np.pi, mode="nearest"),
                ResizeWithPadOrCrop(100),
                CastToType(dtype=torch.uint8),
            ]
        )

        # num workers = 0 for mac or gpu transforms
        num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available() else 2
        dataset = Dataset(data, transform=transform)
        self.assertIsInstance(transform.inverse(dataset[0]), MetaTensor)
        loader = DataLoader(dataset, num_workers=num_workers, batch_size=1)
        inverter = Invert(transform=transform, nearest_interp=True, device="cpu")

        for d in loader:
            d = decollate_batch(d)
            for item in d:
                orig = deepcopy(item)
                i = inverter(item)
                self.assertTupleEqual(orig.shape[1:], (100, 100, 100))
                # check the nearest interpolation mode
                torch.testing.assert_allclose(i.to(torch.uint8).to(torch.float), i.to(torch.float))
                self.assertTupleEqual(i.shape[1:], (100, 101, 107))
        # check labels match
        reverted = i.detach().cpu().numpy().astype(np.int32)
        original = LoadImage(image_only=True)(data[-1])
        n_good = np.sum(np.isclose(reverted, original.numpy(), atol=1e-3))
        reverted_name = i.meta["filename_or_obj"]
        original_name = original.meta["filename_or_obj"]
        self.assertEqual(reverted_name, original_name)
        print("invert diff", reverted.size - n_good)
        self.assertTrue((reverted.size - n_good) < 300000, f"diff. {reverted.size - n_good}")
        set_determinism(seed=None)
示例#28
0
    def test_content(self):
        data = [0] * 8

        # set up engine
        def _train_func(engine, batch):
            pass

        engine = Engine(_train_func)

        # set up testing handler
        val_data_loader = torch.utils.data.DataLoader(Dataset(data))
        evaluator = TestEvaluator(torch.device("cpu:0"), val_data_loader)
        saver = ValidationHandler(interval=2, validator=evaluator)
        saver.attach(engine)

        engine.run(data, max_epochs=5)
        self.assertEqual(evaluator.state.max_epochs, 4)
        self.assertEqual(evaluator.state.epoch_length, 8)
示例#29
0
    def test_seg_values(self):
        with tempfile.TemporaryDirectory() as tempdir:
            # prepare test datalist file
            test_data = {
                "name":
                "Spleen",
                "description":
                "Spleen Segmentation",
                "labels": {
                    "0": "background",
                    "1": "spleen"
                },
                "training": [
                    {
                        "image": "spleen_19.nii.gz",
                        "label": "spleen_19.nii.gz"
                    },
                    {
                        "image": "spleen_31.nii.gz",
                        "label": "spleen_31.nii.gz"
                    },
                ],
                "test": ["spleen_15.nii.gz", "spleen_23.nii.gz"],
            }
            json_str = json.dumps(test_data)
            file_path = os.path.join(tempdir, "test_data.json")
            with open(file_path, "w") as json_file:
                json_file.write(json_str)

            data_list = DatasetFunc(data=file_path,
                                    func=load_decathlon_datalist,
                                    data_list_key="training",
                                    base_dir=tempdir)
            # partition dataset for train / validation
            data_partition = DatasetFunc(
                data=data_list,
                func=lambda x, **kwargs: partition_dataset(x, **kwargs)[0],
                num_partitions=2)
            dataset = Dataset(data=data_partition, transform=None)
            self.assertEqual(dataset[0]["image"],
                             os.path.join(tempdir, "spleen_19.nii.gz"))
            self.assertEqual(dataset[0]["label"],
                             os.path.join(tempdir, "spleen_19.nii.gz"))
示例#30
0
    def test_dataloader_repeats(self):
        dataset = Dataset(data=self.datalist, transform=self.transform)
        dataloader = ThreadDataLoader(dataset=dataset,
                                      batch_size=2,
                                      num_workers=0,
                                      repeats=2)

        previous_batch = None

        for d in dataloader:
            self.assertEqual(d["image"][0], "spleen_19.nii.gz")
            self.assertEqual(d["image"][1], "spleen_31.nii.gz")

            if previous_batch is None:
                previous_batch = d
            else:
                self.assertTrue(previous_batch is d,
                                "Batch object was not repeated")
                previous_batch = None