Beispiel #1
0
    def _cmp(self, filename, shape, ch_shape, reader_1, reader_2, outname,
             ext):
        data_dict = {"img": filename}
        keys = data_dict.keys()
        xforms = Compose(
            [LoadImaged(keys, reader=reader_1, ensure_channel_first=True)])
        img_dict = xforms(data_dict)  # load dicom with itk
        self.assertTupleEqual(img_dict["img"].shape, ch_shape)
        self.assertTupleEqual(
            tuple(img_dict[PostFix.meta("img")]["spatial_shape"]), shape)

        with tempfile.TemporaryDirectory() as tempdir:
            save_xform = SaveImageD(keys,
                                    meta_keys=PostFix.meta("img"),
                                    output_dir=tempdir,
                                    squeeze_end_dims=False,
                                    output_ext=ext)
            save_xform(img_dict)  # save to nifti

            new_xforms = Compose(
                [LoadImaged(keys, reader=reader_2),
                 EnsureChannelFirstD(keys)])
            out = new_xforms({"img":
                              os.path.join(tempdir,
                                           outname)})  # load nifti with itk
            self.assertTupleEqual(out["img"].shape, ch_shape)
            self.assertTupleEqual(
                tuple(out[PostFix.meta("img")]["spatial_shape"]), shape)
            if "affine" in img_dict[PostFix.meta("img")] and "affine" in out[
                    PostFix.meta("img")]:
                np.testing.assert_allclose(
                    img_dict[PostFix.meta("img")]["affine"],
                    out[PostFix.meta("img")]["affine"],
                    rtol=1e-3)
            np.testing.assert_allclose(out["img"], img_dict["img"], rtol=1e-3)
    def setUp(self):
        if not has_nib:
            self.skipTest("nibabel required for test_inverse")

        set_determinism(seed=0)

        b_size = 11
        im_fname, seg_fname = (make_nifti_image(i)
                               for i in create_test_image_3d(101, 100, 107))
        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.data_3d = [
            load_ims({
                "image": im_fname,
                "label": seg_fname
            }) for _ in range(b_size)
        ]

        b_size = 8
        im_fname, seg_fname = (
            make_nifti_image(i)
            for i in create_test_image_2d(62, 37, rad_max=10))
        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.data_2d = [
            load_ims({
                "image": im_fname,
                "label": seg_fname
            }) for _ in range(b_size)
        ]

        self.batch_size = 7
Beispiel #3
0
 def test_no_file(self):
     with self.assertRaises(RuntimeError):
         LoadImaged(keys="img", image_only=True)({"img": "unknown"})
     with self.assertRaises(RuntimeError):
         LoadImaged(keys="img", reader="nibabelreader", image_only=True)({
             "img":
             "unknown"
         })
Beispiel #4
0
    def test_values(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   "testing_data")
        train_transform = Compose([
            LoadImaged(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            ScaleIntensityd(keys="image"),
            ToTensord(keys=["image", "label"]),
        ])
        val_transform = LoadImaged(keys=["image", "label"])

        def _test_dataset(dataset):
            self.assertEqual(len(dataset), 52)
            self.assertTrue("image" in dataset[0])
            self.assertTrue("label" in dataset[0])
            self.assertTrue("image_meta_dict" in dataset[0])
            self.assertTupleEqual(dataset[0]["image"].shape, (1, 34, 49, 41))

        cvdataset = CrossValidation(
            dataset_cls=DecathlonDataset,
            nfolds=5,
            seed=12345,
            root_dir=testing_dir,
            task="Task04_Hippocampus",
            section="validation",
            transform=train_transform,
            download=True,
        )

        try:  # will start downloading if testing_dir doesn't have the Decathlon files
            data = cvdataset.get_dataset(folds=0)
        except (ContentTooShortError, HTTPError, RuntimeError) as e:
            print(str(e))
            if isinstance(e, RuntimeError):
                # FIXME: skip MD5 check as current downloading method may fail
                self.assertTrue(str(e).startswith("md5 check"))
            return  # skipping this test due the network connection errors

        _test_dataset(data)

        # test training data for fold [1, 2, 3, 4] of 5 splits
        data = cvdataset.get_dataset(folds=[1, 2, 3, 4])
        self.assertTupleEqual(data[0]["image"].shape, (1, 35, 52, 33))
        self.assertEqual(len(data), 208)
        # test train / validation for fold 4 of 5 splits
        data = cvdataset.get_dataset(folds=[4],
                                     transform=val_transform,
                                     download=False)
        # val_transform doesn't add the channel dim to shape
        self.assertTupleEqual(data[0]["image"].shape, (38, 53, 30))
        self.assertEqual(len(data), 52)
        data = cvdataset.get_dataset(folds=[0, 1, 2, 3])
        self.assertTupleEqual(data[0]["image"].shape, (1, 34, 49, 41))
        self.assertEqual(len(data), 208)
Beispiel #5
0
    def test_channel_dim(self):
        spatial_size = (32, 64, 3, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            nib.save(nib.Nifti1Image(test_image, affine=np.eye(4)), filename)

            loader = LoadImaged(keys="img")
            loader.register(ITKReader(channel_dim=2))
            result = EnsureChannelFirstD("img")(loader({"img": filename}))
            self.assertTupleEqual(tuple(result["img_meta_dict"]["spatial_shape"]), (32, 64, 128))
            self.assertTupleEqual(result["img"].shape, (3, 32, 64, 128))
Beispiel #6
0
def transformations():
    train_transforms = Compose([
        LoadImaged(keys=['image', 'label']),
        AddChanneld(keys=['image', 'label']),
        ToTensord(keys=['image', 'label'])
    ])
    val_transforms = Compose([
        LoadImaged(keys=['image', 'label']),
        AddChanneld(keys=['image', 'label']),
        ToTensord(keys=['image', 'label'])
    ])
    return train_transforms, val_transforms
Beispiel #7
0
    def test_channel_dim(self):
        spatial_size = (32, 64, 3, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            nib.save(nib.Nifti1Image(test_image, affine=np.eye(4)), filename)

            loader = LoadImaged(keys="img", image_only=True)
            loader.register(ITKReader(channel_dim=2))
            t = Compose([EnsureChannelFirstD("img"), FromMetaTensord("img")])
            result = t(loader({"img": filename}))
            self.assertTupleEqual(result["img"].shape, (3, 32, 64, 128))
Beispiel #8
0
    def test_register(self):
        spatial_size = (32, 64, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImaged(keys="img", image_only=True)
            loader.register(ITKReader())
            result = loader({"img": Path(filename)})
            self.assertTupleEqual(result["img"].shape, spatial_size[::-1])
Beispiel #9
0
def load_img_and_sample_ddf():
    # load image
    img = LoadImaged(keys="img")({"img": FILE_PATH})["img"]
    # W, H, D -> D, H, W
    img = img.transpose((2, 1, 0))

    # randomly sample ddf such that maximum displacement in each direction equals to one-tenth of the image dimension in
    # that direction.
    ddf = np.random.random((3, *img.shape)).astype(np.float32)  # (3, D, H, W)
    ddf[0] = ddf[0] * img.shape[0] * 0.1
    ddf[1] = ddf[1] * img.shape[1] * 0.1
    ddf[2] = ddf[2] * img.shape[2] * 0.1
    return img, ddf
Beispiel #10
0
    def test_shape(self, expected_shape):
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        with tempfile.TemporaryDirectory() as tempdir:
            nib.save(test_image, os.path.join(tempdir, "test_image1.nii.gz"))
            nib.save(test_image, os.path.join(tempdir, "test_label1.nii.gz"))
            nib.save(test_image, os.path.join(tempdir, "test_extra1.nii.gz"))
            nib.save(test_image, os.path.join(tempdir, "test_image2.nii.gz"))
            nib.save(test_image, os.path.join(tempdir, "test_label2.nii.gz"))
            nib.save(test_image, os.path.join(tempdir, "test_extra2.nii.gz"))
            test_data = [
                {
                    "image": os.path.join(tempdir, "test_image1.nii.gz"),
                    "label": os.path.join(tempdir, "test_label1.nii.gz"),
                    "extra": os.path.join(tempdir, "test_extra1.nii.gz"),
                },
                {
                    "image": os.path.join(tempdir, "test_image2.nii.gz"),
                    "label": os.path.join(tempdir, "test_label2.nii.gz"),
                    "extra": os.path.join(tempdir, "test_extra2.nii.gz"),
                },
            ]
            test_transform = Compose([
                LoadImaged(keys=["image", "label", "extra"]),
                SimulateDelayd(keys=["image", "label", "extra"],
                               delay_time=[1e-7, 1e-6, 1e-5]),
            ])
            dataset = Dataset(data=test_data, transform=test_transform)
            data1 = dataset[0]
            data2 = dataset[1]

            self.assertTupleEqual(data1["image"].shape, expected_shape)
            self.assertTupleEqual(data1["label"].shape, expected_shape)
            self.assertTupleEqual(data1["extra"].shape, expected_shape)
            self.assertTupleEqual(data2["image"].shape, expected_shape)
            self.assertTupleEqual(data2["label"].shape, expected_shape)
            self.assertTupleEqual(data2["extra"].shape, expected_shape)

            dataset = Dataset(
                data=test_data,
                transform=LoadImaged(keys=["image", "label", "extra"]))
            data1_simple = dataset[0]
            data2_simple = dataset[1]

            self.assertTupleEqual(data1_simple["image"].shape, expected_shape)
            self.assertTupleEqual(data1_simple["label"].shape, expected_shape)
            self.assertTupleEqual(data1_simple["extra"].shape, expected_shape)
            self.assertTupleEqual(data2_simple["image"].shape, expected_shape)
            self.assertTupleEqual(data2_simple["label"].shape, expected_shape)
            self.assertTupleEqual(data2_simple["extra"].shape, expected_shape)
Beispiel #11
0
    def test_values(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   "testing_data")
        train_transform = Compose([
            LoadImaged(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            ScaleIntensityd(keys="image"),
            ToTensord(keys=["image", "label"]),
        ])
        val_transform = LoadImaged(keys=["image", "label"])

        def _test_dataset(dataset):
            self.assertEqual(len(dataset), 52)
            self.assertTrue("image" in dataset[0])
            self.assertTrue("label" in dataset[0])
            self.assertTrue(PostFix.meta("image") in dataset[0])
            self.assertTupleEqual(dataset[0]["image"].shape, (1, 34, 49, 41))

        cvdataset = CrossValidation(
            dataset_cls=DecathlonDataset,
            nfolds=5,
            seed=12345,
            root_dir=testing_dir,
            task="Task04_Hippocampus",
            section="validation",
            transform=train_transform,
            download=True,
        )

        with skip_if_downloading_fails():
            data = cvdataset.get_dataset(folds=0)

        _test_dataset(data)

        # test training data for fold [1, 2, 3, 4] of 5 splits
        data = cvdataset.get_dataset(folds=[1, 2, 3, 4])
        self.assertTupleEqual(data[0]["image"].shape, (1, 35, 52, 33))
        self.assertEqual(len(data), 208)
        # test train / validation for fold 4 of 5 splits
        data = cvdataset.get_dataset(folds=[4],
                                     transform=val_transform,
                                     download=False)
        # val_transform doesn't add the channel dim to shape
        self.assertTupleEqual(data[0]["image"].shape, (38, 53, 30))
        self.assertEqual(len(data), 52)
        data = cvdataset.get_dataset(folds=[0, 1, 2, 3])
        self.assertTupleEqual(data[0]["image"].shape, (1, 34, 49, 41))
        self.assertEqual(len(data), 208)
Beispiel #12
0
 def test_linear_consistent_dict(self, xform_cls, input_dict, atol):
     """xform cls testing itk consistency"""
     img = LoadImaged(keys, image_only=True, simple_keys=True)({
         keys[0]:
         FILE_PATH,
         keys[1]:
         FILE_PATH_1
     })
     img = EnsureChannelFirstd(keys)(img)
     ref_1 = {k: _create_itk_obj(img[k][0], img[k].affine) for k in keys}
     output = self.run_transform(img, xform_cls, input_dict)
     ref_2 = {
         k: _create_itk_obj(output[k][0], output[k].affine)
         for k in keys
     }
     expected = {k: _resample_to_affine(ref_1[k], ref_2[k]) for k in keys}
     # compare ref_2 and expected results from itk
     diff = {
         k: np.abs(
             itk.GetArrayFromImage(ref_2[k]) -
             itk.GetArrayFromImage(expected[k]))
         for k in keys
     }
     avg_diff = {k: np.mean(diff[k]) for k in keys}
     for k in keys:
         self.assertTrue(avg_diff[k] < atol,
                         f"{xform_cls} avg_diff: {avg_diff}, tol: {atol}")
Beispiel #13
0
 def pre_transforms(self, data):
     return [
         LoadImaged(keys=["image", "label"]),
         EnsureChannelFirstd(keys=["image", "label"]),
         AddBackgroundScribblesFromROId(
             scribbles="label",
             scribbles_bg_label=self.scribbles_bg_label,
             scribbles_fg_label=self.scribbles_fg_label,
         ),
         # at the moment optimisers are bottleneck taking a long time,
         # therefore scaling non-isotropic with big spacing
         Spacingd(keys=["image", "label"], pixdim=self.pix_dim, mode=["bilinear", "nearest"]),
         Orientationd(keys=["image", "label"], axcodes="RAS"),
         ScaleIntensityRanged(
             keys="image",
             a_min=self.intensity_range[0],
             a_max=self.intensity_range[1],
             b_min=self.intensity_range[2],
             b_max=self.intensity_range[3],
             clip=self.intensity_range[4],
         ),
         MakeLikelihoodFromScribblesHistogramd(
             image="image",
             scribbles="label",
             post_proc_label="prob",
             scribbles_bg_label=self.scribbles_bg_label,
             scribbles_fg_label=self.scribbles_fg_label,
             normalise=True,
         ),
     ]
    def __init__(
        self,
        root_dir,
        section,
        transform=LoadImaged(["image", "label"]),
        cache_rate=1.0,
        num_workers=0,
        shuffle=False,
    ) -> None:

        if not os.path.isdir(root_dir):
            raise ValueError("Root directory root_dir must be a directory.")
        self.section = section
        self.shuffle = shuffle
        self.val_frac = 0.2
        self.set_random_state(seed=0)
        dataset_dir = os.path.join(root_dir, "Task01_BrainTumour")
        if not os.path.exists(dataset_dir):
            raise RuntimeError(
                f"Cannot find dataset directory: {dataset_dir}, please download it from Decathlon challenge."
            )
        data = self._generate_data_list(dataset_dir)
        super(DecathlonDataset, self).__init__(data,
                                               transform,
                                               cache_rate=cache_rate,
                                               num_workers=num_workers)
Beispiel #15
0
    def __init__(self, tranforms):
        self.tranform_list = []
        for tranform in tranforms:
            if 'LoadImaged' == tranform:
                self.tranform_list.append(LoadImaged(keys=["image", "label"]))
            elif 'AsChannelFirstd' == tranform:
                self.tranform_list.append(AsChannelFirstd(keys="image"))
            elif 'ConvertToMultiChannelBasedOnBratsClassesd' == tranform:
                self.tranform_list.append(ConvertToMultiChannelBasedOnBratsClassesd(keys="label"))
            elif 'Spacingd' == tranform:
                self.tranform_list.append(Spacingd(keys=["image", "label"], pixdim=(1.5, 1.5, 2.0), mode=("bilinear", "nearest")))
            elif 'Orientationd' == tranform:
                self.tranform_list.append(Orientationd(keys=["image", "label"], axcodes="RAS"))
            elif 'CenterSpatialCropd' == tranform:
                self.tranform_list.append(CenterSpatialCropd(keys=["image", "label"], roi_size=[128, 128, 64]))
            elif 'NormalizeIntensityd' == tranform:
                self.tranform_list.append(NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True))
            elif 'ToTensord' == tranform:
                self.tranform_list.append(ToTensord(keys=["image", "label"]))
            elif 'Activations' == tranform:
                self.tranform_list.append(Activations(sigmoid=True))
            elif 'AsDiscrete' == tranform:
                self.tranform_list.append(AsDiscrete(threshold_values=True))
            else:
                raise ValueError(
                    f"Unsupported tranform: {tranform}. Please add it to support it."
                )

        super().__init__(self.tranform_list)
Beispiel #16
0
    def test_samples(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data")
        keys = "image"
        xforms = Compose(
            [
                LoadImaged(keys=keys),
                AddChanneld(keys=keys),
                ScaleIntensityd(keys=keys),
                RandSpatialCropSamplesd(keys=keys, roi_size=(8, 8, 5), random_size=True, num_samples=10),
            ]
        )
        image_path = os.path.join(testing_dir, "anatomical.nii")
        xforms.set_random_state(0)
        ims = xforms({keys: image_path})
        fig, mat = matshow3d(
            [im[keys] for im in ims], title=f"testing {keys}", figsize=(2, 2), frames_per_row=5, every_n=2, show=False
        )
        self.assertTrue(mat.dtype == np.float32)

        with tempfile.TemporaryDirectory() as tempdir:
            tempimg = f"{tempdir}/matshow3d_patch_test.png"
            fig.savefig(tempimg)
            comp = compare_images(f"{testing_dir}/matshow3d_patch_test.png", tempimg, 5e-2, in_decorator=True)
            if comp:
                print("not none comp: ", comp)  # matplotlib 3.2.2
                np.testing.assert_allclose(comp["rms"], 30.786983, atol=1e-3, rtol=1e-3)
            else:
                self.assertIsNone(comp, f"value of comp={comp}")  # None indicates test passed
Beispiel #17
0
    def __init__(
        self,
        root_dir: str,
        section: str,
        transform: Union[Sequence[Callable], Callable] = (),
        download: bool = False,
        seed: int = 0,
        val_frac: float = 0.1,
        test_frac: float = 0.1,
        cache_num: int = sys.maxsize,
        cache_rate: float = 1.0,
        num_workers: int = 0,
    ) -> None:
        if not os.path.isdir(root_dir):
            raise ValueError("Root directory root_dir must be a directory.")
        self.section = section
        self.val_frac = val_frac
        self.test_frac = test_frac
        self.set_random_state(seed=seed)
        tarfile_name = os.path.join(root_dir, self.compressed_file_name)
        dataset_dir = os.path.join(root_dir, self.dataset_folder_name)
        if download:
            download_and_extract(self.resource, tarfile_name, root_dir, self.md5)

        if not os.path.exists(dataset_dir):
            raise RuntimeError(
                f"Cannot find dataset directory: {dataset_dir}, please use download=True to download it."
            )
        data = self._generate_data_list(dataset_dir)
        if transform == ():
            transform = LoadImaged("image")
        super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
Beispiel #18
0
    def test_shape(self):
        expected_shape = (128, 128, 128)
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        test_data = []
        with tempfile.TemporaryDirectory() as tempdir:
            for i in range(6):
                nib.save(test_image,
                         os.path.join(tempdir, f"test_image{str(i)}.nii.gz"))
                test_data.append(
                    {"image": os.path.join(tempdir, f"test_image{i}.nii.gz")})

            test_transform = Compose([
                LoadImaged(keys="image"),
                SimulateDelayd(keys="image", delay_time=1e-7)
            ])

            data_iterator = _Stream(test_data)
            with self.assertRaises(TypeError):  # Dataset doesn't work
                dataset = Dataset(data=data_iterator, transform=test_transform)
                for _ in dataset:
                    pass
            dataset = IterableDataset(data=data_iterator,
                                      transform=test_transform)
            for d in dataset:
                self.assertTupleEqual(d["image"].shape, expected_shape)

            num_workers = 2 if sys.platform == "linux" else 0
            dataloader = DataLoader(dataset=dataset,
                                    batch_size=3,
                                    num_workers=num_workers)
            for d in dataloader:
                self.assertTupleEqual(d["image"].shape[1:], expected_shape)
Beispiel #19
0
def get_data(keys):
    """Get the example data to be used.

    Use MarsAtlas as it only contains 1 image for quick download and
    that image is parcellated.
    """
    cache_dir = os.environ.get("MONAI_DATA_DIRECTORY") or tempfile.mkdtemp()
    fname = "MarsAtlas-MNI-Colin27.zip"
    url = "https://www.dropbox.com/s/ndz8qtqblkciole/" + fname + "?dl=1"
    out_path = os.path.join(cache_dir, "MarsAtlas-MNI-Colin27")
    zip_path = os.path.join(cache_dir, fname)

    download_and_extract(url, zip_path, out_path)

    image, label = sorted(glob(os.path.join(out_path, "*.nii")))

    data = {CommonKeys.IMAGE: image, CommonKeys.LABEL: label}

    transforms = Compose([
        LoadImaged(keys),
        AddChanneld(keys),
        ScaleIntensityd(CommonKeys.IMAGE),
        Rotate90d(keys, spatial_axes=[0, 2])
    ])
    data = transforms(data)
    max_size = max(data[keys[0]].shape)
    padder = SpatialPadd(keys, (max_size, max_size, max_size))
    return padder(data)
Beispiel #20
0
 def train_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         Spacingd(
             keys=("image", "label"),
             pixdim=(1.0, 1.0, 1.0),
             mode=("bilinear", "nearest"),
         ),
         ScaleIntensityRanged(keys="image",
                              a_min=-57,
                              a_max=164,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         CropForegroundd(keys=("image", "label"), source_key="image"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         RandCropByPosNegLabeld(
             keys=("image", "label"),
             label_key="label",
             spatial_size=(96, 96, 96),
             pos=1,
             neg=1,
             num_samples=4,
             image_key="image",
             image_threshold=0,
         ),
         RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
         SelectItemsd(keys=("image", "label")),
     ]
Beispiel #21
0
    def setUp(self):
        if not has_nib:
            self.skipTest("nibabel required for test_inverse")

        set_determinism(seed=0)

        self.all_data = {}

        affine = make_rand_affine()
        affine[0] *= 2

        for size in [10, 11]:
            # pad 5 onto both ends so that cropping can be lossless
            im_1d = np.pad(np.arange(size), 5)[None]
            name = "1D even" if size % 2 == 0 else "1D odd"
            self.all_data[name] = {
                "image": np.array(im_1d, copy=True),
                "label": np.array(im_1d, copy=True),
                "other": np.array(im_1d, copy=True),
            }

        im_2d_fname, seg_2d_fname = [make_nifti_image(i) for i in create_test_image_2d(101, 100)]
        im_3d_fname, seg_3d_fname = [make_nifti_image(i, affine) for i in create_test_image_3d(100, 101, 107)]

        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.all_data["2D"] = load_ims({"image": im_2d_fname, "label": seg_2d_fname})
        self.all_data["3D"] = load_ims({"image": im_3d_fname, "label": seg_3d_fname})
Beispiel #22
0
 def train_pre_transforms(self, context: Context):
     # Dataset preparation
     t: List[Any] = [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         SpatialCropForegroundd(keys=("image", "label"),
                                source_key="label",
                                spatial_size=self.roi_size),
         Resized(keys=("image", "label"),
                 spatial_size=self.model_size,
                 mode=("area", "nearest")),
         NormalizeIntensityd(keys="image", subtrahend=208.0,
                             divisor=388.0),  # type: ignore
     ]
     if self.dimension == 3:
         t.append(FindAllValidSlicesd(label="label", sids="sids"))
     t.extend([
         AddInitialSeedPointd(label="label",
                              guidance="guidance",
                              sids="sids"),
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         SelectItemsd(keys=("image", "label", "guidance")),
     ])
     return t
Beispiel #23
0
    def test_decollation(self, batch_size=2, num_workers=2):

        im = create_test_image_2d(100, 101)[0]
        data = [{
            "image": make_nifti_image(im) if has_nib else im
        } for _ in range(6)]

        transforms = Compose([
            AddChanneld("image"),
            SpatialPadd("image", 150),
            RandFlipd("image", prob=1.0, spatial_axis=1),
            ToTensord("image"),
        ])
        # If nibabel present, read from disk
        if has_nib:
            transforms = Compose([LoadImaged("image"), transforms])

        dataset = CacheDataset(data, transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)
Beispiel #24
0
    def test_register(self):
        spatial_size = (32, 64, 128)
        expected_shape = (128, 64, 32)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImaged(keys="img")
            loader.register(ITKReader(c_order_axis_indexing=True))
            result = loader({"img": filename})
            self.assertTupleEqual(
                tuple(result["img_meta_dict"]["spatial_shape"]),
                expected_shape)
            self.assertTupleEqual(result["img"].shape, spatial_size)
Beispiel #25
0
    def test_3d_rgb(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data")
        keys = "image"
        xforms = Compose(
            [
                LoadImaged(keys=keys),
                AddChanneld(keys=keys),
                ScaleIntensityd(keys=keys),
                # change to RGB color image
                RepeatChanneld(keys=keys, repeats=3),
            ]
        )
        image_path = os.path.join(testing_dir, "anatomical.nii")
        ims = xforms({keys: image_path})

        fig = pyplot.figure()  # external figure
        fig, _ = matshow3d(
            volume=ims[keys],
            fig=fig,
            figsize=(2, 2),
            frames_per_row=5,
            every_n=2,
            frame_dim=-1,
            channel_dim=0,
            show=False,
        )

        with tempfile.TemporaryDirectory() as tempdir:
            tempimg = f"{tempdir}/matshow3d_rgb_test.png"
            fig.savefig(tempimg)
            comp = compare_images(f"{testing_dir}/matshow3d_rgb_test.png", tempimg, 5e-2)
            self.assertIsNone(comp, f"value of comp={comp}")  # None indicates test passed
Beispiel #26
0
    def test_anisotropic_spacing(self):
        with tempfile.TemporaryDirectory() as tempdir:

            pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]]
            for i in range(5):
                im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [
                {"image": image_name, "label": label_name} for image_name, label_name in zip(train_images, train_labels)
            ]

            t = Compose([LoadImaged(keys=["image", "label"]), FromMetaTensord(keys=["image", "label"])])
            dataset = Dataset(data=data_dicts, transform=t)

            calculator = DatasetSummary(dataset, num_workers=4, meta_key_postfix=PostFix.meta())

            target_spacing = calculator.get_target_spacing(anisotropic_threshold=4.0, percentile=20.0)
            np.testing.assert_allclose(target_spacing, (1.0, 1.0, 1.8))
Beispiel #27
0
 def train_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label"), dtype=np.uint8),
         FilterImaged(keys="image", min_size=5),
         AsChannelFirstd(keys="image"),
         AddChanneld(keys="label"),
         ToTensord(keys="image"),
         TorchVisiond(keys="image",
                      name="ColorJitter",
                      brightness=64.0 / 255.0,
                      contrast=0.75,
                      saturation=0.25,
                      hue=0.04),
         ToNumpyd(keys="image"),
         RandRotate90d(keys=("image", "label"),
                       prob=0.5,
                       spatial_axes=(0, 1)),
         ScaleIntensityRangeD(keys="image",
                              a_min=0.0,
                              a_max=255.0,
                              b_min=-1.0,
                              b_max=1.0),
         AddInitialSeedPointExd(label="label", guidance="guidance"),
         AddGuidanceSignald(image="image",
                            guidance="guidance",
                            number_intensity_ch=3),
         EnsureTyped(keys=("image", "label")),
     ]
Beispiel #28
0
    def test_shape(self):
        expected_shape = (128, 128, 128)
        test_image = nib.Nifti1Image(
            np.random.randint(0, 2, size=[128, 128, 128]), np.eye(4))
        test_data = list()
        with tempfile.TemporaryDirectory() as tempdir:
            for i in range(6):
                nib.save(test_image,
                         os.path.join(tempdir, f"test_image{str(i)}.nii.gz"))
                test_data.append({
                    "image":
                    os.path.join(tempdir, f"test_image{str(i)}.nii.gz")
                })

            test_transform = Compose([
                LoadImaged(keys="image"),
                SimulateDelayd(keys="image", delay_time=1e-7),
            ])

            test_stream = _Stream(data=test_data,
                                  dbpath=os.path.join(tempdir, "countDB"))

            dataset = IterableDataset(data=test_stream,
                                      transform=test_transform)
            for d in dataset:
                self.assertTupleEqual(d["image"].shape, expected_shape)

            test_stream.reset()
            dataloader = DataLoader(dataset=dataset,
                                    batch_size=3,
                                    num_workers=2)
            for d in dataloader:
                self.assertTupleEqual(d["image"].shape[1:], expected_shape)
Beispiel #29
0
 def pre_transforms(self):
     t = [
         LoadImaged(keys="image", reader="nibabelreader"),
         AddChanneld(keys="image"),
         # Spacing might not be needed as resize transform is used later.
         # Spacingd(keys="image", pixdim=self.spacing),
         RandAffined(
             keys="image",
             prob=1,
             rotate_range=(np.pi / 4, np.pi / 4, np.pi / 4),
             padding_mode="zeros",
             as_tensor_output=False,
         ),
         RandFlipd(keys="image", prob=0.5, spatial_axis=0),
         RandRotated(keys="image",
                     range_x=(-5, 5),
                     range_y=(-5, 5),
                     range_z=(-5, 5)),
         Resized(keys="image", spatial_size=self.spatial_size),
     ]
     # If using TTA for deepedit
     if self.deepedit:
         t.append(DiscardAddGuidanced(keys="image"))
     t.append(ToTensord(keys="image"))
     return Compose(t)
    def test_decollation(self, *transforms):

        batch_size = 2
        num_workers = 2

        t_compose = Compose(
            [AddChanneld(KEYS),
             Compose(transforms),
             ToTensord(KEYS)])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = Compose([LoadImaged("image"), t_compose])

        dataset = CacheDataset(self.data, t_compose, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)