def test_rand_weighted_crop_patch_index(self):
     for p in TEST_NDARRAYS:
         for q in TEST_NDARRAYS:
             img = self.imt[0]
             n_samples = 3
             crop = RandWeightedCropd(("img", "seg"), "w", (10, -1, -1),
                                      n_samples)
             weight = np.zeros_like(img)
             weight[0, 7, 17] = 1.1
             weight[0, 13, 31] = 1.1
             weight[0, 24, 21] = 1
             crop.set_random_state(10)
             result = crop({
                 "img": p(img),
                 "seg": p(self.segn[0]),
                 "w": q(weight),
                 PostFix.meta("img"): {
                     "affine": None
                 }
             })
             self.assertTrue(len(result) == n_samples)
             for c, e in zip(crop.centers,
                             [[14, 32, 40], [41, 32, 40], [20, 32, 40]]):
                 assert_allclose(c, e, type_test=False)
             for i in range(n_samples):
                 np.testing.assert_allclose(result[i]["img"].shape,
                                            (1, 10, 64, 80))
                 np.testing.assert_allclose(result[i]["seg"].shape,
                                            (1, 10, 64, 80))
                 np.testing.assert_allclose(
                     result[i][PostFix.meta("img")]["patch_index"], i)
                 np.testing.assert_allclose(
                     result[i][PostFix.meta("seg")]["patch_index"], i)
Esempio n. 2
0
    def test_dict_examples(self):
        test_case = {
            "meta": {
                "out": ["test", "test"]
            },
            PostFix.meta("image"): {
                "scl_slope": torch.Tensor((0.0, 0.0))
            }
        }
        out = decollate_batch(test_case)
        self.assertEqual(out[0]["meta"]["out"], "test")
        self.assertEqual(out[0][PostFix.meta("image")]["scl_slope"], 0.0)

        test_case = [torch.ones((2, 1, 10, 10)), torch.ones((2, 3, 5, 5))]
        out = decollate_batch(test_case)
        self.assertTupleEqual(out[0][0].shape, (1, 10, 10))
        self.assertTupleEqual(out[0][1].shape, (3, 5, 5))

        test_case = torch.rand((2, 1, 10, 10))
        out = decollate_batch(test_case)
        self.assertTupleEqual(out[0].shape, (1, 10, 10))

        test_case = [torch.tensor(0), torch.tensor(0)]
        out = decollate_batch(test_case, detach=True)
        self.assertListEqual([0, 0], out)
        self.assertFalse(isinstance(out[0], torch.Tensor))

        test_case = {"a": [torch.tensor(0), torch.tensor(0)]}
        out = decollate_batch(test_case, detach=False)
        self.assertListEqual([{
            "a": torch.tensor(0)
        }, {
            "a": torch.tensor(0)
        }], out)
        self.assertTrue(isinstance(out[0]["a"], torch.Tensor))

        test_case = [torch.tensor(0), torch.tensor(0)]
        out = decollate_batch(test_case, detach=False)
        self.assertListEqual(test_case, out)

        test_case = {
            "image": torch.tensor([[[1, 2]], [[3, 4]]]),
            "label": torch.tensor([[[5, 6]], [[7, 8]]]),
            "pred": torch.tensor([[[9, 10]], [[11, 12]]]),
            "out": ["test"],
        }
        out = decollate_batch(test_case, detach=False)
        self.assertEqual(out[0]["out"], "test")

        test_case = {
            "image": torch.tensor([[[1, 2, 3]], [[3, 4, 5]]]),
            "label": torch.tensor([[[5]], [[7]]]),
            "out": ["test"],
        }
        out = decollate_batch(test_case, detach=False, pad=False)
        self.assertEqual(len(out), 1)  # no padding
        out = decollate_batch(test_case, detach=False, pad=True, fill_value=0)
        self.assertEqual(out[1]["out"], 0)  # verify padding fill_value
 def test_exceptions(self):
     with self.assertRaises(ValueError):  # no meta
         EnsureChannelFirstd("img")({"img": np.zeros((1, 2, 3)), PostFix.meta("img"): None})
     with self.assertRaises(ValueError):  # no meta channel
         EnsureChannelFirstd("img")(
             {"img": np.zeros((1, 2, 3)), PostFix.meta("img"): {"original_channel_dim": None}}
         )
     EnsureChannelFirstd("img", strict_check=False)({"img": np.zeros((1, 2, 3)), PostFix.meta("img"): None})
     EnsureChannelFirstd("img", strict_check=False)(
         {"img": np.zeros((1, 2, 3)), PostFix.meta("img"): {"original_channel_dim": None}}
     )
 def test_shape(self, input_param, input_data, expected_shape, expected_last):
     xform = RandSpatialCropSamplesd(**input_param)
     xform.set_random_state(1234)
     result = xform(input_data)
     for item, expected in zip(result, expected_shape):
         self.assertTupleEqual(item["img"].shape, expected)
         self.assertTupleEqual(item["seg"].shape, expected)
     for i, item in enumerate(result):
         self.assertEqual(item[PostFix.meta("img")]["patch_index"], i)
         self.assertEqual(item[PostFix.meta("seg")]["patch_index"], i)
     assert_allclose(item["img"], expected_last["img"], type_test=True)
     assert_allclose(item["seg"], expected_last["seg"], type_test=True)
Esempio n. 5
0
 def __call__(
     self, data: Mapping[Hashable, NdarrayOrTensor]
 ) -> Dict[Hashable, NdarrayOrTensor]:
     d = dict(data)
     for key in self.key_iterator(d):
         self.push_transform(d, key)
         im = d[key]
         meta = d.pop(PostFix.meta(key), None)
         transforms = d.pop(PostFix.transforms(key), None)
         im = MetaTensor(im, meta=meta,
                         applied_operations=transforms)  # type: ignore
         d[key] = im
     return d
Esempio n. 6
0
 def test_orntd(self):
     data = {
         "seg": np.ones((2, 1, 2, 3)),
         PostFix.meta("seg"): {
             "affine": np.eye(4)
         }
     }
     ornt = Orientationd(keys="seg", axcodes="RAS")
     res = ornt(data)
     np.testing.assert_allclose(res["seg"].shape, (2, 1, 2, 3))
     code = nib.aff2axcodes(res[PostFix.meta("seg")]["affine"],
                            ornt.ornt_transform.labels)
     self.assertEqual(code, ("R", "A", "S"))
 def get_data(num_examples, input_size, data_type=np.asarray, include_label=True):
     custom_create_test_image_2d = partial(
         create_test_image_2d, *input_size, rad_max=7, num_seg_classes=1, num_objs=1
     )
     data = []
     for i in range(num_examples):
         im, label = custom_create_test_image_2d()
         d = {}
         d["image"] = data_type(im[:, i:])
         d[PostFix.meta("image")] = {"affine": np.eye(4)}
         if include_label:
             d["label"] = data_type(label[:, i:])
             d[PostFix.meta("label")] = {"affine": np.eye(4)}
         data.append(d)
     return data[0] if num_examples == 1 else data
Esempio n. 8
0
    def test_endianness(self, endianness, use_array, image_only):

        hdr = nib.Nifti1Header(endianness=endianness)
        nii = nib.Nifti1Image(self.im, np.eye(4), header=hdr)
        nib.save(nii, self.fname)

        data = [self.fname] if use_array else [{"image": self.fname}]
        tr = LoadImage(image_only=image_only) if use_array else LoadImaged(
            "image", image_only=image_only)
        check_ds = Dataset(data, tr)
        check_loader = DataLoader(check_ds, batch_size=1)
        ret = next(iter(check_loader))
        if isinstance(ret, dict) and PostFix.meta("image") in ret:
            np.testing.assert_allclose(
                ret[PostFix.meta("image")]["spatial_shape"], [[100, 100]])
Esempio n. 9
0
 def test_as_dict(self):
     m, _ = self.get_im()
     m_dict = m.as_dict("im")
     im, meta = m_dict["im"], m_dict[PostFix.meta("im")]
     affine = meta.pop("affine")
     m2 = MetaTensor(im, affine, meta)
     self.check(m2, m, check_ids=False)
Esempio n. 10
0
    def test_anisotropic_spacing(self):
        with tempfile.TemporaryDirectory() as tempdir:

            pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]]
            for i in range(5):
                im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [
                {"image": image_name, "label": label_name} for image_name, label_name in zip(train_images, train_labels)
            ]

            t = Compose([LoadImaged(keys=["image", "label"]), FromMetaTensord(keys=["image", "label"])])
            dataset = Dataset(data=data_dicts, transform=t)

            calculator = DatasetSummary(dataset, num_workers=4, meta_key_postfix=PostFix.meta())

            target_spacing = calculator.get_target_spacing(anisotropic_threshold=4.0, percentile=20.0)
            np.testing.assert_allclose(target_spacing, (1.0, 1.0, 1.8))
Esempio n. 11
0
 def test_spacingd(self, _, data, kw_args, expected_keys, expected_shape,
                   expected_affine):
     res = Spacingd(**kw_args)(data)
     if isinstance(data["image"], torch.Tensor):
         self.assertEqual(data["image"].device, res["image"].device)
     self.assertEqual(expected_keys, tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, expected_shape)
     assert_allclose(res[PostFix.meta("image")]["affine"], expected_affine)
Esempio n. 12
0
 def inverse(
     self, data: Mapping[Hashable, NdarrayOrTensor]
 ) -> Dict[Hashable, NdarrayOrTensor]:
     d = dict(data)
     for key in self.key_iterator(d):
         # check transform
         _ = self.get_most_recent_transform(d, key)
         # do the inverse
         im = d[key]
         meta = d.pop(PostFix.meta(key), None)
         transforms = d.pop(PostFix.transforms(key), None)
         im = MetaTensor(im, meta=meta,
                         applied_operations=transforms)  # type: ignore
         d[key] = im
         # Remove the applied transform
         self.pop_transform(d, key)
     return d
Esempio n. 13
0
 def _test_dataset(dataset):
     self.assertEqual(
         len(dataset),
         int(MEDNIST_FULL_DATASET_LENGTH * dataset.test_frac))
     self.assertTrue("image" in dataset[0])
     self.assertTrue("label" in dataset[0])
     self.assertTrue(PostFix.meta("image") in dataset[0])
     self.assertTupleEqual(dataset[0]["image"].shape, (1, 64, 64))
def run_inference_test(root_dir, device="cuda:0"):
    images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
    segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

    # define transforms for image and segmentation
    val_transforms = Compose(
        [
            LoadImaged(keys=["img", "seg"]),
            EnsureChannelFirstd(keys=["img", "seg"]),
            # resampling with align_corners=True or dtype=float64 will generate
            # slight different results between PyTorch 1.5 an 1.6
            Spacingd(keys=["img", "seg"], pixdim=[1.2, 0.8, 0.7], mode=["bilinear", "nearest"], dtype=np.float32),
            ScaleIntensityd(keys="img"),
            ToTensord(keys=["img", "seg"]),
        ]
    )
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    # sliding window inference need to input 1 image in every iteration
    val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
    val_post_tran = Compose([ToTensor(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
    dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)

    model = UNet(
        spatial_dims=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    with eval_mode(model):
        # resampling with align_corners=True or dtype=float64 will generate
        # slight different results between PyTorch 1.5 an 1.6
        saver = SaveImage(
            output_dir=os.path.join(root_dir, "output"),
            dtype=np.float32,
            output_ext=".nii.gz",
            output_postfix="seg",
            mode="bilinear",
        )
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
            # define sliding window size and batch size for windows inference
            sw_batch_size, roi_size = 4, (96, 96, 96)
            val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
            # decollate prediction into a list
            val_outputs = [val_post_tran(i) for i in decollate_batch(val_outputs)]
            val_meta = decollate_batch(val_data[PostFix.meta("img")])
            # compute metrics
            dice_metric(y_pred=val_outputs, y=val_labels)
            for img, meta in zip(val_outputs, val_meta):  # save a decollated batch of files
                saver(img, meta)

    return dice_metric.aggregate().item()
Esempio n. 15
0
    def _cmp(self, filename, shape, ch_shape, reader_1, reader_2, outname,
             ext):
        data_dict = {"img": filename}
        keys = data_dict.keys()
        xforms = Compose(
            [LoadImaged(keys, reader=reader_1, ensure_channel_first=True)])
        img_dict = xforms(data_dict)  # load dicom with itk
        self.assertTupleEqual(img_dict["img"].shape, ch_shape)
        self.assertTupleEqual(
            tuple(img_dict[PostFix.meta("img")]["spatial_shape"]), shape)

        with tempfile.TemporaryDirectory() as tempdir:
            save_xform = SaveImageD(keys,
                                    meta_keys=PostFix.meta("img"),
                                    output_dir=tempdir,
                                    squeeze_end_dims=False,
                                    output_ext=ext)
            save_xform(img_dict)  # save to nifti

            new_xforms = Compose(
                [LoadImaged(keys, reader=reader_2),
                 EnsureChannelFirstD(keys)])
            out = new_xforms({"img":
                              os.path.join(tempdir,
                                           outname)})  # load nifti with itk
            self.assertTupleEqual(out["img"].shape, ch_shape)
            self.assertTupleEqual(
                tuple(out[PostFix.meta("img")]["spatial_shape"]), shape)
            if "affine" in img_dict[PostFix.meta("img")] and "affine" in out[
                    PostFix.meta("img")]:
                np.testing.assert_allclose(
                    img_dict[PostFix.meta("img")]["affine"],
                    out[PostFix.meta("img")]["affine"],
                    rtol=1e-3)
            np.testing.assert_allclose(out["img"], img_dict["img"], rtol=1e-3)
Esempio n. 16
0
    def as_dict(self, key: str) -> dict:
        """
        Get the object as a dictionary for backwards compatibility.
        This method makes a copy of the objects.

        Args:
            key: Base key to store main data. The key for the metadata will be
                determined using `PostFix.meta`.

        Return:
            A dictionary consisting of two keys, the main data (stored under `key`) and
                the metadata.
        """
        return {
            key: self.as_tensor().clone().detach(),
            PostFix.meta(key): deepcopy(self.meta),
            PostFix.transforms(key): deepcopy(self.applied_operations),
        }
Esempio n. 17
0
    def test_foreground_position(self, arguments, input_data, _):
        result = SpatialCropForegroundd(**arguments)(input_data)
        np.testing.assert_allclose(
            result[PostFix.meta("image")]["foreground_start_coord"],
            np.array([0, 1, 1]))
        np.testing.assert_allclose(
            result[PostFix.meta("image")]["foreground_end_coord"],
            np.array([1, 4, 4]))

        arguments["start_coord_key"] = "test_start_coord"
        arguments["end_coord_key"] = "test_end_coord"
        result = SpatialCropForegroundd(**arguments)(input_data)
        np.testing.assert_allclose(
            result[PostFix.meta("image")]["test_start_coord"],
            np.array([0, 1, 1]))
        np.testing.assert_allclose(
            result[PostFix.meta("image")]["test_end_coord"],
            np.array([1, 4, 4]))
Esempio n. 18
0
    def test_factor(self):
        key = "img"
        stats = IntensityStatsd(keys=key, ops="max", key_prefix="orig")
        shifter = ShiftIntensityd(keys=[key], offset=1.0, factor_key=["orig_max"])
        data = {key: self.imt, PostFix.meta(key): {"affine": None}}

        result = shifter(stats(data))
        expected = self.imt + 1.0 * np.nanmax(self.imt)
        np.testing.assert_allclose(result[key], expected)
Esempio n. 19
0
def _default_transforms(image_key, label_key, pixdim):
    keys = [image_key] if label_key is None else [image_key, label_key]
    mode = [GridSampleMode.BILINEAR, GridSampleMode.NEAREST
            ] if len(keys) == 2 else [GridSampleMode.BILINEAR]
    return Compose([
        LoadImaged(keys=keys),
        AsChannelFirstd(keys=keys),
        Orientationd(keys=keys, axcodes="RAS"),
        Spacingd(keys=keys, pixdim=pixdim, mode=mode),
        FromMetaTensord(keys=keys),
        ToNumpyd(keys=keys + [PostFix.meta(k) for k in keys]),
    ])
Esempio n. 20
0
 def test_with_dataloader(self, file_path, level, expected_spatial_shape, expected_shape):
     train_transform = Compose(
         [
             LoadImaged(keys=["image"], reader=WSIReader, backend=self.backend, level=level),
             ToTensord(keys=["image"]),
         ]
     )
     dataset = Dataset([{"image": file_path}], transform=train_transform)
     data_loader = DataLoader(dataset)
     data: dict = first(data_loader)
     for s in data[PostFix.meta("image")]["spatial_shape"]:
         torch.testing.assert_allclose(s, expected_spatial_shape)
     self.assertTupleEqual(data["image"].shape, expected_shape)
Esempio n. 21
0
 def test_orntd_3d(self):
     for p in TEST_NDARRAYS:
         data = {
             "seg": p(np.ones((2, 1, 2, 3))),
             "img": p(np.ones((2, 1, 2, 3))),
             PostFix.meta("seg"): {
                 "affine": np.eye(4)
             },
             PostFix.meta("img"): {
                 "affine": np.eye(4)
             },
         }
         ornt = Orientationd(keys=("img", "seg"), axcodes="PLI")
         res = ornt(data)
         np.testing.assert_allclose(res["img"].shape, (2, 2, 1, 3))
         np.testing.assert_allclose(res["seg"].shape, (2, 2, 1, 3))
         code = nib.aff2axcodes(res[PostFix.meta("seg")]["affine"],
                                ornt.ornt_transform.labels)
         self.assertEqual(code, ("P", "L", "I"))
         code = nib.aff2axcodes(res[PostFix.meta("img")]["affine"],
                                ornt.ornt_transform.labels)
         self.assertEqual(code, ("P", "L", "I"))
 def test_load_spacingd_non_diag(self):
     data = {"image": FILES[1]}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     affine = data_dict[PostFix.meta("image")]["affine"]
     data_dict[PostFix.meta("image")]["original_affine"] = data_dict[
         PostFix.meta("image")]["affine"] = (
             np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0],
                       [0, 0, 0, 1]]) @ affine)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         padding_mode="zeros")(data_dict)
     np.testing.assert_allclose(
         res_dict[PostFix.meta("image")]["affine"],
         np.array([
             [0.0, 0.0, 3.0, -27.599409],
             [0.0, 2.0, 0.0, -47.977585],
             [-1.0, 0.0, 0.0, 35.297897],
             [0.0, 0.0, 0.0, 1.0],
         ]),
     )
 def test_load_spacingd_rotate_non_diag(self):
     data = {"image": FILES[0]}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         padding_mode="border")(data_dict)
     np.testing.assert_allclose(
         res_dict[PostFix.meta("image")]["affine"],
         np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0],
                   [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]),
     )
Esempio n. 24
0
    def as_dict(self, key: str) -> dict:
        """
        Get the object as a dictionary for backwards compatibility.

        Args:
            key: Base key to store main data. The key for the metadata will be
                determined using `PostFix.meta`.

        Return:
            A dictionary consisting of two keys, the main data (stored under `key`) and
                the metadata.
        """
        return {key: self.as_tensor(), PostFix.meta(key): self.meta}
Esempio n. 25
0
    def __call__(self, data):
        d = dict(data)
        current_shape = d[self.ref_image].shape[1:]

        factor = np.divide(current_shape, d[PostFix.meta("image")]["dim"][1:4])
        pos_clicks, neg_clicks = d["foreground"], d["background"]

        pos = np.multiply(pos_clicks, factor).astype(
            int, copy=False).tolist() if len(pos_clicks) else []
        neg = np.multiply(neg_clicks, factor).astype(
            int, copy=False).tolist() if len(neg_clicks) else []

        d[self.guidance] = [pos, neg]
        return d
 def test_load_spacingd_non_diag_ornt(self):
     data = {"image": FILES[1]}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     affine = data_dict[PostFix.meta("image")]["affine"]
     data_dict[PostFix.meta("image")]["original_affine"] = data_dict[
         PostFix.meta("image")]["affine"] = (
             np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0],
                       [0, 0, 0, 1]]) @ affine)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         padding_mode="border")(data_dict)
     res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict)
     np.testing.assert_allclose(
         res_dict[PostFix.meta("image")]["affine"],
         np.array([
             [-3.0, 0.0, 0.0, 56.4005909],
             [0.0, -2.0, 0.0, 52.02241516],
             [0.0, 0.0, -1.0, 35.29789734],
             [0.0, 0.0, 0.0, 1.0],
         ]),
     )
Esempio n. 27
0
    def test_channel_dim(self):
        spatial_size = (32, 64, 3, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            nib.save(nib.Nifti1Image(test_image, affine=np.eye(4)), filename)

            loader = LoadImaged(keys="img")
            loader.register(ITKReader(channel_dim=2))
            result = EnsureChannelFirstD("img")(loader({"img": filename}))
            self.assertTupleEqual(
                tuple(result[PostFix.meta("img")]["spatial_shape"]),
                (32, 64, 128))
            self.assertTupleEqual(result["img"].shape, (3, 32, 64, 128))
 def test_load_spacingd_rotate(self, filename):
     data = {"image": filename}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     affine = data_dict[PostFix.meta("image")]["affine"]
     data_dict[PostFix.meta("image")]["original_affine"] = data_dict[
         PostFix.meta("image")]["affine"] = (
             np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0],
                       [0, 0, 0, 1]]) @ affine)
     t = time.time()
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=True,
                         padding_mode="zeros")(data_dict)
     t1 = time.time()
     print(f"time monai: {t1 - t}")
     anat = nibabel.Nifti1Image(
         data_dict["image"][0],
         data_dict[PostFix.meta("image")]["original_affine"])
     ref = resample_to_output(anat, (1, 2, 3), order=1)
     t2 = time.time()
     print(f"time scipy: {t2 - t1}")
     self.assertTrue(t2 >= t1)
     np.testing.assert_allclose(res_dict[PostFix.meta("image")]["affine"],
                                ref.affine)
     if "anatomical" not in filename:
         np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
         np.testing.assert_allclose(ref.get_fdata(),
                                    res_dict["image"][0],
                                    atol=0.05)
     else:
         # different from the ref implementation (shape computed by round
         # instead of ceil)
         np.testing.assert_allclose(ref.get_fdata()[..., :-1],
                                    res_dict["image"][0],
                                    atol=0.05)
 def test_load_spacingd(self, filename):
     data = {"image": filename}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     t = time.time()
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 0.2, 1),
                         diagonal=True,
                         padding_mode="zeros")(data_dict)
     t1 = time.time()
     print(f"time monai: {t1 - t}")
     anat = nibabel.Nifti1Image(
         data_dict["image"][0],
         data_dict[PostFix.meta("image")]["original_affine"])
     ref = resample_to_output(anat, (1, 0.2, 1), order=1)
     t2 = time.time()
     print(f"time scipy: {t2 - t1}")
     self.assertTrue(t2 >= t1)
     np.testing.assert_allclose(res_dict[PostFix.meta("image")]["affine"],
                                ref.affine)
     np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
     np.testing.assert_allclose(ref.get_fdata(),
                                res_dict["image"][0],
                                atol=0.05)
Esempio n. 30
0
 def test_with_dataloader_batch(self, file_path, level, expected_spatial_shape, expected_shape):
     train_transform = Compose(
         [
             LoadImaged(keys=["image"], reader=WSIReader, backend=self.backend, level=level),
             FromMetaTensord(keys=["image"]),
             ToTensord(keys=["image"]),
         ]
     )
     dataset = Dataset([{"image": file_path}, {"image": file_path}], transform=train_transform)
     batch_size = 2
     data_loader = DataLoader(dataset, batch_size=batch_size)
     data: dict = first(data_loader)
     for s in data[PostFix.meta("image")]["spatial_shape"]:
         assert_allclose(s, expected_spatial_shape, type_test=False)
     self.assertTupleEqual(data["image"].shape, (batch_size, *expected_shape[1:]))