예제 #1
0
 def pre_transforms(self, data):
     return [
         LoadImaged(keys=["image", "label"]),
         EnsureChannelFirstd(keys=["image", "label"]),
         AddBackgroundScribblesFromROId(
             scribbles="label",
             scribbles_bg_label=self.scribbles_bg_label,
             scribbles_fg_label=self.scribbles_fg_label,
         ),
         # at the moment optimisers are bottleneck taking a long time,
         # therefore scaling non-isotropic with big spacing
         Spacingd(keys=["image", "label"], pixdim=self.pix_dim, mode=["bilinear", "nearest"]),
         Orientationd(keys=["image", "label"], axcodes="RAS"),
         ScaleIntensityRanged(
             keys="image",
             a_min=self.intensity_range[0],
             a_max=self.intensity_range[1],
             b_min=self.intensity_range[2],
             b_max=self.intensity_range[3],
             clip=self.intensity_range[4],
         ),
         MakeLikelihoodFromScribblesHistogramd(
             image="image",
             scribbles="label",
             post_proc_label="prob",
             scribbles_bg_label=self.scribbles_bg_label,
             scribbles_fg_label=self.scribbles_fg_label,
             normalise=True,
         ),
     ]
예제 #2
0
 def pre_transforms(self, data=None) -> Sequence[Callable]:
     t = [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image",
                  pixdim=[1.0] * self.dimension,
                  mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image",
                                guidance="guidance",
                                dimensions=self.dimension),
     ]
     if self.dimension == 2:
         t.append(Fetch2DSliced(keys="image", guidance="guidance"))
     t.extend([
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image",
                              guidance="guidance",
                              spatial_size=self.spatial_size),
         Resized(keys="image", spatial_size=self.model_size, mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208,
                             divisor=388),  # type: ignore
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys="image",
                     device=data.get("device") if data else None),
     ])
     return t
예제 #3
0
 def test_spacingd_2d(self):
     data = {"image": np.ones((2, 10, 20)), "image_meta_dict": {"affine": np.eye(3)}}
     spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4))
     res = spacing(data)
     self.assertEqual(("image", "image_meta_dict"), tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, (2, 10, 10))
     np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag((1, 2, 1)))
예제 #4
0
 def train_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         Spacingd(
             keys=("image", "label"),
             pixdim=(1.0, 1.0, 1.0),
             mode=("bilinear", "nearest"),
         ),
         ScaleIntensityRanged(keys="image",
                              a_min=-57,
                              a_max=164,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         CropForegroundd(keys=("image", "label"), source_key="image"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         RandCropByPosNegLabeld(
             keys=("image", "label"),
             label_key="label",
             spatial_size=(96, 96, 96),
             pos=1,
             neg=1,
             num_samples=4,
             image_key="image",
             image_threshold=0,
         ),
         RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
         SelectItemsd(keys=("image", "label")),
     ]
예제 #5
0
 def test_load_spacingd_rotate(self, filename):
     data_dict = self.load_image(filename)
     affine = data_dict["image"].affine
     data_dict["image"].meta["original_affine"] = data_dict[
         "image"].affine = (torch.tensor(
             [[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]],
             dtype=torch.float64) @ affine)
     t = time.time()
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=True,
                         padding_mode="zeros")(data_dict)
     t1 = time.time()
     print(f"time monai: {t1 - t}")
     anat = nibabel.Nifti1Image(np.asarray(data_dict["image"][0]),
                                data_dict["image"].meta["original_affine"])
     ref = resample_to_output(anat, (1, 2, 3), order=1)
     t2 = time.time()
     print(f"time scipy: {t2 - t1}")
     self.assertTrue(t2 >= t1)
     np.testing.assert_allclose(res_dict["image"].affine, ref.affine)
     if "anatomical" not in filename:
         np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
         np.testing.assert_allclose(ref.get_fdata(),
                                    res_dict["image"][0],
                                    atol=0.05)
     else:
         # different from the ref implementation (shape computed by round
         # instead of ceil)
         np.testing.assert_allclose(ref.get_fdata()[..., :-1],
                                    res_dict["image"][0],
                                    atol=0.05)
예제 #6
0
 def test_spacingd_2d(self):
     data = {'image': np.ones((2, 10, 20)), 'image.affine': np.eye(3)}
     spacing = Spacingd(keys='image', pixdim=(1, 2, 1.4))
     res = spacing(data)
     self.assertEqual(('image', 'image.affine'), tuple(sorted(res)))
     np.testing.assert_allclose(res['image'].shape, (2, 10, 10))
     np.testing.assert_allclose(res['image.affine'], np.diag((1, 2, 1)))
def run_inference_test(root_dir, device="cuda:0"):
    images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
    segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

    # define transforms for image and segmentation
    val_transforms = Compose([
        LoadNiftid(keys=["img", "seg"]),
        AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
        # resampling with align_corners=True or dtype=float64 will generate
        # slight different results between PyTorch 1.5 an 1.6
        Spacingd(keys=["img", "seg"],
                 pixdim=[1.2, 0.8, 0.7],
                 mode=["bilinear", "nearest"],
                 dtype=np.float32),
        ScaleIntensityd(keys="img"),
        ToTensord(keys=["img", "seg"]),
    ])
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    # sliding window inference need to input 1 image in every iteration
    val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
    val_post_tran = Compose(
        [Activations(sigmoid=True),
         AsDiscrete(threshold_values=True)])
    dice_metric = DiceMetric(include_background=True, reduction="mean")

    model = UNet(
        dimensions=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    model.eval()
    with torch.no_grad():
        metric_sum = 0.0
        metric_count = 0
        # resampling with align_corners=True or dtype=float64 will generate
        # slight different results between PyTorch 1.5 an 1.6
        saver = NiftiSaver(output_dir=os.path.join(root_dir, "output"),
                           dtype=np.float32)
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(
                device), val_data["seg"].to(device)
            # define sliding window size and batch size for windows inference
            sw_batch_size, roi_size = 4, (96, 96, 96)
            val_outputs = val_post_tran(
                sliding_window_inference(val_images, roi_size, sw_batch_size,
                                         model))
            value, not_nans = dice_metric(y_pred=val_outputs, y=val_labels)
            metric_count += not_nans.item()
            metric_sum += value.item() * not_nans.item()
            saver.save_batch(val_outputs, val_data["img_meta_dict"])
        metric = metric_sum / metric_count
    return metric
예제 #8
0
 def test_spacingd_3d(self):
     data = {"image": np.ones((2, 10, 15, 20)), "image.affine": np.eye(4)}
     spacing = Spacingd(keys="image", pixdim=(1, 2, 1.4))
     res = spacing(data)
     self.assertEqual(("image", "image.affine"), tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, (2, 10, 8, 15))
     np.testing.assert_allclose(res["image.affine"],
                                np.diag([1, 2, 1.4, 1.0]))
예제 #9
0
 def test_spacingd(self, _, data, kw_args, expected_keys, expected_shape,
                   expected_affine):
     res = Spacingd(**kw_args)(data)
     if isinstance(data["image"], torch.Tensor):
         self.assertEqual(data["image"].device, res["image"].device)
     self.assertEqual(expected_keys, tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, expected_shape)
     assert_allclose(res[PostFix.meta("image")]["affine"], expected_affine)
def run_inference_test(root_dir, device="cuda:0"):
    images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
    segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

    # define transforms for image and segmentation
    val_transforms = Compose(
        [
            LoadImaged(keys=["img", "seg"]),
            EnsureChannelFirstd(keys=["img", "seg"]),
            # resampling with align_corners=True or dtype=float64 will generate
            # slight different results between PyTorch 1.5 an 1.6
            Spacingd(keys=["img", "seg"], pixdim=[1.2, 0.8, 0.7], mode=["bilinear", "nearest"], dtype=np.float32),
            ScaleIntensityd(keys="img"),
            ToTensord(keys=["img", "seg"]),
        ]
    )
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    # sliding window inference need to input 1 image in every iteration
    val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
    val_post_tran = Compose([ToTensor(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
    dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)

    model = UNet(
        spatial_dims=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    with eval_mode(model):
        # resampling with align_corners=True or dtype=float64 will generate
        # slight different results between PyTorch 1.5 an 1.6
        saver = SaveImage(
            output_dir=os.path.join(root_dir, "output"),
            dtype=np.float32,
            output_ext=".nii.gz",
            output_postfix="seg",
            mode="bilinear",
        )
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
            # define sliding window size and batch size for windows inference
            sw_batch_size, roi_size = 4, (96, 96, 96)
            val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
            # decollate prediction into a list
            val_outputs = [val_post_tran(i) for i in decollate_batch(val_outputs)]
            val_meta = decollate_batch(val_data[PostFix.meta("img")])
            # compute metrics
            dice_metric(y_pred=val_outputs, y=val_labels)
            for img, meta in zip(val_outputs, val_meta):  # save a decollated batch of files
                saver(img, meta)

    return dice_metric.aggregate().item()
예제 #11
0
def run_inference_test(root_dir, device=torch.device("cuda:0")):
    images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
    segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

    # define transforms for image and segmentation
    val_transforms = Compose([
        LoadNiftid(keys=["img", "seg"]),
        AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
        Spacingd(keys=["img", "seg"],
                 pixdim=[1.2, 0.8, 0.7],
                 interp_order=["bilinear", "nearest"]),
        ScaleIntensityd(keys=["img", "seg"]),
        ToTensord(keys=["img", "seg"]),
    ])
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    # sliding window inferene need to input 1 image in every iteration
    val_loader = DataLoader(val_ds,
                            batch_size=1,
                            num_workers=4,
                            collate_fn=list_data_collate,
                            pin_memory=torch.cuda.is_available())

    model = UNet(
        dimensions=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    model.eval()
    with torch.no_grad():
        metric_sum = 0.0
        metric_count = 0
        saver = NiftiSaver(output_dir=os.path.join(root_dir, "output"),
                           dtype=int)
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(
                device), val_data["seg"].to(device)
            # define sliding window size and batch size for windows inference
            sw_batch_size, roi_size = 4, (96, 96, 96)
            val_outputs = sliding_window_inference(val_images, roi_size,
                                                   sw_batch_size, model)
            value = compute_meandice(y_pred=val_outputs,
                                     y=val_labels,
                                     include_background=True,
                                     to_onehot_y=False,
                                     sigmoid=True)
            metric_count += len(value)
            metric_sum += value.sum().item()
            val_outputs = (val_outputs.sigmoid() >= 0.5).float()
            saver.save_batch(val_outputs, val_data["img_meta_dict"])
        metric = metric_sum / metric_count
    return metric
예제 #12
0
 def test_load_spacingd_rotate_non_diag(self):
     data = {'image': FILES[0]}
     data_dict = LoadNiftid(keys='image')(data)
     data_dict = AddChanneld(keys='image')(data_dict)
     res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='nearest')(data_dict)
     np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine'])
     np.testing.assert_allclose(
         res_dict['image.affine'],
         np.array([[-1., 0., 0., 32.], [0., 2., 0., -40.], [0., 0., 3., -16.], [0., 0., 0., 1.]]))
예제 #13
0
def _default_transforms(image_key, label_key, pixdim):
    keys = [image_key] if label_key is None else [image_key, label_key]
    mode = [GridSampleMode.BILINEAR, GridSampleMode.NEAREST
            ] if len(keys) == 2 else [GridSampleMode.BILINEAR]
    return Compose([
        LoadImaged(keys=keys),
        AsChannelFirstd(keys=keys),
        Spacingd(keys=keys, pixdim=pixdim, mode=mode),
        Orientationd(keys=keys, axcodes="RAS"),
    ])
예제 #14
0
 def test_load_spacingd(self, filename):
     data = {'image': filename}
     data_dict = LoadNiftid(keys='image')(data)
     data_dict = AddChanneld(keys='image')(data_dict)
     res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=True, mode='constant')(data_dict)
     np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine'])
     anat = nibabel.Nifti1Image(data_dict['image'][0], data_dict['image.affine'])
     ref = resample_to_output(anat, (1, 2, 3))
     np.testing.assert_allclose(res_dict['image.affine'], ref.affine)
     np.testing.assert_allclose(res_dict['image'].shape[1:], ref.shape)
     np.testing.assert_allclose(ref.get_fdata(), res_dict['image'][0])
예제 #15
0
 def test_load_spacingd_rotate_non_diag(self):
     data_dict = self.load_image(FILES[0])
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         padding_mode="border")(data_dict)
     np.testing.assert_allclose(
         res_dict["image"].affine,
         np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0],
                   [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]),
     )
예제 #16
0
def _get_transforms(keys, pixdim):
    mode = [GridSampleMode.BILINEAR, GridSampleMode.NEAREST
            ] if len(keys) == 2 else [GridSampleMode.BILINEAR]
    transforms = [
        LoadImaged(keys=keys),
        AsChannelFirstd(keys=keys),
        Spacingd(keys=keys, pixdim=pixdim, mode=mode),
        Orientationd(keys=keys, axcodes="RAS"),
    ]

    return Compose(transforms)
예제 #17
0
 def test_spacingd(self, _, data, kw_args, expected_shape, expected_affine,
                   device):
     data = {k: v.to(device) for k, v in data.items()}
     res = Spacingd(**kw_args)(data)
     in_img = data["image"]
     out_img = res["image"]
     self.assertEqual(in_img.device, out_img.device)
     # no change in number of keys
     self.assertEqual(tuple(sorted(data)), tuple(sorted(res)))
     np.testing.assert_allclose(out_img.shape, expected_shape)
     assert_allclose(out_img.affine, expected_affine)
예제 #18
0
 def test_interp_sep(self):
     data = {
         "image": np.ones((2, 1, 10)),
         "seg": np.ones((2, 1, 10)),
         "image.affine": np.eye(4),
         "seg.affine": np.eye(4),
     }
     spacing = Spacingd(keys=("image", "seg"), interp_order=("bilinear", "nearest"), pixdim=(1, 0.2,))
     res = spacing(data)
     self.assertEqual(("image", "image.affine", "seg", "seg.affine"), tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, (2, 1, 46))
     np.testing.assert_allclose(res["image.affine"], np.diag((1, 0.2, 1, 1)))
예제 #19
0
 def test_interp_all(self):
     data = {
         "image": np.arange(20).reshape((2, 1, 10)),
         "seg": np.ones((2, 1, 10)),
         "image_meta_dict": {"affine": np.eye(4)},
         "seg_meta_dict": {"affine": np.eye(4)},
     }
     spacing = Spacingd(keys=("image", "seg"), interp_order="nearest", pixdim=(1, 0.2))
     res = spacing(data)
     self.assertEqual(("image", "image_meta_dict", "seg", "seg_meta_dict"), tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, (2, 1, 46))
     np.testing.assert_allclose(res["image_meta_dict"]["affine"], np.diag((1, 0.2, 1, 1)))
예제 #20
0
 def test_load_spacingd_non_diag(self):
     data = {'image': FILES[1]}
     data_dict = LoadNiftid(keys='image')(data)
     data_dict = AddChanneld(keys='image')(data_dict)
     affine = data_dict['image.affine']
     data_dict['image.original_affine'] = data_dict['image.affine'] = \
         np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine
     res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='constant')(data_dict)
     np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine'])
     np.testing.assert_allclose(
         res_dict['image.affine'],
         np.array([[0., 0., 3., -27.599409], [0., 2., 0., -47.977585], [-1., 0., 0., 35.297897], [0., 0., 0., 1.]]))
예제 #21
0
 def pre_transforms(self, data=None) -> Sequence[Callable]:
     return [
         LoadImaged(keys="image", reader="ITKReader"),
         EnsureChannelFirstd(keys="image"),
         Spacingd(keys="image", pixdim=self.target_spacing),
         ScaleIntensityRanged(keys="image",
                              a_min=-175,
                              a_max=250,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         EnsureTyped(keys="image"),
     ]
예제 #22
0
 def pre_transforms(self, data=None) -> Sequence[Callable]:
     return [
         LoadImaged(keys="image"),
         AddChanneld(keys="image"),
         Spacingd(keys="image", pixdim=[1.0, 1.0, 1.0]),
         ScaleIntensityRanged(keys="image",
                              a_min=-57,
                              a_max=164,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         EnsureTyped(keys="image"),
     ]
 def test_load_spacingd_rotate_non_diag(self):
     data = {"image": FILES[0]}
     data_dict = LoadImaged(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         padding_mode="border")(data_dict)
     np.testing.assert_allclose(
         res_dict[PostFix.meta("image")]["affine"],
         np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0],
                   [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]),
     )
예제 #24
0
 def pre_transforms(self, data):
     return [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image", pixdim=[1.0, 1.0, 1.0], mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image", guidance="guidance", dimensions=3),
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image", guidance="guidance", spatial_size=self.spatial_size),
         Resized(keys="image", spatial_size=self.model_size, mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208, divisor=388),
         AddGuidanceSignald(image="image", guidance="guidance"),
     ]
예제 #25
0
 def test_load_spacingd_rotate_non_diag_ornt(self):
     data = {"image": FILES[0]}
     data_dict = LoadNiftid(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         mode="border")(data_dict)
     res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict)
     np.testing.assert_allclose(
         res_dict["image_meta_dict"]["affine"],
         np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, -2.0, 0.0, 40.0],
                   [0.0, 0.0, -3.0, 32.0], [0.0, 0.0, 0.0, 1.0]]),
     )
예제 #26
0
 def test_load_spacingd_non_diag_ornt(self):
     data = {'image': FILES[1]}
     data_dict = LoadNiftid(keys='image')(data)
     data_dict = AddChanneld(keys='image')(data_dict)
     affine = data_dict['image.affine']
     data_dict['image.original_affine'] = data_dict['image.affine'] = \
         np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine
     res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='constant')(data_dict)
     res_dict = Orientationd(keys='image', axcodes='LPI')(res_dict)
     np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine'])
     np.testing.assert_allclose(
         res_dict['image.affine'],
         np.array([[-3., 0., 0., 56.4005909], [0., -2., 0., 52.02241516], [0., 0., -1., 35.29789734],
                   [0., 0., 0., 1.]]))
예제 #27
0
def get_xforms_load(mode="load", keys=("image", "label")):
    """returns a composed transform for train/val/infer."""

    xforms = [
        LoadImaged(keys),
        AddChanneld(keys),
        Orientationd(keys, axcodes="LPS"),
        Spacingd(keys, pixdim=(1.25, 1.25, 5.0), mode=("bilinear", "nearest")[: len(keys)]),
        # ScaleIntensityRanged(keys[0], a_min=-1000.0, a_max=500.0, b_min=0.0, b_max=1.0, clip=True),
    ]
    if mode == "load":
        dtype = (np.int16, np.uint8)
    xforms.extend([CastToTyped(keys, dtype=dtype), ToTensord(keys)])
    return monai.transforms.Compose(xforms)
    def test_duplicate_transforms(self):
        im, _ = create_test_image_2d(128, 128, num_seg_classes=1, channel_dim=0)
        data = [{"img": im} for _ in range(2)]

        # at least 1 deterministic followed by at least 1 random
        transform = Compose([Spacingd("img", pixdim=(1, 1)), RandAffined("img", prob=1.0)])

        # cachedataset and data loader w persistent_workers
        train_ds = CacheDataset(data, transform, cache_num=1)
        train_loader = DataLoader(train_ds, num_workers=2, persistent_workers=True)

        b1 = next(iter(train_loader))
        b2 = next(iter(train_loader))

        self.assertEqual(len(b1["img_transforms"]), len(b2["img_transforms"]))
 def test_load_spacingd_rotate_non_diag(self):
     data = {"image": FILES[0]}
     data_dict = LoadNiftid(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=False,
                         mode="border")(data_dict)
     np.testing.assert_allclose(data_dict["image.affine"],
                                res_dict["image.original_affine"])
     np.testing.assert_allclose(
         res_dict["image.affine"],
         np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0],
                   [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]),
     )
예제 #30
0
 def test_spacingd_1d(self):
     data = {
         "image": np.arange(20).reshape((2, 10)),
         "image.original_affine": np.diag((3, 2, 1, 1))
     }
     data["image.affine"] = data["image.original_affine"]
     spacing = Spacingd(keys="image", pixdim=(0.2, ))
     res = spacing(data)
     self.assertEqual(("image", "image.affine", "image.original_affine"),
                      tuple(sorted(res)))
     np.testing.assert_allclose(res["image"].shape, (2, 136))
     np.testing.assert_allclose(res["image.affine"], np.diag(
         (0.2, 2, 1, 1)))
     np.testing.assert_allclose(res["image.original_affine"],
                                np.diag((3, 2, 1, 1)))