def setUp(self):
        if not has_nib:
            self.skipTest("nibabel required for test_inverse")

        set_determinism(seed=0)

        b_size = 11
        im_fname, seg_fname = (make_nifti_image(i)
                               for i in create_test_image_3d(101, 100, 107))
        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.data_3d = [
            load_ims({
                "image": im_fname,
                "label": seg_fname
            }) for _ in range(b_size)
        ]

        b_size = 8
        im_fname, seg_fname = (
            make_nifti_image(i)
            for i in create_test_image_2d(62, 37, rad_max=10))
        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.data_2d = [
            load_ims({
                "image": im_fname,
                "label": seg_fname
            }) for _ in range(b_size)
        ]

        self.batch_size = 7
Esempio n. 2
0
def transformations():
    train_transforms = Compose([
        LoadImaged(keys=['image', 'label']),
        AddChanneld(keys=['image', 'label']),
        ToTensord(keys=['image', 'label'])
    ])
    val_transforms = Compose([
        LoadImaged(keys=['image', 'label']),
        AddChanneld(keys=['image', 'label']),
        ToTensord(keys=['image', 'label'])
    ])
    return train_transforms, val_transforms
Esempio n. 3
0
 def train_pre_transforms(self, context: Context):
     # Dataset preparation
     t: List[Any] = [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         SpatialCropForegroundd(keys=("image", "label"),
                                source_key="label",
                                spatial_size=self.roi_size),
         Resized(keys=("image", "label"),
                 spatial_size=self.model_size,
                 mode=("area", "nearest")),
         NormalizeIntensityd(keys="image", subtrahend=208.0,
                             divisor=388.0),  # type: ignore
     ]
     if self.dimension == 3:
         t.append(FindAllValidSlicesd(label="label", sids="sids"))
     t.extend([
         AddInitialSeedPointd(label="label",
                              guidance="guidance",
                              sids="sids"),
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         SelectItemsd(keys=("image", "label", "guidance")),
     ])
     return t
Esempio n. 4
0
    def test_decollation(self, batch_size=2, num_workers=2):

        im = create_test_image_2d(100, 101)[0]
        data = [{
            "image": make_nifti_image(im) if has_nib else im
        } for _ in range(6)]

        transforms = Compose([
            AddChanneld("image"),
            SpatialPadd("image", 150),
            RandFlipd("image", prob=1.0, spatial_axis=1),
            ToTensord("image"),
        ])
        # If nibabel present, read from disk
        if has_nib:
            transforms = Compose([LoadImaged("image"), transforms])

        dataset = CacheDataset(data, transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)
 def test_load_spacingd_rotate(self, filename):
     data = {"image": filename}
     data_dict = LoadNiftid(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     affine = data_dict["image.affine"]
     data_dict["image.original_affine"] = data_dict["image.affine"] = (
         np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]
                   ]) @ affine)
     t = time.time()
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=True,
                         mode="zeros")(data_dict)
     t1 = time.time()
     print(f"time monai: {t1 - t}")
     anat = nibabel.Nifti1Image(data_dict["image"][0],
                                data_dict["image.affine"])
     ref = resample_to_output(anat, (1, 2, 3), order=1)
     t2 = time.time()
     print(f"time scipy: {t2 - t1}")
     self.assertTrue(t2 >= t1)
     np.testing.assert_allclose(data_dict["image.affine"],
                                res_dict["image.original_affine"])
     np.testing.assert_allclose(res_dict["image.affine"], ref.affine)
     if "anatomical" not in filename:
         np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
         np.testing.assert_allclose(ref.get_fdata(),
                                    res_dict["image"][0],
                                    atol=0.05)
     else:
         # different from the ref implementation (shape computed by round
         # instead of ceil)
         np.testing.assert_allclose(ref.get_fdata()[..., :-1],
                                    res_dict["image"][0],
                                    atol=0.05)
Esempio n. 6
0
 def train_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label"), dtype=np.uint8),
         FilterImaged(keys="image", min_size=5),
         AsChannelFirstd(keys="image"),
         AddChanneld(keys="label"),
         ToTensord(keys="image"),
         TorchVisiond(keys="image",
                      name="ColorJitter",
                      brightness=64.0 / 255.0,
                      contrast=0.75,
                      saturation=0.25,
                      hue=0.04),
         ToNumpyd(keys="image"),
         RandRotate90d(keys=("image", "label"),
                       prob=0.5,
                       spatial_axes=(0, 1)),
         ScaleIntensityRangeD(keys="image",
                              a_min=0.0,
                              a_max=255.0,
                              b_min=-1.0,
                              b_max=1.0),
         AddInitialSeedPointExd(label="label", guidance="guidance"),
         AddGuidanceSignald(image="image",
                            guidance="guidance",
                            number_intensity_ch=3),
         EnsureTyped(keys=("image", "label")),
     ]
 def test_warn_random_but_has_no_invertible(self):
     transforms = Compose(
         [AddChanneld("image"), RandFlipd("image", prob=1.0), RandScaleIntensityd("image", 0.1, prob=1.0)]
     )
     with self.assertWarns(UserWarning):
         tta = TestTimeAugmentation(transforms, 5, 0, orig_key="image")
         tta(self.get_data(1, (20, 20), data_type=np.float32))
Esempio n. 8
0
    def test_3d_rgb(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data")
        keys = "image"
        xforms = Compose(
            [
                LoadImaged(keys=keys),
                AddChanneld(keys=keys),
                ScaleIntensityd(keys=keys),
                # change to RGB color image
                RepeatChanneld(keys=keys, repeats=3),
            ]
        )
        image_path = os.path.join(testing_dir, "anatomical.nii")
        ims = xforms({keys: image_path})

        fig = pyplot.figure()  # external figure
        fig, _ = matshow3d(
            volume=ims[keys],
            fig=fig,
            figsize=(2, 2),
            frames_per_row=5,
            every_n=2,
            frame_dim=-1,
            channel_dim=0,
            show=False,
        )

        with tempfile.TemporaryDirectory() as tempdir:
            tempimg = f"{tempdir}/matshow3d_rgb_test.png"
            fig.savefig(tempimg)
            comp = compare_images(f"{testing_dir}/matshow3d_rgb_test.png", tempimg, 5e-2)
            self.assertIsNone(comp, f"value of comp={comp}")  # None indicates test passed
Esempio n. 9
0
def get_data(keys):
    """Get the example data to be used.

    Use MarsAtlas as it only contains 1 image for quick download and
    that image is parcellated.
    """
    cache_dir = os.environ.get("MONAI_DATA_DIRECTORY") or tempfile.mkdtemp()
    fname = "MarsAtlas-MNI-Colin27.zip"
    url = "https://www.dropbox.com/s/ndz8qtqblkciole/" + fname + "?dl=1"
    out_path = os.path.join(cache_dir, "MarsAtlas-MNI-Colin27")
    zip_path = os.path.join(cache_dir, fname)

    download_and_extract(url, zip_path, out_path)

    image, label = sorted(glob(os.path.join(out_path, "*.nii")))

    data = {CommonKeys.IMAGE: image, CommonKeys.LABEL: label}

    transforms = Compose([
        LoadImaged(keys),
        AddChanneld(keys),
        ScaleIntensityd(CommonKeys.IMAGE),
        Rotate90d(keys, spatial_axes=[0, 2])
    ])
    data = transforms(data)
    max_size = max(data[keys[0]].shape)
    padder = SpatialPadd(keys, (max_size, max_size, max_size))
    return padder(data)
Esempio n. 10
0
 def prepare_data(self):
     data_dir = self.hparams.data_dir
     
     # Train imgs/masks
     train_imgs = []
     train_masks = []
     with open(data_dir + 'train_imgs.txt', 'r') as f:
         train_imgs = [data_dir + image.rstrip() for image in f.readlines()]
     with open(data_dir + 'train_masks.txt', 'r') as f:
         train_masks = [data_dir + mask.rstrip() for mask in f.readlines()]
     train_dicts = [{'image': image, 'mask': mask} for (image, mask) in zip(train_imgs, train_masks)]
     train_dicts, val_dicts = train_test_split(train_dicts, test_size=0.2)
     
     # Basic transforms
     data_keys = ["image", "mask"]
     data_transforms = Compose(
         [
             LoadNiftid(keys=data_keys),
             AddChanneld(keys=data_keys),
             ScaleIntensityRangePercentilesd(
                 keys='image',
                 lower=25,
                 upper=75,
                 b_min=-0.5,
                 b_max=0.5
             )
         ]
     )
     
     self.train_dataset = monai.data.CacheDataset(
         data=train_dicts,
         transform=Compose(
             [
                 data_transforms,
                 RandCropByPosNegLabeld(
                     keys=data_keys,
                     label_key="mask",
                     spatial_size=self.hparams.patch_size,
                     num_samples=4,
                     image_key="image",
                     pos=0.8,
                     neg=0.2
                 ),
                 ToTensord(keys=data_keys)
             ]
         ),
         cache_rate=1.0
     )
     
     self.val_dataset = monai.data.CacheDataset(
         data=val_dicts,
         transform=Compose(
             [
                 data_transforms,
                 CenterSpatialCropd(keys=data_keys, roi_size=self.hparams.patch_size),
                 ToTensord(keys=data_keys)
             ]
         ),
         cache_rate=1.0
     )
Esempio n. 11
0
    def test_samples(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data")
        keys = "image"
        xforms = Compose(
            [
                LoadImaged(keys=keys),
                AddChanneld(keys=keys),
                ScaleIntensityd(keys=keys),
                RandSpatialCropSamplesd(keys=keys, roi_size=(8, 8, 5), random_size=True, num_samples=10),
            ]
        )
        image_path = os.path.join(testing_dir, "anatomical.nii")
        xforms.set_random_state(0)
        ims = xforms({keys: image_path})
        fig, mat = matshow3d(
            [im[keys] for im in ims], title=f"testing {keys}", figsize=(2, 2), frames_per_row=5, every_n=2, show=False
        )
        self.assertTrue(mat.dtype == np.float32)

        with tempfile.TemporaryDirectory() as tempdir:
            tempimg = f"{tempdir}/matshow3d_patch_test.png"
            fig.savefig(tempimg)
            comp = compare_images(f"{testing_dir}/matshow3d_patch_test.png", tempimg, 5e-2, in_decorator=True)
            if comp:
                print("not none comp: ", comp)  # matplotlib 3.2.2
                np.testing.assert_allclose(comp["rms"], 30.786983, atol=1e-3, rtol=1e-3)
            else:
                self.assertIsNone(comp, f"value of comp={comp}")  # None indicates test passed
Esempio n. 12
0
 def pre_transforms(self, data=None) -> Sequence[Callable]:
     t = [
         LoadImaged(keys="image"),
         AsChannelFirstd(keys="image"),
         Spacingd(keys="image",
                  pixdim=[1.0] * self.dimension,
                  mode="bilinear"),
         AddGuidanceFromPointsd(ref_image="image",
                                guidance="guidance",
                                dimensions=self.dimension),
     ]
     if self.dimension == 2:
         t.append(Fetch2DSliced(keys="image", guidance="guidance"))
     t.extend([
         AddChanneld(keys="image"),
         SpatialCropGuidanced(keys="image",
                              guidance="guidance",
                              spatial_size=self.spatial_size),
         Resized(keys="image", spatial_size=self.model_size, mode="area"),
         ResizeGuidanced(guidance="guidance", ref_image="image"),
         NormalizeIntensityd(keys="image", subtrahend=208,
                             divisor=388),  # type: ignore
         AddGuidanceSignald(image="image", guidance="guidance"),
         EnsureTyped(keys="image",
                     device=data.get("device") if data else None),
     ])
     return t
Esempio n. 13
0
    def setUp(self):
        if not has_nib:
            self.skipTest("nibabel required for test_inverse")

        set_determinism(seed=0)

        self.all_data = {}

        affine = make_rand_affine()
        affine[0] *= 2

        for size in [10, 11]:
            # pad 5 onto both ends so that cropping can be lossless
            im_1d = np.pad(np.arange(size), 5)[None]
            name = "1D even" if size % 2 == 0 else "1D odd"
            self.all_data[name] = {
                "image": np.array(im_1d, copy=True),
                "label": np.array(im_1d, copy=True),
                "other": np.array(im_1d, copy=True),
            }

        im_2d_fname, seg_2d_fname = [make_nifti_image(i) for i in create_test_image_2d(101, 100)]
        im_3d_fname, seg_3d_fname = [make_nifti_image(i, affine) for i in create_test_image_3d(100, 101, 107)]

        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.all_data["2D"] = load_ims({"image": im_2d_fname, "label": seg_2d_fname})
        self.all_data["3D"] = load_ims({"image": im_3d_fname, "label": seg_3d_fname})
Esempio n. 14
0
 def pre_transforms(self):
     t = [
         LoadImaged(keys="image", reader="nibabelreader"),
         AddChanneld(keys="image"),
         # Spacing might not be needed as resize transform is used later.
         # Spacingd(keys="image", pixdim=self.spacing),
         RandAffined(
             keys="image",
             prob=1,
             rotate_range=(np.pi / 4, np.pi / 4, np.pi / 4),
             padding_mode="zeros",
             as_tensor_output=False,
         ),
         RandFlipd(keys="image", prob=0.5, spatial_axis=0),
         RandRotated(keys="image",
                     range_x=(-5, 5),
                     range_y=(-5, 5),
                     range_z=(-5, 5)),
         Resized(keys="image", spatial_size=self.spatial_size),
     ]
     # If using TTA for deepedit
     if self.deepedit:
         t.append(DiscardAddGuidanced(keys="image"))
     t.append(ToTensord(keys="image"))
     return Compose(t)
Esempio n. 15
0
    def test_decollation(self, *transforms):

        batch_size = 2
        num_workers = 2

        t_compose = Compose(
            [AddChanneld(KEYS),
             Compose(transforms),
             ToTensord(KEYS)])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = Compose([LoadImaged("image"), t_compose])

        dataset = CacheDataset(self.data, t_compose, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        for b, batch_data in enumerate(loader):
            decollated_1 = decollate_batch(batch_data)
            decollated_2 = Decollated()(batch_data)

            for decollated in [decollated_1, decollated_2]:
                for i, d in enumerate(decollated):
                    self.check_match(dataset[b * batch_size + i], d)
Esempio n. 16
0
 def train_pre_transforms(self, context: Context):
     return [
         LoadImaged(keys=("image", "label")),
         AddChanneld(keys=("image", "label")),
         Spacingd(
             keys=("image", "label"),
             pixdim=(1.0, 1.0, 1.0),
             mode=("bilinear", "nearest"),
         ),
         ScaleIntensityRanged(keys="image",
                              a_min=-57,
                              a_max=164,
                              b_min=0.0,
                              b_max=1.0,
                              clip=True),
         CropForegroundd(keys=("image", "label"), source_key="image"),
         EnsureTyped(keys=("image", "label"), device=context.device),
         RandCropByPosNegLabeld(
             keys=("image", "label"),
             label_key="label",
             spatial_size=(96, 96, 96),
             pos=1,
             neg=1,
             num_samples=4,
             image_key="image",
             image_threshold=0,
         ),
         RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
         SelectItemsd(keys=("image", "label")),
     ]
Esempio n. 17
0
 def test_load_spacingd_rotate(self, filename):
     data = {"image": filename}
     data_dict = LoadNiftid(keys="image")(data)
     data_dict = AddChanneld(keys="image")(data_dict)
     affine = data_dict["image.affine"]
     data_dict["image.original_affine"] = data_dict["image.affine"] = (
         np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]
                   ]) @ affine)
     res_dict = Spacingd(keys="image",
                         pixdim=(1, 2, 3),
                         diagonal=True,
                         mode="constant")(data_dict)
     np.testing.assert_allclose(data_dict["image.affine"],
                                res_dict["image.original_affine"])
     anat = nibabel.Nifti1Image(data_dict["image"][0],
                                data_dict["image.affine"])
     ref = resample_to_output(anat, (1, 2, 3))
     np.testing.assert_allclose(res_dict["image.affine"], ref.affine)
     if "anatomical" not in filename:
         np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
         np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0])
     else:
         # different from the ref implementation (shape computed by round
         # instead of ceil)
         np.testing.assert_allclose(ref.get_fdata()[..., :-1],
                                    res_dict["image"][0])
def get_seg_transforms(end_seg_axcodes):
    seg_transforms = Compose([
        LoadNiftid(keys=["image"], as_closest_canonical=False),
        AddChanneld(keys=["image"]),
        Orientationd(keys=["image"], axcodes=end_seg_axcodes),
    ])
    return seg_transforms
Esempio n. 19
0
    def test_epistemic_scoring(self):
        input_size = (20, 20, 20)
        device = "cuda" if torch.cuda.is_available() else "cpu"
        keys = ["image", "label"]
        num_training_ims = 10
        train_data = self.get_data(num_training_ims, input_size)
        test_data = self.get_data(1, input_size)

        transforms = Compose([
            AddChanneld(keys),
            CropForegroundd(keys, source_key="image"),
            DivisiblePadd(keys, 4),
        ])

        infer_transforms = Compose([
            AddChannel(),
            CropForeground(),
            DivisiblePad(4),
        ])

        train_ds = CacheDataset(train_data, transforms)
        # output might be different size, so pad so that they match
        train_loader = DataLoader(train_ds,
                                  batch_size=2,
                                  collate_fn=pad_list_data_collate)

        model = UNet(3, 1, 1, channels=(6, 6), strides=(2, 2)).to(device)
        loss_function = DiceLoss(sigmoid=True)
        optimizer = torch.optim.Adam(model.parameters(), 1e-3)

        num_epochs = 10
        for _ in trange(num_epochs):
            epoch_loss = 0

            for batch_data in train_loader:
                inputs, labels = batch_data["image"].to(
                    device), batch_data["label"].to(device)
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = loss_function(outputs, labels)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            epoch_loss /= len(train_loader)

        entropy_score = EpistemicScoring(model=model,
                                         transforms=infer_transforms,
                                         roi_size=[20, 20, 20],
                                         num_samples=10)
        # Call Individual Infer from Epistemic Scoring
        ip_stack = [test_data["image"], test_data["image"], test_data["image"]]
        ip_stack = np.array(ip_stack)
        score_3d = entropy_score.entropy_3d_volume(ip_stack)
        score_3d_sum = np.sum(score_3d)
        # Call Entropy Metric from Epistemic Scoring
        self.assertEqual(score_3d.shape, input_size)
        self.assertIsInstance(score_3d_sum, np.float32)
        self.assertGreater(score_3d_sum, 3.0)
Esempio n. 20
0
    def test_decollation_dict(self, *transforms):
        t_compose = Compose([AddChanneld(KEYS), Compose(transforms), ToTensord(KEYS)])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = Compose([LoadImaged("image"), t_compose])

        dataset = CacheDataset(self.data_dict, t_compose, progress=False)
        self.check_decollate(dataset=dataset)
Esempio n. 21
0
    def transformations(self, H, L):
        lower = L - (H / 2)
        upper = L + (H / 2)

        basic_transforms = Compose([
            # Load image
            LoadImaged(keys=["image"]),

            # Segmentacija
            CTSegmentation(keys=["image"]),
            AddChanneld(keys=["image"]),

            # Crop foreground based on seg image.
            CropForegroundd(keys=["image"],
                            source_key="image",
                            margin=(30, 30, 0)),

            # Obreži sliko v Z smeri, relative_z_roi = ( % od spodaj, % od zgoraj)
            RelativeAsymmetricZCropd(keys=["image"],
                                     relative_z_roi=(0.15, 0.25)),
        ])

        train_transforms = Compose([
            basic_transforms,

            # Normalizacija na CT okno
            # https://radiopaedia.org/articles/windowing-ct
            RandCTWindowd(keys=["image"],
                          prob=1.0,
                          width=(H - 50, H + 50),
                          level=(L - 25, L + 25)),

            # Mogoče zanimiva
            RandAxisFlipd(keys=["image"], prob=0.1),
            RandAffined(
                keys=["image"],
                prob=0.25,
                rotate_range=(0, 0, np.pi / 16),
                shear_range=(0.05, 0.05, 0.0),
                translate_range=(10, 10, 0),
                scale_range=(0.05, 0.05, 0.0),
                spatial_size=(-1, -1, -1),
                padding_mode="zeros",
            ),
            ToTensord(keys=["image"]),
        ]).flatten()

        # NOTE: No random transforms in the validation data
        valid_transforms = Compose([
            basic_transforms,

            # Normalizacija na CT okno
            # https://radiopaedia.org/articles/windowing-ct
            CTWindowd(keys=["image"], width=H, level=L),
            ToTensord(keys=["image"]),
        ]).flatten()

        return train_transforms, valid_transforms
Esempio n. 22
0
def main(tempdir):
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    print(f"generating synthetic data to {tempdir} (this may take a while)")
    for i in range(5):
        im, seg = create_test_image_2d(128, 128, num_seg_classes=1)
        Image.fromarray(im.astype("uint8")).save(os.path.join(tempdir, f"img{i:d}.png"))
        Image.fromarray(seg.astype("uint8")).save(os.path.join(tempdir, f"seg{i:d}.png"))

    images = sorted(glob(os.path.join(tempdir, "img*.png")))
    segs = sorted(glob(os.path.join(tempdir, "seg*.png")))
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

    # define transforms for image and segmentation
    val_transforms = Compose(
        [
            LoadImaged(keys=["img", "seg"]),
            AddChanneld(keys=["img", "seg"]),
            ScaleIntensityd(keys="img"),
            ToTensord(keys=["img", "seg"]),
        ]
    )
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    # sliding window inference need to input 1 image in every iteration
    val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
    dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet(
        dimensions=2,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model.load_state_dict(torch.load("best_metric_model_segmentation2d_dict.pth"))

    model.eval()
    with torch.no_grad():
        metric_sum = 0.0
        metric_count = 0
        saver = PNGSaver(output_dir="./output")
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
            # define sliding window size and batch size for windows inference
            roi_size = (96, 96)
            sw_batch_size = 4
            val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
            value = dice_metric(y_pred=val_outputs, y=val_labels)
            metric_count += len(value)
            metric_sum += value.item() * len(value)
            val_outputs = val_outputs.sigmoid() >= 0.5
            saver.save_batch(val_outputs)
        metric = metric_sum / metric_count
        print("evaluation metric:", metric)
 def test_load_spacingd_rotate_non_diag(self):
     data = {'image': FILES[0]}
     data_dict = LoadNiftid(keys='image')(data)
     data_dict = AddChanneld(keys='image')(data_dict)
     res_dict = Spacingd(keys='image', pixdim=(1, 2, 3), diagonal=False, mode='nearest')(data_dict)
     np.testing.assert_allclose(data_dict['image.affine'], res_dict['image.original_affine'])
     np.testing.assert_allclose(
         res_dict['image.affine'],
         np.array([[-1., 0., 0., 32.], [0., 2., 0., -40.], [0., 0., 3., -16.], [0., 0., 0., 1.]]))
Esempio n. 24
0
    def test_values(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   "testing_data")
        transform = Compose([
            LoadImaged(keys="image"),
            AddChanneld(keys="image"),
            ScaleIntensityd(keys="image"),
            ToTensord(keys=["image", "label"]),
        ])

        def _test_dataset(dataset):
            self.assertEqual(
                len(dataset),
                int(MEDNIST_FULL_DATASET_LENGTH * dataset.test_frac))
            self.assertTrue("image" in dataset[0])
            self.assertTrue("label" in dataset[0])
            self.assertTrue(PostFix.meta("image") in dataset[0])
            self.assertTupleEqual(dataset[0]["image"].shape, (1, 64, 64))

        with skip_if_downloading_fails():
            data = MedNISTDataset(root_dir=testing_dir,
                                  transform=transform,
                                  section="test",
                                  download=True,
                                  copy_cache=False)

        _test_dataset(data)

        # testing from
        data = MedNISTDataset(root_dir=Path(testing_dir),
                              transform=transform,
                              section="test",
                              download=False)
        self.assertEqual(data.get_num_classes(), 6)
        _test_dataset(data)
        data = MedNISTDataset(root_dir=testing_dir,
                              section="test",
                              download=False)
        self.assertTupleEqual(data[0]["image"].shape, (64, 64))
        # test same dataset length with different random seed
        data = MedNISTDataset(root_dir=testing_dir,
                              transform=transform,
                              section="test",
                              download=False,
                              seed=42)
        _test_dataset(data)
        self.assertEqual(data[0]["class_name"], "AbdomenCT")
        self.assertEqual(data[0]["label"].cpu().item(), 0)
        shutil.rmtree(os.path.join(testing_dir, "MedNIST"))
        try:
            MedNISTDataset(root_dir=testing_dir,
                           transform=transform,
                           section="test",
                           download=False)
        except RuntimeError as e:
            print(str(e))
            self.assertTrue(str(e).startswith("Cannot find dataset directory"))
Esempio n. 25
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI607-Guys-1097-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI175-HH-1570-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI385-HH-2078-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI344-Guys-0905-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI409-Guys-0960-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI584-Guys-1129-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI253-HH-1694-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI092-HH-1436-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI574-IOP-1156-T1.nii.gz"]),
        os.sep.join(["workspace", "data", "medical", "ixi", "IXI-T1", "IXI585-Guys-1130-T1.nii.gz"]),
    ]

    # 2 binary labels for gender classification: man and woman
    labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)
    val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]

    # Define transforms for image
    val_transforms = Compose(
        [
            LoadNiftid(keys=["img"]),
            AddChanneld(keys=["img"]),
            ScaleIntensityd(keys=["img"]),
            Resized(keys=["img"], spatial_size=(96, 96, 96)),
            ToTensord(keys=["img"]),
        ]
    )

    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

    # Create DenseNet121
    device = torch.device("cuda:0")
    model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)

    model.load_state_dict(torch.load("best_metric_model.pth"))
    model.eval()
    with torch.no_grad():
        num_correct = 0.0
        metric_count = 0
        saver = CSVSaver(output_dir="./output")
        for val_data in val_loader:
            val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
            val_outputs = model(val_images).argmax(dim=1)
            value = torch.eq(val_outputs, val_labels)
            metric_count += len(value)
            num_correct += value.sum().item()
            saver.save_batch(val_outputs, val_data["img_meta_dict"])
        metric = num_correct / metric_count
        print("evaluation metric:", metric)
        saver.finalize()
Esempio n. 26
0
    def test_inverse_inferred_seg(self):

        test_data = []
        for _ in range(20):
            image, label = create_test_image_2d(100, 101)
            test_data.append({
                "image": image,
                "label": label.astype(np.float32)
            })

        batch_size = 10
        # num workers = 0 for mac
        num_workers = 2 if sys.platform != "darwin" else 0
        transforms = Compose([
            AddChanneld(KEYS),
            SpatialPadd(KEYS, (150, 153)),
            CenterSpatialCropd(KEYS, (110, 99))
        ])
        num_invertible_transforms = sum(1 for i in transforms.transforms
                                        if isinstance(i, InvertibleTransform))

        dataset = CacheDataset(test_data, transform=transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        device = "cuda" if torch.cuda.is_available() else "cpu"
        model = UNet(
            dimensions=2,
            in_channels=1,
            out_channels=1,
            channels=(2, 4),
            strides=(2, ),
        ).to(device)

        data = first(loader)
        labels = data["label"].to(device)
        segs = model(labels).detach().cpu()
        label_transform_key = "label" + InverseKeys.KEY_SUFFIX.value
        segs_dict = {
            "label": segs,
            label_transform_key: data[label_transform_key]
        }

        segs_dict_decollated = decollate_batch(segs_dict)

        # inverse of individual segmentation
        seg_dict = first(segs_dict_decollated)
        with allow_missing_keys_mode(transforms):
            inv_seg = transforms.inverse(seg_dict)["label"]
        self.assertEqual(len(data["label_transforms"]),
                         num_invertible_transforms)
        self.assertEqual(len(seg_dict["label_transforms"]),
                         num_invertible_transforms)
        self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape)
Esempio n. 27
0
    def test_inverse_inferred_seg(self, extra_transform):

        test_data = []
        for _ in range(20):
            image, label = create_test_image_2d(100, 101)
            test_data.append({
                "image": image,
                "label": label.astype(np.float32)
            })

        batch_size = 10
        # num workers = 0 for mac
        num_workers = 2 if sys.platform == "linux" else 0
        transforms = Compose([
            AddChanneld(KEYS),
            SpatialPadd(KEYS, (150, 153)), extra_transform
        ])

        dataset = CacheDataset(test_data, transform=transforms, progress=False)
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers)

        device = "cuda" if torch.cuda.is_available() else "cpu"
        model = UNet(spatial_dims=2,
                     in_channels=1,
                     out_channels=1,
                     channels=(2, 4),
                     strides=(1, )).to(device)

        data = first(loader)
        self.assertEqual(data["image"].shape[0], batch_size * NUM_SAMPLES)

        labels = data["label"].to(device)
        self.assertIsInstance(labels, MetaTensor)
        segs = model(labels).detach().cpu()
        segs_decollated = decollate_batch(segs)
        self.assertIsInstance(segs_decollated[0], MetaTensor)
        # inverse of individual segmentation
        seg_metatensor = first(segs_decollated)
        # test to convert interpolation mode for 1 data of model output batch
        convert_applied_interp_mode(seg_metatensor.applied_operations,
                                    mode="nearest",
                                    align_corners=None)

        # manually invert the last crop samples
        xform = seg_metatensor.applied_operations.pop(-1)
        shape_before_extra_xform = xform["orig_size"]
        resizer = ResizeWithPadOrCrop(spatial_size=shape_before_extra_xform)
        with resizer.trace_transform(False):
            seg_metatensor = resizer(seg_metatensor)

        with allow_missing_keys_mode(transforms):
            inv_seg = transforms.inverse({"label": seg_metatensor})["label"]
        self.assertEqual(inv_seg.shape[1:], test_data[0]["label"].shape)
Esempio n. 28
0
    def test_values(self):
        testing_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   "testing_data")
        transform = Compose([
            LoadNiftid(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            ScaleIntensityd(keys="image"),
            ToTensord(keys=["image", "label"]),
        ])

        def _test_dataset(dataset):
            self.assertEqual(len(dataset), 52)
            self.assertTrue("image" in dataset[0])
            self.assertTrue("label" in dataset[0])
            self.assertTrue("image_meta_dict" in dataset[0])
            self.assertTupleEqual(dataset[0]["image"].shape, (1, 33, 47, 34))

        try:  # will start downloading if testing_dir doesn't have the Decathlon files
            data = DecathlonDataset(
                root_dir=testing_dir,
                task="Task04_Hippocampus",
                transform=transform,
                section="validation",
                download=True,
            )
        except (ContentTooShortError, HTTPError, RuntimeError) as e:
            print(str(e))
            if isinstance(e, RuntimeError):
                # FIXME: skip MD5 check as current downloading method may fail
                self.assertTrue(str(e).startswith("MD5 check"))
            return  # skipping this test due the network connection errors

        _test_dataset(data)
        data = DecathlonDataset(root_dir=testing_dir,
                                task="Task04_Hippocampus",
                                transform=transform,
                                section="validation",
                                download=False)
        _test_dataset(data)
        data = DecathlonDataset(root_dir=testing_dir,
                                task="Task04_Hippocampus",
                                section="validation",
                                download=False)
        self.assertTupleEqual(data[0]["image"].shape, (33, 47, 34))
        shutil.rmtree(os.path.join(testing_dir, "Task04_Hippocampus"))
        try:
            data = DecathlonDataset(
                root_dir=testing_dir,
                task="Task04_Hippocampus",
                transform=transform,
                section="validation",
                download=False,
            )
        except RuntimeError as e:
            print(str(e))
            self.assertTrue(str(e).startswith("Cannot find dataset directory"))
Esempio n. 29
0
    def setUp(self):
        if not has_nib:
            self.skipTest("nibabel required for test_inverse")

        set_determinism(seed=0)

        im_fname, seg_fname = [make_nifti_image(i) for i in create_test_image_3d(101, 100, 107)]
        load_ims = Compose([LoadImaged(KEYS), AddChanneld(KEYS)])
        self.batch_size = 10
        self.data = [load_ims({"image": im_fname, "label": seg_fname}) for _ in range(self.batch_size)]
Esempio n. 30
0
    def get_transforms(self):
        self.logger.info("Getting transforms...")
        # Setup transforms of data sets
        train_transforms = Compose([
            LoadNiftid(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            Orientationd(keys=["image", "label"], axcodes="RAS"),
            NormalizeIntensityd(keys=["image"]),
            SpatialPadd(keys=["image", "label"],
                        spatial_size=self.pad_crop_shape),
            RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
            RandSpatialCropd(keys=["image", "label"],
                             roi_size=self.pad_crop_shape,
                             random_center=True,
                             random_size=False),
            ToTensord(keys=["image", "label"]),
        ])

        val_transforms = Compose([
            LoadNiftid(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            Orientationd(keys=["image", "label"], axcodes="RAS"),
            NormalizeIntensityd(keys=["image"]),
            SpatialPadd(keys=["image", "label"],
                        spatial_size=self.pad_crop_shape),
            RandSpatialCropd(
                keys=["image", "label"],
                roi_size=self.pad_crop_shape,
                random_center=True,
                random_size=False,
            ),
            ToTensord(keys=["image", "label"]),
        ])

        test_transforms = Compose([
            LoadNiftid(keys=["image", "label"]),
            AddChanneld(keys=["image", "label"]),
            Orientationd(keys=["image", "label"], axcodes="RAS"),
            NormalizeIntensityd(keys=["image"]),
            ToTensord(keys=["image", "label"]),
        ])

        return train_transforms, val_transforms, test_transforms