Beispiel #1
0
 def test_dicom_reader_consistency(self, filenames):
     itk_param = {"reader": "ITKReader"}
     pydicom_param = {"reader": "PydicomReader"}
     for affine_flag in [True, False]:
         itk_param["affine_lps_to_ras"] = affine_flag
         pydicom_param["affine_lps_to_ras"] = affine_flag
         itk_result = LoadImage(image_only=True, **itk_param)(filenames)
         pydicom_result = LoadImage(image_only=True, **pydicom_param)(filenames)
         np.testing.assert_allclose(pydicom_result, itk_result)
         np.testing.assert_allclose(pydicom_result.affine, itk_result.affine)
Beispiel #2
0
    def test_load_image(self):
        instance1 = LoadImage(image_only=False, dtype=None)
        instance2 = LoadImage(image_only=True, dtype=None)
        self.assertIsInstance(instance1, LoadImage)
        self.assertIsInstance(instance2, LoadImage)

        for r in [
                "NibabelReader", "PILReader", "ITKReader", "NumpyReader", None
        ]:
            inst = LoadImaged("image", reader=r)
            self.assertIsInstance(inst, LoadImaged)
Beispiel #3
0
 def test_my_reader(self):
     """test customised readers"""
     out = LoadImage(reader=_MiniReader, is_compatible=True)("test")
     self.assertEqual(out[1]["name"], "my test")
     out = LoadImage(reader=_MiniReader, is_compatible=False)("test")
     self.assertEqual(out[1]["name"], "my test")
     for item in (_MiniReader, _MiniReader(is_compatible=False)):
         out = LoadImage(reader=item)("test")
         self.assertEqual(out[1]["name"], "my test")
     out = LoadImage()("test", reader=_MiniReader(is_compatible=False))
     self.assertEqual(out[1]["name"], "my test")
Beispiel #4
0
    def test_register(self):
        spatial_size = (32, 64, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImage(image_only=True)
            loader.register(ITKReader())
            result = loader(filename)
            self.assertTupleEqual(result.shape, spatial_size[::-1])
def main(tempdir):
    config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    print(f"generating synthetic data to {tempdir} (this may take a while)")
    for i in range(5):
        im, seg = create_test_image_2d(128, 128, num_seg_classes=1)
        Image.fromarray((im * 255).astype("uint8")).save(os.path.join(tempdir, f"img{i:d}.png"))
        Image.fromarray((seg * 255).astype("uint8")).save(os.path.join(tempdir, f"seg{i:d}.png"))

    images = sorted(glob(os.path.join(tempdir, "img*.png")))
    segs = sorted(glob(os.path.join(tempdir, "seg*.png")))

    # define transforms for image and segmentation
    imtrans = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()])
    segtrans = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()])
    val_ds = ArrayDataset(images, imtrans, segs, segtrans)
    # sliding window inference for one image at every iteration
    val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
    dice_metric = DiceMetric(include_background=True, reduction="mean", get_not_nans=False)
    post_trans = Compose([EnsureType(), Activations(sigmoid=True), AsDiscrete(threshold=0.5)])
    saver = SaveImage(output_dir="./output", output_ext=".png", output_postfix="seg")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet(
        spatial_dims=2,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model.load_state_dict(torch.load("best_metric_model_segmentation2d_array.pth"))
    model.eval()
    with torch.no_grad():
        for val_data in val_loader:
            val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
            # define sliding window size and batch size for windows inference
            roi_size = (96, 96)
            sw_batch_size = 4
            val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
            val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
            val_labels = decollate_batch(val_labels)
            # compute metric for current iteration
            dice_metric(y_pred=val_outputs, y=val_labels)
            for val_output in val_outputs:
                saver(val_output)
        # aggregate the final mean dice result
        print("evaluation metric:", dice_metric.aggregate().item())
        # reset the status
        dice_metric.reset()
Beispiel #6
0
def main(tempdir):
    config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    print(f"generating synthetic data to {tempdir} (this may take a while)")
    for i in range(5):
        im, seg = create_test_image_2d(128, 128, num_seg_classes=1)
        Image.fromarray(im.astype("uint8")).save(os.path.join(tempdir, f"img{i:d}.png"))
        Image.fromarray(seg.astype("uint8")).save(os.path.join(tempdir, f"seg{i:d}.png"))

    images = sorted(glob(os.path.join(tempdir, "img*.png")))
    segs = sorted(glob(os.path.join(tempdir, "seg*.png")))

    # define transforms for image and segmentation
    imtrans = Compose([LoadImage(image_only=True), ScaleIntensity(), AddChannel(), ToTensor()])
    segtrans = Compose([LoadImage(image_only=True), AddChannel(), ToTensor()])
    val_ds = ArrayDataset(images, imtrans, segs, segtrans)
    # sliding window inference for one image at every iteration
    val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
    dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet(
        dimensions=2,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)

    model.load_state_dict(torch.load("best_metric_model_segmentation2d_array.pth"))
    model.eval()
    with torch.no_grad():
        metric_sum = 0.0
        metric_count = 0
        saver = PNGSaver(output_dir="./output")
        for val_data in val_loader:
            val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
            # define sliding window size and batch size for windows inference
            roi_size = (96, 96)
            sw_batch_size = 4
            val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
            value = dice_metric(y_pred=val_outputs, y=val_labels)
            metric_count += len(value)
            metric_sum += value.item() * len(value)
            val_outputs = val_outputs.sigmoid() >= 0.5
            saver.save_batch(val_outputs)
        metric = metric_sum / metric_count
        print("evaluation metric:", metric)
Beispiel #7
0
    def test_load_nifti_multichannel(self):
        test_image = np.random.randint(0, 256, size=(31, 64, 16, 2)).astype(np.float32)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image, is_vector=True)
            itk.imwrite(itk_np_view, filename)

            itk_img = LoadImage(image_only=True, reader=ITKReader())(Path(filename))
            self.assertTupleEqual(tuple(itk_img.shape), (16, 64, 31, 2))

            nib_image = LoadImage(image_only=True, reader=NibabelReader(squeeze_non_spatial_dims=True))(Path(filename))
            self.assertTupleEqual(tuple(nib_image.shape), (16, 64, 31, 2))

            np.testing.assert_allclose(itk_img, nib_image, atol=1e-3, rtol=1e-3)
Beispiel #8
0
    def test_register(self):
        spatial_size = (32, 64, 128)
        expected_shape = (128, 64, 32)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImage(image_only=False)
            loader.register(ITKReader(c_order_axis_indexing=True))
            result, header = loader(filename)
            self.assertTupleEqual(tuple(header["spatial_shape"]),
                                  expected_shape)
            self.assertTupleEqual(result.shape, spatial_size)
Beispiel #9
0
 def test_consistency(self):
     np.set_printoptions(suppress=True, precision=3)
     test_image = make_nifti_image(np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1]))
     data, header = LoadImage(reader="NibabelReader", as_closest_canonical=False)(test_image)
     data, original_affine, new_affine = Spacing([0.8, 0.8, 0.8])(data[None], header["affine"], mode="nearest")
     data, _, new_affine = Orientation("ILP")(data, new_affine)
     if os.path.exists(test_image):
         os.remove(test_image)
     write_nifti(data[0], test_image, new_affine, original_affine, mode="nearest", padding_mode="border")
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
     write_nifti(
         data[0],
         test_image,
         new_affine,
         original_affine,
         mode="nearest",
         padding_mode="border",
         output_spatial_shape=(1, 8, 8),
     )
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
def run_inference_test(root_dir,
                       test_x,
                       test_y,
                       device="cuda:0",
                       num_workers=10):
    # define transforms for image and classification
    val_transforms = Compose(
        [LoadImage(image_only=True),
         AddChannel(),
         ScaleIntensity()])
    val_ds = MedNISTDataset(test_x, test_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)

    model = DenseNet121(spatial_dims=2,
                        in_channels=1,
                        out_channels=len(np.unique(test_y))).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    y_true = []
    y_pred = []
    with eval_mode(model):
        for test_data in val_loader:
            test_images, test_labels = test_data[0].to(
                device), test_data[1].to(device)
            pred = model(test_images).argmax(dim=1)
            for i in range(len(pred)):
                y_true.append(test_labels[i].item())
                y_pred.append(pred[i].item())
    tps = [
        np.sum((np.asarray(y_true) == idx) & (np.asarray(y_pred) == idx))
        for idx in np.unique(test_y)
    ]
    return tps
Beispiel #11
0
 def nifti_rw(self, test_data, reader, writer, dtype, resample=True):
     test_data = test_data.astype(dtype)
     ndim = len(test_data.shape) - 1
     for p in TEST_NDARRAYS:
         output_ext = ".nii.gz"
         filepath = f"testfile_{ndim}d"
         saver = SaveImage(output_dir=self.test_dir,
                           output_ext=output_ext,
                           resample=resample,
                           separate_folder=False,
                           writer=writer)
         saver(
             p(test_data),
             {
                 "filename_or_obj":
                 f"{filepath}.png",
                 "affine":
                 np.eye(4),
                 "original_affine":
                 np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0],
                           [0, 0, 0, 1]]),
             },
         )
         saved_path = os.path.join(self.test_dir,
                                   filepath + "_trans" + output_ext)
         self.assertTrue(os.path.exists(saved_path))
         loader = LoadImage(reader=reader, squeeze_non_spatial_dims=True)
         data, meta = loader(saved_path)
         if meta["original_channel_dim"] == -1:
             _test_data = moveaxis(test_data, 0, -1)
         else:
             _test_data = test_data[0]
         if resample:
             _test_data = moveaxis(_test_data, 0, 1)
         assert_allclose(data, _test_data)
Beispiel #12
0
    def test_orientation(self, array, affine, reader_param, expected):
        test_image = make_nifti_image(array, affine)

        # read test cases
        loader = LoadImage(**reader_param)
        load_result = loader(test_image)
        if isinstance(load_result, tuple):
            data_array, header = load_result
        else:
            data_array = load_result
            header = None
        if os.path.exists(test_image):
            os.remove(test_image)

        # write test cases
        if header is not None:
            write_nifti(data_array, test_image, header["affine"],
                        header.get("original_affine", None))
        elif affine is not None:
            write_nifti(data_array, test_image, affine)
        saved = nib.load(test_image)
        saved_affine = saved.affine
        saved_data = saved.get_fdata()
        if os.path.exists(test_image):
            os.remove(test_image)

        if affine is not None:
            np.testing.assert_allclose(saved_affine, affine)
        np.testing.assert_allclose(saved_data, expected)
Beispiel #13
0
    def test_orientation(self, array, affine, reader_param, expected):
        test_image = make_nifti_image(array, affine)

        # read test cases
        loader = LoadImage(**reader_param)
        load_result = loader(test_image)
        if isinstance(load_result, tuple):
            data_array, header = load_result
        else:
            data_array = load_result
            header = None
        if os.path.exists(test_image):
            os.remove(test_image)

        # write test cases
        writer_obj = NibabelWriter()
        writer_obj.set_data_array(data_array, channel_dim=None)
        if header is not None:
            writer_obj.set_metadata(header)
        elif affine is not None:
            writer_obj.set_metadata({"affine": affine})
        writer_obj.write(test_image, verbose=True)
        saved = nib.load(test_image)
        saved_affine = saved.affine
        saved_data = saved.get_fdata()
        if os.path.exists(test_image):
            os.remove(test_image)

        if affine is not None:
            assert_allclose(saved_affine, affine, type_test=False)
        assert_allclose(saved_data, expected, type_test=False)
Beispiel #14
0
 def test_consistency(self):
     np.set_printoptions(suppress=True, precision=3)
     test_image = make_nifti_image(
         np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1]))
     data, header = LoadImage(reader="NibabelReader",
                              as_closest_canonical=False)(test_image)
     data, original_affine, new_affine = Spacing([0.8, 0.8,
                                                  0.8])(data[None],
                                                        header["affine"],
                                                        mode="nearest")
     data, _, new_affine = Orientation("ILP")(data, new_affine)
     if os.path.exists(test_image):
         os.remove(test_image)
     write_nifti(data[0],
                 test_image,
                 new_affine,
                 original_affine,
                 mode="nearest",
                 padding_mode="border")
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data,
                                np.arange(64).reshape(1, 8, 8),
                                atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
     write_nifti(
         data[0],
         test_image,
         new_affine,
         original_affine,
         mode="nearest",
         padding_mode="border",
         output_spatial_shape=(1, 8, 8),
     )
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data,
                                np.arange(64).reshape(1, 8, 8),
                                atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
     # test the case that only correct orientation but don't resample
     write_nifti(data[0],
                 test_image,
                 new_affine,
                 original_affine,
                 resample=False)
     saved = nib.load(test_image)
     # compute expected affine
     start_ornt = nib.orientations.io_orientation(new_affine)
     target_ornt = nib.orientations.io_orientation(original_affine)
     ornt_transform = nib.orientations.ornt_transform(
         start_ornt, target_ornt)
     data_shape = data[0].shape
     expected_affine = new_affine @ nib.orientations.inv_ornt_aff(
         ornt_transform, data_shape)
     np.testing.assert_allclose(saved.affine, expected_affine)
     if os.path.exists(test_image):
         os.remove(test_image)
Beispiel #15
0
def binary_to_image(reference_image,
                    label,
                    dtype=np.uint16,
                    file_ext=".nii.gz",
                    use_itk=True):
    start = time.time()

    image_np, meta_dict = LoadImage()(reference_image)
    label_np = np.fromfile(label, dtype=dtype)

    logger.info(f"Image: {image_np.shape}")
    logger.info(f"Label: {label_np.shape}")

    label_np = label_np.reshape(image_np.shape, order="F")
    logger.info(f"Label (reshape): {label_np.shape}")

    output_file = tempfile.NamedTemporaryFile(suffix=file_ext).name
    affine = meta_dict.get("affine")
    if use_itk:
        write_itk(label_np,
                  output_file,
                  affine=affine,
                  dtype=None,
                  compress=True)
    else:
        write_nifti(label_np, output_file, affine=affine)

    logger.info(f"binary_to_image latency : {time.time() - start} (sec)")
    return output_file
Beispiel #16
0
 def png_rw(self, test_data, reader, writer, dtype, resample=True):
     test_data = test_data.astype(dtype)
     ndim = len(test_data.shape) - 1
     for p in TEST_NDARRAYS:
         output_ext = ".png"
         filepath = f"testfile_{ndim}d"
         saver = SaveImage(output_dir=self.test_dir,
                           output_ext=output_ext,
                           resample=resample,
                           separate_folder=False,
                           writer=writer)
         saver(p(test_data), {
             "filename_or_obj": f"{filepath}.png",
             "spatial_shape": (6, 8)
         })
         saved_path = os.path.join(self.test_dir,
                                   filepath + "_trans" + output_ext)
         self.assertTrue(os.path.exists(saved_path))
         loader = LoadImage(reader=reader)
         data, meta = loader(saved_path)
         if meta["original_channel_dim"] == -1:
             _test_data = moveaxis(test_data, 0, -1)
         else:
             _test_data = test_data[0]
         assert_allclose(data, _test_data)
Beispiel #17
0
    def _define_prediction_transforms(self):
        """Define and initialize all prediction data transforms.

          * prediction set images transform
          * prediction set images post-transform

        @return True if data transforms could be instantiated, False otherwise.
        """

        # Define transforms for prediction
        self._prediction_image_transforms = Compose(
            [
                LoadImage(image_only=True),
                ScaleIntensity(),
                AddChannel(),
                ToTensor(),
            ]
        )

        self._prediction_post_transforms = Compose(
            [
                Activations(softmax=True),
                AsDiscrete(threshold_values=True),
            ]
        )
Beispiel #18
0
    def test_invert(self):
        set_determinism(seed=0)
        im_fname = make_nifti_image(create_test_image_3d(101, 100, 107, noise_max=100)[1])  # label image, discrete
        data = [im_fname for _ in range(12)]
        transform = Compose(
            [
                LoadImage(image_only=True),
                EnsureChannelFirst(),
                Orientation("RPS"),
                Spacing(pixdim=(1.2, 1.01, 0.9), mode="bilinear", dtype=np.float32),
                RandFlip(prob=0.5, spatial_axis=[1, 2]),
                RandAxisFlip(prob=0.5),
                RandRotate90(prob=0, spatial_axes=(1, 2)),
                RandZoom(prob=0.5, min_zoom=0.5, max_zoom=1.1, keep_size=True),
                RandRotate(prob=0.5, range_x=np.pi, mode="bilinear", align_corners=True, dtype=np.float64),
                RandAffine(prob=0.5, rotate_range=np.pi, mode="nearest"),
                ResizeWithPadOrCrop(100),
                CastToType(dtype=torch.uint8),
            ]
        )

        # num workers = 0 for mac or gpu transforms
        num_workers = 0 if sys.platform != "linux" or torch.cuda.is_available() else 2
        dataset = Dataset(data, transform=transform)
        self.assertIsInstance(transform.inverse(dataset[0]), MetaTensor)
        loader = DataLoader(dataset, num_workers=num_workers, batch_size=1)
        inverter = Invert(transform=transform, nearest_interp=True, device="cpu")

        for d in loader:
            d = decollate_batch(d)
            for item in d:
                orig = deepcopy(item)
                i = inverter(item)
                self.assertTupleEqual(orig.shape[1:], (100, 100, 100))
                # check the nearest interpolation mode
                torch.testing.assert_allclose(i.to(torch.uint8).to(torch.float), i.to(torch.float))
                self.assertTupleEqual(i.shape[1:], (100, 101, 107))
        # check labels match
        reverted = i.detach().cpu().numpy().astype(np.int32)
        original = LoadImage(image_only=True)(data[-1])
        n_good = np.sum(np.isclose(reverted, original.numpy(), atol=1e-3))
        reverted_name = i.meta["filename_or_obj"]
        original_name = original.meta["filename_or_obj"]
        self.assertEqual(reverted_name, original_name)
        print("invert diff", reverted.size - n_good)
        self.assertTrue((reverted.size - n_good) < 300000, f"diff. {reverted.size - n_good}")
        set_determinism(seed=None)
Beispiel #19
0
    def __init__(
        self,
        image_files: Sequence[str],
        seg_files: Optional[Sequence[str]] = None,
        labels: Optional[Sequence[float]] = None,
        transform: Optional[Callable] = None,
        seg_transform: Optional[Callable] = None,
        label_transform: Optional[Callable] = None,
        image_only: bool = True,
        transform_with_metadata: bool = False,
        dtype: DtypeLike = np.float32,
        reader: Optional[Union[ImageReader, str]] = None,
        *args,
        **kwargs,
    ) -> None:
        """
        Initializes the dataset with the image and segmentation filename lists. The transform `transform` is applied
        to the images and `seg_transform` to the segmentations.

        Args:
            image_files: list of image filenames.
            seg_files: if in segmentation task, list of segmentation filenames.
            labels: if in classification task, list of classification labels.
            transform: transform to apply to image arrays.
            seg_transform: transform to apply to segmentation arrays.
            label_transform: transform to apply to the label data.
            image_only: if True return only the image volume, otherwise, return image volume and the metadata.
            transform_with_metadata: if True, the metadata will be passed to the transforms whenever possible.
            dtype: if not None convert the loaded image to this data type.
            reader: register reader to load image file and meta data, if None, will use the default readers.
                If a string of reader name provided, will construct a reader object with the `*args` and `**kwargs`
                parameters, supported reader name: "NibabelReader", "PILReader", "ITKReader", "NumpyReader"
            args: additional parameters for reader if providing a reader name.
            kwargs: additional parameters for reader if providing a reader name.

        Raises:
            ValueError: When ``seg_files`` length differs from ``image_files``

        """

        if seg_files is not None and len(image_files) != len(seg_files):
            raise ValueError(
                "Must have same the number of segmentation as image files: "
                f"images={len(image_files)}, segmentations={len(seg_files)}.")

        self.image_files = image_files
        self.seg_files = seg_files
        self.labels = labels
        self.transform = transform
        self.seg_transform = seg_transform
        self.label_transform = label_transform
        if image_only and transform_with_metadata:
            raise ValueError(
                "transform_with_metadata=True requires image_only=False.")
        self.image_only = image_only
        self.transform_with_metadata = transform_with_metadata
        self.loader = LoadImage(reader, image_only, dtype, *args, **kwargs)
        self.set_random_state(seed=get_seed())
        self._seed = 0  # transform synchronization seed
Beispiel #20
0
 def test_consistency(self):
     np.set_printoptions(suppress=True, precision=3)
     test_image = make_nifti_image(
         np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1]))
     data, header = LoadImage(reader="NibabelReader",
                              as_closest_canonical=False)(test_image)
     data, original_affine, new_affine = Spacing([0.8, 0.8,
                                                  0.8])(data[None],
                                                        header["affine"],
                                                        mode="nearest")
     data, _, new_affine = Orientation("ILP")(data, new_affine)
     if os.path.exists(test_image):
         os.remove(test_image)
     writer_obj = NibabelWriter()
     writer_obj.set_data_array(data[0], channel_dim=None)
     writer_obj.set_metadata(meta_dict={
         "affine": new_affine,
         "original_affine": original_affine
     },
                             mode="nearest",
                             padding_mode="border")
     writer_obj.write(test_image, verbose=True)
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data,
                                np.arange(64).reshape(1, 8, 8),
                                atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
     writer_obj.set_data_array(data[0], channel_dim=None)
     writer_obj.set_metadata(
         meta_dict={
             "affine": new_affine,
             "original_affine": original_affine,
             "spatial_shape": (1, 8, 8)
         },
         mode="nearest",
         padding_mode="border",
     )
     writer_obj.write(test_image, verbose=True)
     saved = nib.load(test_image)
     saved_data = saved.get_fdata()
     np.testing.assert_allclose(saved_data,
                                np.arange(64).reshape(1, 8, 8),
                                atol=1e-7)
     if os.path.exists(test_image):
         os.remove(test_image)
     # test the case no resample
     writer_obj.set_data_array(data[0], channel_dim=None)
     writer_obj.set_metadata(meta_dict={
         "affine": new_affine,
         "original_affine": original_affine
     },
                             resample=False)
     writer_obj.write(test_image, verbose=True)
     saved = nib.load(test_image)
     np.testing.assert_allclose(saved.affine, new_affine)
     if os.path.exists(test_image):
         os.remove(test_image)
Beispiel #21
0
    def test_decollation_list(self, *transforms):
        t_compose = Compose([AddChannel(), Compose(transforms), ToTensor()])
        # If nibabel present, read from disk
        if has_nib:
            t_compose = _ListCompose([LoadImage(image_only=False), t_compose])

        dataset = Dataset(self.data_list, t_compose)
        self.check_decollate(dataset=dataset)
Beispiel #22
0
    def test_pil(self):
        tempdir = tempfile.mkdtemp()
        test_image = np.random.randint(0, 256, size=[128, 256])
        filename = os.path.join(tempdir, "test_image.png")
        PILImage.fromarray(test_image.astype("uint8")).save(filename)

        loader = LoadImage(PILReader(converter=lambda image: image.convert("LA")))
        _ = loader(filename)
Beispiel #23
0
    def initialize(self, args):
        """
        `initialize` is called only once when the model is being loaded.
        Implementing `initialize` function is optional. This function allows
        the model to intialize any state associated with this model.
        """

        # Pull model from google drive
        extract_dir = "/models/mednist_class/1"
        tar_save_path = os.path.join(extract_dir, model_filename)
        download_and_extract(gdrive_url,
                             tar_save_path,
                             output_dir=extract_dir,
                             hash_val=md5_check,
                             hash_type="md5")
        # load model configuration
        self.model_config = json.loads(args['model_config'])

        # create inferer engine and load PyTorch model
        inference_device_kind = args.get('model_instance_kind', None)
        logger.info(f"Inference device: {inference_device_kind}")

        self.inference_device = torch.device('cpu')
        if inference_device_kind is None or inference_device_kind == 'CPU':
            self.inference_device = torch.device('cpu')
        elif inference_device_kind == 'GPU':
            inference_device_id = args.get('model_instance_device_id', '0')
            logger.info(f"Inference device id: {inference_device_id}")

            if torch.cuda.is_available():
                self.inference_device = torch.device(
                    f'cuda:{inference_device_id}')
                cudnn.enabled = True
            else:
                logger.error(
                    f"No CUDA device detected. Using device: {inference_device_kind}"
                )

        # create pre-transforms for MedNIST
        self.pre_transforms = Compose([
            LoadImage(reader="PILReader", image_only=True, dtype=np.float32),
            ScaleIntensity(),
            AddChannel(),
            AddChannel(),
            ToTensor(),
            Lambda(func=lambda x: x.to(device=self.inference_device)),
        ])

        # create post-transforms
        self.post_transforms = Compose([
            Lambda(func=lambda x: x.to(device="cpu")),
        ])

        self.inferer = SimpleInferer()

        self.model = torch.jit.load(
            f'{pathlib.Path(os.path.realpath(__file__)).parent}{os.path.sep}model.pt',
            map_location=self.inference_device)
Beispiel #24
0
 def test_load_png(self):
     spatial_size = (256, 256, 3)
     test_image = np.random.randint(0, 256, size=spatial_size)
     with tempfile.TemporaryDirectory() as tempdir:
         filename = os.path.join(tempdir, "test_image.png")
         Image.fromarray(test_image.astype("uint8")).save(filename)
         result, header = LoadImage(image_only=False)(filename)
         result = EnsureChannelFirst()(result, header)
         self.assertEqual(result.shape[0], 3)
Beispiel #25
0
 def test_load_png(self):
     spatial_size = (256, 224)
     test_image = np.random.randint(0, 256, size=spatial_size)
     with tempfile.TemporaryDirectory() as tempdir:
         filename = os.path.join(tempdir, "test_image.png")
         Image.fromarray(test_image.astype("uint8")).save(filename)
         result = LoadImage(image_only=True)(filename)
         self.assertTupleEqual(result.shape, spatial_size[::-1])
         np.testing.assert_allclose(result.T, test_image)
Beispiel #26
0
    def test_kwargs(self):
        spatial_size = (32, 64, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImage(image_only=True)
            reader = ITKReader(fallback_only=False)
            loader.register(reader)
            result = loader(filename)

            reader = ITKReader()
            img = reader.read(filename, fallback_only=False)
            result_raw = reader.get_data(img)
            result_raw = MetaTensor.ensure_torch_and_prune_meta(*result_raw)
            self.assertTupleEqual(result.shape, result_raw.shape)
Beispiel #27
0
    def run(self, datastore):
        logger.info("Reading datastore metadata for heuristic planner...")
        if len(datastore.list_images()) == 0:
            logger.warning(
                "Currently no images are available in datastore for sampling")
            return

        # Sampling max_samples images from the datastore
        datastore_check = (datastore.list_images()
                           if len(datastore.list_images()) < self.max_samples
                           else random.sample(datastore.list_images(),
                                              self.max_samples))

        spacings = []
        img_sizes = []
        pix_img_max = []
        pix_img_min = []
        pix_img_mean = []
        pix_img_std = []
        loader = LoadImage()
        for n in tqdm(datastore_check):
            img, mtdt = loader(datastore.get_image_uri(n))

            # Check if images have more than one modality
            if mtdt["pixdim"][4] > 0:
                logger.info(
                    f"Image {mtdt['filename_or_obj'].split('/')[-1]} has more than one modality ..."
                )
            spacings.append(mtdt["pixdim"][1:4])
            img_sizes.append(mtdt["spatial_shape"])

            pix_img_max.append(img.max())
            pix_img_min.append(img.min())
            pix_img_mean.append(img.mean())
            pix_img_std.append(img.std())

        spacings = np.array(spacings)
        img_sizes = np.array(img_sizes)

        logger.info(f"Available GPU memory: {gpu_memory_map()} in MB")

        self.target_spacing = self._get_target_spacing(np.mean(spacings, 0))
        self.spatial_size = self._get_target_img_size(
            np.mean(img_sizes, 0, np.int64))
        logger.info(
            f"Spacing: {self.target_spacing}; Spatial Size: {self.spatial_size}"
        )

        # Image stats for intensity normalization
        self.max_pix = np.max(np.array(pix_img_max))
        self.min_pix = np.min(np.array(pix_img_min))
        self.mean_pix = np.mean(np.array(pix_img_mean))
        self.std_pix = np.mean(np.array(pix_img_std))
        logger.info(
            f"Pix Max: {self.max_pix}; Min: {self.min_pix}; Mean: {self.mean_pix}; Std: {self.std_pix}"
        )
Beispiel #28
0
 def test_itk_meta(self):
     """test metadata from a directory"""
     out = LoadImage(image_only=True, reader="ITKReader", pixel_type=itk.UC, series_meta=True)(
         "tests/testing_data/CT_DICOM"
     )
     idx = "0008|103e"
     label = itk.GDCMImageIO.GetLabelFromTag(idx, "")[1]
     val = out.meta[idx]
     expected = "Series Description=Routine Brain "
     self.assertEqual(f"{label}={val}", expected)
Beispiel #29
0
    def test_kwargs(self):
        spatial_size = (32, 64, 128)
        test_image = np.random.rand(*spatial_size)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, "test_image.nii.gz")
            itk_np_view = itk.image_view_from_array(test_image)
            itk.imwrite(itk_np_view, filename)

            loader = LoadImage(image_only=False)
            reader = ITKReader(fallback_only=False)
            loader.register(reader)
            result, header = loader(filename)

            reader = ITKReader()
            img = reader.read(filename, fallback_only=False)
            result_raw, header_raw = reader.get_data(img)
            np.testing.assert_allclose(header["spatial_shape"],
                                       header_raw["spatial_shape"])
            self.assertTupleEqual(result.shape, result_raw.shape)
Beispiel #30
0
    def test_channel_dim(self, input_param, filename, expected_shape):
        test_image = np.random.rand(*expected_shape)
        with tempfile.TemporaryDirectory() as tempdir:
            filename = os.path.join(tempdir, filename)
            nib.save(nib.Nifti1Image(test_image, np.eye(4)), filename)
            result = LoadImage(image_only=True, **input_param)(filename)

        self.assertTupleEqual(
            result.shape, (3, 128, 128, 128) if input_param.get("ensure_channel_first", False) else expected_shape
        )
        self.assertEqual(result.meta["original_channel_dim"], input_param["channel_dim"])