def test_orientation(self, array, affine, reader_param, expected): test_image = make_nifti_image(array, affine) # read test cases loader = LoadNifti(**reader_param) load_result = loader(test_image) if isinstance(load_result, tuple): data_array, header = load_result else: data_array = load_result header = None if os.path.exists(test_image): os.remove(test_image) # write test cases if header is not None: write_nifti(data_array, test_image, header["affine"], header.get("original_affine", None)) elif affine is not None: write_nifti(data_array, test_image, affine) saved = nib.load(test_image) saved_affine = saved.affine saved_data = saved.get_fdata() if os.path.exists(test_image): os.remove(test_image) if affine is not None: np.testing.assert_allclose(saved_affine, affine) np.testing.assert_allclose(saved_data, expected)
def test_consistency(self): np.set_printoptions(suppress=True, precision=3) test_image = make_nifti_image(np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1])) data, header = LoadNifti(as_closest_canonical=False)(test_image) data, original_affine, new_affine = Spacing([0.8, 0.8, 0.8])( data[None], header["affine"], interp_order="nearest" ) data, _, new_affine = Orientation("ILP")(data, new_affine) if os.path.exists(test_image): os.remove(test_image) write_nifti(data[0], test_image, new_affine, original_affine, interp_order="nearest", mode="border") saved = nib.load(test_image) saved_data = saved.get_fdata() np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7) if os.path.exists(test_image): os.remove(test_image) write_nifti( data[0], test_image, new_affine, original_affine, interp_order="nearest", mode="border", output_shape=(1, 8, 8), ) saved = nib.load(test_image) saved_data = saved.get_fdata() np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7) if os.path.exists(test_image): os.remove(test_image)
def __getitem__(self, i): self.randomize() loadnifti = LoadNifti() X_img, compatible_meta = loadnifti(self.X_path[i]) if int(len(self.X_path)) < 1000 and i < 5: # only print the val x_path print(f"No. {i} file, path: {self.X_path[i]}") y_img, compatible_meta = loadnifti(self.y_path[i]) if isinstance(self.X_transform, Randomizable): self.X_transform.set_random_state(seed=self._seed) self.y_transform.set_random_state(seed=self._seed) X_img = apply_transform(self.X_transform, X_img) y_img = apply_transform(self.y_transform, y_img) if self.using_flair: X_path_str = str(self.X_path[i]) if "t1" in X_path_str: X_fair_path = X_path_str.replace("t1", "flair") else: X_fair_path = X_path_str.replace("t2", "flair") X_fair, compatible_meta = loadnifti(Path(X_fair_path)) X_fair_img = apply_transform(self.X_transform, X_fair) X_img = torch.cat((X_img, X_fair_img), 0) return X_img, y_img
def open_nii_image(fn): x = None if str(fn).split('.')[-1] == 'nrrd': _nrrd = nrrd.read(str(fn)) x = _nrrd[0] else: load_data = LoadNifti(image_only=True) x = load_data(fn) if x is None: raise TypeError return fvision.Image(torch.Tensor(x[None]))
def __getitem__(self, index: int): self.randomize() meta_data = None img_loader = LoadNifti(as_closest_canonical=self.as_closest_canonical, image_only=self.image_only, dtype=self.dtype) if self.image_only: img = img_loader(self.image_files[index]) else: img, meta_data = img_loader(self.image_files[index]) seg = None if self.seg_files is not None: seg_loader = LoadNifti(image_only=True) seg = seg_loader(self.seg_files[index]) label = None if self.labels is not None: label = self.labels[index] if self.transform is not None: if isinstance(self.transform, Randomizable): self.transform.set_random_state(seed=self._seed) img = apply_transform(self.transform, img) data = [img] if self.seg_transform is not None: if isinstance(self.seg_transform, Randomizable): self.seg_transform.set_random_state(seed=self._seed) seg = apply_transform(self.seg_transform, seg) if seg is not None: data.append(seg) if label is not None: data.append(label) if not self.image_only and meta_data is not None: data.append(meta_data) if len(data) == 1: return data[0] # use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists return tuple(data)
def __getitem__(self, index: int): self.randomize() meta_data = None img_loader = LoadNifti( as_closest_canonical=self.as_closest_canonical, image_only=self.image_only, dtype=self.dtype ) if self.image_only: img = img_loader(self.image_files[index]) else: img, meta_data = img_loader(self.image_files[index]) seg = None if self.seg_files is not None: seg_loader = LoadNifti(image_only=True) seg = seg_loader(self.seg_files[index]) label = None if self.labels is not None: label = self.labels[index] if self.transform is not None: if isinstance(self.transform, Randomizable): self.transform.set_random_state(seed=self.seed) img = self.transform(img) data = [img] if self.seg_transform is not None: if isinstance(self.seg_transform, Randomizable): self.seg_transform.set_random_state(seed=self.seed) seg = self.seg_transform(seg) if seg is not None: data.append(seg) if label is not None: data.append(label) if not self.image_only and meta_data is not None: data.append(meta_data) return data
def test_shape(self, input_param, filenames, expected_shape): test_image = np.random.randint(0, 2, size=[128, 128, 128]) with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) nib.save(nib.Nifti1Image(test_image, np.eye(4)), filenames[i]) result = LoadNifti(**input_param)(filenames) if isinstance(result, tuple): result, header = result self.assertTrue("affine" in header) np.testing.assert_allclose(header["affine"], np.eye(4)) if input_param["as_closest_canonical"]: np.testing.assert_allclose(header["original_affine"], np.eye(4)) self.assertTupleEqual(result.shape, expected_shape)
def __getitem__(self, i): self.randomize() loadnifti = LoadNifti() y_img, compatible_meta = loadnifti(self.y_path[i]) y_img = apply_transform(self.y_transform, y_img) if isinstance(self.X_transform, Randomizable): self.X_transform.set_random_state(seed=self._seed) self.y_transform.set_random_state(seed=self._seed) X_img = [] if self.num_scan_training > 1: for scan in self.X_path[i]: img = MGHImage.load(scan).get_fdata() img[img < 3] = 0.0 img = apply_transform(self.X_transform, img) X_img.append(img) X_img = torch.cat(X_img, dim=0) else: img = MGHImage.load(self.X_path[i]).get_fdata() img[img < 3] = 0.0 X_img = apply_transform(self.X_transform, img) return X_img, y_img
import os import shutil import tempfile import unittest import nibabel as nib import numpy as np from parameterized import parameterized from monai.data import ArrayDataset from monai.transforms import AddChannel, Compose, LoadNifti, RandAdjustContrast, Spacing, RandGaussianNoise TEST_CASE_1 = [ Compose([ LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0) ]), Compose([ LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0) ]), (0, 1), (1, 128, 128, 128), ] TEST_CASE_2 = [ Compose([ LoadNifti(image_only=True),
import os import shutil import tempfile import unittest import nibabel as nib import numpy as np from parameterized import parameterized from torch.utils.data import DataLoader from monai.data import ArrayDataset from monai.transforms import AddChannel, Compose, LoadNifti, RandAdjustContrast, RandGaussianNoise, Spacing TEST_CASE_1 = [ Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), Compose([LoadNifti(image_only=True), AddChannel(), RandGaussianNoise(prob=1.0)]), (0, 1), (1, 128, 128, 128), ] TEST_CASE_2 = [ Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast(prob=1.0)]), (0, 1), (1, 128, 128, 128), ] class TestCompose(Compose): def __call__(self, input_):
tconfig.set_print_info(True) train_imtrans = Compose([ ToTensor(), AddChannel(), RandSpatialCrop((96, 96), random_size=False), ]) # RandRotate90(prob=0.5, spatial_axes=(0, 1)), # AddChannel(), # ToTensor(), # ScaleIntensity(), # AddChannel(), # RandSpatialCrop((96, 96), random_size=False), # LoadNifti(), train_segtrans = Compose([ LoadNifti(), AddChannel(), RandRotate90(prob=0.5, spatial_axes=(0, 1)), ToTensor(), ]) # For testing # datadir1 = "/home1/quanquan/datasets/lsw/benign_65/fpAML_55/slices/" # image_files = np.array([x.path for x in os.scandir(datadir1+"image") if x.name.endswith(".npy")]) # label_files = np.array([x.path for x in os.scandir(datadir1+"label") if x.name.endswith(".npy")]) ### Data Collection for Kits19 datadir_kits = "/home1/quanquan/datasets/kits19/resampled_data" image_files = [] for subdir in os.scandir(datadir_kits): if subdir.name.startswith("case_"): image_name = os.path.join(subdir.path, "imaging.nii.gz")
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import os import shutil import numpy as np import tempfile import nibabel as nib from parameterized import parameterized from monai.data import ArrayDataset from monai.transforms import Compose, LoadNifti, AddChannel, RandAdjustContrast, Spacing TEST_CASE_1 = [ Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast()]), Compose([LoadNifti(image_only=True), AddChannel(), RandAdjustContrast()]), (0, 1), (1, 128, 128, 128), ] class TestCompose(Compose): def __call__(self, input_): img, metadata = self.transforms[0](input_) img = self.transforms[1](img) img, _, _ = self.transforms[2](img, metadata["affine"])