def test_converter(self, data_shape, filenames, expected_shape, meta_shape): test_image = np.random.randint(0, 256, size=data_shape) with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) Image.fromarray(test_image.astype("uint8")).save(filenames[i]) reader = PILReader(converter=lambda image: image.convert("LA")) result = reader.get_data(reader.read(filenames, mode="r")) # load image by PIL and compare the result test_image = np.asarray(Image.open(filenames[0]).convert("LA")) self.assertTupleEqual(tuple(result[1]["spatial_shape"]), meta_shape) self.assertTupleEqual(result[0].shape, expected_shape) np.testing.assert_allclose(result[0], test_image)
def test_shape_value(self, data_shape, filenames, expected_shape, meta_shape): test_image = np.random.randint(0, 256, size=data_shape) with tempfile.TemporaryDirectory() as tempdir: for i, name in enumerate(filenames): filenames[i] = os.path.join(tempdir, name) Image.fromarray(test_image.astype("uint8")).save(filenames[i]) reader = PILReader(mode="r") result = reader.get_data(reader.read(filenames)) # load image by PIL and compare the result test_image = np.asarray(Image.open(filenames[0])) self.assertTupleEqual(tuple(result[1]["spatial_shape"]), meta_shape) self.assertTupleEqual(result[0].shape, expected_shape) test_image = np.moveaxis(test_image, 0, 1) if result[0].shape == test_image.shape: np.testing.assert_allclose(result[0], test_image) else: np.testing.assert_allclose(result[0], np.tile(test_image, [result[0].shape[0], 1, 1]))
def test_readers(self): inst = ITKReader() self.assertIsInstance(inst, ITKReader) inst = NibabelReader() self.assertIsInstance(inst, NibabelReader) inst = NibabelReader(as_closest_canonical=True) self.assertIsInstance(inst, NibabelReader) inst = NumpyReader() self.assertIsInstance(inst, NumpyReader) inst = NumpyReader(npz_keys="test") self.assertIsInstance(inst, NumpyReader) inst = PILReader() self.assertIsInstance(inst, PILReader)
inputZ01, inputZ01_val = split_train_val(inputZ01_path) inputZ02, inputZ02_val = split_train_val(inputZ02_path) inputZ03, inputZ03_val = split_train_val(inputZ03_path) inputZ04, inputZ04_val = split_train_val(inputZ04_path) inputZ05, inputZ05_val = split_train_val(inputZ05_path) inputZ06, inputZ06_val = split_train_val(inputZ06_path) inputZ07, inputZ07_val = split_train_val(inputZ07_path) targetC01, targetC01_val = split_train_val(targetC01_path) targetC02, targetC02_val = split_train_val(targetC02_path) targetC03, targetC03_val = split_train_val(targetC03_path) # data preprocessing/augmentation trans_train = Compose([ #LoadPNG(image_only=True), LoadImage(PILReader(), image_only=True), AddChannel(), CenterSpatialCrop(roi_size=2154), # 2154 #ScaleIntensity(), #RandRotate(range_x=15, prob=aug_prob, keep_size=True), #RandRotate90(prob=aug_prob, spatial_axes=(0, 1)), #RandFlip(spatial_axis=0, prob=aug_prob), #RandScaleIntensity(factors=0.5, prob=aug_prob) ToTensor() ]) trans_val = Compose([ #LoadPNG(image_only=True), LoadImage(PILReader(), image_only=True), AddChannel(), #CenterSpatialCrop(roi_size=2154),
RandFlip(spatial_axis=(1, 2), prob=aug_prob), ToTensor() ]) trans_val = MozartTheComposer([ # LoadImage(PILReader(), image_only=True), #ScaleIntensity(), # AddChannel(), # RandSpatialCrop(roi_size=256, random_size=False), #CenterSpatialCrop(roi_size=2154), ToTensor() ]) # create dataset class train_dataset = OurDataset(data=train_split, data_reader=PILReader(), transform=trans_train, roi_size=256, samples_per_image=8) val_dataset = OurGridyDataset(data=val_split, data_reader=PILReader(), patch_size=256) # now create data loader ( MONAI DataLoader) training_loader = DataLoader( train_dataset, batch_size=batch_size, shuffle=False, num_workers=8 #multiprocessing.cpu_count(),