def test_DATAAUGMENTATION_parameter_classification(self): data_aug = Data_Augmentation() data_aug.config_p_per_sample = 1 data_aug.seg_augmentation = False img_aug, seg_aug = data_aug.run(self.img3D, self.seg3D) self.assertFalse(np.array_equal(img_aug, self.img3D)) self.assertTrue(np.array_equal(seg_aug, self.seg3D))
def test_DATAAUGMENTATION_BASE_create(self): data_Aug = Data_Augmentation() self.assertIsInstance(data_Aug, Data_Augmentation) Data_Augmentation(cycles=5) Data_Augmentation(cycles=1, scaling=True, rotations=True, elastic_deform=False, mirror=False, brightness=True, contrast=True, gamma=True, gaussian_noise=True)
def test_DATAAUGMENTATION_parameter_gaussiannoise(self): data_aug = Data_Augmentation(cycles=1, scaling=False, rotations=False, elastic_deform=False, mirror=False, brightness=False, contrast=False, gamma=False, gaussian_noise=True) data_aug.config_p_per_sample = 1 img_aug, seg_aug = data_aug.run(self.img3D, self.seg3D) self.assertFalse(np.array_equal(img_aug, self.img3D)) self.assertTrue(np.array_equal(seg_aug, self.seg3D))
def test_DATAAUGMENTATION_parameter_cropping(self): data_aug = Data_Augmentation(cycles=1, scaling=False, rotations=False, elastic_deform=False, mirror=False, brightness=False, contrast=False, gamma=False, gaussian_noise=False) data_aug.cropping = True data_aug.cropping_patch_shape = (4, 4, 4) img_aug, seg_aug = data_aug.run(self.img3D, self.seg3D) self.assertEqual(img_aug.shape, (1, 4, 4, 4, 1)) self.assertEqual(seg_aug.shape, (1, 4, 4, 4, 3))
def setUpClass(self): np.random.seed(1234) # Create imgaging and segmentation data set self.dataset = dict() for i in range(0, 10): img = np.random.rand(16, 16, 16) * 255 self.img = img.astype(int) seg = np.random.rand(16, 16, 16) * 3 self.seg = seg.astype(int) sample = (self.img, self.seg) self.dataset["TEST.sample_" + str(i)] = sample # Initialize Dictionary IO Interface io_interface = Dictionary_interface(self.dataset, classes=3, three_dim=True) # Initialize temporary directory self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.") tmp_batches = os.path.join(self.tmp_dir.name, "batches") # Initialize Data IO self.data_io = Data_IO(io_interface, input_path="", output_path="", batch_path=tmp_batches, delete_batchDir=False) # Initialize Data Augmentation self.data_aug = Data_Augmentation() # Get sample list self.sample_list = self.data_io.get_indiceslist()
def test_DATAAUGMENTATION_parameter_percentage(self): data_aug = Data_Augmentation(cycles=100, scaling=True, rotations=False, elastic_deform=False, mirror=False, brightness=False, contrast=False, gamma=False, gaussian_noise=False) data_aug.config_p_per_sample = 0.3 img_aug, seg_aug = data_aug.run(self.img3D, self.seg3D) counter_equal = 0 for i in range(0, 100): is_equal = np.array_equal(img_aug[i], self.img3D[0]) if is_equal: counter_equal += 1 ratio = counter_equal / 100 self.assertTrue(ratio >= 0.5 and ratio <= 0.9)
def test_MODEL_predictionAugmentated_2D(self): data_aug = Data_Augmentation() pp = Preprocessor(self.data_io2D, batch_size=2, data_aug=data_aug, analysis="fullimage") nn = Neural_Network(preprocessor=pp) for sample in self.sample_list2D: predictions = nn.predict_augmentated(sample) self.assertEqual(len(predictions), 2) for pred in predictions: self.assertEqual(pred.shape, (16, 16, 3))
def test_DATAAUGMENTATION_BASE_cycles(self): with self.assertRaises(Exception): data_aug = Data_Augmentation(cycles=0) img_aug, seg_aug = data_aug.run(self.img2D, self.seg2D) for i in range(1, 50, 5): data_aug = Data_Augmentation(cycles=i) img_aug, seg_aug = data_aug.run(self.img2D, self.seg2D) self.assertEqual(img_aug.shape[0], i)
def test_DATAGENERATOR_augcyling(self): data_aug = Data_Augmentation(cycles=20) pp_fi = Preprocessor(self.data_io, batch_size=4, data_aug=data_aug, prepare_subfunctions=False, prepare_batches=False, analysis="fullimage") data_gen = DataGenerator(self.sample_list, pp_fi, training=True, shuffle=False, iterations=None) self.assertEqual(50, len(data_gen))
def test_MODEL_predictionAugmentated_3D(self): data_aug = Data_Augmentation() pp = Preprocessor(self.data_io3D, batch_size=1, patch_shape=(8, 8, 8), data_aug=data_aug, analysis="patchwise-crop") nn = Neural_Network(preprocessor=pp, architecture=UNet_standard(depth=2)) for sample in self.sample_list3D: predictions = nn.predict_augmentated(sample) self.assertEqual(len(predictions), 3) for pred in predictions: self.assertEqual(pred.shape, (16, 16, 16, 3))
def test_DATAAUGMENTATION_BASE_infauf(self): data_aug = Data_Augmentation() data_aug.infaug = True for axis in data_aug.infaug_flip_list: data_aug.infaug_flip_current = axis img_aug = data_aug.run_infaug(self.img3D) self.assertFalse(np.array_equal(img_aug, self.img3D)) img_aug = data_aug.run_infaug(img_aug) self.assertTrue(np.array_equal(img_aug, self.img3D))
def test_DATAGENERATOR_inferenceAug(self): data_aug = Data_Augmentation() pp_fi = Preprocessor(self.data_io, batch_size=4, data_aug=data_aug, prepare_subfunctions=False, prepare_batches=False, analysis="fullimage") data_gen = DataGenerator([self.sample_list[0]], pp_fi, training=False, shuffle=False, iterations=None) pred_list_inactive = [] for batch in data_gen: pred_list_inactive.append(batch) data_aug.infaug = True pred_list_active = [] for batch in data_gen: pred_list_active.append(batch) for i in range(0, len(pred_list_active)): ba = pred_list_active[i] bi = pred_list_inactive[i] self.assertFalse(np.array_equal(ba, bi))
def test_DATAAUGMENTATION_BASE_run2D(self): data_aug = Data_Augmentation() data_aug.config_p_per_sample = 1 img_aug, seg_aug = data_aug.run(self.img2D, self.seg2D) self.assertEqual(img_aug.shape, self.img2D.shape) self.assertFalse(np.array_equal(img_aug, self.img2D)) self.assertEqual(seg_aug.shape, self.seg2D.shape) self.assertFalse(np.array_equal(seg_aug, self.seg2D)) data_aug = Data_Augmentation(cycles=1, scaling=False, rotations=False, elastic_deform=False, mirror=False, brightness=False, contrast=False, gamma=False, gaussian_noise=False) img_aug, seg_aug = data_aug.run(self.img2D, self.seg2D) self.assertTrue(np.array_equal(img_aug, self.img2D))
# Setup of MIScnn Pipeline # #-----------------------------------------------------# # Initialize Data IO Interface for NIfTI data ## We are using 4 classes due to [background, lung_left, lung_right, covid-19] interface = NIFTI_interface(channels=1, classes=4) # Create Data IO object to load and write samples in the file structure data_io = Data_IO(interface, input_path="data", delete_batchDir=False) # Access all available samples in our file structure sample_list = data_io.get_indiceslist() sample_list.sort() # Create and configure the Data Augmentation class data_aug = Data_Augmentation(cycles=1, scaling=True, rotations=True, elastic_deform=True, mirror=True, brightness=True, contrast=True, gamma=True, gaussian_noise=True) # Create a clipping Subfunction to the lung window of CTs (-1250 and 250) sf_clipping = Clipping(min=-1250, max=250) # Create a pixel value normalization Subfunction to scale between 0-255 sf_normalize = Normalization(mode="grayscale") # Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70 sf_resample = Resampling((1.58, 1.58, 2.70)) # Create a pixel value normalization Subfunction for z-score scaling sf_zscore = Normalization(mode="z-score") # Assemble Subfunction classes into a list sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore] # Create and configure the Preprocessor class