def setUpClass(self): np.random.seed(1234) # Create 2D imgaging and segmentation data set self.dataset = dict() for i in range(0, 2): img = np.random.rand(16, 16) * 255 self.img = img.astype(int) seg = np.random.rand(16, 16) * 2 self.seg = seg.astype(int) self.dataset["TEST.sample_" + str(i)] = (self.img, self.seg) # Initialize Dictionary IO Interface io_interface = Dictionary_interface(self.dataset, classes=3, three_dim=False) # Initialize temporary directory self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.CESP.") tmp_batches = os.path.join(self.tmp_dir.name, "batches") # Initialize Data IO self.data_io = Data_IO(io_interface, input_path=os.path.join(self.tmp_dir.name), output_path=os.path.join(self.tmp_dir.name), batch_path=tmp_batches, delete_batchDir=False) # Initialize Preprocessor self.pp = Preprocessor(self.data_io, batch_size=1, data_aug=None, analysis="fullimage") # Initialize Neural Network self.model = Neural_Network(self.pp) # Get sample list self.sample_list = self.data_io.get_indiceslist()
def test_MODEL_prediction3D(self): nn = Neural_Network(preprocessor=self.pp3D) nn.predict(self.sample_list3D) for index in self.sample_list3D: sample = self.data_io3D.sample_loader(index, load_seg=True, load_pred=True) self.assertIsNotNone(sample.pred_data)
def test_MODEL_prediction_activationOutput(self): nn = Neural_Network(preprocessor=self.pp2D) pred_list = nn.predict(self.sample_list2D, return_output=True, activation_output=True) for pred in pred_list: self.assertIsNotNone(pred) self.assertEqual(pred.shape, (16, 16, 3))
def test_MODEL_create(self): nn2D = Neural_Network(preprocessor=self.pp2D) self.assertIsInstance(nn2D, Neural_Network) self.assertFalse(nn2D.three_dim) self.assertIsNotNone(nn2D.model) nn3D = Neural_Network(preprocessor=self.pp3D) self.assertIsInstance(nn3D, Neural_Network) self.assertTrue(nn3D.three_dim) self.assertIsNotNone(nn3D.model)
def test_EVALUATION_leaveOneOut(self): # Create 3D imgaging and segmentation data set self.dataset3D = dict() for i in range(0, 6): img = np.random.rand(16, 16, 16) * 255 self.img = img.astype(int) seg = np.random.rand(16, 16, 16) * 3 self.seg = seg.astype(int) self.dataset3D["TEST.sample_" + str(i)] = (self.img, self.seg) # Initialize Dictionary IO Interface io_interface3D = Dictionary_interface(self.dataset3D, classes=3, three_dim=True) # Initialize temporary directory self.tmp_dir3D = tempfile.TemporaryDirectory(prefix="tmp.CESP.") tmp_batches = os.path.join(self.tmp_dir3D.name, "batches") # Initialize Data IO self.data_io3D = Data_IO(io_interface3D, input_path=os.path.join(self.tmp_dir3D.name), output_path=os.path.join(self.tmp_dir3D.name), batch_path=tmp_batches, delete_batchDir=False) # Initialize Preprocessor self.pp3D = Preprocessor(self.data_io3D, batch_size=2, data_aug=None, analysis="fullimage") # Initialize Neural Network model = Neural_Network(self.pp3D) # Get sample list self.sample_list3D = self.data_io3D.get_indiceslist() eval_path = os.path.join(self.tmp_dir3D.name, "evaluation") leave_one_out(self.sample_list3D, model, epochs=3, iterations=None, evaluation_path=eval_path, callbacks=[]) self.assertTrue(os.path.exists(eval_path)) # Cleanup stuff self.tmp_dir3D.cleanup()
class metricTEST(unittest.TestCase): # Create random imaging and segmentation data @classmethod def setUpClass(self): np.random.seed(1234) # Create 2D imgaging and segmentation data set self.dataset = dict() for i in range(0, 2): img = np.random.rand(16, 16) * 255 self.img = img.astype(int) seg = np.random.rand(16, 16) * 2 self.seg = seg.astype(int) self.dataset["TEST.sample_" + str(i)] = (self.img, self.seg) # Initialize Dictionary IO Interface io_interface = Dictionary_interface(self.dataset, classes=3, three_dim=False) # Initialize temporary directory self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.CESP.") tmp_batches = os.path.join(self.tmp_dir.name, "batches") # Initialize Data IO self.data_io = Data_IO(io_interface, input_path=os.path.join(self.tmp_dir.name), output_path=os.path.join(self.tmp_dir.name), batch_path=tmp_batches, delete_batchDir=False) # Initialize Preprocessor self.pp = Preprocessor(self.data_io, batch_size=1, data_aug=None, analysis="fullimage") # Initialize Neural Network self.model = Neural_Network(self.pp) # Get sample list self.sample_list = self.data_io.get_indiceslist() # Delete all temporary files @classmethod def tearDownClass(self): self.tmp_dir.cleanup() #-------------------------------------------------# # Standard DSC Metric # #-------------------------------------------------# def test_METRICS_DSC_standard(self): self.model.loss = dice_coefficient self.model.metrics = [dice_coefficient] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Standard DSC Loss # #-------------------------------------------------# def test_METRICS_DSC_standardLOSS(self): self.model.loss = dice_coefficient_loss self.model.metrics = [dice_coefficient_loss] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Soft DSC Metric # #-------------------------------------------------# def test_METRICS_DSC_soft(self): self.model.loss = dice_soft self.model.metrics = [dice_soft] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Soft DSC Loss # #-------------------------------------------------# def test_METRICS_DSC_softLOSS(self): self.model.loss = dice_soft_loss self.model.metrics = [dice_soft_loss] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Weighted DSC # #-------------------------------------------------# def test_METRICS_DSC_weighted(self): self.model.loss = dice_weighted([1,1,4]) self.model.metrics = [dice_weighted([1,1,4])] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Dice & Crossentropy loss # #-------------------------------------------------# def test_METRICS_DSC_CrossEntropy(self): self.model.loss = dice_crossentropy self.model.metrics = [dice_crossentropy] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Tversky loss # #-------------------------------------------------# def test_METRICS_Tversky(self): self.model.loss = tversky_loss self.model.metrics = [tversky_loss] self.model.train(self.sample_list, epochs=1) #-------------------------------------------------# # Tversky & Crossentropy loss # #-------------------------------------------------# def test_METRICS_Tversky_CrossEntropy(self): self.model.loss = tversky_crossentropy self.model.metrics = [tversky_crossentropy] self.model.train(self.sample_list, epochs=1)
def test_MODEL_validation3D(self): nn = Neural_Network(preprocessor=self.pp3D) history = nn.evaluate(self.sample_list3D[0:4], self.sample_list3D[4:6], epochs=3) self.assertIsNotNone(history)
def test_MODEL_training3D(self): nn = Neural_Network(preprocessor=self.pp3D) nn.train(self.sample_list3D, epochs=3)
def test_MODEL_resetWeights(self): nn = Neural_Network(preprocessor=self.pp3D) nn.reset_weights()
def test_MODEL_loading(self): nn = Neural_Network(preprocessor=self.pp3D) model_path = os.path.join(self.tmp_dir3D.name, "my_model.hdf5") nn.dump(model_path) nn_new = Neural_Network(preprocessor=self.pp3D) nn_new.load(model_path)
def test_MODEL_storage(self): nn = Neural_Network(preprocessor=self.pp3D) model_path = os.path.join(self.tmp_dir3D.name, "my_model.hdf5") nn.dump(model_path) self.assertTrue(os.path.exists(model_path))
def test_ARCHITECTURES_UNET_standard(self): model2D = Neural_Network(self.pp2D, architecture=UNet_standard()) model2D.predict(self.sample_list2D) model3D = Neural_Network(self.pp3D, architecture=UNet_standard()) model3D.predict(self.sample_list3D)