示例#1
0
 def test_DATAGENERATOR_runTraining(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=4,
                          data_aug=self.data_aug,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=None)
     self.assertEqual(len(data_gen), 3)
     for batch in data_gen:
         self.assertIsInstance(batch, tuple)
         self.assertEqual(batch[0].shape, (4, 16, 16, 16, 1))
         self.assertEqual(batch[1].shape, (4, 16, 16, 16, 3))
     pp_pc = Preprocessor(self.data_io,
                          batch_size=3,
                          data_aug=self.data_aug,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          patch_shape=(5, 5, 5),
                          analysis="patchwise-crop")
     data_gen = DataGenerator(self.sample_list,
                              pp_pc,
                              training=True,
                              shuffle=False,
                              iterations=None)
     self.assertEqual(len(data_gen), 4)
     for batch in data_gen:
         self.assertIsInstance(batch, tuple)
         self.assertEqual(batch[0].shape, (3, 5, 5, 5, 1))
         self.assertEqual(batch[1].shape, (3, 5, 5, 5, 3))
示例#2
0
 def test_SUBFUNCTIONS_preprocessing(self):
     ds = dict()
     for i in range(0, 10):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface, input_path="", output_path="",
                      batch_path=tmp_batches, delete_batchDir=False)
     sf = [Resize((8,8,8)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio, data_aug=None, batch_size=1,
                       prepare_subfunctions=False, analysis="fullimage",
                       subfunctions=sf)
     sample_list = dataio.get_indiceslist()
     batches = pp.run(sample_list, training=True, validation=False)
     for i in range(0, 10):
         img = batches[i][0]
         seg = batches[i][1]
         self.assertEqual(img.shape, (1,8,8,8,1))
         self.assertEqual(seg.shape, (1,8,8,8,3))
         self.assertTrue(np.min(img) >= -1.0 and np.max(img) <= 0.0)
     self.tmp_dir.cleanup()
示例#3
0
 def setUpClass(self):
     np.random.seed(1234)
     # Create 2D imgaging and segmentation data set
     self.dataset2D = dict()
     for i in range(0, 6):
         img = np.random.rand(16, 16) * 255
         self.img = img.astype(int)
         seg = np.random.rand(16, 16) * 3
         self.seg = seg.astype(int)
         self.dataset2D["TEST.sample_" + str(i)] = (self.img, self.seg)
     # Initialize Dictionary IO Interface
     io_interface2D = Dictionary_interface(self.dataset2D,
                                           classes=3,
                                           three_dim=False)
     # Initialize temporary directory
     self.tmp_dir2D = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir2D.name, "batches")
     # Initialize Data IO
     self.data_io2D = Data_IO(io_interface2D,
                              input_path=os.path.join(self.tmp_dir2D.name),
                              output_path=os.path.join(self.tmp_dir2D.name),
                              batch_path=tmp_batches,
                              delete_batchDir=False)
     # Initialize Preprocessor
     self.pp2D = Preprocessor(self.data_io2D,
                              batch_size=2,
                              data_aug=None,
                              analysis="fullimage")
     # Get sample list
     self.sample_list2D = self.data_io2D.get_indiceslist()
     # Create 3D imgaging and segmentation data set
     self.dataset3D = dict()
     for i in range(0, 6):
         img = np.random.rand(16, 16, 16) * 255
         self.img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         self.seg = seg.astype(int)
         self.dataset3D["TEST.sample_" + str(i)] = (self.img, self.seg)
     # Initialize Dictionary IO Interface
     io_interface3D = Dictionary_interface(self.dataset3D,
                                           classes=3,
                                           three_dim=True)
     # Initialize temporary directory
     self.tmp_dir3D = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir3D.name, "batches")
     # Initialize Data IO
     self.data_io3D = Data_IO(io_interface3D,
                              input_path=os.path.join(self.tmp_dir3D.name),
                              output_path=os.path.join(self.tmp_dir3D.name),
                              batch_path=tmp_batches,
                              delete_batchDir=False)
     # Initialize Preprocessor
     self.pp3D = Preprocessor(self.data_io3D,
                              batch_size=2,
                              data_aug=None,
                              analysis="fullimage")
     # Get sample list
     self.sample_list3D = self.data_io3D.get_indiceslist()
示例#4
0
 def test_PREPROCESSOR_BASE_dataaugmentation(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D, batch_size=1, analysis="fullimage")
     batches = pp.run(sample_list[8:10], training=False, validation=False)
     self.assertEqual(len(batches), 2)
     self.assertEqual(batches[0][0].shape, (1, 16, 16, 16, 1))
     self.assertIsNone(batches[0][1])
     sample = self.data_io3D.sample_loader(sample_list[8], load_seg=False)
     self.assertFalse(np.array_equal(batches[0][0], sample.img_data))
示例#5
0
 def test_PREPROCESSOR_BASE_prepareSubfunctions(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       batch_size=1,
                       analysis="fullimage",
                       prepare_subfunctions=True)
     pp.run_subfunctions(sample_list[0:8], training=True)
     pp.run(sample_list[0:8], training=True, validation=False)
     pp.run_subfunctions(sample_list[8:10], training=False)
     pp.run(sample_list[8:10], training=False, validation=False)
示例#6
0
 def test_PREPROCESSOR_BASE_create(self):
     with self.assertRaises(Exception):
         Preprocessor()
     Preprocessor(self.data_io3D, batch_size=1, analysis="fullimage")
     Preprocessor(self.data_io3D,
                  batch_size=1,
                  analysis="patchwise-crop",
                  patch_shape=(16, 16, 16))
     Preprocessor(self.data_io3D,
                  batch_size=1,
                  analysis="patchwise-grid",
                  patch_shape=(16, 16, 16),
                  data_aug=None)
示例#7
0
 def test_PREPROCESSOR_postprocessing_(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       batch_size=1,
                       analysis="fullimage",
                       data_aug=None)
     batches = pp.run(sample_list[0:3], training=True, validation=False)
     for i in range(0, 3):
         pred_postprec = pp.postprocessing(sample_list[i], batches[i][1])
         self.assertEqual(pred_postprec.shape, (16, 16, 16))
         sam = self.data_io3D.sample_loader(sample_list[i], load_seg=True)
         self.assertTrue(
             np.array_equal(pred_postprec,
                            np.reshape(sam.seg_data, (16, 16, 16))))
示例#8
0
 def test_PREPROCESSOR_patchwisecrop_skipBlanks(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       data_aug=None,
                       batch_size=1,
                       analysis="patchwise-crop",
                       patch_shape=(4, 4, 4))
     pp.patchwise_skip_blanks = True
     batches = pp.run(sample_list[0:3], training=True, validation=False)
     sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
     sample.seg_data = to_categorical(sample.seg_data,
                                      num_classes=sample.classes)
     ready_data = pp.analysis_patchwise_crop(sample, data_aug=False)
     self.assertEqual(len(ready_data), 1)
     self.assertEqual(ready_data[0][0].shape, (4, 4, 4, 1))
     self.assertEqual(ready_data[0][1].shape, (4, 4, 4, 3))
示例#9
0
 def test_PREPROCESSOR_BASE_prepareBatches(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       batch_size=1,
                       analysis="fullimage",
                       prepare_batches=True)
     batch_pointer = pp.run(sample_list[0:8],
                            training=True,
                            validation=False)
     self.assertEqual(batch_pointer, 7)
     tmp_batches = os.path.join(self.tmp_dir3D.name, "batches")
     batch_list = []
     for batch_file in os.listdir(tmp_batches):
         if batch_file.startswith(str(pp.data_io.seed)):
             batch_list.append(batch_file)
     self.assertEqual(len(batch_list), 16)
示例#10
0
 def test_PREPROCESSOR_BASE_run(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       data_aug=None,
                       batch_size=1,
                       analysis="fullimage")
     batches = pp.run(sample_list[8:10], training=False, validation=False)
     self.assertEqual(len(batches), 2)
     self.assertEqual(batches[0][0].shape, (1, 16, 16, 16, 1))
     self.assertIsNone(batches[0][1])
     batches = pp.run(sample_list[0:8], training=True, validation=False)
     self.assertEqual(batches[0][0].shape, (1, 16, 16, 16, 1))
     self.assertEqual(batches[0][1].shape, (1, 16, 16, 16, 3))
     batches = pp.run(sample_list[0:8], training=True, validation=True)
     self.assertEqual(batches[0][0].shape, (1, 16, 16, 16, 1))
     self.assertEqual(batches[0][1].shape, (1, 16, 16, 16, 3))
示例#11
0
 def setUpClass(self):
     np.random.seed(1234)
     # Create 2D imgaging and segmentation data set
     self.dataset = dict()
     for i in range(0, 2):
         img = np.random.rand(16, 16) * 255
         self.img = img.astype(int)
         seg = np.random.rand(16, 16) * 2
         self.seg = seg.astype(int)
         self.dataset["TEST.sample_" + str(i)] = (self.img, self.seg)
     # Initialize Dictionary IO Interface
     io_interface = Dictionary_interface(self.dataset, classes=3,
                                           three_dim=False)
     # Initialize temporary directory
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     # Initialize Data IO
     self.data_io = Data_IO(io_interface,
                            input_path=os.path.join(self.tmp_dir.name),
                            output_path=os.path.join(self.tmp_dir.name),
                            batch_path=tmp_batches, delete_batchDir=False)
     # Initialize Preprocessor
     self.pp = Preprocessor(self.data_io, batch_size=1,
                            data_aug=None, analysis="fullimage")
     # Initialize Neural Network
     self.model = Neural_Network(self.pp)
     # Get sample list
     self.sample_list = self.data_io.get_indiceslist()
示例#12
0
 def test_DATAGENERATOR_iterations(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=1,
                          data_aug=None,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=None)
     self.assertEqual(10, len(data_gen))
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=5)
     self.assertEqual(5, len(data_gen))
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=50)
     self.assertEqual(50, len(data_gen))
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=100)
     self.assertEqual(100, len(data_gen))
示例#13
0
    def test_EVALUATION_leaveOneOut(self):
        # Create 3D imgaging and segmentation data set
        self.dataset3D = dict()
        for i in range(0, 6):
            img = np.random.rand(16, 16, 16) * 255
            self.img = img.astype(int)
            seg = np.random.rand(16, 16, 16) * 3
            self.seg = seg.astype(int)
            self.dataset3D["TEST.sample_" + str(i)] = (self.img, self.seg)
        # Initialize Dictionary IO Interface
        io_interface3D = Dictionary_interface(self.dataset3D, classes=3,
                                              three_dim=True)
        # Initialize temporary directory
        self.tmp_dir3D = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
        tmp_batches = os.path.join(self.tmp_dir3D.name, "batches")
        # Initialize Data IO
        self.data_io3D = Data_IO(io_interface3D,
                                 input_path=os.path.join(self.tmp_dir3D.name),
                                 output_path=os.path.join(self.tmp_dir3D.name),
                                 batch_path=tmp_batches, delete_batchDir=False)
        # Initialize Preprocessor
        self.pp3D = Preprocessor(self.data_io3D, batch_size=2,
                                 data_aug=None, analysis="fullimage")
        # Initialize Neural Network
        model = Neural_Network(self.pp3D)
        # Get sample list
        self.sample_list3D = self.data_io3D.get_indiceslist()

        eval_path = os.path.join(self.tmp_dir3D.name, "evaluation")
        leave_one_out(self.sample_list3D, model, epochs=3, iterations=None,
                      evaluation_path=eval_path, callbacks=[])
        self.assertTrue(os.path.exists(eval_path))
        # Cleanup stuff
        self.tmp_dir3D.cleanup()
示例#14
0
 def test_SUBFUNCTIONS_postprocessing(self):
     ds = dict()
     for i in range(0, 10):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface,
                      input_path="",
                      output_path="",
                      batch_path=tmp_batches,
                      delete_batchDir=False)
     sf = [Resize((9, 9, 9)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio,
                       batch_size=1,
                       prepare_subfunctions=False,
                       analysis="patchwise-grid",
                       subfunctions=sf,
                       patch_shape=(4, 4, 4))
     sample_list = dataio.get_indiceslist()
     for index in sample_list:
         sample = dataio.sample_loader(index)
         for sf in pp.subfunctions:
             sf.preprocessing(sample, training=False)
         pp.cache["shape_" + str(index)] = sample.img_data.shape
         sample.seg_data = np.random.rand(9, 9, 9) * 3
         sample.seg_data = sample.seg_data.astype(int)
         sample.seg_data = to_categorical(sample.seg_data, num_classes=3)
         data_patches = pp.analysis_patchwise_grid(sample,
                                                   training=True,
                                                   data_aug=False)
         seg_list = []
         for i in range(0, len(data_patches)):
             seg_list.append(data_patches[i][1])
         seg = np.stack(seg_list, axis=0)
         self.assertEqual(seg.shape, (27, 4, 4, 4, 3))
         pred = pp.postprocessing(sample, seg)
         self.assertEqual(pred.shape, (16, 16, 16))
     self.tmp_dir.cleanup()
示例#15
0
 def test_PREPROCESSOR_fullimage_3D(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       data_aug=None,
                       batch_size=2,
                       analysis="fullimage")
     batches = pp.run(sample_list[0:3], training=True, validation=False)
     self.assertEqual(len(batches), 2)
     batches = pp.run(sample_list[0:1], training=False, validation=False)
     self.assertEqual(len(batches), 1)
     sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
     sample.seg_data = to_categorical(sample.seg_data,
                                      num_classes=sample.classes)
     ready_data = pp.analysis_fullimage(sample,
                                        data_aug=False,
                                        training=True)
     self.assertEqual(len(ready_data), 1)
     self.assertEqual(ready_data[0][0].shape, (16, 16, 16, 1))
     self.assertEqual(ready_data[0][1].shape, (16, 16, 16, 3))
示例#16
0
 def test_MODEL_predictionAugmentated_2D(self):
     data_aug = Data_Augmentation()
     pp = Preprocessor(self.data_io2D,
                       batch_size=2,
                       data_aug=data_aug,
                       analysis="fullimage")
     nn = Neural_Network(preprocessor=pp)
     for sample in self.sample_list2D:
         predictions = nn.predict_augmentated(sample)
         self.assertEqual(len(predictions), 2)
         for pred in predictions:
             self.assertEqual(pred.shape, (16, 16, 3))
示例#17
0
 def test_SUBFUNCTIONS_prepare_MULTIPROCESSING(self):
     ds = dict()
     for i in range(0, 5):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface, input_path="", output_path="",
                      batch_path=tmp_batches, delete_batchDir=False)
     sf = [Resize((8,8,8)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio, batch_size=1, prepare_subfunctions=True,
                       analysis="fullimage", subfunctions=sf,
                       use_multiprocessing=True)
     pp.mp_threads = 4
     sample_list = dataio.get_indiceslist()
     pp.run_subfunctions(sample_list, training=True)
     batches = pp.run(sample_list, training=True, validation=False)
     self.assertEqual(len(os.listdir(tmp_batches)), 5)
     for i in range(0, 5):
         file_prepared_subfunctions = os.path.join(tmp_batches,
                 str(pp.data_io.seed) + ".TEST.sample_" + str(i) + ".pickle")
         self.assertTrue(os.path.exists(file_prepared_subfunctions))
         img = batches[i][0]
         seg = batches[i][1]
         self.assertIsNotNone(img)
         self.assertIsNotNone(seg)
         self.assertEqual(img.shape, (1,8,8,8,1))
         self.assertEqual(seg.shape, (1,8,8,8,3))
示例#18
0
 def test_MODEL_predictionAugmentated_3D(self):
     data_aug = Data_Augmentation()
     pp = Preprocessor(self.data_io3D,
                       batch_size=1,
                       patch_shape=(8, 8, 8),
                       data_aug=data_aug,
                       analysis="patchwise-crop")
     nn = Neural_Network(preprocessor=pp,
                         architecture=UNet_standard(depth=2))
     for sample in self.sample_list3D:
         predictions = nn.predict_augmentated(sample)
         self.assertEqual(len(predictions), 3)
         for pred in predictions:
             self.assertEqual(pred.shape, (16, 16, 16, 3))
示例#19
0
 def test_DATAGENERATOR_create(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=4,
                          data_aug=self.data_aug,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=False,
                              validation=False,
                              shuffle=False,
                              iterations=None)
     self.assertIsInstance(data_gen, DataGenerator)
示例#20
0
 def test_DATAGENERATOR_shuffle(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=1,
                          data_aug=None,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=None)
     list_ordered = []
     for batch in data_gen:
         list_ordered.append(batch)
     for batch in data_gen:
         list_ordered.append(batch)
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=True,
                              iterations=None)
     list_shuffled = []
     for batch in data_gen:
         list_shuffled.append(batch)
     data_gen.on_epoch_end()
     for batch in data_gen:
         list_shuffled.append(batch)
     size = len(data_gen)
     o_counter = 0
     s_counter = 0
     for i in range(0, size):
         oa_img = list_ordered[i][0]
         oa_seg = list_ordered[i][1]
         ob_img = list_ordered[i + size][0]
         ob_seg = list_ordered[i + size][1]
         sa_img = list_shuffled[i][0]
         sa_seg = list_shuffled[i][1]
         sb_img = list_shuffled[i + size][0]
         sb_seg = list_shuffled[i + size][1]
         if np.array_equal(oa_img, ob_img) and \
             np.array_equal(oa_seg, ob_seg):
             o_counter += 1
         if not np.array_equal(sa_img, sb_img) and \
             not np.array_equal(sa_seg, sb_seg):
             s_counter += 1
     o_ratio = o_counter / size
     self.assertTrue(o_ratio == 1.0)
     s_ratio = s_counter / size
     self.assertTrue(1.0 >= s_ratio and s_ratio >= 0.5)
示例#21
0
 def test_DATAGENERATOR_augcyling(self):
     data_aug = Data_Augmentation(cycles=20)
     pp_fi = Preprocessor(self.data_io,
                          batch_size=4,
                          data_aug=data_aug,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=None)
     self.assertEqual(50, len(data_gen))
示例#22
0
 def test_DATAGENERATOR_prepareData(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=4,
                          data_aug=None,
                          prepare_subfunctions=True,
                          prepare_batches=True,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=True,
                              iterations=None)
     self.assertEqual(len(data_gen), 3)
     for batch in data_gen:
         self.assertIsInstance(batch, tuple)
         self.assertEqual(batch[0].shape[1:], (16, 16, 16, 1))
         self.assertEqual(batch[1].shape[1:], (16, 16, 16, 3))
         self.assertIn(batch[0].shape[0], [2, 4])
示例#23
0
 def test_SUBFUNCTIONS_fullrun(self):
     ds = dict()
     for i in range(0, 10):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface, input_path="", output_path="",
                      batch_path=tmp_batches, delete_batchDir=False)
     sf = [Resize((16,16,16)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio, batch_size=1, prepare_subfunctions=True,
                       analysis="fullimage", subfunctions=sf)
     nn = Neural_Network(preprocessor=pp)
     sample_list = dataio.get_indiceslist()
     nn.predict(sample_list, return_output=True)
示例#24
0
 def test_DATAGENERATOR_consistency(self):
     pp_fi = Preprocessor(self.data_io,
                          batch_size=1,
                          data_aug=None,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator(self.sample_list,
                              pp_fi,
                              training=True,
                              shuffle=False,
                              iterations=None)
     i = 0
     for batch in data_gen:
         sample = self.data_io.sample_loader(self.sample_list[i],
                                             load_seg=True)
         self.assertTrue(np.array_equal(batch[0][0], sample.img_data))
         seg = to_categorical(sample.seg_data, num_classes=3)
         self.assertTrue(np.array_equal(batch[1][0], seg))
         i += 1
示例#25
0
 def test_DATAGENERATOR_inferenceAug(self):
     data_aug = Data_Augmentation()
     pp_fi = Preprocessor(self.data_io,
                          batch_size=4,
                          data_aug=data_aug,
                          prepare_subfunctions=False,
                          prepare_batches=False,
                          analysis="fullimage")
     data_gen = DataGenerator([self.sample_list[0]],
                              pp_fi,
                              training=False,
                              shuffle=False,
                              iterations=None)
     pred_list_inactive = []
     for batch in data_gen:
         pred_list_inactive.append(batch)
     data_aug.infaug = True
     pred_list_active = []
     for batch in data_gen:
         pred_list_active.append(batch)
     for i in range(0, len(pred_list_active)):
         ba = pred_list_active[i]
         bi = pred_list_inactive[i]
         self.assertFalse(np.array_equal(ba, bi))
示例#26
0
 def test_PREPROCESSOR_patchwisegrid_3D(self):
     sample_list = self.data_io3D.get_indiceslist()
     pp = Preprocessor(self.data_io3D,
                       data_aug=None,
                       batch_size=1,
                       analysis="patchwise-grid",
                       patch_shape=(4, 4, 4))
     batches = pp.run(sample_list[0:1], training=False, validation=False)
     self.assertEqual(len(batches), 64)
     sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
     sample.seg_data = to_categorical(sample.seg_data,
                                      num_classes=sample.classes)
     pp = Preprocessor(self.data_io3D,
                       data_aug=None,
                       batch_size=1,
                       analysis="patchwise-grid",
                       patch_shape=(5, 5, 5))
     ready_data = pp.analysis_patchwise_grid(sample,
                                             data_aug=False,
                                             training=True)
     self.assertEqual(len(ready_data), 64)
     self.assertEqual(ready_data[0][0].shape, (5, 5, 5, 1))
     self.assertEqual(ready_data[0][1].shape, (5, 5, 5, 3))
示例#27
0
# Let's test out, if the the NIfTI slicer interface works like we want
# and output the image and segmentation shape of a random slice
sample = data_io.sample_loader("case_00002:#:42", load_seg=True)
print(sample.img_data.shape, sample.seg_data.shape)

## As you hopefully noted, the index of a slice is defined
## as the volume file name and the slice number separated with a ":#:"

# Specify subfunctions for preprocessing
## Here we are using the Resize subfunctions due to many 2D models
## want a specific shape (e.g. DenseNet for classification)
sf = [Resize(new_shape=(224, 224))]

# Initialize the Preprocessor class
pp = Preprocessor(data_io, data_aug=None, batch_size=1, subfunctions=sf,
                  prepare_subfunctions=True, prepare_batches=False,
                  analysis="fullimage")
## We are using fullimage analysis due to a 2D image can easily fit completely
## in our GPU

# Initialize the neural network model
model = Neural_Network(preprocessor=pp)

# Start the fitting on some slices
model.train(samples_list[30:50], epochs=3, iterations=10, callbacks=[])

# Predict a generic slice with direct output
pred = model.predict(["case_00002:#:42"], return_output=True)
print(np.asarray(pred).shape)
## Be aware that the direct prediction output, has a additional batch axis
    def run(self):
        # Create sample list for miscnn
        util.create_sample_list(self.input_dir)

        # Initialize Data IO Interface for NIfTI data
        interface = NIFTI_interface(channels=1, classes=2)

        # Create Data IO object to load and write samples in the file structure
        data_io = Data_IO(interface,
                          input_path=self.input_dir,
                          delete_batchDir=False)

        # Access all available samples in our file structure
        sample_list = data_io.get_indiceslist()
        sample_list.sort()

        # Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
        sf_resample = Resampling((1.58, 1.58, 2.70))

        # Create a pixel value normalization Subfunction for z-score scaling
        sf_zscore = Normalization(mode="z-score")

        # Create a pixel value normalization Subfunction to scale between 0-255
        sf_normalize = Normalization(mode="grayscale")

        # Assemble Subfunction classes into a list
        sf = [sf_normalize, sf_resample, sf_zscore]

        # Create and configure the Preprocessor class
        pp = Preprocessor(data_io,
                          batch_size=2,
                          subfunctions=sf,
                          prepare_subfunctions=True,
                          prepare_batches=False,
                          analysis="patchwise-crop",
                          patch_shape=(160, 160, 80))

        # Adjust the patch overlap for predictions
        pp.patchwise_overlap = (80, 80, 30)

        # Initialize the Architecture
        unet_standard = Architecture(depth=4,
                                     activation="softmax",
                                     batch_normalization=True)

        # Create the Neural Network model
        model = Neural_Network(
            preprocessor=pp,
            architecture=unet_standard,
            loss=tversky_crossentropy,
            metrics=[tversky_loss, dice_soft, dice_crossentropy],
            batch_queue_size=3,
            workers=1,
            learninig_rate=0.001)

        # Load best model weights during fitting
        model.load(f'{self.model_dir}{self.model_name}.hdf5')

        # Obtain training and validation data set ----- CHANGE BASED ON PRED/TRAIN
        images, _ = load_disk2fold(f'{self.input_dir}sample_list.json')

        print('\n\nRunning automatic segmentation on samples...\n')
        print(f'Segmenting images: {images}')

        # Compute predictions
        self.predictions = model.predict(images)

        # Delete folder created by miscnn
        shutil.rmtree('batches/')
示例#29
0
# Create a clipping Subfunction to the lung window of CTs (-1250 and 250)
sf_clipping = Clipping(min=-1250, max=250)
# Create a pixel value normalization Subfunction to scale between 0-255
sf_normalize = Normalization(mode="grayscale")
# Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
sf_resample = Resampling((1.58, 1.58, 2.70))
# Create a pixel value normalization Subfunction for z-score scaling
sf_zscore = Normalization(mode="z-score")

# Assemble Subfunction classes into a list
sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore]

# Create and configure the Preprocessor class
pp = Preprocessor(data_io, data_aug=data_aug, batch_size=2, subfunctions=sf,
                  prepare_subfunctions=True, prepare_batches=False,
                  analysis="fullimage", patch_shape=(160, 160, 80))
# Adjust the patch overlap for predictions
pp.patchwise_overlap = (80, 80, 40)


# Initialize Keras Data Generator for generating batches
from miscnn.neural_network.data_generator import DataGenerator
dataGen = DataGenerator(sample_list, pp, training=False, validation=False, shuffle=False)

x = []
y = []
z = []
for batch in dataGen:
    print("Batch:", batch.shape)
    x.append(batch.shape[1])
示例#30
0
# Create a pixel value normalization Subfunction to scale between 0-255
sf_normalize = Normalization(mode="grayscale")
# Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
sf_resample = Resampling((1.58, 1.58, 2.70))
# Create a pixel value normalization Subfunction for z-score scaling
sf_zscore = Normalization(mode="z-score")

# Assemble Subfunction classes into a list
sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore]

# Create and configure the Preprocessor class
pp = Preprocessor(data_io,
                  data_aug=data_aug,
                  batch_size=2,
                  subfunctions=sf,
                  prepare_subfunctions=True,
                  prepare_batches=False,
                  analysis="patchwise-crop",
                  patch_shape=(160, 160, 80),
                  use_multiprocessing=True)
# Adjust the patch overlap for predictions
pp.patchwise_overlap = (80, 80, 30)

# Initialize the Architecture
unet_standard = Architecture(depth=4,
                             activation="softmax",
                             batch_normalization=True)

# Create the Neural Network model
model = Neural_Network(preprocessor=pp,
                       architecture=unet_standard,