Пример #1
0
 def test_MODEL_prediction3D(self):
     nn = Neural_Network(preprocessor=self.pp3D)
     nn.predict(self.sample_list3D)
     for index in self.sample_list3D:
         sample = self.data_io3D.sample_loader(index, load_seg=True,
                                               load_pred=True)
         self.assertIsNotNone(sample.pred_data)
Пример #2
0
 def test_MODEL_prediction_activationOutput(self):
     nn = Neural_Network(preprocessor=self.pp2D)
     pred_list = nn.predict(self.sample_list2D, return_output=True,
                            activation_output=True)
     for pred in pred_list:
         self.assertIsNotNone(pred)
         self.assertEqual(pred.shape, (16,16,3))
Пример #3
0
 def test_SUBFUNCTIONS_fullrun(self):
     ds = dict()
     for i in range(0, 10):
         img = np.random.rand(16, 16, 16) * 255
         img = img.astype(int)
         seg = np.random.rand(16, 16, 16) * 3
         seg = seg.astype(int)
         sample = (img, seg)
         ds["TEST.sample_" + str(i)] = sample
     io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
     self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
     tmp_batches = os.path.join(self.tmp_dir.name, "batches")
     dataio = Data_IO(io_interface, input_path="", output_path="",
                      batch_path=tmp_batches, delete_batchDir=False)
     sf = [Resize((16,16,16)), Normalization(), Clipping(min=-1.0, max=0.0)]
     pp = Preprocessor(dataio, batch_size=1, prepare_subfunctions=True,
                       analysis="fullimage", subfunctions=sf)
     nn = Neural_Network(preprocessor=pp)
     sample_list = dataio.get_indiceslist()
     nn.predict(sample_list, return_output=True)
Пример #4
0
# Initialize the Preprocessor class
pp = Preprocessor(data_io, data_aug=None, batch_size=1, subfunctions=sf,
                  prepare_subfunctions=True, prepare_batches=False,
                  analysis="fullimage")
## We are using fullimage analysis due to a 2D image can easily fit completely
## in our GPU

# Initialize the neural network model
model = Neural_Network(preprocessor=pp)

# Start the fitting on some slices
model.train(samples_list[30:50], epochs=3, iterations=10, callbacks=[])

# Predict a generic slice with direct output
pred = model.predict(["case_00002:#:42"], return_output=True)
print(np.asarray(pred).shape)
## Be aware that the direct prediction output, has a additional batch axis

# Predict a generic slice and save it as a NumPy pickle on disk
model.predict(["case_00002:#:42"], return_output=False)

# Load the slice via sample-loader and output also the new prediction, now
sample = data_io.sample_loader("case_00002:#:42", load_seg=True, load_pred=True)
print(sample.img_data.shape, sample.seg_data.shape, sample.pred_data.shape)


## Final words
# I hope that this usage example / tutorial on the new NIfTI slicer IO
# interface, helps on understanding how it works and how you can use it
#
Пример #5
0
 def test_ARCHITECTURES_UNET_standard(self):
     model2D = Neural_Network(self.pp2D, architecture=UNet_standard())
     model2D.predict(self.sample_list2D)
     model3D = Neural_Network(self.pp3D, architecture=UNet_standard())
     model3D.predict(self.sample_list3D)
Пример #6
0
                        mode="min")

#-----------------------------------------------------#
#          Run Pipeline for provided CV Fold          #
#-----------------------------------------------------#
# Run pipeline for cross-validation fold
run_fold(fold,
         model,
         epochs=1000,
         iterations=150,
         evaluation_path=path_eval,
         draw_figures=True,
         callbacks=[cb_lr, cb_es, cb_tb, cb_cl, cb_mc],
         save_models=False)

# Dump latest model to disk
model.dump(os.path.join(fold_subdir, "model.latest.hdf5"))

#-----------------------------------------------------#
#           Inference for provided CV Fold            #
#-----------------------------------------------------#
# Load best model weights during fitting
model.load(os.path.join(fold_subdir, "model.best.hdf5"))

# Obtain training and validation data set
training, validation = load_disk2fold(
    os.path.join(fold_subdir, "sample_list.json"))

# Compute predictions
model.predict(validation, return_output=False)
    def run(self):
        # Create sample list for miscnn
        util.create_sample_list(self.input_dir)

        # Initialize Data IO Interface for NIfTI data
        interface = NIFTI_interface(channels=1, classes=2)

        # Create Data IO object to load and write samples in the file structure
        data_io = Data_IO(interface,
                          input_path=self.input_dir,
                          delete_batchDir=False)

        # Access all available samples in our file structure
        sample_list = data_io.get_indiceslist()
        sample_list.sort()

        # Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70
        sf_resample = Resampling((1.58, 1.58, 2.70))

        # Create a pixel value normalization Subfunction for z-score scaling
        sf_zscore = Normalization(mode="z-score")

        # Create a pixel value normalization Subfunction to scale between 0-255
        sf_normalize = Normalization(mode="grayscale")

        # Assemble Subfunction classes into a list
        sf = [sf_normalize, sf_resample, sf_zscore]

        # Create and configure the Preprocessor class
        pp = Preprocessor(data_io,
                          batch_size=2,
                          subfunctions=sf,
                          prepare_subfunctions=True,
                          prepare_batches=False,
                          analysis="patchwise-crop",
                          patch_shape=(160, 160, 80))

        # Adjust the patch overlap for predictions
        pp.patchwise_overlap = (80, 80, 30)

        # Initialize the Architecture
        unet_standard = Architecture(depth=4,
                                     activation="softmax",
                                     batch_normalization=True)

        # Create the Neural Network model
        model = Neural_Network(
            preprocessor=pp,
            architecture=unet_standard,
            loss=tversky_crossentropy,
            metrics=[tversky_loss, dice_soft, dice_crossentropy],
            batch_queue_size=3,
            workers=1,
            learninig_rate=0.001)

        # Load best model weights during fitting
        model.load(f'{self.model_dir}{self.model_name}.hdf5')

        # Obtain training and validation data set ----- CHANGE BASED ON PRED/TRAIN
        images, _ = load_disk2fold(f'{self.input_dir}sample_list.json')

        print('\n\nRunning automatic segmentation on samples...\n')
        print(f'Segmenting images: {images}')

        # Compute predictions
        self.predictions = model.predict(images)

        # Delete folder created by miscnn
        shutil.rmtree('batches/')
Пример #8
0
                        mode="min")

#-----------------------------------------------------#
#          Run Pipeline for provided CV Fold          #
#-----------------------------------------------------#
# Run pipeline for cross-validation fold
run_fold(fold,
         model,
         epochs=1000,
         iterations=150,
         evaluation_path="evaluation",
         draw_figures=True,
         callbacks=[cb_lr, cb_es, cb_tb, cb_cl, cb_mc],
         save_models=False)

# Dump latest model to disk
model.dump(os.path.join(fold_subdir, "model.latest.hdf5"))

#-----------------------------------------------------#
#           Inference for provided CV Fold            #
#-----------------------------------------------------#
# Load best model weights during fitting
model.load(os.path.join(fold_subdir, "model.best.hdf5"))

# Obtain training and validation data set
training, validation = load_csv2fold(
    os.path.join(fold_subdir, "sample_list.csv"))

# Compute predictions
model.predict(validation, direct_output=False)
Пример #9
0
 def test_ARCHITECTURES_UNET_attention_residual(self):
     model2D = Neural_Network(self.pp2D, architecture=UNet_attention_residual())
     model2D.predict(self.sample_list2D)
     model3D = Neural_Network(self.pp3D, architecture=UNet_attention_residual())
     model3D.predict(self.sample_list3D)
Пример #10
0
                  batch_size=1,
                  subfunctions=sf,
                  prepare_subfunctions=True,
                  prepare_batches=False,
                  analysis="fullimage")
## We are using fullimage analysis due to a 2D image can easily fit completely
## in our GPU

# Initialize the neural network model
model = Neural_Network(preprocessor=pp)

# Start the fitting on some slices
model.train(samples_list[30:50], epochs=3, iterations=10, callbacks=[])

# Predict a generic slice with direct output
pred = model.predict(["case_00002:#:42"], direct_output=True)
print(np.asarray(pred).shape)
## Be aware that the direct prediction output, has a additional batch axis

# Predict a generic slice and save it as a NumPy pickle on disk
model.predict(["case_00002:#:42"], direct_output=False)

# Load the slice via sample-loader and output also the new prediction, now
sample = data_io.sample_loader("case_00000:#:89",
                               load_seg=True,
                               load_pred=True)
print(sample.img_data.shape, sample.seg_data.shape, sample.pred_data.shape)

## Final words
# I hope that this usage example / tutorial on the new NIfTI slicer IO
# interface, helps on understanding how it works and how you can use it
Пример #11
0
# Create a pixel value normalization Subfunction for z-score scaling
sf_zscore = Normalization(mode="z-score")

# Assemble Subfunction classes into a list
sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore]

# Create and configure the Preprocessor class
pp = Preprocessor(data_io, data_aug=None, batch_size=2, subfunctions=sf,
                  prepare_subfunctions=True, prepare_batches=False,
                  analysis="patchwise-crop", patch_shape=(160, 160, 80),
                  use_multiprocessing=True)
# Adjust the patch overlap for predictions
pp.patchwise_overlap = (80, 80, 30)
pp.mp_threads = 16

# Initialize the Architecture
unet_standard = Architecture(depth=4, activation="softmax",
                             batch_normalization=True)

# Create the Neural Network model
model = Neural_Network(preprocessor=pp, architecture=unet_standard,
                       loss=tversky_crossentropy,
                       metrics=[tversky_loss, dice_soft, dice_crossentropy],
                       batch_queue_size=3, workers=3, learninig_rate=0.001)

# Load best model weights during fitting
model.load(path_model)

# Compute predictions
model.predict(sample_list, return_output=False)