mode="min") #-----------------------------------------------------# # Run Pipeline for provided CV Fold # #-----------------------------------------------------# # Run pipeline for cross-validation fold run_fold(fold, model, epochs=1000, iterations=150, evaluation_path=path_eval, draw_figures=True, callbacks=[cb_lr, cb_es, cb_tb, cb_cl, cb_mc], save_models=False) # Dump latest model to disk model.dump(os.path.join(fold_subdir, "model.latest.hdf5")) #-----------------------------------------------------# # Inference for provided CV Fold # #-----------------------------------------------------# # Load best model weights during fitting model.load(os.path.join(fold_subdir, "model.best.hdf5")) # Obtain training and validation data set training, validation = load_disk2fold( os.path.join(fold_subdir, "sample_list.json")) # Compute predictions model.predict(validation, return_output=False)
def test_MODEL_loading(self): nn = Neural_Network(preprocessor=self.pp3D) model_path = os.path.join(self.tmp_dir3D.name, "my_model.hdf5") nn.dump(model_path) nn_new = Neural_Network(preprocessor=self.pp3D) nn_new.load(model_path)
def run(self): # Create sample list for miscnn util.create_sample_list(self.input_dir) # Initialize Data IO Interface for NIfTI data interface = NIFTI_interface(channels=1, classes=2) # Create Data IO object to load and write samples in the file structure data_io = Data_IO(interface, input_path=self.input_dir, delete_batchDir=False) # Access all available samples in our file structure sample_list = data_io.get_indiceslist() sample_list.sort() # Create a resampling Subfunction to voxel spacing 1.58 x 1.58 x 2.70 sf_resample = Resampling((1.58, 1.58, 2.70)) # Create a pixel value normalization Subfunction for z-score scaling sf_zscore = Normalization(mode="z-score") # Create a pixel value normalization Subfunction to scale between 0-255 sf_normalize = Normalization(mode="grayscale") # Assemble Subfunction classes into a list sf = [sf_normalize, sf_resample, sf_zscore] # Create and configure the Preprocessor class pp = Preprocessor(data_io, batch_size=2, subfunctions=sf, prepare_subfunctions=True, prepare_batches=False, analysis="patchwise-crop", patch_shape=(160, 160, 80)) # Adjust the patch overlap for predictions pp.patchwise_overlap = (80, 80, 30) # Initialize the Architecture unet_standard = Architecture(depth=4, activation="softmax", batch_normalization=True) # Create the Neural Network model model = Neural_Network( preprocessor=pp, architecture=unet_standard, loss=tversky_crossentropy, metrics=[tversky_loss, dice_soft, dice_crossentropy], batch_queue_size=3, workers=1, learninig_rate=0.001) # Load best model weights during fitting model.load(f'{self.model_dir}{self.model_name}.hdf5') # Obtain training and validation data set ----- CHANGE BASED ON PRED/TRAIN images, _ = load_disk2fold(f'{self.input_dir}sample_list.json') print('\n\nRunning automatic segmentation on samples...\n') print(f'Segmenting images: {images}') # Compute predictions self.predictions = model.predict(images) # Delete folder created by miscnn shutil.rmtree('batches/')
# Create a pixel value normalization Subfunction for z-score scaling sf_zscore = Normalization(mode="z-score") # Assemble Subfunction classes into a list sf = [sf_clipping, sf_normalize, sf_resample, sf_zscore] # Create and configure the Preprocessor class pp = Preprocessor(data_io, data_aug=None, batch_size=2, subfunctions=sf, prepare_subfunctions=True, prepare_batches=False, analysis="patchwise-crop", patch_shape=(160, 160, 80), use_multiprocessing=True) # Adjust the patch overlap for predictions pp.patchwise_overlap = (80, 80, 30) pp.mp_threads = 16 # Initialize the Architecture unet_standard = Architecture(depth=4, activation="softmax", batch_normalization=True) # Create the Neural Network model model = Neural_Network(preprocessor=pp, architecture=unet_standard, loss=tversky_crossentropy, metrics=[tversky_loss, dice_soft, dice_crossentropy], batch_queue_size=3, workers=3, learninig_rate=0.001) # Load best model weights during fitting model.load(path_model) # Compute predictions model.predict(sample_list, return_output=False)