def save_checkpoint(state, is_best, output_path): """Save checkpoint if a new best is achieved""" if is_best: save_path = os.path.join(output_path, f"checkpoint_epoch_{state['epoch']}_val_loss_{state['val_loss']}_dice_{state['val_dice_score']}.pth") logger.info(f"Saving a new best to {save_path}") torch.save(state, save_path) else: logger.info("Validation loss did not improve")
def _epoch_summary(self, epoch, train_loss, val_loss, train_dice_score, val_dice_score, train_combined_loss, train_ce_loss, val_combined_loss, val_ce_loss): if self.args.loss == "dice" or self.args.loss == "both_dice": logger.info( f'epoch: {epoch}\n ' f'** Dice Loss ** : train_loss: {train_loss:.2f} | val_loss {val_loss:.2f} \n' f'** Dice Score ** : train_dice_score {train_dice_score:.2f} | val_dice_score {val_dice_score:.2f}' ) else: logger.info( f'epoch: {epoch}\n' f'** Combined Loss ** : train_loss: {train_combined_loss:.2f} | val_loss {val_combined_loss:.2f} \n' f'** CE Loss ** : train_loss {train_ce_loss:.2f} | val_loss {val_ce_loss:.2f}\n' f'** Dice Loss ** : train_loss: {train_loss:.2f} | val_loss {val_loss:.2f} \n' f'** Dice Score ** : train_dice_score {train_dice_score:.2f} | val_dice_score {val_dice_score:.2f}\n' )
def save_predictions(patient: Patient, results: dict, model_path: str, task: str): output_dir = os.path.join(model_path, task) output_dir_entropy = os.path.join(output_dir, "entropy") if not os.path.exists(output_dir): os.makedirs(output_dir) if not os.path.exists(output_dir_entropy): os.makedirs(output_dir_entropy) for name, volume in results.items(): file_name = f"{patient.patch_name}.nii.gz" if name == "prediction" else f"{patient.patch_name}_unc_{name}.nii.gz" directory = output_dir_entropy if "entropy" in file_name else output_dir output_path = os.path.join(directory, file_name) affine_func = patient.get_affine() logger.info(f"Saving to: {output_path}") save_segmask_as_nifi_volume(volume, affine_func, output_path)
post_result = {"prediction": segmentation_post} predict.save_predictions(data[idx], post_result, model_path, f"{task}_post_processed") results["prediction"] = prediction_map predict.save_predictions(data[idx], results, model_path, task) if compute_metrics: patient_path = os.path.join(data[idx].data_path, data[idx].patch_name, data[idx].seg) data_path = os.path.join(data[idx].data_path, data[idx].patch_name, data[idx].flair) if os.path.exists(patient_path): volume_gt = data[idx].load_gt_mask() volume = nifi_volume.load_nifi_volume(data_path) metrics = compute_wt_tc_et(prediction_map, volume_gt, volume) logger.info(f"{data[idx].patient} | {metrics}") print("Normalize uncertainty for brats!") if uncertainty_flag: input_dir = os.path.join(model_path, task) output_dir = os.path.join(model_path, task, "normalized") gt_path = data[0].data_path compute_normalization(input_dir=input_dir, output_dir=output_dir, ground_truth_path=gt_path) print("All done!!!! Be happy!")
models = list(map(lambda x: os.path.join(check_path, x, task), models)) output_dir = os.path.join(check_path, f"{task}/ensemble_predictions/") if not os.path.exists(output_dir): os.makedirs(output_dir) data, _ = dataset.read_brats(dataset_csv_path, lgg_only=False) for patient in tqdm(data, total=len(data), desc="Ensemble prediction"): patient_name = patient.patient seg_maps = read_preds_from_models(models, f"{patient_name}.nii.gz") ensemble_map = majority_voting(seg_maps, patient.get_brain_mask()) output_path_with_name = os.path.join(output_dir, f"{patient_name}.nii.gz") save_segmask_as_nifi_volume(ensemble_map, patient.get_affine(), output_path_with_name) if compute_metrics: patient_path = os.path.join(patient.data_path, patient.patch_name, patient.seg) data_path = os.path.join(patient.data_path, patient.patch_name, patient.flair) if os.path.exists(patient_path): volume_gt = patient.load_gt_mask() volume = nifi_volume.load_nifi_volume(data_path) metrics = compute_wt_tc_et(ensemble_map, volume_gt, volume) logger.info(f"{patient.patient} | {metrics}")
def prepare_parameters(self): self.config["model"]["model_path"] = get_correct_path( self.config.get("model", "model_path_local"), self.config.get("model", "model_path_server")) train = self.config.getboolean("basics", "train_flag") resume = self.config.getboolean("basics", "resume") if train: if resume: model_name, _ = os.path.split( self.config.get("model", "checkpoint")) else: model_name = f"model_{round(time.time())}" logger.info("Create model directory and save configuration") self.config["basics"]["tensorboard_logs"] = os.path.join( self.config.get("basics", "tensorboard_logs"), model_name) self.config["model"]["checkpoint"] = os.path.join( self.config.get("model", "model_path"), self.config.get("model", "checkpoint")) create_directory(self.config.get("basics", "tensorboard_logs")) self.config["model"]["model_path"] = os.path.join( self.config.get("model", "model_path"), model_name) create_directory(self.config["model"]["model_path"]) # save current configuration there with open( os.path.join(self.config["model"]["model_path"], "config.ini"), 'w') as configfile: self.config.write(configfile) if train: sampling_method = self.config["dataset"]["source_sampling"].split( ".")[-1] else: sampling_method = self.config["dataset"]["sampling_method"].split( ".")[-1] # SETTING PATH! self.config["dataset"]["root_path"] = get_correct_path( self.config.get("dataset", "dataset_root_path_local"), self.config.get("dataset", "dataset_root_path_server")) self.config["dataset"]["path_train"] = os.path.join( self.config["dataset"]["root_path"], self.config["dataset"]["dataset_train_folder"], sampling_method) self.config["dataset"]["path_val"] = os.path.join( self.config["dataset"]["root_path"], self.config["dataset"]["dataset_val_folder"], sampling_method) self.config["dataset"]["path_test"] = os.path.join( self.config["dataset"]["root_path"], self.config["dataset"]["dataset_test_folder"], sampling_method) self.config["dataset"]["train_csv"] = os.path.join( self.config["dataset"]["path_train"], self.config.get("dataset", "train_csv")) self.config["dataset"]["val_csv"] = os.path.join( self.config["dataset"]["path_val"], self.config.get("dataset", "val_csv")) self.config["dataset"]["test_csv"] = os.path.join( self.config["dataset"]["path_test"], self.config.get("dataset", "test_csv")) if "batch_size" not in self.config["dataset"]: self.config["dataset"]["batch_size"] = str( self.config.getint("dataset", "n_patients_per_batch") * self.config.getint("dataset", "n_patches")) self.patch_size = tuple([ int(item) for item in self.config.get("dataset", "patch_size").split("\n") ])
def num_params(net_params): n_params = sum([p.data.nelement() for p in net_params]) logger.info(f"Number of params: {n_params}")
from src.config import BratsConfiguration from src.dataset.augmentations import color_augmentations, spatial_augmentations from src.dataset.utils import dataset, visualization as visualization from src.models.vnet import vnet, asymm_vnet from src.logging_conf import logger from src.dataset.loaders.brats_dataset import BratsDataset def num_params(net_params): n_params = sum([p.data.nelement() for p in net_params]) logger.info(f"Number of params: {n_params}") ######## PARAMS logger.info("Processing Parameters...") config = BratsConfiguration(sys.argv[1]) model_config = config.get_model_config() dataset_config = config.get_dataset_config() basic_config = config.get_basic_config() patch_size = config.patch_size tensorboard_logdir = basic_config.get("tensorboard_logs") checkpoint_path = model_config.get("checkpoint") batch_size = dataset_config.getint("batch_size") n_patches = dataset_config.getint("n_patches") n_classes = dataset_config.getint("classes") loss = model_config.get("loss") device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
def save_model(state, output_path): save_path = os.path.join(output_path, f"final_epoch_{state['epoch']}_val_loss_{state['val_loss']}_dice_{state['val_dice_score']}.pth") logger.info(f"Saving last to{save_path}") torch.save(state, save_path)