def validation_on_dilated_lesions(normal_validation_dir, dilated_validation_dir, gt_dir, evaluation_labels, recompute=True): utils.mkdir(dilated_validation_dir) list_validation_subdir = utils.list_subfolders(normal_validation_dir) loop_info = utils.LoopInfo(len(list_validation_subdir), 5, 'validating', True) for val_idx, validation_subdir in enumerate(list_validation_subdir): loop_info.update(val_idx) # dilate lesion dilated_validation_subdir = os.path.join( dilated_validation_dir, os.path.basename(validation_subdir)) dilate_lesions(validation_subdir, dilated_validation_subdir, recompute=recompute) # compute new dice scores path_dice = os.path.join(dilated_validation_subdir, 'dice.npy') dice_evaluation(gt_dir, dilated_validation_subdir, evaluation_labels, path_dice=path_dice, recompute=recompute)
def run_validation_on_aseg_gt(list_supervised_model_dir, list_aseg_gt_dir, path_label_list, recompute=False): list_main_samseg_validation_dir = [ os.path.join(p, 'validation_samseg') for p in list_supervised_model_dir ] # loop over architectures for (main_samseg_validation_dir, gt_dir) in zip(list_main_samseg_validation_dir, list_aseg_gt_dir): # list model subdirs main_aseg_validation_dir = os.path.join( os.path.dirname(main_samseg_validation_dir), 'validation') utils.mkdir(main_aseg_validation_dir) list_samseg_validation_subdir = utils.list_subfolders( main_samseg_validation_dir) # lover over models for samseg_validation_subdir in list_samseg_validation_subdir: # create equivalent aseg subdir aseg_validation_subdir = os.path.join( main_aseg_validation_dir, os.path.basename(samseg_validation_subdir)) utils.mkdir(aseg_validation_subdir) path_aseg_dice = os.path.join(aseg_validation_subdir, 'dice.npy') # compute dice with aseg gt if (not os.path.isfile(path_aseg_dice)) | recompute: dice_evaluation(gt_dir, samseg_validation_subdir, path_label_list, path_aseg_dice)
def validation_on_dilated_lesions(normal_validation_dir, dilated_validation_dir, gt_dir, evaluation_labels, recompute=True): utils.mkdir(dilated_validation_dir) list_validation_subdir = utils.list_subfolders(normal_validation_dir) for val_idx, validation_subdir in enumerate(list_validation_subdir): utils.print_loop_info(val_idx, len(list_validation_subdir), 5) # dilate lesion dilated_validation_subdir = os.path.join( dilated_validation_dir, os.path.basename(validation_subdir)) dilate_lesions(validation_subdir, dilated_validation_subdir, recompute=recompute) # compute new dice scores path_dice = os.path.join(dilated_validation_subdir, 'dice.npy') if (not os.path.isfile(path_dice)) | recompute: dice_evaluation(gt_dir, dilated_validation_subdir, evaluation_labels, path_dice)
def postprocess_samseg(list_samseg_dir, list_gt_dir, path_segmentation_labels, incorrect_labels, correct_labels, list_posteriors_dir=None, list_thresholds=None, recompute=False): """ This function processes the samseg segmentations: it corrects the labels (right/left and 99 to 77), resamples them to the space of gt_dir, and computes the Dice scores for 1) all_subjects vs. testing subjects only, and 2) all ROIs vs. lesions only. It requires that all segmentations are sorted in three subfolders inside samseg_main_dir: t1, flair, and t1_flair. IMPORTANT: Images are expected to have to following naming convention: <subject_id>.samseg.<contrast>.lesion.mgz, where <contrast> must either be t1, flair, ***t1_flair*** :param list_samseg_dir: main samseg dir containing the three subfolders t1, flair, t1_flair :param list_gt_dir: folder with the gt label maps for all subjects :param path_segmentation_labels: list of segmentation labels :param incorrect_labels: list of samseg incorrect labels :param correct_labels: list of labels to correct the wrong one with :param recompute: whether to recompute files """ if list_posteriors_dir is None: list_posteriors_dir = [None] * len(list_samseg_dir) for samseg_dir, gt_dir, posteriors_dir, threshold in zip( list_samseg_dir, list_gt_dir, list_posteriors_dir, list_thresholds): # define result directories samseg_corrected_dir = samseg_dir + '_corrected' samseg_preprocessed_dir = samseg_dir + '_preprocessed' if (not os.path.isdir(samseg_preprocessed_dir)) | recompute: # regroup right/left labels and change 99 to 77 edit_volumes.correct_labels_in_dir(samseg_dir, incorrect_labels, correct_labels, samseg_corrected_dir, recompute=recompute) # resample to gt format edit_volumes.mri_convert_images_in_dir(samseg_corrected_dir, samseg_preprocessed_dir, interpolation='nearest', reference_dir=gt_dir, recompute=recompute) # replace lesions by thresholded lesion posteriors if posteriors_dir is not None: # resample posteriors to gt format posteriors_preprocessed_dir = posteriors_dir + '_preprocessed' edit_volumes.mri_convert_images_in_dir(posteriors_dir, posteriors_preprocessed_dir, reference_dir=gt_dir, recompute=recompute) # list hard segmentations and posteriors samseg_postprocessed_dir = samseg_dir + '_postprocessed' utils.mkdir(samseg_postprocessed_dir) path_segs = [ path for path in utils.list_images_in_folder( samseg_preprocessed_dir) ] path_posteriors = [ path for path in utils.list_images_in_folder( posteriors_preprocessed_dir) ] for subject_idx, (path_seg, path_post) in enumerate( zip(path_segs, path_posteriors)): path_result = os.path.join(samseg_postprocessed_dir, os.path.basename(path_seg)) if (not os.path.isfile(path_result)) | recompute: # replace segmented lesions by thresholded posteriors seg, aff, h = utils.load_volume(path_seg, im_only=False) posteriors = utils.load_volume(path_post) seg[seg == 77] = 2 seg[posteriors > threshold] = 77 utils.save_volume(seg, aff, h, path_result) else: samseg_postprocessed_dir = samseg_preprocessed_dir # compute dice scores with path_dice_testing = os.path.join(samseg_postprocessed_dir, 'dice.npy') path_dice_lesions_testing = os.path.join(samseg_postprocessed_dir, 'dice_lesions.npy') if (not os.path.isfile(path_dice_testing)) | recompute: dice_evaluation(gt_dir, samseg_postprocessed_dir, path_segmentation_labels, path_dice_testing) if (not os.path.isfile(path_dice_lesions_testing)) | recompute: dice = np.load(path_dice_testing) np.save(path_dice_lesions_testing, dice[4, :])
labels_folder_1 = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_masks/lesion_mask1' labels_folder_2 = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_masks/lesion_mask2' result_folder = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_consensus/lesion_consensus' build_longitudinal_consensus(labels_folder_1, labels_folder_2, result_folder, recompute=True) # ----------------------------------- inter-rater reprodicibility ISBI longitudinal -------------------------------- gt_folder = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_masks/lesion_mask1_2.2_only' seg_dir = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_masks/lesion_mask2_2.2_only' path_result_dice = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/lesion_masks/dice_1_vs_2_2.2_only.npy' dice_evaluation(gt_dir=gt_folder, seg_dir=seg_dir, path_label_list=[1], path_result_dice_array=path_result_dice, cropping_margin_around_gt=None) # ---------------------------------------------- preprocess MSSeg asegs -------------------------------------------- aseg_folder = '/home/benjamin/data/lesions/MSSeg/labels/resample_1_1_1/asegs' cropped_labels_dir = '/home/benjamin/data/lesions/MSSeg/labels/resample_1_1_1/SAMSEG_generation/samseg_lesions' main_result_dir = '/home/benjamin/data/lesions/MSSeg/labels/resample_1_1_1/aseg_lesions' incorrect = '/home/benjamin/data/lesions/MSSeg/labels_classes_stats/aseg_incorrect_labels.npy' correct = '/home/benjamin/data/lesions/MSSeg/labels_classes_stats/aseg_correct_labels.npy' preprocess_asegs(aseg_folder, cropped_labels_dir, incorrect, correct) # --------------------------------------- preprocess ISBI longitudinal asegs --------------------------------------- aseg_folder = '/home/benjamin/data/lesions/ISBI_longitudinal/labels/asegs/asegs_original'
def predict(path_images, path_model, segmentation_label_list, dist_map=False, path_segmentations=None, path_posteriors=None, path_volumes=None, segmentation_names_list=None, padding=None, cropping=None, resample=None, aff_ref='FS', sigma_smoothing=0, keep_biggest_component=False, conv_size=3, n_levels=5, nb_conv_per_level=2, unet_feat_count=24, feat_multiplier=2, activation='elu', gt_folder=None, evaluation_label_list=None, compute_distances=False, recompute=True, verbose=True): """ This function uses trained models to segment images. It is crucial that the inputs match the architecture parameters of the trained model. :param path_images: path of the images to segment. Can be the path to a directory or the path to a single image. :param path_model: path ot the trained model. :param segmentation_label_list: List of labels for which to compute Dice scores. It should contain the same values as the segmentation label list used for training the network. Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. :param dist_map: (optional) whether the input will contain distance maps channels (between each intenisty channels) Default is False. :param path_segmentations: (optional) path where segmentations will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_posteriors is None. :param path_posteriors: (optional) path where posteriors will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_segmentations is None. :param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen. The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels. The soft volume of a structure corresponds to the sum of its predicted probability map. :param segmentation_names_list: (optional) List of names correponding to the names of the segmentation labels. Only used when path_volumes is provided. Must be of the same size as segmentation_label_list. Can be given as a list, a numpy array of strings, or the path to such a numpy array. Default is None. :param padding: (optional) pad the images to the specified shape before predicting the segmentation maps. Can be an int, a sequence or a 1d numpy array. :param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps. If padding and cropping are specified, images are padded before being cropped. Can be an int, a sequence or a 1d numpy array. :param resample: (optional) resample the images to the specified resolution before predicting the segmentation maps. Can be an int, a sequence or a 1d numpy array. :param aff_ref: (optional) type of affine matrix of the images used for training. By default this is set to the FreeSurfer orientation ('FS'), as it was the configuration in which SynthSeg was trained. However, the new models are now trained on data aligned with identity vox2ras matrix, so you need to change aff_ref to 'identity'. :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified standard deviation. :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation. :param conv_size: (optional) size of unet's convolution masks. Default is 3. :param n_levels: (optional) number of levels for unet. Default is 5. :param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2. :param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24. :param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2. :param activation: (optional) activation function. Can be 'elu', 'relu'. :param gt_folder: (optional) folder containing ground truth files for evaluation. A numpy array containing all dice scores (labels in rows, subjects in columns) will be writen either at segmentations_dir (if not None), or posteriors_dir. :param evaluation_label_list: (optional) if gt_folder is True you can evaluate the Dice scores on a subset of the segmentation labels, by providing another label list here. Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. Default is the same as segmentation_label_list. :param recompute: (optional) whether to recompute segmentations that were already computed. This also applies to Dice scores, if gt_folder is not None. Default is True. :param verbose: (optional) whether to print out info about the remaining number of cases. """ # prepare output filepaths images_to_segment, path_segmentations, path_posteriors, path_volumes, compute = \ prepare_output_files(path_images, path_segmentations, path_posteriors, path_volumes, recompute) # get label and classes lists label_list, n_neutral_labels = utils.get_list_labels(label_list=segmentation_label_list, FS_sort=True) if evaluation_label_list is None: evaluation_label_list = segmentation_label_list # prepare volume file if needed if path_volumes is not None: if segmentation_names_list is not None: csv_header = [[''] + utils.reformat_to_list(segmentation_names_list, load_as_numpy=True)] csv_header += [[''] + [str(lab) for lab in label_list[1:]]] else: csv_header = [['subjects'] + [str(lab) for lab in label_list[1:]]] with open(path_volumes, 'w') as csvFile: writer = csv.writer(csvFile) writer.writerows(csv_header) csvFile.close() # perform segmentation net = None previous_model_input_shape = None loop_info = utils.LoopInfo(len(images_to_segment), 10, 'predicting', True) for idx, (path_image, path_segmentation, path_posterior, tmp_compute) in enumerate(zip(images_to_segment, path_segmentations, path_posteriors, compute)): # compute segmentation only if needed if tmp_compute: # preprocess image and get information image, aff, h, im_res, n_channels, n_dims, shape, pad_shape, crop_idx = \ preprocess_image(path_image, n_levels, cropping, padding, aff_ref=aff_ref, dist_map=dist_map) model_input_shape = list(image.shape[1:]) # prepare net for first image or if input's size has changed if (net is None) | (previous_model_input_shape != model_input_shape): # check for image size compatibility if (net is not None) & (previous_model_input_shape != model_input_shape) & verbose: print('image of different shape as previous ones, redefining network') previous_model_input_shape = model_input_shape # build network net = build_model(path_model, model_input_shape, resample, im_res, n_levels, len(label_list), conv_size, nb_conv_per_level, unet_feat_count, feat_multiplier, activation, sigma_smoothing) if verbose: loop_info.update(idx) # predict posteriors prediction_patch = net.predict(image) # get posteriors and segmentation seg, posteriors = postprocess(prediction_patch, pad_shape, shape, crop_idx, n_dims, label_list, keep_biggest_component, aff, aff_ref=aff_ref, keep_biggest_of_each_group=keep_biggest_component, n_neutral_labels=n_neutral_labels) # write results to disk if path_segmentation is not None: utils.save_volume(seg.astype('int'), aff, h, path_segmentation) if path_posterior is not None: if n_channels > 1: posteriors = utils.add_axis(posteriors, axis=[0, -1]) utils.save_volume(posteriors.astype('float'), aff, h, path_posterior) else: if path_volumes is not None: posteriors, _, _, _, _, _, im_res = utils.get_volume_info(path_posterior, True, aff_ref=np.eye(4)) else: posteriors = im_res = None # compute volumes if path_volumes is not None: volumes = np.sum(posteriors[..., 1:], axis=tuple(range(0, len(posteriors.shape) - 1))) volumes = np.around(volumes * np.prod(im_res), 3) row = [os.path.basename(path_image).replace('.nii.gz', '')] + [str(vol) for vol in volumes] with open(path_volumes, 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow(row) csvFile.close() # evaluate if gt_folder is not None: # find path evaluation folder path_first_result = path_segmentations[0] if (path_segmentations[0] is not None) else path_posteriors[0] eval_folder = os.path.dirname(path_first_result) # compute evaluation metrics evaluate.dice_evaluation(gt_folder, eval_folder, evaluation_label_list, compute_distances=compute_distances, compute_score_whole_structure=False, path_dice=os.path.join(eval_folder, 'dice.npy'), path_hausdorff=os.path.join(eval_folder, 'hausdorff.npy'), path_mean_distance=os.path.join(eval_folder, 'mean_distance.npy'), recompute=recompute, verbose=verbose)
def predict(path_images, path_model, segmentation_label_list, path_segmentations=None, path_posteriors=None, path_volumes=None, voxel_volume=1., skip_background_volume=True, padding=None, cropping=None, resample=None, sigma_smoothing=0, keep_biggest_component=False, conv_size=3, n_levels=5, nb_conv_per_level=2, unet_feat_count=24, feat_multiplier=2, no_batch_norm=False, gt_folder=None): """ This function uses trained models to segment images. It is crucial that the inputs match the architecture parameters of the trained model. :param path_images: path of the images to segment. Can be the path to a directory or the path to a single image. :param path_model: path ot the trained model. :param segmentation_label_list: List of labels for which to compute Dice scores. It should contain the same values as the segmentation label list used for training the network. Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. :param path_segmentations: (optional) path where segmentations will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_posteriors is None. :param path_posteriors: (optional) path where posteriors will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_segmentations is None. :param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen. The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels. The soft volume of a structure corresponds to the sum of its predicted probability map. :param voxel_volume: (optional) volume of voxel. Default is 1 (i.e. returned volumes are voxel counts). :param skip_background_volume: (optional) whether to skip computing the volume of the background. This assumes the background correspond to the first value in label list. :param padding: (optional) crop the images to the specified shape before predicting the segmentation maps. If padding and cropping are specified, images are padded before being cropped. Can be an int, a sequence or a 1d numpy array. :param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps. If padding and cropping are specified, images are padded before being cropped. Can be an int, a sequence or a 1d numpy array. :param resample: (optional) resample the images to the specified resolution before predicting the segmentation maps. Can be an int, a sequence or a 1d numpy array. :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified standard deviation. :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation. :param conv_size: (optional) size of unet's convolution masks. Default is 3. :param n_levels: (optional) number of levels for unet. Default is 5. :param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2. :param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24. :param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2. :param no_batch_norm: (optional) whether to deactivate batch norm. Default is False. :param gt_folder: (optional) folder containing ground truth files for evaluation. A numpy array containing all dice scores (labels in rows, subjects in columns) will be writen either at segmentations_dir (if not None), or posteriors_dir. """ assert path_model, "A model file is necessary" assert path_segmentations or path_posteriors, "output segmentation (or posteriors) is required" # prepare output filepaths images_to_segment, path_segmentations, path_posteriors, path_volumes = prepare_output_files( path_images, path_segmentations, path_posteriors, path_volumes) # get label and classes lists label_list, _ = utils.get_list_labels(label_list=segmentation_label_list, FS_sort=True) # prepare volume file if needed if path_volumes is not None: if skip_background_volume: csv_header = [['subject'] + [str(lab) for lab in label_list[1:]]] else: csv_header = [['subject'] + [str(lab) for lab in label_list]] with open(path_volumes, 'w') as csvFile: writer = csv.writer(csvFile) writer.writerows(csv_header) csvFile.close() # perform segmentation net = None previous_model_input_shape = None for idx, (im_path, seg_path, posteriors_path) in enumerate( zip(images_to_segment, path_segmentations, path_posteriors)): utils.print_loop_info(idx, len(images_to_segment), 10) # preprocess image and get information image, aff, h, n_channels, n_dims, shape, pad_shape, cropping, crop_idx = preprocess_image( im_path, n_levels, cropping, padding) model_input_shape = image.shape[1:] # prepare net for first image or if input's size has changed if (idx == 0) | (previous_model_input_shape != model_input_shape): # check for image size compatibility if (idx != 0) & (previous_model_input_shape != model_input_shape): print( 'image of different shape as previous ones, redefining network' ) previous_model_input_shape = model_input_shape net = None if resample is not None: net, resample_shape = preprocessing_model( resample, model_input_shape, h, n_channels, n_dims, n_levels) else: resample_shape = previous_model_input_shape net = prepare_unet(resample_shape, len(label_list), conv_size, n_levels, nb_conv_per_level, unet_feat_count, feat_multiplier, no_batch_norm, path_model, input_model=net) if (resample is not None) | (sigma_smoothing != 0): net = postprocessing_model(net, model_input_shape, resample, sigma_smoothing, n_dims) # predict posteriors prediction_patch = net.predict(image) # get posteriors and segmentation seg, posteriors = postprocess(prediction_patch, cropping, pad_shape, shape, crop_idx, n_dims, label_list, keep_biggest_component) # compute volumes if path_volumes is not None: if skip_background_volume: volumes = np.around( np.sum(posteriors[..., 1:], axis=tuple(range(0, len(posteriors.shape) - 1))), 3) else: volumes = np.around( np.sum(posteriors, axis=tuple(range(0, len(posteriors.shape) - 1))), 3) volumes = voxel_volume * volumes row = [os.path.basename(im_path)] + [str(vol) for vol in volumes] with open(path_volumes, 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow(row) csvFile.close() # write results to disk if seg_path is not None: utils.save_volume(seg.astype('int'), aff, h, seg_path) if posteriors_path is not None: if n_channels > 1: new_shape = list(posteriors.shape) new_shape.insert(-1, 1) new_shape = tuple(new_shape) posteriors = np.reshape(posteriors, new_shape) utils.save_volume(posteriors.astype('float'), aff, h, posteriors_path) # evaluate if gt_folder is not None: if path_segmentations[0] is not None: eval_folder = os.path.dirname(path_segmentations[0]) else: eval_folder = os.path.dirname(path_posteriors[0]) path_result_dice = os.path.join(eval_folder, 'dice.npy') evaluate.dice_evaluation(gt_folder, eval_folder, segmentation_label_list, path_result_dice)
def predict(path_images, path_model, segmentation_label_list, path_segmentations=None, path_posteriors=None, path_volumes=None, skip_background_volume=True, padding=None, cropping=None, resample=None, aff_ref='FS', sigma_smoothing=0, keep_biggest_component=False, conv_size=3, n_levels=5, nb_conv_per_level=2, unet_feat_count=24, feat_multiplier=2, no_batch_norm=False, activation='elu', gt_folder=None, evaluation_label_list=None, verbose=True): """ This function uses trained models to segment images. It is crucial that the inputs match the architecture parameters of the trained model. :param path_images: path of the images to segment. Can be the path to a directory or the path to a single image. :param path_model: path ot the trained model. :param segmentation_label_list: List of labels for which to compute Dice scores. It should contain the same values as the segmentation label list used for training the network. Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. :param path_segmentations: (optional) path where segmentations will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_posteriors is None. :param path_posteriors: (optional) path where posteriors will be writen. Should be a dir, if path_images is a dir, and afile if path_images is a file. Should not be None, if path_segmentations is None. :param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen. The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels. The soft volume of a structure corresponds to the sum of its predicted probability map. :param skip_background_volume: (optional) whether to skip computing the volume of the background. This assumes the background correspond to the first value in label list. :param padding: (optional) crop the images to the specified shape before predicting the segmentation maps. If padding and cropping are specified, images are padded before being cropped. Can be an int, a sequence or a 1d numpy array. :param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps. If padding and cropping are specified, images are padded before being cropped. Can be an int, a sequence or a 1d numpy array. :param resample: (optional) resample the images to the specified resolution before predicting the segmentation maps. Can be an int, a sequence or a 1d numpy array. :param aff_ref: (optional) type of affine matrix of the images used for training. By default this is set to the FreeSurfer orientation ('FS'), as it was the configuration in which SynthSeg was trained. However, the new models are now trained on data aligned with identity vox2ras matrix, so you need to change aff_ref to 'identity'. :param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified standard deviation. :param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation. :param conv_size: (optional) size of unet's convolution masks. Default is 3. :param n_levels: (optional) number of levels for unet. Default is 5. :param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2. :param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24. :param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2. :param no_batch_norm: (optional) whether to deactivate batch norm. Default is False. :param activation: (optional) activation function. Can be 'elu', 'relu'. :param gt_folder: (optional) folder containing ground truth files for evaluation. A numpy array containing all dice scores (labels in rows, subjects in columns) will be writen either at segmentations_dir (if not None), or posteriors_dir. :param evaluation_label_list: (optional) if gt_folder is True you can evaluate the Dice scores on a subset of the segmentation labels, by providing another label list here. Can be a sequence, a 1d numpy array, or the path to a numpy 1d array. Default is the same as segmentation_label_list. :param verbose: (optional) whether to print out info about the remaining number of cases. """ assert path_model, "A model file is necessary" assert path_segmentations or path_posteriors, "output segmentation (or posteriors) is required" # prepare output filepaths images_to_segment, path_segmentations, path_posteriors, path_volumes = prepare_output_files(path_images, path_segmentations, path_posteriors, path_volumes) # get label and classes lists label_list, _ = utils.get_list_labels(label_list=segmentation_label_list, FS_sort=True) if evaluation_label_list is None: evaluation_label_list = segmentation_label_list # prepare volume file if needed if path_volumes is not None: if skip_background_volume: csv_header = [['subject'] + [str(lab) for lab in label_list[1:]]] else: csv_header = [['subject'] + [str(lab) for lab in label_list]] with open(path_volumes, 'w') as csvFile: writer = csv.writer(csvFile) writer.writerows(csv_header) csvFile.close() # perform segmentation net = None previous_model_input_shape = None for idx, (path_image, path_segmentation, path_posterior) in enumerate(zip(images_to_segment, path_segmentations, path_posteriors)): if verbose: utils.print_loop_info(idx, len(images_to_segment), 10) # preprocess image and get information image, aff, h, im_res, n_channels, n_dims, shape, pad_shape, cropping, crop_idx = \ preprocess_image(path_image, n_levels, cropping, padding, aff_ref=aff_ref) model_input_shape = list(image.shape[1:]) # prepare net for first image or if input's size has changed if (idx == 0) | (previous_model_input_shape != model_input_shape): # check for image size compatibility if (idx != 0) & (previous_model_input_shape != model_input_shape): print('image of different shape as previous ones, redefining network') previous_model_input_shape = model_input_shape # build network net = build_model(path_model, model_input_shape, resample, im_res, n_levels, len(label_list), conv_size, nb_conv_per_level, unet_feat_count, feat_multiplier, no_batch_norm, activation, sigma_smoothing) # predict posteriors prediction_patch = net.predict(image) # get posteriors and segmentation seg, posteriors = postprocess(prediction_patch, cropping, pad_shape, shape, crop_idx, n_dims, label_list, keep_biggest_component, aff, aff_ref=aff_ref) # compute volumes if path_volumes is not None: if skip_background_volume: volumes = np.sum(posteriors[..., 1:], axis=tuple(range(0, len(posteriors.shape) - 1))) else: volumes = np.sum(posteriors, axis=tuple(range(0, len(posteriors.shape) - 1))) volumes = np.around(volumes * np.prod(im_res), 3) row = [os.path.basename(path_image).replace('.nii.gz', '')] + [str(vol) for vol in volumes] row += [np.sum(volumes[:int(len(volumes) / 2)]), np.sum(volumes[int(len(volumes) / 2):])] with open(path_volumes, 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow(row) csvFile.close() # write results to disk if path_segmentation is not None: utils.save_volume(seg.astype('int'), aff, h, path_segmentation) if path_posterior is not None: if n_channels > 1: new_shape = list(posteriors.shape) new_shape.insert(-1, 1) new_shape = tuple(new_shape) posteriors = np.reshape(posteriors, new_shape) utils.save_volume(posteriors.astype('float'), aff, h, path_posterior) # evaluate if gt_folder is not None: if path_segmentations[0] is not None: eval_folder = os.path.dirname(path_segmentations[0]) else: eval_folder = os.path.dirname(path_posteriors[0]) path_result_dice = os.path.join(eval_folder, 'dice.npy') evaluate.dice_evaluation(gt_folder, eval_folder, evaluation_label_list, path_result_dice, verbose=verbose)