def init_evaluator(directory: str, result_file_name: str = 'results.csv') -> eval_.Evaluator: """Initializes an evaluator. Args: directory (str): The directory for the results file. result_file_name (str): The result file name (CSV file). Returns: eval.Evaluator: An evaluator. """ os.makedirs( directory, exist_ok=True) # generate result directory, if it does not exists evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.add_writer( eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name))) evaluator.add_label(1, 'WhiteMatter') evaluator.add_label(2, 'GreyMatter') evaluator.add_label(3, 'Hippocampus') evaluator.add_label(4, 'Amygdala') evaluator.add_label(5, 'Thalamus') evaluator.metrics = [ metric.DiceCoefficient(), metric.HausdorffDistance(95) ] # Solutions # todo: add hausdorff distance, 95th percentile (see metric.HausdorffDistance) # evaluator.add_metric(metric.HausdorffDistance(95)) # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?') return evaluator
def init_evaluator(directory: str, result_file_name: str = 'results.csv') -> eval_.Evaluator: """Initializes an evaluator. Args: directory (str): The directory for the results file. result_file_name (str): The result file name (CSV file). Returns: eval.Evaluator: An evaluator. """ os.makedirs( directory, exist_ok=True) # generate result directory, if it does not exists evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.add_writer( eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name))) evaluator.add_label(1, "WhiteMatter") evaluator.add_label(2, "GreyMatter") evaluator.add_label(3, "Hippocampus") evaluator.add_label(4, "Amygdala") evaluator.add_label(5, "Thalamus") evaluator.metrics = [metric.DiceCoefficient()] return evaluator
def init_evaluator(directory: str, result_file_name: str = 'results.csv') -> eval_.Evaluator: """Initializes an evaluator. Args: directory (str): The directory for the results file. result_file_name (str): The result file name (CSV file). Returns: eval.Evaluator: An evaluator. """ os.makedirs(directory, exist_ok=True) # generate result directory, if it does not exists evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name))) evaluator.add_label(1, "WhiteMatter") evaluator.add_label(2, "GreyMatter") evaluator.add_label(3, "Hippocampus") evaluator.add_label(4, "Amygdala") evaluator.add_label(5, "Thalamus") evaluator.metrics = [metric.DiceCoefficient(), metric.AreaUnderCurve(), metric.VolumeSimilarity(), metric.Accuracy(), metric.AverageDistance(), metric.CohenKappaMetric(), metric.FalseNegative(), metric.FalsePositive(), metric.Fallout(), metric.GroundTruthArea(), metric.GroundTruthVolume(), metric.Specificity(), metric.Sensitivity() ] return evaluator
def init_evaluator() -> pymia_eval.Evaluator: evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5)) evaluator.add_label(1, 'Structure 1') evaluator.add_label(2, 'Structure 2') evaluator.add_label(3, 'Structure 3') evaluator.add_label(4, 'Structure 4') evaluator.metrics = [pymia_metric.DiceCoefficient()] return evaluator
def init_evaluator(csv_file: str=None): evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5)) if csv_file is not None: evaluator.add_writer(pymia_eval.CSVEvaluatorWriter(csv_file)) evaluator.add_writer(EvaluatorAggregator()) evaluator.metrics = [pymia_metric.DiceCoefficient()] evaluator.add_label(1, "WhiteMatter") evaluator.add_label(2, "GreyMatter") evaluator.add_label(3, "Hippocampus") evaluator.add_label(4, "Amygdala") evaluator.add_label(5, "Thalamus") return evaluator
def init_evaluator(write_to_console: bool = True, csv_file: str = None, calculate_distance_metrics: bool = False): evaluator = eval.Evaluator(EvaluatorAggregator()) if write_to_console: evaluator.add_writer(eval.ConsoleEvaluatorWriter(5)) if csv_file is not None: evaluator.add_writer(eval.CSVEvaluatorWriter(csv_file)) if calculate_distance_metrics: evaluator.metrics = [ pymia_metric.DiceCoefficient(), pymia_metric.HausdorffDistance(), pymia_metric.HausdorffDistance(percentile=95, metric='HDRFDST95'), pymia_metric.VolumeSimilarity() ] else: evaluator.metrics = [ pymia_metric.DiceCoefficient(), pymia_metric.VolumeSimilarity() ] evaluator.add_label(1, cfg.FOREGROUND_NAME) return evaluator
def init_evaluator(directory: object, result_file_name: object = 'results.csv') -> object: """Initializes an evaluator. Args: directory (str): The directory for the results file. result_file_name (str): The result file name (CSV file). Returns: eval.Evaluator: An evaluator. """ os.makedirs(directory, exist_ok=True) # generate result directory, if it does not exists evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.add_writer(eval_.CSVEvaluatorWriter(os.path.join(directory, result_file_name))) evaluator.add_label(1, 'WhiteMatter') evaluator.add_label(2, 'GreyMatter') evaluator.add_label(3, 'Hippocampus') evaluator.add_label(4, 'Amygdala') evaluator.add_label(5, 'Thalamus') evaluator.metrics = [metric.DiceCoefficient(), metric.HausdorffDistance()] # warnings.warn('Initialized evaluation with the Dice coefficient. Do you know other suitable metrics?') # you should add more metrics than just the Hausdorff distance! return evaluator
def main(inputdir: str, csvoutputdir: str, segname: str): # ! THIS PARAMETER FOR THE DEFORMATION SIGMA HAS TO BE TUNED PER LABEL TO MATCH INTERRATER-VARIABILITY ! # sigmaarr = np.linspace(2, 8, 31) subjroot = '/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/MANAGE/data/robustness/preprocessed_segmented' csvoutputdir = os.path.join( '/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/MANAGE/data/robustness/segdeform/interrateroutput', segname) # make output directory if it does not already exists if not os.path.isdir(csvoutputdi): os.makedirs(csvoutputdi) patlist = os.listdir(subjroot) evaluator = pymia_eval.Evaluator(pymia_eval.ConsoleEvaluatorWriter(5)) evaluator.add_label(1, segname) evaluator.add_metric(pymia_metric.DiceCoefficient()) # for sigmaidx, sigma in enumerate(deformation_sigma): for sigmaval in sigmaarr: evaluator.add_writer( pymia_eval.CSVEvaluatorWriter( os.path.join(csvoutputdir, 'results_' + str(sigmaval) + '.csv'))) for patidx, pat in enumerate(patlist): # read CET image img_orig = sitk.ReadImage( os.path.join(subjroot, pat, pat + '_' + segname + '.nii.gz')) for runidx_cet in range(0, 100): deformed = elasticdeform(img_orig, sigmaval) evaluator.evaluate( img_orig, deformed, pat + '_' + str(sigmaval) + '_' + str(runidx_cet))
def atlas_creation(): #Load the train labels_native with their transform wdpath = 'C:/Users/Admin/PycharmProjects/MyMIALab/data/train' results_labels_nii = [] results_affine = [] resample_labels = [] for dirpath, subdirs, files in os.walk(wdpath): for x in files: if x.endswith("labels_native.nii.gz"): results_labels_nii.append(os.path.join(dirpath, x)) if x.endswith("affine.txt"): results_affine.append(os.path.join(dirpath, x)) #Resample the train labels_native with the transform for i in range(0, len(results_affine)): transform = sitk.ReadTransform(results_affine[i]) labels_image = sitk.ReadImage(results_labels_nii[i]) resample_image = sitk.Resample(labels_image, transform, sitk.sitkNearestNeighbor, 0, labels_image.GetPixelIDValue()) resample_labels.append(resample_image) #without resample #resample_labels.append(labels_image) # Threshold the images to sort them in 5 categories white_matter_list = [] grey_matter_list = [] hippocampus_list = [] amygdala_list = [] thalamus_list = [] for i in range(0, len(resample_labels)): white_matter_list.append(sitk.Threshold(resample_labels[i], 1, 1, 0)) grey_matter_list.append(sitk.Threshold(resample_labels[i], 2, 2, 0)) hippocampus_list.append(sitk.Threshold(resample_labels[i], 3, 3, 0)) amygdala_list.append(sitk.Threshold(resample_labels[i], 4, 4, 0)) thalamus_list.append(sitk.Threshold(resample_labels[i], 5, 5, 0)) #sum them up and divide by their number of images to make a probability map white_matter_map = 0 grey_matter_map = 0 hippocampus_map = 0 amygdala_map = 0 thalamus_map = 0 for i in range(1, len(resample_labels)): white_matter_map = sitk.Add(white_matter_map, white_matter_list[i]) grey_matter_map = sitk.Add(grey_matter_map, grey_matter_list[i]) hippocampus_map = sitk.Add(hippocampus_map, hippocampus_list[i]) amygdala_map = sitk.Add(amygdala_map, amygdala_list[i]) thalamus_map = sitk.Add(thalamus_map, thalamus_list[i]) white_matter_map = sitk.Divide(white_matter_map, len(white_matter_list)) grey_matter_map = sitk.Divide(grey_matter_map, len(grey_matter_list)) hippocampus_map = sitk.Divide(hippocampus_map, len(hippocampus_list)) amygdala_map = sitk.Divide(amygdala_map, len(amygdala_list)) thalamus_map = sitk.Divide(thalamus_map, len(thalamus_list)) #atlas = sitk.Divide(sum_images, len(test_resample)) #slice = sitk.GetArrayFromImage(atlas)[90,:,:] #plt.imshow(slice) sitk.WriteImage( hippocampus_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/hippocampus_map_no_threshold.nii', False) sitk.WriteImage( white_matter_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/white_matter_map_no_threshold.nii', False) sitk.WriteImage( grey_matter_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/grey_matter_map_no_threshold.nii', False) sitk.WriteImage( amygdala_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/amygdala_map_no_threshold.nii', False) sitk.WriteImage( thalamus_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/thalamus_map_no_threshold.nii', False) #Threhold the 5 different maps to get a binary map white_matter_map = sitk.BinaryThreshold(white_matter_map, 0, 1, 1, 0) grey_matter_map = sitk.BinaryThreshold(grey_matter_map, 0, 2, 2, 0) hippocampus_map = sitk.BinaryThreshold(hippocampus_map, 0, 3, 3, 0) amygdala_map = sitk.BinaryThreshold(amygdala_map, 0, 4, 4, 0) thalamus_map = sitk.BinaryThreshold(thalamus_map, 0, 5, 5, 0) #Save the images sitk.WriteImage( grey_matter_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/grey_matter_map.nii', False) sitk.WriteImage( white_matter_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/white_matter_map.nii', False) sitk.WriteImage( hippocampus_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/hippocampus_map.nii', False) sitk.WriteImage( amygdala_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/amygdala_map.nii', False) sitk.WriteImage( thalamus_map, 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/thalamus_map.nii', False) #Load the test labels_native and their transform wdpath_test = 'C:/Users/Admin/PycharmProjects/MyMIALab/data/test' test_results_nii = [] test_results_affine = [] test_resample = [] for dirpath, subdirs, files in os.walk(wdpath_test): for x in files: if x.endswith("labels_native.nii.gz"): test_results_nii.append(os.path.join(dirpath, x)) if x.endswith("affine.txt"): test_results_affine.append(os.path.join(dirpath, x)) #Resample the labels_native with the transform for i in range(0, len(test_results_affine)): test_transform = sitk.ReadTransform(test_results_affine[i]) test_image = sitk.ReadImage(test_results_nii[i]) test_resample_image = sitk.Resample(test_image, test_transform, sitk.sitkNearestNeighbor) test_resample.append(test_resample_image) #Without resample #test_resample.append(test_image) #Save the first test patient labels sitk.WriteImage( test_resample[0], 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result/test.nii', False) #Compute the dice coeefficent (and the Hausdorff distance) label_list = [ 'White Matter', 'Grey Matter', 'Hippocampus', 'Amygdala', 'Thalamus' ] map_list = [ white_matter_map, grey_matter_map, hippocampus_map, amygdala_map, thalamus_map ] dice_list = [] for i in range(0, 5): evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.metrics = [ metric.DiceCoefficient(), metric.Sensitivity(), metric.Precision(), metric.Fallout() ] evaluator.add_writer( eval_.CSVEvaluatorWriter( os.path.join( 'C:/Users/Admin/PycharmProjects/MyMIALab/bin/mia-result', 'Results_' + label_list[i] + '.csv'))) evaluator.add_label(i + 1, label_list[i]) for j in range(0, len(test_resample)): evaluator.evaluate(test_resample[j], map_list[i], 'Patient ' + str(j))
def validate_on_subject(self: train.Trainer, subject_assembler: pymia_asmbl.SubjectAssembler, config: cfg.Configuration, is_training: bool) -> float: # prepare filesystem and evaluator if self.current_epoch % self.save_validation_nth_epoch == 0: epoch_result_dir = fs.prepare_epoch_result_directory( config.result_dir, self.current_epoch) epoch_csv_file = os.path.join( epoch_result_dir, '{}_{}{}.csv'.format(os.path.basename(config.result_dir), self.current_epoch, '_train' if is_training else '')) epoch_csv_roi_file = os.path.join( epoch_result_dir, '{}_ROI_{}{}.csv'.format(os.path.basename(config.result_dir), self.current_epoch, '_train' if is_training else '')) epoch_csv_roi_summary_file = os.path.join( epoch_result_dir, '{}_ROI_SUMMARY_{}{}.csv'.format( os.path.basename(config.result_dir), self.current_epoch, '_train' if is_training else '')) epoch_txt_file = os.path.join( epoch_result_dir, '{}_{}{}.txt'.format(os.path.basename(config.result_dir), self.current_epoch, '_train' if is_training else '')) if is_training: writers = [pymia_eval.CSVEvaluatorWriter(epoch_csv_file)] else: writers = [ pymia_eval.ConsoleEvaluatorWriter(5), pymia_eval.CSVEvaluatorWriter(epoch_csv_file) ] evaluator = eval.Evaluator(writers, metric.get_metrics(), config.maps) evaluator_roi = eval.ROIEvaluator( [pymia_eval.CSVEvaluatorWriter(epoch_csv_roi_file)], config.maps, config.label_file_dir, config.label_files) elif is_training: return float(-np.inf) else: epoch_result_dir = None epoch_csv_file = None epoch_csv_roi_summary_file = None epoch_txt_file = None evaluator = eval.Evaluator([pymia_eval.ConsoleEvaluatorWriter(5)], metric.get_metrics(), config.maps) evaluator_roi = eval.ROIEvaluator([], config.maps, config.label_file_dir, config.label_files) if not is_training: print('Epoch {}, {} s:'.format(self._get_current_epoch_formatted(), self.epoch_duration)) # loop over all subjects for subject_idx in list(subject_assembler.predictions.keys()): subject_data = self.data_handler.dataset.direct_extract( self.data_handler.extractor_test, subject_idx) subject_name = subject_data['subject'] # for voxel-wise dataset, we need to reshape the voxel-wise data to the original shape for k, v in subject_data.items(): if isinstance(v, np.ndarray): subject_data[k] = np.reshape( v, subject_data[pymia_def.KEY_SHAPE] + (v.shape[-1], )) # rescale and mask reference maps (clipping will have no influence) maps = norm.process(subject_data[pymia_def.KEY_LABELS], subject_data[defs.ID_MASK_FG], subject_data[defs.KEY_NORM], config.maps) # rescale, clip, and mask prediction prediction = subject_assembler.get_assembled_subject(subject_idx) prediction = np.reshape( prediction, subject_data[pymia_def.KEY_SHAPE] + (prediction.shape[-1], )) prediction = norm.process(prediction, subject_data[defs.ID_MASK_FG], subject_data[defs.KEY_NORM], config.maps) # evaluate evaluator.evaluate( prediction, maps, { 'FG': subject_data[defs.ID_MASK_FG], 'T1H2O': subject_data[defs.ID_MASK_T1H2O] }, subject_name) roi_masks = { 'FG': subject_data[defs.ID_MASK_ROI], 'T1H2O': subject_data[defs.ID_MASK_ROI_T1H2O] } evaluator_roi.evaluate( prediction, roi_masks, { 'FG': subject_data[defs.ID_MASK_FG], 'T1H2O': subject_data[defs.ID_MASK_FG] }, subject_name) # Save predictions as SimpleITK images and plot slice images if not is_training and (self.current_epoch % self.save_validation_nth_epoch == 0): subject_results = os.path.join(epoch_result_dir, subject_name) os.makedirs(subject_results, exist_ok=True) plotter = plt.QualitativePlotter(subject_results, 2, 'png') for map_idx, map_name in enumerate(config.maps): map_name_short = map_name.replace('map', '') # save predicted maps prediction_image = pymia_conv.NumpySimpleITKImageBridge.convert( prediction[..., map_idx], subject_data[pymia_def.KEY_PROPERTIES]) sitk.WriteImage( prediction_image, os.path.join( subject_results, '{}_{}.mha'.format(subject_name, map_name_short)), True) plotter.plot( subject_name, map_name, prediction[..., map_idx], maps[..., map_idx], subject_data[defs.ID_MASK_T1H2O] if map_name == defs.ID_MAP_T1H2O else subject_data[defs.ID_MASK_FG]) evaluator.write() evaluator_roi.write() # log to TensorBoard summaries = evaluator.get_summaries() for result in summaries: self.logger.log_scalar('{}/{}-MEAN'.format(result.map_, result.metric), result.mean, self.current_epoch, is_training) self.logger.log_scalar('{}/{}-STD'.format(result.map_, result.metric), result.std, self.current_epoch, is_training) roi_calculator = eval.ROICalculator(config.maps) roi_results = roi_calculator.calculate(evaluator_roi.results, config.roi_reference_file) scores = [] for roi_result in roi_results: self.logger.log_scalar( '{}/{}'.format(roi_result.map_, roi_result.metric), roi_result.mean, self.current_epoch, is_training) scores.append(roi_result.mean) summaries.append(roi_result) print('Aggregated {} results (epoch {}):'.format( 'training' if is_training else 'validation', self._get_current_epoch_formatted())) if self.current_epoch % self.save_validation_nth_epoch == 0: eval.SummaryResultWriter(epoch_txt_file).write(summaries) stat.QuantitativePlotter(epoch_result_dir).plot( epoch_csv_file, 'summary_train' if is_training else 'summary', False if is_training else True) stat.QuantitativeROIPlotter(epoch_result_dir, config.maps).plot( epoch_csv_roi_file, config.roi_reference_file, 'train' if is_training else '') roi_calculator.save_summary(evaluator_roi.results, config.roi_reference_file, epoch_csv_roi_summary_file) else: eval.SummaryResultWriter().write(summaries) return float(np.mean(scores)) if not is_training else -math.inf
def load_atlas_custom_images(wdpath): # params_list = list(data_batch.items()) # print(params_list[0] ) t1w_list = [] t2w_list = [] gt_label_list = [] brain_mask_list = [] transform_list = [] #Load the train labels_native with their transform for dirpath, subdirs, files in os.walk(wdpath): # print("dirpath", dirpath) # print("subdirs", subdirs) # print("files", files) for x in files: if x.endswith("T1native.nii.gz"): t1w_list.append(sitk.ReadImage(os.path.join(dirpath, x))) elif x.endswith("T2native.nii.gz"): t2w_list.append(sitk.ReadImage(os.path.join(dirpath, x))) elif x.endswith("labels_native.nii.gz"): gt_label_list.append(sitk.ReadImage(os.path.join(dirpath, x))) elif x.endswith("Brainmasknative.nii.gz"): brain_mask_list.append(sitk.ReadImage(os.path.join(dirpath, x))) elif x.endswith("affine.txt"): transform_list.append( sitk.ReadTransform(os.path.join(dirpath, x))) # else: # print("Problem in CustomAtlas in folder", dirpath) #Resample and thershold to get the label white_matter_list = [] grey_matter_list = [] hippocampus_list = [] amygdala_list = [] thalamus_list = [] for i in range(0, len(gt_label_list)): resample_img = sitk.Resample(gt_label_list[i], atlas_t1, transform_list[i], sitk.sitkNearestNeighbor, 0, gt_label_list[i].GetPixelIDValue()) white_matter_list.append(sitk.Threshold(resample_img, 1, 1, 0)) grey_matter_list.append(sitk.Threshold(resample_img, 2, 2, 0)) hippocampus_list.append(sitk.Threshold(resample_img, 3, 3, 0)) amygdala_list.append(sitk.Threshold(resample_img, 4, 4, 0)) thalamus_list.append(sitk.Threshold(resample_img, 5, 5, 0)) #Save each label from first data path_to_save = '../bin/custom_atlas_result/' if not os.path.exists(path_to_save): os.makedirs(path_to_save) sitk.WriteImage(hippocampus_list[0], os.path.join(path_to_save, 'Hippocampus_label.nii'), True) sitk.WriteImage(white_matter_list[0], os.path.join(path_to_save, 'White_matter_label.nii'), True) sitk.WriteImage(grey_matter_list[0], os.path.join(path_to_save, 'Grey_matter_label.nii'), True) sitk.WriteImage(amygdala_list[0], os.path.join(path_to_save, 'Amygdala_label.nii'), True) sitk.WriteImage(thalamus_list[0], os.path.join(path_to_save, 'Thalamus_label.nii'), True) #Save an image resampled to show segmentation sitk.WriteImage(gt_label_list[0], os.path.join(path_to_save, 'Train_image_1_resampled.nii'), True) # sum them up and divide by their number of images to make a probability map white_matter_map = 0 grey_matter_map = 0 hippocampus_map = 0 amygdala_map = 0 thalamus_map = 0 for i in range(1, len(gt_label_list)): white_matter_map = sitk.Add(white_matter_map, white_matter_list[i]) grey_matter_map = sitk.Add(grey_matter_map, grey_matter_list[i]) hippocampus_map = sitk.Add(hippocampus_map, hippocampus_list[i]) amygdala_map = sitk.Add(amygdala_map, amygdala_list[i]) thalamus_map = sitk.Add(thalamus_map, thalamus_list[i]) white_matter_map = sitk.Divide(white_matter_map, len(white_matter_list)) grey_matter_map = sitk.Divide(grey_matter_map, len(grey_matter_list)) hippocampus_map = sitk.Divide(hippocampus_map, len(hippocampus_list)) amygdala_map = sitk.Divide(amygdala_map, len(amygdala_list)) thalamus_map = sitk.Divide(thalamus_map, len(thalamus_list)) #atlas = sitk.Divide(sum_images, len(test_resample)) #slice = sitk.GetArrayFromImage(atlas)[90,:,:] #plt.imshow(slice) #Register without threshold path_to_save = '../bin/custom_atlas_result/' if not os.path.exists(path_to_save): os.makedirs(path_to_save) sitk.WriteImage( grey_matter_map, os.path.join(path_to_save, 'grey_matter_map_no_threshold.nii'), True) sitk.WriteImage( white_matter_map, os.path.join(path_to_save, 'white_matter_map_no_threshold.nii'), True) sitk.WriteImage( hippocampus_map, os.path.join(path_to_save, 'hippocampus_map_no_threshold.nii'), True) sitk.WriteImage( amygdala_map, os.path.join(path_to_save, 'amygdala_map_no_threshold.nii'), True) sitk.WriteImage( thalamus_map, os.path.join(path_to_save, 'thalamus_map_no_threshold.nii'), True) #Threhold the 5 different maps to get a binary map white_matter_map = sitk.BinaryThreshold(white_matter_map, 0.3, 1, 1, 0) grey_matter_map = sitk.BinaryThreshold(grey_matter_map, 0.6, 2, 2, 0) hippocampus_map = sitk.BinaryThreshold(hippocampus_map, 0.9, 3, 3, 0) amygdala_map = sitk.BinaryThreshold(amygdala_map, 1.2, 4, 4, 0) thalamus_map = sitk.BinaryThreshold(thalamus_map, 1.5, 5, 5, 0) #Save the images path_to_save = '../bin/custom_atlas_result/' if not os.path.exists(path_to_save): os.makedirs(path_to_save) sitk.WriteImage(grey_matter_map, os.path.join(path_to_save, 'grey_matter_map.nii'), True) sitk.WriteImage(white_matter_map, os.path.join(path_to_save, 'white_matter_map.nii'), True) sitk.WriteImage(hippocampus_map, os.path.join(path_to_save, 'hippocampus_map.nii'), True) sitk.WriteImage(amygdala_map, os.path.join(path_to_save, 'amygdala_map.nii'), True) sitk.WriteImage(thalamus_map, os.path.join(path_to_save, 'thalamus_map.nii'), True) # Load the test labels_native and their transform path_to_test = '../data/test' test_gt_label_list = [] test_transform_list = [] for dirpath, subdirs, files in os.walk(path_to_test): for x in files: if x.endswith("labels_native.nii.gz"): test_gt_label_list.append( sitk.ReadImage(os.path.join(dirpath, x))) if x.endswith("affine.txt"): test_transform_list.append( sitk.ReadTransform(os.path.join(dirpath, x))) #Resample the labels_native with the transform test_resample_img = [] for i in range(0, len(test_gt_label_list)): resample_img = sitk.Resample(test_gt_label_list[i], atlas_t1, test_transform_list[i], sitk.sitkNearestNeighbor, 0, test_gt_label_list[i].GetPixelIDValue()) test_resample_img.append(resample_img) sitk.WriteImage(test_resample_img[0], os.path.join(path_to_save, 'Test_data_1_resampled.nii'), True) # Save the first test patient labels # path_to_save = '../bin/temp_test_result/' # if not os.path.exists(path_to_save): # os.makedirs(path_to_save) # sitk.WriteImage(test_resample_img[0], os.path.join(path_to_save, 'FirstPatienFromTestList.nii'), False) #Compute the dice coeefficent (and the Hausdorff distance) label_list = [ 'White Matter', 'Grey Matter', 'Hippocampus', 'Amygdala', 'Thalamus' ] map_list = [ white_matter_map, grey_matter_map, hippocampus_map, amygdala_map, thalamus_map ] dice_list = [] path_to_save = '../bin/DiceTestResult/' if not os.path.exists(path_to_save): os.makedirs(path_to_save) for i in range(0, 5): evaluator = eval_.Evaluator(eval_.ConsoleEvaluatorWriter(5)) evaluator.metrics = [ metric.DiceCoefficient(), metric.HausdorffDistance() ] evaluator.add_writer( eval_.CSVEvaluatorWriter( os.path.join(path_to_save, 'DiceResults_' + label_list[i] + '.csv'))) evaluator.add_label(i + 1, label_list[i]) for j in range(0, len(test_resample_img)): evaluator.evaluate(test_resample_img[j], map_list[i], 'Patient ' + str(j)) print("END Custom loadAtlas")