def main(data_dir: str, result_file: str, result_summary_file: str): # initialize metrics metrics = [ metric.DiceCoefficient(), metric.HausdorffDistance(percentile=95, metric='HDRFDST95'), metric.VolumeSimilarity() ] # define the labels to evaluate labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'} evaluator = eval_.SegmentationEvaluator(metrics, labels) # get subjects to evaluate subject_dirs = [ subject for subject in glob.glob(os.path.join(data_dir, '*')) if os.path.isdir(subject) and os.path.basename(subject).startswith('Subject') ] for subject_dir in subject_dirs: subject_id = os.path.basename(subject_dir) print(f'Evaluating {subject_id}...') # load ground truth image and create artificial prediction by erosion ground_truth = sitk.ReadImage( os.path.join(subject_dir, f'{subject_id}_GT.mha')) prediction = ground_truth for label_val in labels.keys(): # erode each label we are going to evaluate prediction = sitk.BinaryErode(prediction, 1, sitk.sitkBall, 0, label_val) # evaluate the "prediction" against the ground truth evaluator.evaluate(prediction, ground_truth, subject_id) # use two writers to report the results writer.CSVWriter(result_file).write(evaluator.results) print('\nSubject-wise results...') writer.ConsoleWriter().write(evaluator.results) # report also mean and standard deviation among all subjects functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results) print('\nAggregated statistic results...') writer.ConsoleStatisticsWriter(functions=functions).write( evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear()
def main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str): """Brain tissue segmentation using decision forests. The main routine executes the medical image analysis pipeline: - Image loading - Registration - Pre-processing - Feature extraction - Decision forest classifier model building - Segmentation using the decision forest classifier model on unseen images - Post-processing of the segmentation - Evaluation of the segmentation """ # load atlas images putil.load_atlas_images(data_atlas_dir) print('-' * 5, 'Training...') # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_train_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) pre_process_params = { 'skullstrip_pre': True, 'normalization_pre': True, 'registration_pre': True, 'coordinates_feature': True, 'intensity_feature': True, 'gradient_intensity_feature': True } # load images for training and pre-process images = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) # generate feature matrix and label vector data_train = np.concatenate([img.feature_matrix[0] for img in images]) labels_train = np.concatenate([img.feature_matrix[1] for img in images]).squeeze() #warnings.warn('Random forest parameters not properly set.') forest = sk_ensemble.RandomForestClassifier( max_features=images[0].feature_matrix[0].shape[1], n_estimators=10, max_depth=10) start_time = timeit.default_timer() forest.fit(data_train, labels_train) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # create a result directory with timestamp t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') result_dir = os.path.join(result_dir, t) os.makedirs(result_dir, exist_ok=True) print('-' * 5, 'Testing...') # initialize evaluator evaluator = putil.init_evaluator() # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_test_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) # load images for testing and pre-process pre_process_params['training'] = False images_test = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) images_prediction = [] images_probabilities = [] for img in images_test: print('-' * 10, 'Testing', img.id_) start_time = timeit.default_timer() predictions = forest.predict(img.feature_matrix[0]) probabilities = forest.predict_proba(img.feature_matrix[0]) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # convert prediction and probabilities back to SimpleITK images image_prediction = conversion.NumpySimpleITKImageBridge.convert( predictions.astype(np.uint8), img.image_properties) image_probabilities = conversion.NumpySimpleITKImageBridge.convert( probabilities, img.image_properties) # evaluate segmentation without post-processing evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_) images_prediction.append(image_prediction) images_probabilities.append(image_probabilities) # post-process segmentation and evaluate with post-processing post_process_params = {'simple_post': True} images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities, post_process_params, multi_process=False) for i, img in enumerate(images_test): evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth], img.id_ + '-PP') # save results sitk.WriteImage( images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True) sitk.WriteImage( images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True) # use two writers to report the results os.makedirs( result_dir, exist_ok=True) # generate result directory, if it does not exists result_file = os.path.join(result_dir, 'results.csv') writer.CSVWriter(result_file).write(evaluator.results) print('\nSubject-wise results...') writer.ConsoleWriter().write(evaluator.results) # report also mean and standard deviation among all subjects result_summary_file = os.path.join(result_dir, 'results_summary.csv') functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results) print('\nAggregated statistic results...') writer.ConsoleStatisticsWriter(functions=functions).write( evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear()
def main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str): """Brain tissue segmentation using decision forests. Section of the original main routine. Executes gird search of the probabilistic keyhole filling method parameters: Must be done separately in advance: - Image loading - Registration - Pre-processing - Feature extraction - Decision forest classifier model building - Segmentation using the decision forest classifier model on unseen images Is carried out in this section of the pipeline - Loading of temporary data - Grid search of PKF parameter of the segmentation - Evaluation of the segmentation """ # load atlas images putil.load_atlas_images(data_atlas_dir) print('-' * 5, 'Training...') # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_train_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) pre_process_params = { 'skullstrip_pre': True, 'normalization_pre': True, 'registration_pre': True, 'coordinates_feature': True, 'intensity_feature': True, 'gradient_intensity_feature': True } # load images for training and pre-process images = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) # generate feature matrix and label vector data_train = np.concatenate([img.feature_matrix[0] for img in images]) labels_train = np.concatenate([img.feature_matrix[1] for img in images]).squeeze() #warnings.warn('Random forest parameters not properly set.') forest = sk_ensemble.RandomForestClassifier( max_features=images[0].feature_matrix[0].shape[1], n_estimators=20, max_depth=85) start_time = timeit.default_timer() forest.fit(data_train, labels_train) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # create a result directory with timestamp t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') result_dir = os.path.join(result_dir, t) os.makedirs(result_dir, exist_ok=True) print('-' * 5, 'Testing...') # initialize evaluator evaluator = putil.init_evaluator() # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_test_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) # load images for testing and pre-process pre_process_params['training'] = False images_test = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) images_prediction = [] images_probabilities = [] for img in images_test: print('-' * 10, 'Testing', img.id_) start_time = timeit.default_timer() predictions = forest.predict(img.feature_matrix[0]) probabilities = forest.predict_proba(img.feature_matrix[0]) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # convert prediction and probabilities back to SimpleITK images image_prediction = conversion.NumpySimpleITKImageBridge.convert( predictions.astype(np.uint8), img.image_properties) image_probabilities = conversion.NumpySimpleITKImageBridge.convert( probabilities, img.image_properties) # evaluate segmentation without post-processing evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_) images_prediction.append(image_prediction) images_probabilities.append(image_probabilities) # save results without post-processing name = 'no_PP' sub_dir = os.path.join(result_dir, name) os.makedirs(sub_dir, exist_ok=True) for i, img in enumerate(images_test): sitk.WriteImage(images_prediction[i], os.path.join(sub_dir, images_test[i].id_ + '_SEG.mha'), True) result_file = os.path.join(sub_dir, 'results.csv') writer.CSVWriter(result_file).write(evaluator.results) # report also mean and standard deviation among all subjects result_summary_file = os.path.join(sub_dir, 'results_summary.csv') functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear() # define paramter for grid search post_process_param_list = [] variance = np.arange(1, 2) preserve_background = np.asarray([False]) # # # define paramter for grid search # post_process_param_list = [] # variance = np.arange(0.5, 4.0, 0.5) # preserve_background = np.asarray([False, True]) for bg in preserve_background: for var in variance: post_process_param_list.append({ 'simple_post': bool(True), 'variance': float(var), 'preserve_background': bool(bg) }) # execute post processing with definde parameters for post_process_params in post_process_param_list: # create sub-directory for results name = 'PP-V-'+ str(post_process_params.get('variance')).replace('.','_') +\ '-BG-' + str(post_process_params.get('preserve_background')) sub_dir = os.path.join(result_dir, name) os.makedirs(sub_dir, exist_ok=True) #write the used parameter into a text file and store it in the result folder completeName = os.path.join(sub_dir, "parameter.txt") file1 = open(completeName, "w+") json.dump(post_process_params, file1) file1.close() # post-process segmentation and evaluate with post-processing images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities, post_process_params, multi_process=False) for i, img in enumerate(images_test): evaluator.evaluate( images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth], img.id_ + '-PP') # save results sitk.WriteImage( images_post_processed[i], os.path.join(sub_dir, images_test[i].id_ + '_SEG-PP.mha'), True) # save all results in csv file result_file = os.path.join(sub_dir, 'results.csv') writer.CSVWriter(result_file).write(evaluator.results) print('\nSubject-wise results...') writer.ConsoleWriter().write(evaluator.results) # report also mean and standard deviation among all subjects result_summary_file = os.path.join(sub_dir, 'results_summary.csv') functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter( result_summary_file, functions=functions).write(evaluator.results) print('\nAggregated statistic results...') writer.ConsoleStatisticsWriter(functions=functions).write( evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear()
def main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str, tmp_result_dir: str): """Brain tissue segmentation using decision forests. Section of the original main routine. Executes post processing part of the medical image analysis pipeline: Must be done separately in advance: - Image loading - Registration - Pre-processing - Feature extraction - Decision forest classifier model building - Segmentation using the decision forest classifier model on unseen images Is carried out in this section of the pipeline - Loading of temporary data - Post-processing of the segmentation - Evaluation of the segmentation """ # load atlas images putil.load_atlas_images(data_atlas_dir) # print('-' * 5, 'Training...') # # # crawl the training image directories # crawler = futil.FileSystemDataCrawler(data_train_dir, # LOADING_KEYS, # futil.BrainImageFilePathGenerator(), # futil.DataDirectoryFilter()) pre_process_params = { 'skullstrip_pre': True, 'normalization_pre': True, 'registration_pre': True, 'coordinates_feature': True, 'intensity_feature': True, 'gradient_intensity_feature': True } # create a result directory with timestamp t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') result_dir = os.path.join(result_dir, t) os.makedirs(result_dir, exist_ok=True) # initialize evaluator evaluator = putil.init_evaluator() # crawl the test image directories crawler = futil.FileSystemDataCrawler(data_test_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) # load necessary data to perform post processing pre_process_params['training'] = False images_test = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) # load the prediction of the test images (segmented image images_prediction, images_probabilities = putil.load_prediction_images( images_test, tmp_result_dir, '2020-10-30-18-31-15') # evaluate images without post-processing for i, img in enumerate(images_test): evaluator.evaluate(images_prediction[i], img.images[structure.BrainImageTypes.GroundTruth], img.id_) # post-process segmentation and evaluate with post-processing post_process_params = { 'simple_post': True, 'variance': 1.0, 'preserve_background': False } images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities, post_process_params, multi_process=False) for i, img in enumerate(images_test): evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth], img.id_ + '-PP') # save results sitk.WriteImage( images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True) sitk.WriteImage( images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True) # use two writers to report the results os.makedirs( result_dir, exist_ok=True) # generate result directory, if it does not exists result_file = os.path.join(result_dir, 'results.csv') writer.CSVWriter(result_file).write(evaluator.results) print('\nSubject-wise results...') writer.ConsoleWriter().write(evaluator.results) # report also mean and standard deviation among all subjects result_summary_file = os.path.join(result_dir, 'results_summary.csv') functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results) print('\nAggregated statistic results...') writer.ConsoleStatisticsWriter(functions=functions).write( evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear()
def main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str, parameters_file: str): """Brain tissue segmentation using decision forests. The main routine executes the medical image analysis pipeline: - Image loading - Registration - Pre-processing - Feature extraction - Decision forest classifier model building - Segmentation using the decision forest classifier model on unseen images - Post-processing of the segmentation - Evaluation of the segmentation """ start_main = timeit.default_timer() # load atlas images putil.load_atlas_images(data_atlas_dir) print('-' * 5, 'Training...') # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_train_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) fof_parameters = {'10Percentile': True, '90Percentile': True, 'Energy': True, 'Entropy': True, 'InterquartileRange': True, 'Kurtosis': True, 'Maximum': True, 'MeanAbsoluteDeviation': True, 'Mean': True, 'Median': True, 'Minimum': True, 'Range': True, 'RobustMeanAbsoluteDeviation': True, 'RootMeanSquared': True, 'Skewness': True, 'TotalEnergy': True, 'Uniformity': True, 'Variance': True} glcm_parameters = {'Autocorrelation': True, 'ClusterProminence': True, 'ClusterShade': True, 'ClusterTendency': True, 'Contrast': True, 'Correlation': True, 'DifferenceAverage': True, 'DifferenceEntropy': True, 'DifferenceVariance': True, 'Id': True, 'Idm': True, 'Idmn': True, 'Idn': True, 'Imc1': True, 'Imc2': True, 'InverseVariance': True, 'JointAverage': True, 'JointEnergy': True, 'JointEntropy': True, 'MCC': True, 'MaximumProbability': True, 'SumAverage': True, 'SumEntropy': True, 'SumSquares': True} pre_process_params = {'skullstrip_pre': True, 'normalization_pre': True, 'registration_pre': True, 'save_features': False, 'coordinates_feature': True, 'intensity_feature': False, 'gradient_intensity_feature': False, 'first_order_feature': False, 'first_order_feature_parameters': fof_parameters, 'HOG_feature': False, 'GLCM_features': False, 'GLCM_features_parameters': glcm_parameters, 'n_estimators': 50, 'max_depth': 60, 'experiment_name': 'default' } parameters = json.load(open(parameters_file, 'r')) if bool(parameters): pre_process_params = parameters # load images for training and pre-process images = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) # generate feature matrix and label vector data_train = np.concatenate([img.feature_matrix[0] for img in images]) labels_train = np.concatenate([img.feature_matrix[1] for img in images]).squeeze() np.nan_to_num(data_train, copy=False) # warnings.warn('Random forest parameters not properly set.') forest = sk_ensemble.RandomForestClassifier(max_features=images[0].feature_matrix[0].shape[1], n_estimators=pre_process_params['n_estimators'], # 100 max_depth=pre_process_params['max_depth']) # 10 # Debugging nan_data_idx = np.argwhere(np.isnan(data_train)) np.savez('data_train.npz', data_train) np.save('data_nan.npy', nan_data_idx) start_time = timeit.default_timer() forest.fit(data_train, labels_train) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # create a result directory with timestamp result_dir = os.path.join(result_dir, pre_process_params['experiment_name']) os.makedirs(result_dir, exist_ok=True) print('-' * 5, 'Testing...') # initialize evaluator evaluator = putil.init_evaluator() # crawl the training image directories crawler = futil.FileSystemDataCrawler(data_test_dir, LOADING_KEYS, futil.BrainImageFilePathGenerator(), futil.DataDirectoryFilter()) # load images for testing and pre-process pre_process_params['training'] = False images_test = putil.pre_process_batch(crawler.data, pre_process_params, multi_process=False) images_prediction = [] images_probabilities = [] for img in images_test: print('-' * 10, 'Testing', img.id_) start_time = timeit.default_timer() predictions = forest.predict(np.nan_to_num(img.feature_matrix[0],copy=False)) probabilities = forest.predict_proba(np.nan_to_num(img.feature_matrix[0],copy=False)) print(' Time elapsed:', timeit.default_timer() - start_time, 's') # convert prediction and probabilities back to SimpleITK images image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8), img.image_properties) image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties) # evaluate segmentation without post-processing evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_) images_prediction.append(image_prediction) images_probabilities.append(image_probabilities) # post-process segmentation and evaluate with post-processing post_process_params = {'simple_post': True} images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities, post_process_params, multi_process=False) for i, img in enumerate(images_test): evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth], img.id_ + '-PP') # save results sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True) sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True) # use two writers to report the results os.makedirs(result_dir, exist_ok=True) # generate result directory, if it does not exists result_file = os.path.join(result_dir, 'results.csv') writer.CSVWriter(result_file).write(evaluator.results) print('\nSubject-wise results...') writer.ConsoleWriter().write(evaluator.results) # report also mean and standard deviation among all subjects result_summary_file = os.path.join(result_dir, 'results_summary.csv') functions = {'MEAN': np.mean, 'STD': np.std} writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results) print('\nAggregated statistic results...') writer.ConsoleStatisticsWriter(functions=functions).write(evaluator.results) # clear results such that the evaluator is ready for the next evaluation evaluator.clear() end_main = timeit.default_timer() main_time = end_main - start_main # writing information on a txt file reporter.feature_writer(result_dir, pre_process_params, main_time, 'feature_report')