def main(): # Extract the tracts from the atlas' folder tracts = get_tracts(folder_atlas) # Get the sum of the tracts tracts_sum = add_tracts(tracts, tracts_to_sum_index) # Save sum of the tracts to niftii save_3D_nparray_niftii(tracts_sum, tracts_sum_img)
def validate_atlas(folder_cropped_atlas, nb_bootstraps, std_noise, range_tract, val_csf, results_folder, results_file, mask_folder, list_methods, test_map=0, param_map='20,20', list_tracts=[]): # Parameters file_phantom = "WM_phantom.nii.gz" file_phantom_noise = "WM_phantom_noise.nii.gz" file_tract_sum = "tracts_sum.nii.gz" true_value = 40 file_extract_metrics = "metric_label.txt" # list_tracts = ['2', '17', '0,1,15,16'] list_tracts_txt = ['csl', 'csr', 'dc'] index_dorsalcolumn = 2 # index of dorsal column in list_tracts nb_tracts_all = 32 # total number of tracts in atlas (do not include CSF tracts) # dorsal_column_labels = '0,1,15,16' # nb_tracts_dorsalcolumn = 4 value_gm = 35 # value in gray matter #value_csf = 5 # value in csf nb_digits_results = 2 # number of digits to display for result file mask_prefix = 'manual_' mask_ext = '.nii.gz' # initialization start_time = time.time() # save start time for duration folder_tmp = 'tmp.'+datetime.datetime.now().strftime("%y%m%d%H%M%S%f/") nb_methods = len(list_methods) nb_tracts = len(list_tracts) perc_error = np.zeros(shape=(nb_tracts, nb_methods, nb_bootstraps)) # percent error within single tract (for comparison with manual labeling) perc_error_all = np.zeros(shape=(nb_tracts_all, nb_methods, nb_bootstraps)) # percent error for all tracts (for comparing automatic methods) stat_perc_error_all = np.zeros(shape=(nb_methods, nb_bootstraps, 4)) # statistics list_stat = ['MSE', 'median', 'min', 'max'] x_true_i = np.zeros(shape=(nb_tracts)) fname_phantom = folder_tmp+file_phantom fname_phantom_noise = folder_tmp+file_phantom_noise fname_tract_sum = folder_tmp+file_tract_sum # create output folder create_folder(results_folder, 0) # Extract the tracts from the atlas' folder tracts = get_tracts(folder_cropped_atlas) # get file name of the first atlas file fname_atlas = folder_cropped_atlas+'WMtract__00.nii.gz' # Get ponderation of each tract for dorsal column average ponderation of each tract of the dorsal column if nb_tracts: list_tract_dorsalcolumn = list_tracts[index_dorsalcolumn].split(',') nb_tracts_dorsalcolumn = len(list_tract_dorsalcolumn) pond_dc = np.zeros(nb_tracts_dorsalcolumn) # sum of each pond_sum = 0 for i in range(nb_tracts_dorsalcolumn): # i = int(i) # Sum tracts values which are higher than 0 in the tracts pond_dc[i] = sum(tracts[int(list_tract_dorsalcolumn[i]), 0][tracts[int(list_tract_dorsalcolumn[i]), 0] > 0]) pond_sum = pond_sum + pond_dc[i] # Normalize the sum of ponderations to 1 pond_dc = pond_dc / pond_sum # create temporary folder sct.run('mkdir '+folder_tmp) # loop across bootstrap for i_bootstrap in range(0, nb_bootstraps): sct.printv('Iteration: ' + str(i_bootstrap+1) + '/' + str(nb_bootstraps), 1, 'warning') # Generate phantom [WM_phantom, WM_phantom_noise, values_synthetic_data, tracts_sum] = phantom_generation(tracts, std_noise, range_tract, true_value, folder_tmp, value_gm, true_value*val_csf/100) # Save generated phantoms as nifti image (.nii.gz) save_3D_nparray_nifti(WM_phantom, fname_phantom, fname_atlas) save_3D_nparray_nifti(WM_phantom_noise, fname_phantom_noise, fname_atlas) save_3D_nparray_nifti(tracts_sum, fname_tract_sum, fname_atlas) # Get the np.mean of all values in dorsal column in the generated phantom if nb_tracts: dc_val_avg = 0 for j in range(nb_tracts_dorsalcolumn): dc_val_avg = dc_val_avg + values_synthetic_data[int(list_tract_dorsalcolumn[j])] * pond_dc[j] dc_val_avg = float(dc_val_avg) # build variable with true values (WARNING: HARD-CODED INDICES) x_true_i[0] = values_synthetic_data[int(list_tracts[0])] x_true_i[1] = values_synthetic_data[int(list_tracts[1])] x_true_i[2] = dc_val_avg fname_extract_metrics = folder_tmp + file_extract_metrics if nb_tracts: if not test_map: # loop across tracts for i_tract in range(len(list_tracts)): # loop across methods for i_method in range(len(list_methods)): # display stuff print 'Tract: '+list_tracts[i_tract]+', Method: '+list_methods[i_method] # check if method is manual if not list_methods[i_method].find('man') == -1: # find index of manual mask index_manual = int(list_methods[i_method][list_methods[i_method].find('man')+3]) fname_mask = mask_folder[index_manual] + mask_prefix + list_tracts_txt[i_tract] + mask_ext # manual extraction status, output = sct.run('sct_average_data_within_mask -i ' + fname_phantom_noise + ' -m ' + fname_mask + ' -v 0') x_estim_i = float(output) else: # automatic extraction sct.run('sct_extract_metric -i ' + fname_phantom_noise + ' -f ' + folder_cropped_atlas + ' -m '+list_methods[i_method]+' -l '+list_tracts[i_tract]+' -a -o '+fname_extract_metrics) # read in txt file x_estim_i = read_results(fname_extract_metrics) # Get the percent absolute deviation with the true value #perc_error[i_tract, i_method, i_bootstrap] = 100 * (x_true_i[i_tract] - x_estim_i) / float(x_true_i[i_tract]) perc_error[i_tract, i_method, i_bootstrap] = 100 * abs(x_estim_i - x_true_i[i_tract]) / float(x_true_i[i_tract]) # calculate percentage error for all tracts (only for automatic methods) # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: # display stuff print 'Tract: ALL, Method: '+list_methods[i_method] # automatic extraction in all tracts sct.run('sct_extract_metric -i ' + fname_phantom_noise + ' -f ' + folder_cropped_atlas + ' -m '+list_methods[i_method] + ' -o '+fname_extract_metrics + ' -p '+param_map) # read results in txt file x_estim_i_all = read_results(fname_extract_metrics) # get nonzero values index_nonzero = np.nonzero(values_synthetic_data) perc_error_all[0:nb_tracts_all, i_method, i_bootstrap] = 100 * abs(x_estim_i_all[index_nonzero] - values_synthetic_data[index_nonzero]) / values_synthetic_data[index_nonzero] # will be used to display boxcar # perc_error_all[0:nb_tracts_all, i_method, i_bootstrap] = 100 * (x_estim_i_all[index_nonzero] - values_synthetic_data[index_nonzero]) / values_synthetic_data[index_nonzero] # will be used to display boxcar # compute mean squared error stat_perc_error_all[i_method, i_bootstrap, 0] = (perc_error_all[:, i_method, i_bootstrap] ** 2).mean() # mean squared error stat_perc_error_all[i_method, i_bootstrap, 1] = np.median(perc_error_all[:, i_method, i_bootstrap]) # median stat_perc_error_all[i_method, i_bootstrap, 2] = min(perc_error_all[:, i_method, i_bootstrap]) stat_perc_error_all[i_method, i_bootstrap, 3] = max(perc_error_all[:, i_method, i_bootstrap]) # Calculate elapsed time elapsed_time = int(round(time.time() - start_time)) # Extract time in minutes and seconds sec = elapsed_time % 60 mte = (elapsed_time - sec) / 60 # PRINT RESULTS FOR SINGLE TRACTS # =============================== if nb_tracts: # create output folder create_folder(results_folder+'sub/', 0) # Open text file where results are printed fname_results = results_folder+'sub/'+results_file+'.txt' results_text = open(fname_results, 'w+') # print header print >>results_text, '# Mean(std) percentage of absolute error within single tracts.' print >>results_text, '# Generated on: ' + time.strftime('%Y-%m-%d %H:%M:%S') print >>results_text, '# true_value: ' + str(true_value) print >>results_text, '# sigma noise (in percentage of true value): ' + str(std_noise) + '%' print >>results_text, '# range tracts (in percentage of true value): (-' + str(range_tract) + '%:+' + str(range_tract) + '%)' print >>results_text, '# value CSF (in percentage of true value): ' + str(val_csf) + '%' print >>results_text, '# number of iterations: ' + str(nb_bootstraps) print >>results_text, '# elapsed time: ' + str(mte) + 'min' + str(sec) + 's' text_methods = 'Label' # loop across methods for i_method in range(len(list_methods)): text_methods = text_methods + ', ' + list_methods[i_method] print >>results_text, text_methods # print results # loop across tracts for i_tract in range(len(list_tracts)): text_results = list_tracts_txt[i_tract] # loop across methods for i_method in range(len(list_methods)): text_results = text_results + ', ' + str(round(np.mean(perc_error[i_tract, i_method, :]), ndigits=nb_digits_results))+'('+str(round(np.std(perc_error[i_tract, i_method, :]), ndigits=nb_digits_results))+')' print >>results_text, text_results # close file results_text.close() # display results status, output = sct.run('cat ' + fname_results) print output # PRINT RESULTS FOR ALL TRACTS # ============================ # Open text file where results are printed fname_results = results_folder+results_file+'_all.txt' results_text = open(fname_results, 'w+') # print header print >>results_text, '# Mean(std) percentage of absolute error within all tracts (only for automatic methods).' print >>results_text, '# Generated on: ' + time.strftime('%Y-%m-%d %H:%M:%S') print >>results_text, '# true_value: ' + str(true_value) print >>results_text, '# sigma noise (in percentage of true value): ' + str(std_noise) + '%' print >>results_text, '# range tracts (in percentage of true value): (-' + str(range_tract) + '%:+' + str(range_tract) + '%)' print >>results_text, '# value CSF (in percentage of true value): ' + str(val_csf) + '%' print >>results_text, '# number of iterations: ' + str(nb_bootstraps) print >>results_text, '# elapsed time: ' + str(mte) + 'min' + str(sec) + 's' text_methods = 'Label' # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_methods = text_methods + ', ' + list_methods[i_method] print >>results_text, text_methods # print results # loop across tracts for i_tract in range(nb_tracts_all): text_results = str(i_tract) # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_results = text_results + ', ' + str(round(np.mean(perc_error_all[i_tract, i_method, :]), ndigits=nb_digits_results))+'('+str(round(np.std(perc_error_all[i_tract, i_method, :]), ndigits=nb_digits_results))+')' print >>results_text, text_results # loop across statistics nb_stats = len(list_stat) for i_stat in range(nb_stats): text_results = list_stat[i_stat] # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_results = text_results + ', ' + str(round(np.mean(stat_perc_error_all[i_method, :, i_stat]), ndigits=nb_digits_results))+'('+str(round(np.std(stat_perc_error_all[i_method, :, i_stat]), ndigits=nb_digits_results))+')' print >>results_text, text_results # close file results_text.close() # display results status, output = sct.run('cat ' + fname_results) print output
def validate_atlas(folder_cropped_atlas, nb_bootstraps, std_noise, range_tract, val_csf, results_folder, results_file, mask_folder, list_methods, test_map=0, param_map='20,20', list_tracts=[]): # Parameters file_phantom = "WM_phantom.nii.gz" file_phantom_noise = "WM_phantom_noise.nii.gz" file_tract_sum = "tracts_sum.nii.gz" true_value = 40 file_extract_metrics = "metric_label.txt" # list_tracts = ['2', '17', '0,1,15,16'] list_tracts_txt = ['csl', 'csr', 'dc'] index_dorsalcolumn = 2 # index of dorsal column in list_tracts nb_tracts_all = 32 # total number of tracts in atlas (do not include CSF tracts) # dorsal_column_labels = '0,1,15,16' # nb_tracts_dorsalcolumn = 4 value_gm = 35 # value in gray matter #value_csf = 5 # value in csf nb_digits_results = 2 # number of digits to display for result file mask_prefix = 'manual_' mask_ext = '.nii.gz' # initialization start_time = time.time() # save start time for duration folder_tmp = 'tmp.' + datetime.datetime.now().strftime("%y%m%d%H%M%S%f/") nb_methods = len(list_methods) nb_tracts = len(list_tracts) perc_error = np.zeros( shape=(nb_tracts, nb_methods, nb_bootstraps) ) # percent error within single tract (for comparison with manual labeling) perc_error_all = np.zeros(shape=( nb_tracts_all, nb_methods, nb_bootstraps )) # percent error for all tracts (for comparing automatic methods) stat_perc_error_all = np.zeros(shape=(nb_methods, nb_bootstraps, 4)) # statistics list_stat = ['MSE', 'median', 'min', 'max'] x_true_i = np.zeros(shape=(nb_tracts)) fname_phantom = os.path.join(folder_tmp, file_phantom) fname_phantom_noise = os.path.join(folder_tmp, file_phantom_noise) fname_tract_sum = os.path.join(folder_tmp, file_tract_sum) # create output folder create_folder(results_folder, 0) # Extract the tracts from the atlas' folder tracts = get_tracts(folder_cropped_atlas) # get file name of the first atlas file fname_atlas = os.path.join(folder_cropped_atlas, 'WMtract__00.nii.gz') # Get ponderation of each tract for dorsal column average ponderation of each tract of the dorsal column if nb_tracts: list_tract_dorsalcolumn = list_tracts[index_dorsalcolumn].split(',') nb_tracts_dorsalcolumn = len(list_tract_dorsalcolumn) pond_dc = np.zeros(nb_tracts_dorsalcolumn) # sum of each pond_sum = 0 for i in range(nb_tracts_dorsalcolumn): # i = int(i) # Sum tracts values which are higher than 0 in the tracts pond_dc[i] = sum( tracts[int(list_tract_dorsalcolumn[i]), 0][tracts[int(list_tract_dorsalcolumn[i]), 0] > 0]) pond_sum = pond_sum + pond_dc[i] # Normalize the sum of ponderations to 1 pond_dc = pond_dc / pond_sum # create temporary folder sct.run('mkdir ' + folder_tmp) # loop across bootstrap for i_bootstrap in range(0, nb_bootstraps): sct.printv( 'Iteration: ' + str(i_bootstrap + 1) + '/' + str(nb_bootstraps), 1, 'warning') # Generate phantom [WM_phantom, WM_phantom_noise, values_synthetic_data, tracts_sum] = phantom_generation(tracts, std_noise, range_tract, true_value, folder_tmp, value_gm, true_value * val_csf / 100) # Save generated phantoms as nifti image (.nii.gz) save_3D_nparray_nifti(WM_phantom, fname_phantom, fname_atlas) save_3D_nparray_nifti(WM_phantom_noise, fname_phantom_noise, fname_atlas) save_3D_nparray_nifti(tracts_sum, fname_tract_sum, fname_atlas) # Get the np.mean of all values in dorsal column in the generated phantom if nb_tracts: dc_val_avg = 0 for j in range(nb_tracts_dorsalcolumn): dc_val_avg = dc_val_avg + values_synthetic_data[int( list_tract_dorsalcolumn[j])] * pond_dc[j] dc_val_avg = float(dc_val_avg) # build variable with true values (WARNING: HARD-CODED INDICES) x_true_i[0] = values_synthetic_data[int(list_tracts[0])] x_true_i[1] = values_synthetic_data[int(list_tracts[1])] x_true_i[2] = dc_val_avg fname_extract_metrics = os.path.join(folder_tmp, file_extract_metrics) if nb_tracts: if not test_map: # loop across tracts for i_tract in range(len(list_tracts)): # loop across methods for i_method in range(len(list_methods)): # display stuff print 'Tract: ' + list_tracts[ i_tract] + ', Method: ' + list_methods[i_method] # check if method is manual if not list_methods[i_method].find('man') == -1: # find index of manual mask index_manual = int(list_methods[i_method][ list_methods[i_method].find('man') + 3]) fname_mask = mask_folder[ index_manual] + mask_prefix + list_tracts_txt[ i_tract] + mask_ext # manual extraction status, output = sct.run( 'sct_average_data_within_mask -i ' + fname_phantom_noise + ' -m ' + fname_mask + ' -v 0') x_estim_i = float(output) else: # automatic extraction sct.run('sct_extract_metric -i ' + fname_phantom_noise + ' -f ' + folder_cropped_atlas + ' -m ' + list_methods[i_method] + ' -l ' + list_tracts[i_tract] + ' -a -o ' + fname_extract_metrics) # read in txt file x_estim_i = read_results(fname_extract_metrics) # Get the percent absolute deviation with the true value #perc_error[i_tract, i_method, i_bootstrap] = 100 * (x_true_i[i_tract] - x_estim_i) / float(x_true_i[i_tract]) perc_error[i_tract, i_method, i_bootstrap] = 100 * abs( x_estim_i - x_true_i[i_tract]) / float( x_true_i[i_tract]) # calculate percentage error for all tracts (only for automatic methods) # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: # display stuff print 'Tract: ALL, Method: ' + list_methods[i_method] # automatic extraction in all tracts sct.run('sct_extract_metric -i ' + fname_phantom_noise + ' -f ' + folder_cropped_atlas + ' -m ' + list_methods[i_method] + ' -o ' + fname_extract_metrics + ' -p ' + param_map) # read results in txt file x_estim_i_all = read_results(fname_extract_metrics) # get nonzero values index_nonzero = np.nonzero(values_synthetic_data) perc_error_all[ 0:nb_tracts_all, i_method, i_bootstrap] = 100 * abs( x_estim_i_all[index_nonzero] - values_synthetic_data[index_nonzero] ) / values_synthetic_data[ index_nonzero] # will be used to display boxcar # perc_error_all[0:nb_tracts_all, i_method, i_bootstrap] = 100 * (x_estim_i_all[index_nonzero] - values_synthetic_data[index_nonzero]) / values_synthetic_data[index_nonzero] # will be used to display boxcar # compute mean squared error stat_perc_error_all[i_method, i_bootstrap, 0] = (perc_error_all[:, i_method, i_bootstrap]**2 ).mean() # mean squared error stat_perc_error_all[i_method, i_bootstrap, 1] = np.median( perc_error_all[:, i_method, i_bootstrap]) # median stat_perc_error_all[i_method, i_bootstrap, 2] = min(perc_error_all[:, i_method, i_bootstrap]) stat_perc_error_all[i_method, i_bootstrap, 3] = max(perc_error_all[:, i_method, i_bootstrap]) # Calculate elapsed time elapsed_time = int(round(time.time() - start_time)) # Extract time in minutes and seconds sec = elapsed_time % 60 mte = (elapsed_time - sec) / 60 # PRINT RESULTS FOR SINGLE TRACTS # =============================== if nb_tracts: # create output folder create_folder(results_folder + 'sub/', 0) # Open text file where results are printed fname_results = results_folder + 'sub/' + results_file + '.txt' results_text = open(fname_results, 'w+') # print header print >> results_text, '# Mean(std) percentage of absolute error within single tracts.' print >> results_text, '# Generated on: ' + time.strftime( '%Y-%m-%d %H:%M:%S') print >> results_text, '# true_value: ' + str(true_value) print >> results_text, '# sigma noise (in percentage of true value): ' + str( std_noise) + '%' print >> results_text, '# range tracts (in percentage of true value): (-' + str( range_tract) + '%:+' + str(range_tract) + '%)' print >> results_text, '# value CSF (in percentage of true value): ' + str( val_csf) + '%' print >> results_text, '# number of iterations: ' + str(nb_bootstraps) print >> results_text, '# elapsed time: ' + str(mte) + 'min' + str( sec) + 's' text_methods = 'Label' # loop across methods for i_method in range(len(list_methods)): text_methods = text_methods + ', ' + list_methods[i_method] print >> results_text, text_methods # print results # loop across tracts for i_tract in range(len(list_tracts)): text_results = list_tracts_txt[i_tract] # loop across methods for i_method in range(len(list_methods)): text_results = text_results + ', ' + str( round(np.mean(perc_error[i_tract, i_method, :]), ndigits=nb_digits_results)) + '(' + str( round(np.std(perc_error[i_tract, i_method, :]), ndigits=nb_digits_results)) + ')' print >> results_text, text_results # close file results_text.close() # display results status, output = sct.run('cat ' + fname_results) print output # PRINT RESULTS FOR ALL TRACTS # ============================ # Open text file where results are printed fname_results = results_folder + results_file + '_all.txt' results_text = open(fname_results, 'w+') # print header print >> results_text, '# Mean(std) percentage of absolute error within all tracts (only for automatic methods).' print >> results_text, '# Generated on: ' + time.strftime( '%Y-%m-%d %H:%M:%S') print >> results_text, '# true_value: ' + str(true_value) print >> results_text, '# sigma noise (in percentage of true value): ' + str( std_noise) + '%' print >> results_text, '# range tracts (in percentage of true value): (-' + str( range_tract) + '%:+' + str(range_tract) + '%)' print >> results_text, '# value CSF (in percentage of true value): ' + str( val_csf) + '%' print >> results_text, '# number of iterations: ' + str(nb_bootstraps) print >> results_text, '# elapsed time: ' + str(mte) + 'min' + str( sec) + 's' text_methods = 'Label' # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_methods = text_methods + ', ' + list_methods[i_method] print >> results_text, text_methods # print results # loop across tracts for i_tract in range(nb_tracts_all): text_results = str(i_tract) # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_results = text_results + ', ' + str( round(np.mean(perc_error_all[i_tract, i_method, :]), ndigits=nb_digits_results)) + '(' + str( round(np.std(perc_error_all[i_tract, i_method, :]), ndigits=nb_digits_results)) + ')' print >> results_text, text_results # loop across statistics nb_stats = len(list_stat) for i_stat in range(nb_stats): text_results = list_stat[i_stat] # loop across methods for i_method in range(len(list_methods)): # check if method is automatic if list_methods[i_method].find('man') == -1: text_results = text_results + ', ' + str( round(np.mean(stat_perc_error_all[i_method, :, i_stat]), ndigits=nb_digits_results)) + '(' + str( round(np.std(stat_perc_error_all[i_method, :, i_stat]), ndigits=nb_digits_results)) + ')' print >> results_text, text_results # close file results_text.close() # display results status, output = sct.run('cat ' + fname_results) print output