def main(): # Initialization path_data = '' xmin = '50' xsize = '100' ymin = '0' ysize = '-1' zmin = '0' zsize = '-1' fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI # Parameters for debug mode if param.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' path_data = '/Volumes/folder_shared/template/t2' path_out = '/Volumes/folder_shared/template/t2_crop' else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:o:') except getopt.GetoptError: usage() if not opts: usage() for opt, arg in opts: if opt == '-h': usage() elif opt in ("-i"): path_data = arg elif opt in ("-o"): path_out = arg # check input folder sct.check_folder_exist(path_data) # add slash path_data = sct.slash_at_the_end(path_data, 1) path_out = sct.slash_at_the_end(path_out, 1) # create output folder if os.path.exists(path_out): sct.printv('WARNING: Output folder exists. Deleting it.', 1, 'warning') # remove dir shutil.rmtree(path_out) # create dir os.makedirs(path_out) # list all files in folder files = [f for f in glob.glob(path_data + '*.nii.gz')] # for files in glob.glob(path_data+'*.nii.gz'): # print files # crop files one by one (to inform user) for f in files: path_f, file_f, ext_f = sct.extract_fname(f) sct.run('fslroi ' + f + ' ' + path_out + file_f + ' ' + xmin + ' ' + xsize + ' ' + ymin + ' ' + ysize + ' ' + zmin + ' ' + zsize) # to view results print '\nDone!'
def main(): # Initialization path_data = '' xmin = '50' xsize = '100' ymin = '0' ysize = '-1' zmin = '0' zsize = '-1' fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI # Parameters for debug mode if param.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' path_data = '/Volumes/folder_shared/template/t2' path_out = '/Volumes/folder_shared/template/t2_crop' else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:o:') except getopt.GetoptError: usage() if not opts: usage() for opt, arg in opts: if opt == '-h': usage() elif opt in ("-i"): path_data = arg elif opt in ("-o"): path_out = arg # check input folder sct.check_folder_exist(path_data) # add slash path_data = sct.slash_at_the_end(path_data, 1) path_out = sct.slash_at_the_end(path_out, 1) # create output folder if os.path.exists(path_out): sct.printv('WARNING: Output folder exists. Deleting it.', 1, 'warning') # remove dir shutil.rmtree(path_out) # create dir os.makedirs(path_out) # list all files in folder files = [f for f in glob.glob(path_data+'*.nii.gz')] # for files in glob.glob(path_data+'*.nii.gz'): # print files # crop files one by one (to inform user) for f in files: path_f, file_f, ext_f = sct.extract_fname(f) sct.run('fslroi '+f+' '+path_out+file_f+' '+xmin+' '+xsize+' '+ymin+' '+ysize+' '+zmin+' '+zsize) # to view results print '\nDone!'
def test_function(script_name): if script_name == 'test_debug': return test_debug() # JULIEN else: # Using the retest variable to recheck if we can perform tests after we downloaded the data retest = 1 # while condition values are arbitrary and are present to prevent infinite loop while 0 < retest < 3: # build script name fname_log = script_name + ".log" tmp_script_name = script_name result_folder = "results_"+script_name script_name = "test_"+script_name if retest == 1: # create folder and go in it sct.create_folder(result_folder) os.chdir(result_folder) # display script name print_line('Checking '+script_name) # import function as a module script_tested = importlib.import_module(script_name) # test function status, output = script_tested.test(param.path_data) # returning script_name to its original name script_name = tmp_script_name # manage status if status == 0: print_ok() retest = 0 else: print_fail() print output print "\nTest files missing, downloading them now \n" os.chdir('../..') downloaddata() param.path_data = sct.slash_at_the_end(os.path.abspath(param.path_data), 1) # check existence of testing data folder sct.check_folder_exist(param.path_data) os.chdir(param.path_tmp + result_folder) retest += 1 # log file write_to_log_file(fname_log, output, 'w') # go back to parent folder os.chdir('..') # end while loop # return return status
def checkFolder(self, param): # check if the folder exist. If not, create it. sct.printv("Check folder existence...") sct.check_folder_exist(param, 0) return param
def main(): path_data = param.path_data function_to_test = param.function_to_test # function_to_avoid = param.function_to_avoid remove_tmp_file = param.remove_tmp_file # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:],'h:d:p:f:r:a:') except getopt.GetoptError: usage() for opt, arg in opts: if opt == '-h': usage() sys.exit(0) if opt == '-d': param.download = int(arg) if opt == '-p': param.path_data = arg if opt == '-f': function_to_test = arg # if opt == '-a': # function_to_avoid = arg if opt == '-r': remove_tmp_file = int(arg) start_time = time.time() # if function_to_avoid: # try: # functions.remove(function_to_avoid) # except ValueError: # print 'The function you want to avoid does not figure in the functions to test list' # download data if param.download: downloaddata() param.path_data = 'sct_testing_data/data' # get absolute path and add slash at the end param.path_data = sct.slash_at_the_end(os.path.abspath(param.path_data), 1) # check existence of testing data folder if not sct.check_folder_exist(param.path_data, 0): downloaddata() # display path to data sct.printv('\nPath to testing data:\n.. '+param.path_data, param.verbose) # create temp folder that will have all results and go in it param.path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1) sct.create_folder(param.path_tmp) os.chdir(param.path_tmp) # get list of all scripts to test functions = fill_functions() # loop across all functions and test them status = [] [status.append(test_function(f)) for f in functions if function_to_test == f] if not status: for f in functions: status.append(test_function(f)) print 'status: '+str(status) # display elapsed time elapsed_time = time.time() - start_time print 'Finished! Elapsed time: '+str(int(round(elapsed_time)))+'s\n' # remove temp files if param.remove_tmp_file: sct.printv('\nRemove temporary files...', param.verbose) sct.run('rm -rf '+param.path_tmp, param.verbose) e = 0 if sum(status) != 0: e = 1 print e sys.exit(e)
def main(): #Initialization directory = "" fname_template = '' n_l = 0 verbose = param.verbose try: opts, args = getopt.getopt(sys.argv[1:], 'hi:t:n:v:') except getopt.GetoptError: usage() for opt, arg in opts: if opt == '-h': usage() elif opt in ("-i"): directory = arg elif opt in ("-t"): fname_template = arg elif opt in ('-n'): n_l = int(arg) elif opt in ('-v'): verbose = int(arg) # display usage if a mandatory argument is not provided if fname_template == '' or directory == '': usage() # check existence of input files print '\nCheck if file exists ...\n' sct.check_file_exist(fname_template) sct.check_folder_exist(directory) path_template, file_template, ext_template = sct.extract_fname( fname_template) template_absolute_path = sct.get_absolute_path(fname_template) os.chdir(directory) n_i = len([ name for name in os.listdir('.') if (os.path.isfile(name) and name.endswith(".nii.gz") and name != 'template_landmarks.nii.gz') ]) # number of landmark images average = zeros((n_i, n_l)) compteur = 0 for file in os.listdir('.'): if file.endswith(".nii.gz") and file != 'template_landmarks.nii.gz': print file img = nibabel.load(file) data = img.get_data() X, Y, Z = (data > 0).nonzero() Z = [Z[i] for i in Z.argsort()] Z.reverse() for i in xrange(n_l): if i < len(Z): average[compteur][i] = Z[i] compteur = compteur + 1 average = array([ int(round(mean([average[average[:, i] > 0, i]]))) for i in xrange(n_l) ]) #print average print template_absolute_path print '\nGet dimensions of template...' nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(template_absolute_path) print '.. matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) print '.. voxel size: ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str( pz) + 'mm' img = nibabel.load(template_absolute_path) data = img.get_data() hdr = img.get_header() data[:, :, :] = 0 compteur = 1 for i in average: print int(round(nx / 2.0)), int(round(ny / 2.0)), int(round(i)), int( round(compteur)) data[int(round(nx / 2.0)), int(round(ny / 2.0)), int(round(i))] = int(round(compteur)) compteur = compteur + 1 print '\nSave volume ...' #hdr.set_data_dtype('float32') # set imagetype to uint8 # save volume #data = data.astype(float32, copy =False) img = nibabel.Nifti1Image(data, None, hdr) file_name = 'template_landmarks.nii.gz' nibabel.save(img, file_name) print '\nFile created : ' + file_name
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_methods_comparison" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' methods_to_display = 'bin,wa,wath,ml,map' else: status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(path_sct + '/scripts') import sct_utils as sct sct.printv("Working directory: " + os.getcwd()) results_folder_noise = results_folder + '/noise' results_folder_tracts = results_folder + '/tracts' sct.printv('\n\nData will be extracted from folder ' + results_folder_noise + ' and ' + results_folder_tracts + '.', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(results_folder_noise) sct.check_folder_exist(results_folder_tracts) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') # Extract file names of the results files fname_results_noise = glob.glob(results_folder_noise + '/*.txt') fname_results_tracts = glob.glob(results_folder_tracts + '/*.txt') fname_results = fname_results_noise + fname_results_tracts # Remove doublons (due to the two folders) # for i_fname in range(0, len(fname_results)): # for j_fname in range(0, len(fname_results)): # if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])): # fname_results.remove(fname_results[j_fname]) file_results = [] for fname in fname_results: file_results.append(os.path.basename(fname)) for file in file_results: if file_results.count(file) > 1: ind = file_results.index(file) fname_results.remove(fname_results[ind]) file_results.remove(file) nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 5)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 5)) # min min_results = numpy.zeros((nb_results_file, 5)) # max max_results = numpy.zeros((nb_results_file, 5)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise] if len(ind_line_noise) != 1: sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std] if len(ind_line_tract_std) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value] if len(ind_line_csf_value) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label] if len(ind_line_label) != 1: sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:]) # extract median ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median] if len(ind_line_median) != 1: sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min] if len(ind_line_min) != 1: sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max] if len(ind_line_max) != 1: sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines) + 1] for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]]) std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]]) labels_id_for_file_i.append(line_label_i[0]) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) nb_method = len(methods_to_display) sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:') print csf_values print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv( 'Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the ' + str( nb_results_file) + ' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the ' + str( nb_results_file) + ' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv( 'Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print error_per_label # *********************************************** START PLOTTING HERE ********************************************** # # plot A (NOT GOOD) # fig0 = plt.figure(0) # fig0.suptitle('Absolute error within all tracts as a function of noise std') # # fig0_ax = fig0.add_subplot(111) # fig0_ax.grid(True) # fig0_ax.set_xlabel('Noise std') # fig0_ax.set_ylabel('Absolute error') # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # # for meth, color in zip(methods_name[0], colors): # if meth != 'mlwa': # i_meth = methods_name[0].index(meth) # # # median # plt.plot(snr[ind_tracts_std_10], median_results[ind_tracts_std_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0) # # min # plt.plot(snr[ind_tracts_std_10], min_results[ind_tracts_std_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0) # # max # plt.plot(snr[ind_tracts_std_10], max_results[ind_tracts_std_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0) # # handles, labels = fig0_ax.get_legend_handles_labels() # fig0_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16) # # # # plot B (NOT GOOD) # fig1 = plt.figure(1) # fig1.suptitle('Absolute error within all tracts as a function of tract std') # # fig1_ax = fig1.add_subplot(111) # fig1_ax.grid(True) # fig1_ax.set_xlabel('Tract std (percentage of the true value in tract)') # fig1_ax.set_ylabel('Absolute error') # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # # for meth, color in zip(methods_name[0], colors): # if meth != 'mlwa': # i_meth = methods_name[0].index(meth) # # # median # plt.plot(tracts_std[ind_snr_10], median_results[ind_snr_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0) # # min # plt.plot(tracts_std[ind_snr_10], min_results[ind_snr_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0) # # max # plt.plot(tracts_std[ind_snr_10], max_results[ind_snr_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0) # # handles, labels = fig1_ax.get_legend_handles_labels() # fig1_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16) # Plot A ind_tracts_std_10 = numpy.where((tracts_std == 10) & (snr != 50)) # indexes where TRACTS STD=10 ind_ind_snr_sort_tracts_std_10 = numpy.argsort(snr[ind_tracts_std_10]) # indexes of indexes where TRACTS STD=10 sorted according to SNR values (in ascending order) ind_snr_sort_tracts_std_10 = ind_tracts_std_10[0][ind_ind_snr_sort_tracts_std_10] # indexes where TRACTS STD=10 sorted according to SNR values (in ascending order) # fig2 = plt.figure(2) # ind_fig2 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Error (%)') # plt.xlabel('Noise std') # plt.title('Error within all tracts as a function of noise std') # plt.xticks(ind_fig2 + 0.5, snr[ind_snr_sort_tracts_std_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig2) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig2 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_results[ind_snr_sort_tracts_std_10, i_meth] - min_results[ # ind_snr_sort_tracts_std_10, i_meth], width, # min_results[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white', linewidth=3) # plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # median_results[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', # markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # Plot B ind_snr_10 = numpy.where((snr == 10) & (tracts_std != 50)) # indexes where SNR=10 ind_ind_tracts_std_sort_snr_10 = numpy.argsort(tracts_std[ ind_snr_10]) # indexes of indexes where SNR=10 sorted according to tracts_std values (in ascending order) ind_tracts_std_sort_snr_10 = ind_snr_10[0][ ind_ind_tracts_std_sort_snr_10] # indexes where SNR=10 sorted according to tracts_std values (in ascending order) # fig3 = plt.figure(3) # ind_fig3 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Error (%)') # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)') # plt.title('Error within all tracts as a function of tracts std') # plt.xticks(ind_fig3 + 0.5, tracts_std[ind_tracts_std_sort_snr_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig3) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig3 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_results[ind_tracts_std_sort_snr_10, i_meth] - min_results[ # ind_tracts_std_sort_snr_10, i_meth], width, # min_results[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white', linewidth=3) # plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # median_results[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None', # markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # # Plot A -- v2: Absolute error (min, max, mean) # # fig4 = plt.figure(4) # ind_fig4 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Absolute error (%)') # plt.xlabel('Noise std') # plt.title('Absolute error within all tracts as a function of noise std') # plt.xticks(ind_fig4 + 0.5, snr[ind_snr_sort_tracts_std_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig4) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig4 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth] - min_abs_error_per_meth[ # ind_snr_sort_tracts_std_10, i_meth], width, # min_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white', # linewidth=3) # plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], # std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', # linestyle='None', markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # # # Plot B -- v2: Absolute error (min, max, mean) # fig5 = plt.figure(5) # ind_fig5 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Absolute error (%)') # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)') # plt.title('Absolute error within all tracts as a function of tracts std') # plt.xticks(ind_fig5 + 0.5, tracts_std[ind_tracts_std_sort_snr_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig5) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig5 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth] - min_abs_error_per_meth[ # ind_tracts_std_sort_snr_10, i_meth], width, # min_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white', # linewidth=3) # plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], # std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', # linestyle='None', markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'}) plt.rcParams['xtick.major.pad'] = '9' plt.rcParams['ytick.major.pad'] = '15' # Plot A -- v3: Box plots absolute error fig6 = plt.figure(6, figsize=(30, 16)) width = 1.0 / (nb_method + 1) ind_fig6 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=55) plt.xlabel('Noise STD (% of true WM value)', fontsize=55) plt.title('Absolute error within all tracts as a function of noise std\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] box_plots = [] for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) boxprops = dict(linewidth=4, color=color) flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.') whiskerprops = dict(color=color, linewidth=3) capprops = dict(color=color, linewidth=3) medianprops = dict(linewidth=4, color=color) meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple') plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_snr_sort_tracts_std_10, :, i_meth]), positions=ind_fig6 + i_meth_to_display * width + (float(i_meth_to_display) * width) / ( nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3) box_plots.append(plot_i['boxes'][0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig6), 2): plt.axvspan(ind_fig6[i_xtick] - width - width / 4, ind_fig6[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1) # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # plt.legend(box_plots, methods_to_display, loc='best') # convert xtick labels into integers xtick_labels = [int(xtick) for xtick in snr[ind_snr_sort_tracts_std_10]] plt.xticks(ind_fig6 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels) plt.gca().set_xlim([-width, numpy.max(ind_fig6) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 18]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both', alpha=0.5) plt.subplots_adjust(left=0.1) plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_noise_std_Tracts_std_fixed_to_10.pdf', format='PDF') # Plot B -- v3: Box plots absolute error fig7 = plt.figure(7, figsize=(30, 16)) width = 1.0 / (nb_method + 1) ind_fig7 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=55) plt.xlabel('Tracts STD (% of true WM value)', fontsize=55) plt.title('Absolute error within all tracts as a function of tracts std\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] box_plots = [] for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) boxprops = dict(linewidth=4, color=color) flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.') whiskerprops = dict(color=color, linewidth=3) capprops = dict(color=color, linewidth=3) medianprops = dict(linewidth=4, color=color) meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple') plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_tracts_std_sort_snr_10, :, i_meth]), positions=ind_fig7 + i_meth_to_display * width + (float(i_meth_to_display) * width) / ( nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3) box_plots.append(plot_i['boxes'][0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig7), 2): plt.axvspan(ind_fig7[i_xtick] - width - width / 4, ind_fig7[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1) # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # plt.legend(box_plots, methods_to_display, loc='best') # convert xtick labels into integers xtick_labels = [int(xtick) for xtick in tracts_std[ind_tracts_std_sort_snr_10]] plt.xticks(ind_fig7 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels) plt.gca().set_xlim([-width, numpy.max(ind_fig7) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 18]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both', alpha=0.5) plt.subplots_adjust(left=0.1) plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_tracts_std_Noise_std_fixed_to_10.pdf', format='PDF') plt.show(block=False)
def main(): # Initialization to defaults parameters fname_data = '' # data is empty by default path_label = '' # empty by default method = param.method # extraction mode by default labels_of_interest = param.labels_of_interest slices_of_interest = param.slices_of_interest vertebral_levels = param.vertebral_levels average_all_labels = param.average_all_labels fname_output = param.fname_output fname_vertebral_labeling = param.fname_vertebral_labeling fname_normalizing_label = '' # optional then default is empty normalization_method = '' # optional then default is empty actual_vert_levels = None # variable used in case the vertebral levels asked by the user don't correspond exactly to the vertebral levels available in the metric data warning_vert_levels = None # variable used to warn the user in case the vertebral levels he asked don't correspond exactly to the vertebral levels available in the metric data verbose = param.verbose flag_h = 0 ml_clusters = param.ml_clusters adv_param = param.adv_param adv_param_user = '' # Parameters for debug mode if param.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' status, path_sct_data = commands.getstatusoutput( 'echo $SCT_TESTING_DATA_DIR') fname_data = '/Users/julien/data/temp/sct_example_data/mt/mtr.nii.gz' path_label = '/Users/julien/data/temp/sct_example_data/mt/label/atlas/' method = 'map' ml_clusters = '0:29,30,31' labels_of_interest = '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29' slices_of_interest = '' vertebral_levels = '' average_all_labels = 1 fname_normalizing_label = '' #path_sct+'/testing/data/errsm_23/mt/label/template/MNI-Poly-AMU_CSF.nii.gz' normalization_method = '' #'whole' else: # Check input parameters try: opts, args = getopt.getopt( sys.argv[1:], 'haf:i:l:m:n:o:p:v:w:z:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error usage() # display usage if not opts: usage() for opt, arg in opts: # explore flags if opt in '-a': average_all_labels = 1 elif opt in '-f': path_label = os.path.abspath(arg) # save path of labels folder elif opt == '-h': # help option flag_h = 1 elif opt in '-i': fname_data = arg elif opt in '-l': labels_of_interest = arg elif opt in '-m': # method for metric extraction method = arg elif opt in '-n': # filename of the label by which the user wants to normalize fname_normalizing_label = arg elif opt in '-o': # output option fname_output = arg # fname of output file elif opt in '-p': adv_param_user = arg elif opt in '-v': # vertebral levels option, if the user wants to average the metric across specific vertebral levels vertebral_levels = arg elif opt in '-w': # method used for the normalization by the metric estimation into the normalizing label (see flag -n): 'sbs' for slice-by-slice or 'whole' for normalization after estimation in the whole labels normalization_method = arg elif opt in '-z': # slices numbers option slices_of_interest = arg # save labels numbers # Display usage with tract parameters by default in case files aren't chosen in arguments inputs if fname_data == '' or path_label == '' or flag_h: param.path_label = path_label usage() # Check existence of data file sct.printv('\ncheck existence of input files...', verbose) sct.check_file_exist(fname_data) sct.check_folder_exist(path_label) if fname_normalizing_label: sct.check_folder_exist(fname_normalizing_label) # add slash at the end path_label = sct.slash_at_the_end(path_label, 1) # Find path to the vertebral labeling file if vertebral levels were specified by the user if vertebral_levels: if slices_of_interest: # impossible to select BOTH specific slices and specific vertebral levels print '\nERROR: You cannot select BOTH vertebral levels AND slice numbers.' usage() else: fname_vertebral_labeling_list = sct.find_file_within_folder( fname_vertebral_labeling, path_label + '..') if len(fname_vertebral_labeling_list) > 1: print color.red + 'ERROR: More than one file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end sys.exit(2) elif len(fname_vertebral_labeling_list) == 0: print color.red + 'ERROR: No file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end sys.exit(2) else: fname_vertebral_labeling = os.path.abspath( fname_vertebral_labeling_list[0]) # Check input parameters check_method(method, fname_normalizing_label, normalization_method) # parse argument for param if not adv_param_user == '': adv_param = adv_param_user.replace(' ', '').split( ',') # remove spaces and parse with comma del adv_param_user # clean variable # TODO: check integrity of input # Extract label info label_id, label_name, label_file = read_label_file(path_label, param.file_info_label) nb_labels_total = len(label_id) # check consistency of label input parameter. label_id_user, average_all_labels = check_labels( labels_of_interest, nb_labels_total, average_all_labels, method) # If 'labels_of_interest' is empty, then # 'label_id_user' contains the index of all labels in the file info_label.txt # print parameters print '\nChecked parameters:' print ' data ...................... ' + fname_data print ' folder label .............. ' + path_label print ' selected labels ........... ' + str(label_id_user) print ' estimation method ......... ' + method print ' slices of interest ........ ' + slices_of_interest print ' vertebral levels .......... ' + vertebral_levels print ' vertebral labeling file.... ' + fname_vertebral_labeling print ' advanced parameters ....... ' + str(adv_param) # Check if the orientation of the data is RPI orientation_data = get_orientation(fname_data) # If orientation is not RPI, change to RPI if orientation_data != 'RPI': sct.printv( '\nCreate temporary folder to change the orientation of the NIFTI files into RPI...', verbose) path_tmp = sct.slash_at_the_end('tmp.' + time.strftime("%y%m%d%H%M%S"), 1) sct.create_folder(path_tmp) # change orientation and load data sct.printv('\nChange image orientation and load it...', verbose) data = nib.load( set_orientation(fname_data, 'RPI', path_tmp + 'orient_data.nii')).get_data() # Do the same for labels sct.printv('\nChange labels orientation and load them...', verbose) labels = np.empty([nb_labels_total], dtype=object) # labels(nb_labels_total, x, y, z) for i_label in range(0, nb_labels_total): labels[i_label] = nib.load( set_orientation(path_label + label_file[i_label], 'RPI', path_tmp + 'orient_' + label_file[i_label])).get_data() if fname_normalizing_label: # if the "normalization" option is wanted, normalizing_label = np.empty( [1], dtype=object ) # choose this kind of structure so as to keep easily the # compatibility with the rest of the code (dimensions: (1, x, y, z)) normalizing_label[0] = nib.load( set_orientation(fname_normalizing_label, 'RPI', path_tmp + 'orient_normalizing_volume.nii')).get_data() if vertebral_levels: # if vertebral levels were selected, data_vertebral_labeling = nib.load( set_orientation( fname_vertebral_labeling, 'RPI', path_tmp + 'orient_vertebral_labeling.nii.gz')).get_data() # Remove the temporary folder used to change the NIFTI files orientation into RPI sct.printv('\nRemove the temporary folder...', verbose) status, output = commands.getstatusoutput('rm -rf ' + path_tmp) else: # Load image sct.printv('\nLoad image...', verbose) data = nib.load(fname_data).get_data() # Load labels sct.printv('\nLoad labels...', verbose) labels = np.empty([nb_labels_total], dtype=object) # labels(nb_labels_total, x, y, z) for i_label in range(0, nb_labels_total): labels[i_label] = nib.load(path_label + label_file[i_label]).get_data() if fname_normalizing_label: # if the "normalization" option is wanted, normalizing_label = np.empty( [1], dtype=object ) # choose this kind of structure so as to keep easily the # compatibility with the rest of the code (dimensions: (1, x, y, z)) normalizing_label[0] = nib.load(fname_normalizing_label).get_data( ) # load the data of the normalizing label if vertebral_levels: # if vertebral levels were selected, data_vertebral_labeling = nib.load( fname_vertebral_labeling).get_data() # Change metric data type into floats for future manipulations (normalization) data = np.float64(data) # Get dimensions of data sct.printv('\nGet dimensions of data...', verbose) nx, ny, nz = data.shape sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # Get dimensions of labels sct.printv('\nGet dimensions of label...', verbose) nx_atlas, ny_atlas, nz_atlas = labels[0].shape sct.printv( '.. ' + str(nx_atlas) + ' x ' + str(ny_atlas) + ' x ' + str(nz_atlas) + ' x ' + str(nb_labels_total), verbose) # Check dimensions consistency between atlas and data if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas): print '\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.' sys.exit(2) # Update the flag "slices_of_interest" according to the vertebral levels selected by user (if it's the case) if vertebral_levels: slices_of_interest, actual_vert_levels, warning_vert_levels = \ get_slices_matching_with_vertebral_levels(data, vertebral_levels, data_vertebral_labeling) # select slice of interest by cropping data and labels if slices_of_interest: data = remove_slices(data, slices_of_interest) for i_label in range(0, nb_labels_total): labels[i_label] = remove_slices(labels[i_label], slices_of_interest) if fname_normalizing_label: # if the "normalization" option was selected, normalizing_label[0] = remove_slices(normalizing_label[0], slices_of_interest) # if user wants to get unique value across labels, then combine all labels together if average_all_labels == 1: sum_labels_user = np.sum( labels[label_id_user]) # sum the labels selected by user if method == 'ml' or method == 'map': # in case the maximum likelihood and the average across different labels are wanted labels_tmp = np.empty([nb_labels_total - len(label_id_user) + 1], dtype=object) labels = np.delete( labels, label_id_user) # remove the labels selected by user labels_tmp[ 0] = sum_labels_user # put the sum of the labels selected by user in first position of the tmp # variable for i_label in range(1, len(labels_tmp)): labels_tmp[i_label] = labels[ i_label - 1] # fill the temporary array with the values of the non-selected labels labels = labels_tmp # replace the initial labels value by the updated ones (with the summed labels) del labels_tmp # delete the temporary labels else: # in other cases than the maximum likelihood, we can remove other labels (not needed for estimation) labels = np.empty(1, dtype=object) labels[ 0] = sum_labels_user # we create a new label array that includes only the summed labels if fname_normalizing_label: # if the "normalization" option is wanted sct.printv('\nExtract normalization values...', verbose) if normalization_method == 'sbs': # case: the user wants to normalize slice-by-slice for z in range(0, data.shape[-1]): normalizing_label_slice = np.empty( [1], dtype=object ) # in order to keep compatibility with the function # 'extract_metric_within_tract', define a new array for the slice z of the normalizing labels normalizing_label_slice[0] = normalizing_label[0][..., z] metric_normalizing_label = extract_metric_within_tract( data[..., z], normalizing_label_slice, method, 0) # estimate the metric mean in the normalizing label for the slice z if metric_normalizing_label[0][0] != 0: data[..., z] = data[..., z] / metric_normalizing_label[0][ 0] # divide all the slice z by this value elif normalization_method == 'whole': # case: the user wants to normalize after estimations in the whole labels metric_mean_norm_label, metric_std_norm_label = extract_metric_within_tract( data, normalizing_label, method, param.verbose) # mean and std are lists # identify cluster for each tract (for use with robust ML) ml_clusters_array = get_clusters(ml_clusters, labels) # extract metrics within labels sct.printv('\nExtract metric within labels...', verbose) metric_mean, metric_std = extract_metric_within_tract( data, labels, method, verbose, ml_clusters_array, adv_param) # mean and std are lists if fname_normalizing_label and normalization_method == 'whole': # case: user wants to normalize after estimations in the whole labels metric_mean, metric_std = np.divide(metric_mean, metric_mean_norm_label), np.divide( metric_std, metric_std_norm_label) # update label name if average if average_all_labels == 1: label_name[0] = 'AVERAGED' + ' -'.join( label_name[i] for i in label_id_user) # concatenate the names of the # labels selected by the user if the average tag was asked label_id_user = [ 0 ] # update "label_id_user" to select the "averaged" label (which is in first position) metric_mean = metric_mean[label_id_user] metric_std = metric_std[label_id_user] # display metrics sct.printv('\nEstimation results:', 1) for i in range(0, metric_mean.size): sct.printv( str(label_id_user[i]) + ', ' + str(label_name[label_id_user[i]]) + ': ' + str(metric_mean[i]) + ' +/- ' + str(metric_std[i]), 1, 'info') # save and display metrics save_metrics(label_id_user, label_name, slices_of_interest, metric_mean, metric_std, fname_output, fname_data, method, fname_normalizing_label, actual_vert_levels, warning_vert_levels)
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter" #"C:/cygwin64/home/Simon_2/data_methods_comparison" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' methods_to_display = 'bin,wa,wath,ml,map' else: path_sct = os.environ.get("SCT_DIR", os.path.dirname(os.path.dirname(__file__))) # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(os.path.join(path_sct, "scripts")) import sct_utils as sct sct.printv("Working directory: " + os.getcwd()) results_folder_noise = results_folder + '/noise' results_folder_tracts = results_folder + '/tracts' sct.printv( '\n\nData will be extracted from folder ' + results_folder_noise + ' and ' + results_folder_tracts, 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(results_folder_noise) sct.check_folder_exist(results_folder_tracts) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') # Extract file names of the results files fname_results_noise = glob.glob(os.path.join(results_folder_noise, "*.txt")) fname_results_tracts = glob.glob( os.path.join(results_folder_tracts, "*.txt")) fname_results = fname_results_noise + fname_results_tracts # Remove doublons (due to the two folders) # for i_fname in range(0, len(fname_results)): # for j_fname in range(0, len(fname_results)): # if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])): # fname_results.remove(fname_results[j_fname]) file_results = [] for fname in fname_results: file_results.append(os.path.basename(fname)) for file in file_results: if file_results.count(file) > 1: ind = file_results.index(file) fname_results.remove(fname_results[ind]) file_results.remove(file) nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 5)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 5)) # min min_results = numpy.zeros((nb_results_file, 5)) # max max_results = numpy.zeros((nb_results_file, 5)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [ lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise ] if len(ind_line_noise) != 1: sct.printv( "ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [ lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std ] if len(ind_line_tract_std) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join( c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [ lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value ] if len(ind_line_csf_value) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' csf_values[i_file] = int(''.join( c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [ lines.index(line_label) for line_label in lines if "Label" in line_label ] if len(ind_line_label) != 1: sct.printv( "ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace( ' ', '').split(',')[1:]) # extract median ind_line_median = [ lines.index(line_median) for line_median in lines if "median" in line_median ] if len(ind_line_median) != 1: sct.printv( "WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array( [float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [ lines.index(line_min) for line_min in lines if "min," in line_min ] if len(ind_line_min) != 1: sct.printv( "WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [ lines.index(line_max) for line_max in lines if "max" in line_max ] if len(ind_line_max) != 1: sct.printv( "WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines) + 1] for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([ float(error.strip().split('(')[0]) for error in line_label_i[1:] ]) std_per_label_for_file_i.append([ float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:] ]) labels_id_for_file_i.append(line_label_i[0]) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) nb_method = len(methods_to_display) sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:') print csf_values print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv('Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print error_per_label # *********************************************** START PLOTTING HERE ********************************************** # # plot A (NOT GOOD) # fig0 = plt.figure(0) # fig0.suptitle('Absolute error within all tracts as a function of noise std') # # fig0_ax = fig0.add_subplot(111) # fig0_ax.grid(True) # fig0_ax.set_xlabel('Noise std') # fig0_ax.set_ylabel('Absolute error') # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # # for meth, color in zip(methods_name[0], colors): # if meth != 'mlwa': # i_meth = methods_name[0].index(meth) # # # median # plt.plot(snr[ind_tracts_std_10], median_results[ind_tracts_std_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0) # # min # plt.plot(snr[ind_tracts_std_10], min_results[ind_tracts_std_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0) # # max # plt.plot(snr[ind_tracts_std_10], max_results[ind_tracts_std_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0) # # handles, labels = fig0_ax.get_legend_handles_labels() # fig0_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16) # # # # plot B (NOT GOOD) # fig1 = plt.figure(1) # fig1.suptitle('Absolute error within all tracts as a function of tract std') # # fig1_ax = fig1.add_subplot(111) # fig1_ax.grid(True) # fig1_ax.set_xlabel('Tract std (percentage of the true value in tract)') # fig1_ax.set_ylabel('Absolute error') # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # # for meth, color in zip(methods_name[0], colors): # if meth != 'mlwa': # i_meth = methods_name[0].index(meth) # # # median # plt.plot(tracts_std[ind_snr_10], median_results[ind_snr_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0) # # min # plt.plot(tracts_std[ind_snr_10], min_results[ind_snr_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0) # # max # plt.plot(tracts_std[ind_snr_10], max_results[ind_snr_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0) # # handles, labels = fig1_ax.get_legend_handles_labels() # fig1_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16) # Plot A ind_tracts_std_10 = numpy.where( (tracts_std == 10) & (snr != 50)) # indexes where TRACTS STD=10 ind_ind_snr_sort_tracts_std_10 = numpy.argsort( snr[ind_tracts_std_10] ) # indexes of indexes where TRACTS STD=10 sorted according to SNR values (in ascending order) ind_snr_sort_tracts_std_10 = ind_tracts_std_10[0][ ind_ind_snr_sort_tracts_std_10] # indexes where TRACTS STD=10 sorted according to SNR values (in ascending order) # fig2 = plt.figure(2) # ind_fig2 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Error (%)') # plt.xlabel('Noise std') # plt.title('Error within all tracts as a function of noise std') # plt.xticks(ind_fig2 + 0.5, snr[ind_snr_sort_tracts_std_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig2) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig2 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_results[ind_snr_sort_tracts_std_10, i_meth] - min_results[ # ind_snr_sort_tracts_std_10, i_meth], width, # min_results[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white', linewidth=3) # plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # median_results[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', # markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # Plot B ind_snr_10 = numpy.where( (snr == 10) & (tracts_std != 50)) # indexes where SNR=10 ind_ind_tracts_std_sort_snr_10 = numpy.argsort( tracts_std[ind_snr_10] ) # indexes of indexes where SNR=10 sorted according to tracts_std values (in ascending order) ind_tracts_std_sort_snr_10 = ind_snr_10[0][ ind_ind_tracts_std_sort_snr_10] # indexes where SNR=10 sorted according to tracts_std values (in ascending order) # fig3 = plt.figure(3) # ind_fig3 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Error (%)') # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)') # plt.title('Error within all tracts as a function of tracts std') # plt.xticks(ind_fig3 + 0.5, tracts_std[ind_tracts_std_sort_snr_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig3) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig3 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_results[ind_tracts_std_sort_snr_10, i_meth] - min_results[ # ind_tracts_std_sort_snr_10, i_meth], width, # min_results[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white', linewidth=3) # plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # median_results[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None', # markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # # Plot A -- v2: Absolute error (min, max, mean) # # fig4 = plt.figure(4) # ind_fig4 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Absolute error (%)') # plt.xlabel('Noise std') # plt.title('Absolute error within all tracts as a function of noise std') # plt.xticks(ind_fig4 + 0.5, snr[ind_snr_sort_tracts_std_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig4) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig4 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth] - min_abs_error_per_meth[ # ind_snr_sort_tracts_std_10, i_meth], width, # min_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white', # linewidth=3) # plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], # std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', # linestyle='None', markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # # # Plot B -- v2: Absolute error (min, max, mean) # fig5 = plt.figure(5) # ind_fig5 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2 # width = 1.0 / (nb_method + 1) # plt.ylabel('Absolute error (%)') # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)') # plt.title('Absolute error within all tracts as a function of tracts std') # plt.xticks(ind_fig5 + 0.5, tracts_std[ind_tracts_std_sort_snr_10]) # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig5) + 1]) # plt.gca().yaxis.grid(True) # # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) # bar_plots = [] # for meth, color in zip(methods_name[0], colors): # i_meth = methods_name[0].index(meth) # # plot_i = plt.bar(ind_fig5 + i_meth * width + (float(i_meth) * width) / (nb_method + 1), # max_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth] - min_abs_error_per_meth[ # ind_tracts_std_sort_snr_10, i_meth], width, # min_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white', # linewidth=3) # plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1), # mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], # std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', # linestyle='None', markersize=200 * width, markeredgewidth=3) # bar_plots.append(plot_i[0]) # # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'}) plt.rcParams['xtick.major.pad'] = '9' plt.rcParams['ytick.major.pad'] = '15' # Plot A -- v3: Box plots absolute error fig6 = plt.figure(6, figsize=(30, 16)) width = 1.0 / (nb_method + 1) ind_fig6 = numpy.arange(len( snr[ind_snr_sort_tracts_std_10])) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=55) plt.xlabel('Noise STD (% of true WM value)', fontsize=55) plt.title('Absolute error within all tracts as a function of noise std\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] box_plots = [] for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) boxprops = dict(linewidth=4, color=color) flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.') whiskerprops = dict(color=color, linewidth=3) capprops = dict(color=color, linewidth=3) medianprops = dict(linewidth=4, color=color) meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple') plot_i = plt.boxplot( numpy.transpose(abs_error_per_labels[ind_snr_sort_tracts_std_10, :, i_meth]), positions=ind_fig6 + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3) box_plots.append(plot_i['boxes'][0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig6), 2): plt.axvspan(ind_fig6[i_xtick] - width - width / 4, ind_fig6[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1) # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # plt.legend(box_plots, methods_to_display, loc='best') # convert xtick labels into integers xtick_labels = [int(xtick) for xtick in snr[ind_snr_sort_tracts_std_10]] plt.xticks( ind_fig6 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels) plt.gca().set_xlim( [-width, numpy.max(ind_fig6) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 18]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both', alpha=0.5) plt.subplots_adjust(left=0.1) plt.savefig(os.path.join( param_default.fname_folder_to_save_fig, 'absolute_error_vs_noise_std_Tracts_std_fixed_to_10.pdf'), format='PDF') # Plot B -- v3: Box plots absolute error fig7 = plt.figure(7, figsize=(30, 16)) width = 1.0 / (nb_method + 1) ind_fig7 = numpy.arange(len( tracts_std[ind_tracts_std_sort_snr_10])) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=55) plt.xlabel('Tracts STD (% of true WM value)', fontsize=55) plt.title('Absolute error within all tracts as a function of tracts std\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] box_plots = [] for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) boxprops = dict(linewidth=4, color=color) flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.') whiskerprops = dict(color=color, linewidth=3) capprops = dict(color=color, linewidth=3) medianprops = dict(linewidth=4, color=color) meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple') plot_i = plt.boxplot( numpy.transpose(abs_error_per_labels[ind_tracts_std_sort_snr_10, :, i_meth]), positions=ind_fig7 + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3) box_plots.append(plot_i['boxes'][0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig7), 2): plt.axvspan(ind_fig7[i_xtick] - width - width / 4, ind_fig7[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1) # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # plt.legend(box_plots, methods_to_display, loc='best') # convert xtick labels into integers xtick_labels = [ int(xtick) for xtick in tracts_std[ind_tracts_std_sort_snr_10] ] plt.xticks( ind_fig7 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels) plt.gca().set_xlim( [-width, numpy.max(ind_fig7) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 18]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both', alpha=0.5) plt.subplots_adjust(left=0.1) plt.savefig(os.path.join( param_default.fname_folder_to_save_fig, 'absolute_error_vs_tracts_std_Noise_std_fixed_to_10.pdf'), format='PDF') plt.show(block=False)
def main(): # Initialization path_atlas = '' # Parameters for debug mode if param.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'hi:s:m:g:t:v:') except getopt.GetoptError: usage() if not opts: usage() for opt, arg in opts: if opt == '-h': usage() elif opt in ('-i'): path_atlas = arg elif opt in ('-s'): param.fname_seg = arg elif opt in ('-m'): param.fname_GM = arg elif opt in ('-t'): param.threshold_atlas = float(arg) elif opt in ('-g'): param.threshold_GM = float(arg) elif opt in ('-v'): param.verbose = int(arg) # display usage if a mandatory argument is not provided if path_atlas == '' and not param.debug: # get path of the toolbox status, path_sct = commands.getstatusoutput('echo $SCT_DIR') path_atlas = path_sct+'/data/atlas/' param.fname_seg = path_sct+'/data/template/MNI-Poly-AMU_cord.nii.gz' param.fname_GM = path_sct+'/data/template/MNI-Poly-AMU_GM.nii.gz' # print arguments if param.verbose: print 'Check input parameters...' print '.. Atlas folder path: '+path_atlas print '.. Spinal cord segmentation path: '+param.fname_seg print '.. Spinal cord gray matter path: '+param.fname_GM print '.. Atlas threshold= '+str(param.threshold_atlas) # Check for end-caracter of folder path if path_atlas[-1] != "/": path_atlas=path_atlas+"/"; # Check folder existence sct.printv('\nCheck atlas existence...', param.verbose) sct.check_folder_exist(path_atlas) # Extract atlas info atlas_id, atlas_name, atlas_file = read_label_file(path_atlas) nb_tracts_total = len(atlas_id) # Load atlas sct.printv('\nLoad atlas...', param.verbose) atlas = np.empty([nb_tracts_total], dtype=object) # labels(nb_labels_total, x, y, z) for i_atlas in range(0, nb_tracts_total): atlas[i_atlas] = nib.load(path_atlas+atlas_file[i_atlas]).get_data() # Check integrity sct.printv('\nCheck atlas integrity...', param.verbose) check_integrity(atlas, atlas_id, atlas_name)
example=["0", "1", "2"], default_value="1", ) return parser if __name__ == "__main__": parser = get_parser() arguments = parser.parse(sys.argv[1:]) ml_param = Param() fname_gm = arguments["-gm"] fname_wm = arguments["-wm"] path_template = arguments["-t"] if not sct.check_folder_exist(path_template): sct.printv( parser.usage.generate( error="ERROR: label/ folder does not exist. Please specify the path to the template using flag -t" ) ) fname_warp_template = arguments["-w"] fname_warp_target2template = None fname_manual_gmseg = None fname_sc_seg = None if "-param" in arguments: ml_param.param_reg = arguments["-param"] if "-manual-gm" in arguments: fname_manual_gmseg = arguments["-manual-gm"]
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display noise_std_to_display = param_default.noise_std_to_display tracts_std_to_display = param_default.tracts_std_to_display csf_value_to_display = param_default.csf_value_to_display nb_RL_labels = param_default.nb_RL_labels # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter" #"C:/cygwin64/home/Simon_2/data_methods_comparison" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' else: status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(path_sct + '/scripts') import sct_utils as sct import isct_get_fractional_volume sct.printv("Working directory: " + os.getcwd()) results_folder_noise = results_folder + '/noise' results_folder_tracts = results_folder + '/tracts' results_folder_csf = results_folder + '/csf' sct.printv( '\n\nData will be extracted from folder ' + results_folder_noise + ' , ' + results_folder_tracts + ' and ' + results_folder_csf + '.', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(results_folder_noise) sct.check_folder_exist(results_folder_tracts) sct.check_folder_exist(results_folder_csf) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') # Extract file names of the results files fname_results_noise = glob.glob(results_folder_noise + '/*.txt') fname_results_tracts = glob.glob(results_folder_tracts + '/*.txt') fname_results_csf = glob.glob(results_folder_csf + '/*.txt') fname_results = fname_results_noise + fname_results_tracts + fname_results_csf # Remove doublons (due to the two folders) # for i_fname in range(0, len(fname_results)): # for j_fname in range(0, len(fname_results)): # if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])): # fname_results.remove(fname_results[j_fname]) file_results = [] for fname in fname_results: file_results.append(os.path.basename(fname)) for file in file_results: if file_results.count(file) > 1: ind = file_results.index(file) fname_results.remove(fname_results[ind]) file_results.remove(file) nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 5)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 5)) # min min_results = numpy.zeros((nb_results_file, 5)) # max max_results = numpy.zeros((nb_results_file, 5)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [ lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise ] if len(ind_line_noise) != 1: sct.printv( "ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [ lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std ] if len(ind_line_tract_std) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join( c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [ lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value ] if len(ind_line_csf_value) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' csf_values[i_file] = int(''.join( c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [ lines.index(line_label) for line_label in lines if "Label" in line_label ] if len(ind_line_label) != 1: sct.printv( "ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace( ' ', '').split(',')[1:]) # extract median ind_line_median = [ lines.index(line_median) for line_median in lines if "median" in line_median ] if len(ind_line_median) != 1: sct.printv( "WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array( [float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [ lines.index(line_min) for line_min in lines if "min," in line_min ] if len(ind_line_min) != 1: sct.printv( "WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [ lines.index(line_max) for line_max in lines if "max" in line_max ] if len(ind_line_max) != 1: sct.printv( "WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines) + 1] for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([ float(error.strip().split('(')[0]) for error in line_label_i[1:] ]) std_per_label_for_file_i.append([ float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:] ]) labels_id_for_file_i.append(int(line_label_i[0])) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) # average error and std across sides meanRL_abs_error_per_labels = numpy.zeros( (error_per_label.shape[0], nb_RL_labels, error_per_label.shape[2])) meanRL_std_abs_error_per_labels = numpy.zeros( (std_per_label.shape[0], nb_RL_labels, std_per_label.shape[2])) for i_file in range(0, nb_results_file): for i_meth in range(0, len(methods_name[i_file])): for i_label in range(0, nb_RL_labels): # find indexes of corresponding labels ind_ID_first_side = labels_id[i_file].index(i_label) ind_ID_other_side = labels_id[i_file].index(i_label + nb_RL_labels) # compute mean across 2 sides meanRL_abs_error_per_labels[i_file, i_label, i_meth] = float( error_per_label[i_file, ind_ID_first_side, i_meth] + error_per_label[i_file, ind_ID_other_side, i_meth]) / 2 meanRL_std_abs_error_per_labels[ i_file, i_label, i_meth] = float( std_per_label[i_file, ind_ID_first_side, i_meth] + std_per_label[i_file, ind_ID_other_side, i_meth]) / 2 nb_method = len(methods_to_display) sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:') print csf_values print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv('Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print error_per_label print '----------------------------------------------------------------------------------------------------------------' sct.printv( 'Mean errors across both sides obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print meanRL_abs_error_per_labels # Compute fractional volume per label labels_id_FV, labels_name_FV, fract_vol_per_lab, labels_name_FV_RL_gathered, fract_vol_per_lab_RL_gathered = isct_get_fractional_volume.get_fractional_volume_per_label( './cropped_atlas/', 'info_label.txt') # # Get the number of voxels including at least one tract # nb_voxels_in_WM = isct_get_fractional_volume.get_nb_voxel_in_WM('./cropped_atlas/', 'info_label.txt') # normalize by the number of voxels in WM and express it as a percentage fract_vol_norm = numpy.divide( fract_vol_per_lab_RL_gathered, numpy.sum(fract_vol_per_lab_RL_gathered) / 100) # NOT NECESSARY NOW WE AVERAGE ACROSS BOTH SIDES (which orders the labels) # # check if the order of the labels returned by the function computing the fractional volumes is the same (which should be the case) # if labels_id_FV != labels_id[0]: # sct.printv('\n\nERROR: the labels IDs returned by the function \'i_sct_get_fractional_volume\' are different from the labels IDs of the results files\n\n', 'error') # # Remove labels #30 and #31 # labels_id_FV_29, labels_name_FV_29, fract_vol_per_lab_29 = labels_id_FV[:-2], labels_name_FV[:-2], fract_vol_per_lab[:-2] # indexes of labels sort according to the fractional volume ind_labels_sort = numpy.argsort(fract_vol_norm) # Find index of the file generated with noise variance = 10 and tracts std = 10 ind_file_to_display = numpy.where((snr == noise_std_to_display) & (tracts_std == tracts_std_to_display) & (csf_values == csf_value_to_display)) # sort arrays in this order meanRL_abs_error_per_labels_sort = meanRL_abs_error_per_labels[ ind_file_to_display[0], ind_labels_sort, :] meanRL_std_abs_error_per_labels_sort = meanRL_std_abs_error_per_labels[ ind_file_to_display[0], ind_labels_sort, :] labels_name_sort = numpy.array(labels_name_FV_RL_gathered)[ind_labels_sort] # *********************************************** START PLOTTING HERE ********************************************** # stringColor = Color() matplotlib.rcParams.update({'font.size': 50, 'font.family': 'trebuchet'}) # plt.rcParams['xtick.major.pad'] = '11' plt.rcParams['ytick.major.pad'] = '15' fig = plt.figure(figsize=(60, 37)) width = 1.0 / (nb_method + 1) ind_fig = numpy.arange(len(labels_name_sort)) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=65) plt.xlabel('Fractional volume (% of the total number of voxels in WM)', fontsize=65) plt.title( 'Absolute error per tract as a function of their fractional volume\n\n', fontsize=30) plt.suptitle('(Noise std=' + str(snr[ind_file_to_display[0]][0]) + ', Tracts std=' + str(tracts_std[ind_file_to_display[0]][0]) + ', CSF value=' + str(csf_values[ind_file_to_display[0]][0]) + ')', fontsize=30) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] markers = ['o', 's', '^', 'D'] errorbar_plots = [] for meth, color, marker in zip(methods_to_display, colors, markers): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) plot_i = plt.errorbar(ind_fig + i_meth_to_display * width, meanRL_abs_error_per_labels_sort[:, i_meth], meanRL_std_abs_error_per_labels_sort[:, i_meth], color=color, marker=marker, markersize=35, lw=7, elinewidth=1, capthick=5, capsize=10) # plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_files_csf_sort, :, i_meth]), positions=ind_fig + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) errorbar_plots.append(plot_i) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig), 2): plt.axvspan(ind_fig[i_xtick] - width - width / 2, ind_fig[i_xtick] + (nb_method + 1) * width - width / 2, facecolor='grey', alpha=0.1) # concatenate value of fractional volume to labels'name xtick_labels = [ labels_name_sort[i_lab] + '\n' + r'$\bf{[' + str(round(fract_vol_norm[ind_labels_sort][i_lab], 2)) + ']}$' for i_lab in range(0, len(labels_name_sort)) ] ind_lemniscus = numpy.where( labels_name_sort == 'spinal lemniscus (spinothalamic and spinoreticular tracts)')[0][0] xtick_labels[ind_lemniscus] = 'spinal lemniscus\n' + r'$\bf{[' + str( round(fract_vol_norm[ind_labels_sort][ind_lemniscus], 2)) + ']}$' # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) plt.legend(errorbar_plots, methods_to_display, loc=1, fontsize=50, numpoints=1) plt.xticks(ind_fig + (numpy.floor(float(nb_method - 1) / 2)) * width, xtick_labels, fontsize=45) # Tweak spacing to prevent clipping of tick-labels plt.subplots_adjust(bottom=0, top=0.95, right=0.96) plt.gca().set_xlim( [-width, numpy.max(ind_fig) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 17]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both') fig.autofmt_xdate() plt.savefig(param_default.fname_folder_to_save_fig + '/absolute_error_vs_fractional_volume.pdf', format='PDF') plt.show(block=False)
def main(): # Initialization to defaults parameters fname_data = '' # data is empty by default path_label = '' # empty by default method = param.method # extraction mode by default labels_of_interest = param.labels_of_interest slices_of_interest = param.slices_of_interest vertebral_levels = param.vertebral_levels average_all_labels = param.average_all_labels fname_output = param.fname_output fname_vertebral_labeling = param.fname_vertebral_labeling fname_normalizing_label = '' # optional then default is empty normalization_method = '' # optional then default is empty actual_vert_levels = None # variable used in case the vertebral levels asked by the user don't correspond exactly to the vertebral levels available in the metric data warning_vert_levels = None # variable used to warn the user in case the vertebral levels he asked don't correspond exactly to the vertebral levels available in the metric data verbose = param.verbose flag_h = 0 ml_clusters = param.ml_clusters adv_param = param.adv_param adv_param_user = '' # Parameters for debug mode if param.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' status, path_sct_data = commands.getstatusoutput('echo $SCT_TESTING_DATA_DIR') fname_data = '/Users/julien/data/temp/sct_example_data/mt/mtr.nii.gz' path_label = '/Users/julien/data/temp/sct_example_data/mt/label/atlas/' method = 'map' ml_clusters = '0:29,30,31' labels_of_interest = '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29' slices_of_interest = '' vertebral_levels = '' average_all_labels = 1 fname_normalizing_label = '' #path_sct+'/testing/data/errsm_23/mt/label/template/MNI-Poly-AMU_CSF.nii.gz' normalization_method = '' #'whole' else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'haf:i:l:m:n:o:p:v:w:z:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error usage() # display usage if not opts: usage() for opt, arg in opts: # explore flags if opt in '-a': average_all_labels = 1 elif opt in '-f': path_label = os.path.abspath(arg) # save path of labels folder elif opt == '-h': # help option flag_h = 1 elif opt in '-i': fname_data = arg elif opt in '-l': labels_of_interest = arg elif opt in '-m': # method for metric extraction method = arg elif opt in '-n': # filename of the label by which the user wants to normalize fname_normalizing_label = arg elif opt in '-o': # output option fname_output = arg # fname of output file elif opt in '-p': adv_param_user = arg elif opt in '-v': # vertebral levels option, if the user wants to average the metric across specific vertebral levels vertebral_levels = arg elif opt in '-w': # method used for the normalization by the metric estimation into the normalizing label (see flag -n): 'sbs' for slice-by-slice or 'whole' for normalization after estimation in the whole labels normalization_method = arg elif opt in '-z': # slices numbers option slices_of_interest = arg # save labels numbers # Display usage with tract parameters by default in case files aren't chosen in arguments inputs if fname_data == '' or path_label == '' or flag_h: param.path_label = path_label usage() # Check existence of data file sct.printv('\ncheck existence of input files...', verbose) sct.check_file_exist(fname_data) sct.check_folder_exist(path_label) if fname_normalizing_label: sct.check_folder_exist(fname_normalizing_label) # add slash at the end path_label = sct.slash_at_the_end(path_label, 1) # Find path to the vertebral labeling file if vertebral levels were specified by the user if vertebral_levels: if slices_of_interest: # impossible to select BOTH specific slices and specific vertebral levels print '\nERROR: You cannot select BOTH vertebral levels AND slice numbers.' usage() else: fname_vertebral_labeling_list = sct.find_file_within_folder(fname_vertebral_labeling, path_label + '..') if len(fname_vertebral_labeling_list) > 1: print color.red + 'ERROR: More than one file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end sys.exit(2) elif len(fname_vertebral_labeling_list) == 0: print color.red + 'ERROR: No file named \'' + fname_vertebral_labeling + ' were found in ' + path_label + '. Exit program.' + color.end sys.exit(2) else: fname_vertebral_labeling = os.path.abspath(fname_vertebral_labeling_list[0]) # Check input parameters check_method(method, fname_normalizing_label, normalization_method) # parse argument for param if not adv_param_user == '': adv_param = adv_param_user.replace(' ', '').split(',') # remove spaces and parse with comma del adv_param_user # clean variable # TODO: check integrity of input # Extract label info label_id, label_name, label_file = read_label_file(path_label, param.file_info_label) nb_labels_total = len(label_id) # check consistency of label input parameter. label_id_user, average_all_labels = check_labels(labels_of_interest, nb_labels_total, average_all_labels, method) # If 'labels_of_interest' is empty, then # 'label_id_user' contains the index of all labels in the file info_label.txt # print parameters print '\nChecked parameters:' print ' data ...................... '+fname_data print ' folder label .............. '+path_label print ' selected labels ........... '+str(label_id_user) print ' estimation method ......... '+method print ' slices of interest ........ '+slices_of_interest print ' vertebral levels .......... '+vertebral_levels print ' vertebral labeling file.... '+fname_vertebral_labeling print ' advanced parameters ....... '+str(adv_param) # Check if the orientation of the data is RPI orientation_data = get_orientation(fname_data) # If orientation is not RPI, change to RPI if orientation_data != 'RPI': sct.printv('\nCreate temporary folder to change the orientation of the NIFTI files into RPI...', verbose) path_tmp = sct.slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1) sct.create_folder(path_tmp) # change orientation and load data sct.printv('\nChange image orientation and load it...', verbose) data = nib.load(set_orientation(fname_data, 'RPI', path_tmp+'orient_data.nii')).get_data() # Do the same for labels sct.printv('\nChange labels orientation and load them...', verbose) labels = np.empty([nb_labels_total], dtype=object) # labels(nb_labels_total, x, y, z) for i_label in range(0, nb_labels_total): labels[i_label] = nib.load(set_orientation(path_label+label_file[i_label], 'RPI', path_tmp+'orient_'+label_file[i_label])).get_data() if fname_normalizing_label: # if the "normalization" option is wanted, normalizing_label = np.empty([1], dtype=object) # choose this kind of structure so as to keep easily the # compatibility with the rest of the code (dimensions: (1, x, y, z)) normalizing_label[0] = nib.load(set_orientation(fname_normalizing_label, 'RPI', path_tmp+'orient_normalizing_volume.nii')).get_data() if vertebral_levels: # if vertebral levels were selected, data_vertebral_labeling = nib.load(set_orientation(fname_vertebral_labeling, 'RPI', path_tmp+'orient_vertebral_labeling.nii.gz')).get_data() # Remove the temporary folder used to change the NIFTI files orientation into RPI sct.printv('\nRemove the temporary folder...', verbose) status, output = commands.getstatusoutput('rm -rf ' + path_tmp) else: # Load image sct.printv('\nLoad image...', verbose) data = nib.load(fname_data).get_data() # Load labels sct.printv('\nLoad labels...', verbose) labels = np.empty([nb_labels_total], dtype=object) # labels(nb_labels_total, x, y, z) for i_label in range(0, nb_labels_total): labels[i_label] = nib.load(path_label+label_file[i_label]).get_data() if fname_normalizing_label: # if the "normalization" option is wanted, normalizing_label = np.empty([1], dtype=object) # choose this kind of structure so as to keep easily the # compatibility with the rest of the code (dimensions: (1, x, y, z)) normalizing_label[0] = nib.load(fname_normalizing_label).get_data() # load the data of the normalizing label if vertebral_levels: # if vertebral levels were selected, data_vertebral_labeling = nib.load(fname_vertebral_labeling).get_data() # Change metric data type into floats for future manipulations (normalization) data = np.float64(data) # Get dimensions of data sct.printv('\nGet dimensions of data...', verbose) nx, ny, nz = data.shape sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose) # Get dimensions of labels sct.printv('\nGet dimensions of label...', verbose) nx_atlas, ny_atlas, nz_atlas = labels[0].shape sct.printv('.. '+str(nx_atlas)+' x '+str(ny_atlas)+' x '+str(nz_atlas)+' x '+str(nb_labels_total), verbose) # Check dimensions consistency between atlas and data if (nx, ny, nz) != (nx_atlas, ny_atlas, nz_atlas): print '\nERROR: Metric data and labels DO NOT HAVE SAME DIMENSIONS.' sys.exit(2) # Update the flag "slices_of_interest" according to the vertebral levels selected by user (if it's the case) if vertebral_levels: slices_of_interest, actual_vert_levels, warning_vert_levels = \ get_slices_matching_with_vertebral_levels(data, vertebral_levels, data_vertebral_labeling) # select slice of interest by cropping data and labels if slices_of_interest: data = remove_slices(data, slices_of_interest) for i_label in range(0, nb_labels_total): labels[i_label] = remove_slices(labels[i_label], slices_of_interest) if fname_normalizing_label: # if the "normalization" option was selected, normalizing_label[0] = remove_slices(normalizing_label[0], slices_of_interest) # if user wants to get unique value across labels, then combine all labels together if average_all_labels == 1: sum_labels_user = np.sum(labels[label_id_user]) # sum the labels selected by user if method == 'ml' or method == 'map': # in case the maximum likelihood and the average across different labels are wanted labels_tmp = np.empty([nb_labels_total - len(label_id_user) + 1], dtype=object) labels = np.delete(labels, label_id_user) # remove the labels selected by user labels_tmp[0] = sum_labels_user # put the sum of the labels selected by user in first position of the tmp # variable for i_label in range(1, len(labels_tmp)): labels_tmp[i_label] = labels[i_label-1] # fill the temporary array with the values of the non-selected labels labels = labels_tmp # replace the initial labels value by the updated ones (with the summed labels) del labels_tmp # delete the temporary labels else: # in other cases than the maximum likelihood, we can remove other labels (not needed for estimation) labels = np.empty(1, dtype=object) labels[0] = sum_labels_user # we create a new label array that includes only the summed labels if fname_normalizing_label: # if the "normalization" option is wanted sct.printv('\nExtract normalization values...', verbose) if normalization_method == 'sbs': # case: the user wants to normalize slice-by-slice for z in range(0, data.shape[-1]): normalizing_label_slice = np.empty([1], dtype=object) # in order to keep compatibility with the function # 'extract_metric_within_tract', define a new array for the slice z of the normalizing labels normalizing_label_slice[0] = normalizing_label[0][..., z] metric_normalizing_label = extract_metric_within_tract(data[..., z], normalizing_label_slice, method, 0) # estimate the metric mean in the normalizing label for the slice z if metric_normalizing_label[0][0] != 0: data[..., z] = data[..., z]/metric_normalizing_label[0][0] # divide all the slice z by this value elif normalization_method == 'whole': # case: the user wants to normalize after estimations in the whole labels metric_mean_norm_label, metric_std_norm_label = extract_metric_within_tract(data, normalizing_label, method, param.verbose) # mean and std are lists # identify cluster for each tract (for use with robust ML) ml_clusters_array = get_clusters(ml_clusters, labels) # extract metrics within labels sct.printv('\nExtract metric within labels...', verbose) metric_mean, metric_std = extract_metric_within_tract(data, labels, method, verbose, ml_clusters_array, adv_param) # mean and std are lists if fname_normalizing_label and normalization_method == 'whole': # case: user wants to normalize after estimations in the whole labels metric_mean, metric_std = np.divide(metric_mean, metric_mean_norm_label), np.divide(metric_std, metric_std_norm_label) # update label name if average if average_all_labels == 1: label_name[0] = 'AVERAGED'+' -'.join(label_name[i] for i in label_id_user) # concatenate the names of the # labels selected by the user if the average tag was asked label_id_user = [0] # update "label_id_user" to select the "averaged" label (which is in first position) metric_mean = metric_mean[label_id_user] metric_std = metric_std[label_id_user] # display metrics sct.printv('\nEstimation results:', 1) for i in range(0, metric_mean.size): sct.printv(str(label_id_user[i])+', '+str(label_name[label_id_user[i]])+': '+str(metric_mean[i])+' +/- '+str(metric_std[i]), 1, 'info') # save and display metrics save_metrics(label_id_user, label_name, slices_of_interest, metric_mean, metric_std, fname_output, fname_data, method, fname_normalizing_label, actual_vert_levels, warning_vert_levels)
def main(): path_data = param.path_data function_to_test = param.function_to_test # function_to_avoid = param.function_to_avoid remove_tmp_file = param.remove_tmp_file # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'h:d:p:f:r:a:') except getopt.GetoptError: usage() for opt, arg in opts: if opt == '-h': usage() sys.exit(0) if opt == '-d': param.download = int(arg) if opt == '-p': param.path_data = arg if opt == '-f': function_to_test = arg # if opt == '-a': # function_to_avoid = arg if opt == '-r': remove_tmp_file = int(arg) start_time = time.time() # if function_to_avoid: # try: # functions.remove(function_to_avoid) # except ValueError: # print 'The function you want to avoid does not figure in the functions to test list' # download data if param.download: downloaddata() param.path_data = 'sct_testing_data/data' # get absolute path and add slash at the end param.path_data = sct.slash_at_the_end(os.path.abspath(param.path_data), 1) # check existence of testing data folder if not sct.check_folder_exist(param.path_data, 0): downloaddata() # display path to data sct.printv('\nPath to testing data:\n.. ' + param.path_data, param.verbose) # create temp folder that will have all results and go in it param.path_tmp = sct.slash_at_the_end( 'tmp.' + time.strftime("%y%m%d%H%M%S"), 1) sct.create_folder(param.path_tmp) os.chdir(param.path_tmp) # get list of all scripts to test functions = fill_functions() # loop across all functions and test them status = [] [ status.append(test_function(f)) for f in functions if function_to_test == f ] if not status: for f in functions: status.append(test_function(f)) print 'status: ' + str(status) # display elapsed time elapsed_time = time.time() - start_time print 'Finished! Elapsed time: ' + str(int(round(elapsed_time))) + 's\n' # remove temp files if param.remove_tmp_file: sct.printv('\nRemove temporary files...', param.verbose) sct.run('rm -rf ' + param.path_tmp, param.verbose) e = 0 if sum(status) != 0: e = 1 print e sys.exit(e)
example=['0', '1', '2'], default_value='1') return parser if __name__ == "__main__": sct.start_stream_logger() parser = get_parser() arguments = parser.parse(sys.argv[1:]) ml_param = Param() fname_gm = arguments['-gm'] fname_wm = arguments['-wm'] path_template = arguments['-t'] if not sct.check_folder_exist(path_template): sct.printv( parser.usage.generate( error= 'ERROR: label/ folder does not exist. Please specify the path to the template using flag -t' )) fname_warp_template = arguments['-w'] fname_warp_target2template = None fname_manual_gmseg = None fname_sc_seg = None fname_template_dest = None if '-param' in arguments: ml_param.param_reg = arguments['-param'] if '-manual-gm' in arguments:
def checkFolder(self, param): # check if the folder exist. If not, create it. if self.parser.check_file_exist: sct.check_folder_exist(param, 0) return param
def main(): #Initialization directory = "" fname_template = '' n_l = 0 verbose = param.verbose try: opts, args = getopt.getopt(sys.argv[1:],'hi:t:n:v:') except getopt.GetoptError: usage() for opt, arg in opts : if opt == '-h': usage() elif opt in ("-i"): directory = arg elif opt in ("-t"): fname_template = arg elif opt in ('-n'): n_l = int(arg) elif opt in ('-v'): verbose = int(arg) # display usage if a mandatory argument is not provided if fname_template == '' or directory == '': usage() # check existence of input files print'\nCheck if file exists ...\n' sct.check_file_exist(fname_template) sct.check_folder_exist(directory) path_template, file_template, ext_template = sct.extract_fname(fname_template) template_absolute_path = sct.get_absolute_path(fname_template) os.chdir(directory) n_i = len([name for name in os.listdir('.') if (os.path.isfile(name) and name.endswith(".nii.gz") and name!='template_landmarks.nii.gz')]) # number of landmark images average = zeros((n_i,n_l)) compteur = 0 for file in os.listdir('.'): if file.endswith(".nii.gz") and file != 'template_landmarks.nii.gz': print file img = nibabel.load(file) data = img.get_data() X,Y,Z = (data>0).nonzero() Z = [Z[i] for i in Z.argsort()] Z.reverse() for i in xrange(n_l): if i < len(Z): average[compteur][i] = Z[i] compteur = compteur + 1 average = array([int(round(mean([average[average[:,i]>0,i]]))) for i in xrange(n_l)]) #print average print template_absolute_path print '\nGet dimensions of template...' nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(template_absolute_path) print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz) print '.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm' img = nibabel.load(template_absolute_path) data = img.get_data() hdr = img.get_header() data[:,:,:] = 0 compteur = 1 for i in average: print int(round(nx/2.0)),int(round(ny/2.0)),int(round(i)),int(round(compteur)) data[int(round(nx/2.0)),int(round(ny/2.0)),int(round(i))] = int(round(compteur)) compteur = compteur + 1 print '\nSave volume ...' #hdr.set_data_dtype('float32') # set imagetype to uint8 # save volume #data = data.astype(float32, copy =False) img = nibabel.Nifti1Image(data, None, hdr) file_name = 'template_landmarks.nii.gz' nibabel.save(img,file_name) print '\nFile created : ' + file_name
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter" #"C:/cygwin64/home/Simon_2/data_auto_vs_manual" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' methods_to_display = 'bin,man0,man1,man2,man3' else: status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(path_sct + '/scripts') import sct_utils as sct sct.printv("Working directory: " + os.getcwd()) # Folder including data "automatic vs manual" folder_auto_vs_manual = results_folder + '/manual_mask/sub' sct.printv( '\n\nData will be extracted from folder ' + folder_auto_vs_manual + ' .', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(folder_auto_vs_manual) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') fname_results = glob.glob(folder_auto_vs_manual + '/*.txt') nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 6)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 6)) # min min_results = numpy.zeros((nb_results_file, 6)) # max max_results = numpy.zeros((nb_results_file, 6)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [ lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise ] if len(ind_line_noise) != 1: sct.printv( "ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [ lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std ] if len(ind_line_tract_std) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join( c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [ lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value ] if len(ind_line_csf_value) != 1: sct.printv( "ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: csf_values[i_file] = int(''.join( c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [ lines.index(line_label) for line_label in lines if "Label" in line_label ] if len(ind_line_label) != 1: sct.printv( "ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace( ' ', '').split(',')[1:]) # extract median ind_line_median = [ lines.index(line_median) for line_median in lines if "median" in line_median ] if len(ind_line_median) != 1: sct.printv( "WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array( [float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [ lines.index(line_min) for line_min in lines if "min," in line_min ] if len(ind_line_min) != 1: sct.printv( "WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [ lines.index(line_max) for line_max in lines if "max" in line_max ] if len(ind_line_max) != 1: sct.printv( "WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array( [float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines) + 1] for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([ float(error.strip().split('(')[0]) for error in line_label_i[1:] ]) std_per_label_for_file_i.append([ float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:] ]) labels_id_for_file_i.append(line_label_i[0]) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) nb_method = len(methods_to_display) sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv( 'Mean errors (across bootstraps) obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print error_per_label print '----------------------------------------------------------------------------------------------------------------' sct.printv( 'Errors std (across bootstraps) obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print std_per_label # ********************************** START PLOTTING HERE *********************************************************** # find index of the file generated with sigma noise = 10 and range tracts = -10:+10 ind_file_noise10_tracts_std10 = numpy.where((snr == 10) & (tracts_std == 10))[0][0] matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'}) plt.rcParams['xtick.major.pad'] = '9' plt.rcParams['ytick.major.pad'] = '15' fig0 = plt.figure(0, figsize=(34, 17)) width = 0.5 / (nb_method + 1) ind_fig0 = numpy.arange(len(labels_id[0])) plt.ylabel('Absolute error (%)', fontsize=55) plt.xlabel('Labels', fontsize=55) plt.suptitle('Automatic estimation vs. manual estimation', fontsize=65) plt.title('(Noise std=' + str(snr[0]) + ', Tracts std=' + str(tracts_std[0]) + ', CSF value=' + str(csf_values[0]) + ')\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] plots = [] marker_size = 30 line_width = 3.0 cap_size = 15 eline_width = 7.0 cap_thick = 5.0 for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) plot = plt.errorbar( ind_fig0 + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), error_per_label[ind_file_noise10_tracts_std10, :, i_meth], std_per_label[ind_file_noise10_tracts_std10, :, i_meth], color=color, linestyle='None', marker='o', ms=marker_size, lw=line_width, capsize=cap_size, capthick=cap_thick, elinewidth=eline_width) plots.append(plot[0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig0), 2): plt.axvspan(ind_fig0[i_xtick] - 3 * width, ind_fig0[i_xtick] + (nb_method + 1) * width + 2 * width, facecolor='grey', alpha=0.1) # plt.legend(plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., handler_map={Line2D: HandlerLine2D(numpoints=1)}) plt.legend(plots, methods_to_display, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}) # plt.xticks(ind_fig0 + (numpy.floor(nb_method/2))*width*(1.0+1.0/(nb_method+1)), labels_id[0], fontsize=45) plt.xticks(ind_fig0 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), ['l-cst', 'r-cst', 'dc'], fontsize=45) plt.gca().set_xlim( [-3 * width, numpy.max(ind_fig0) + (nb_method + 3) * width]) # plt.gca().set_ylim([0, 2]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.5)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both') # adjust the size of the frame # plt.subplots_adjust(bottom=0.15, top=0.86, right=0.7, left=0.2) plt.savefig(param_default.fname_folder_to_save_fig + '/automatic_method_vs_manual_methods.pdf', format='PDF') plt.show(block=False)
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display noise_std_to_display = param_default.noise_std_to_display tracts_std_to_display = param_default.tracts_std_to_display csf_value_to_display = param_default.csf_value_to_display nb_RL_labels = param_default.nb_RL_labels # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_methods_comparison" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' else: status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(path_sct + '/scripts') import sct_utils as sct import isct_get_fractional_volume sct.printv("Working directory: " + os.getcwd()) results_folder_noise = results_folder + '/noise' results_folder_tracts = results_folder + '/tracts' results_folder_csf = results_folder + '/csf' sct.printv('\n\nData will be extracted from folder ' + results_folder_noise + ' , ' + results_folder_tracts + ' and ' + results_folder_csf + '.', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(results_folder_noise) sct.check_folder_exist(results_folder_tracts) sct.check_folder_exist(results_folder_csf) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') # Extract file names of the results files fname_results_noise = glob.glob(results_folder_noise + '/*.txt') fname_results_tracts = glob.glob(results_folder_tracts + '/*.txt') fname_results_csf = glob.glob(results_folder_csf + '/*.txt') fname_results = fname_results_noise + fname_results_tracts + fname_results_csf # Remove doublons (due to the two folders) # for i_fname in range(0, len(fname_results)): # for j_fname in range(0, len(fname_results)): # if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])): # fname_results.remove(fname_results[j_fname]) file_results = [] for fname in fname_results: file_results.append(os.path.basename(fname)) for file in file_results: if file_results.count(file) > 1: ind = file_results.index(file) fname_results.remove(fname_results[ind]) file_results.remove(file) nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 5)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 5)) # min min_results = numpy.zeros((nb_results_file, 5)) # max max_results = numpy.zeros((nb_results_file, 5)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise] if len(ind_line_noise) != 1: sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std] if len(ind_line_tract_std) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value] if len(ind_line_csf_value) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label] if len(ind_line_label) != 1: sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:]) # extract median ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median] if len(ind_line_median) != 1: sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min] if len(ind_line_min) != 1: sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max] if len(ind_line_max) != 1: sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines) + 1] for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]]) std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]]) labels_id_for_file_i.append(int(line_label_i[0])) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv( 'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) # average error and std across sides meanRL_abs_error_per_labels = numpy.zeros((error_per_label.shape[0], nb_RL_labels, error_per_label.shape[2])) meanRL_std_abs_error_per_labels = numpy.zeros((std_per_label.shape[0], nb_RL_labels, std_per_label.shape[2])) for i_file in range(0, nb_results_file): for i_meth in range(0, len(methods_name[i_file])): for i_label in range(0, nb_RL_labels): # find indexes of corresponding labels ind_ID_first_side = labels_id[i_file].index(i_label) ind_ID_other_side = labels_id[i_file].index(i_label + nb_RL_labels) # compute mean across 2 sides meanRL_abs_error_per_labels[i_file, i_label, i_meth] = float(error_per_label[i_file, ind_ID_first_side, i_meth] + error_per_label[i_file, ind_ID_other_side, i_meth]) / 2 meanRL_std_abs_error_per_labels[i_file, i_label, i_meth] = float(std_per_label[i_file, ind_ID_first_side, i_meth] + std_per_label[i_file, ind_ID_other_side, i_meth]) / 2 nb_method = len(methods_to_display) sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:') print csf_values print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the ' + str( nb_results_file) + ' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the ' + str( nb_results_file) + ' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv('Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print error_per_label print '----------------------------------------------------------------------------------------------------------------' sct.printv('Mean errors across both sides obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):') print meanRL_abs_error_per_labels # Compute fractional volume per label labels_id_FV, labels_name_FV, fract_vol_per_lab, labels_name_FV_RL_gathered, fract_vol_per_lab_RL_gathered = isct_get_fractional_volume.get_fractional_volume_per_label('./cropped_atlas/', 'info_label.txt') # # Get the number of voxels including at least one tract # nb_voxels_in_WM = isct_get_fractional_volume.get_nb_voxel_in_WM('./cropped_atlas/', 'info_label.txt') # normalize by the number of voxels in WM and express it as a percentage fract_vol_norm = numpy.divide(fract_vol_per_lab_RL_gathered, numpy.sum(fract_vol_per_lab_RL_gathered)/100) # NOT NECESSARY NOW WE AVERAGE ACROSS BOTH SIDES (which orders the labels) # # check if the order of the labels returned by the function computing the fractional volumes is the same (which should be the case) # if labels_id_FV != labels_id[0]: # sct.printv('\n\nERROR: the labels IDs returned by the function \'i_sct_get_fractional_volume\' are different from the labels IDs of the results files\n\n', 'error') # # Remove labels #30 and #31 # labels_id_FV_29, labels_name_FV_29, fract_vol_per_lab_29 = labels_id_FV[:-2], labels_name_FV[:-2], fract_vol_per_lab[:-2] # indexes of labels sort according to the fractional volume ind_labels_sort = numpy.argsort(fract_vol_norm) # Find index of the file generated with noise variance = 10 and tracts std = 10 ind_file_to_display = numpy.where((snr == noise_std_to_display) & (tracts_std == tracts_std_to_display) & (csf_values == csf_value_to_display)) # sort arrays in this order meanRL_abs_error_per_labels_sort = meanRL_abs_error_per_labels[ind_file_to_display[0], ind_labels_sort, :] meanRL_std_abs_error_per_labels_sort = meanRL_std_abs_error_per_labels[ind_file_to_display[0], ind_labels_sort, :] labels_name_sort = numpy.array(labels_name_FV_RL_gathered)[ind_labels_sort] # *********************************************** START PLOTTING HERE ********************************************** # stringColor = Color() matplotlib.rcParams.update({'font.size': 50, 'font.family': 'trebuchet'}) # plt.rcParams['xtick.major.pad'] = '11' plt.rcParams['ytick.major.pad'] = '15' fig = plt.figure(figsize=(60, 37)) width = 1.0 / (nb_method + 1) ind_fig = numpy.arange(len(labels_name_sort)) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=65) plt.xlabel('Fractional volume (% of the total number of voxels in WM)', fontsize=65) plt.title('Absolute error per tract as a function of their fractional volume\n\n', fontsize=30) plt.suptitle('(Noise std='+str(snr[ind_file_to_display[0]][0])+', Tracts std='+str(tracts_std[ind_file_to_display[0]][0])+', CSF value='+str(csf_values[ind_file_to_display[0]][0])+')', fontsize=30) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] markers = ['o', 's', '^', 'D'] errorbar_plots = [] for meth, color, marker in zip(methods_to_display, colors, markers): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) plot_i = plt.errorbar(ind_fig + i_meth_to_display * width, meanRL_abs_error_per_labels_sort[:, i_meth], meanRL_std_abs_error_per_labels_sort[:, i_meth], color=color, marker=marker, markersize=35, lw=7, elinewidth=1, capthick=5, capsize=10) # plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_files_csf_sort, :, i_meth]), positions=ind_fig + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) errorbar_plots.append(plot_i) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig), 2): plt.axvspan(ind_fig[i_xtick] - width - width / 2, ind_fig[i_xtick] + (nb_method + 1) * width - width / 2, facecolor='grey', alpha=0.1) # concatenate value of fractional volume to labels'name xtick_labels = [labels_name_sort[i_lab]+'\n'+r'$\bf{['+str(round(fract_vol_norm[ind_labels_sort][i_lab], 2))+']}$' for i_lab in range(0, len(labels_name_sort))] ind_lemniscus = numpy.where(labels_name_sort == 'spinal lemniscus (spinothalamic and spinoreticular tracts)')[0][0] xtick_labels[ind_lemniscus] = 'spinal lemniscus\n'+r'$\bf{['+str(round(fract_vol_norm[ind_labels_sort][ind_lemniscus], 2))+']}$' # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) plt.legend(errorbar_plots, methods_to_display, loc=1, fontsize=50, numpoints=1) plt.xticks(ind_fig + (numpy.floor(float(nb_method-1)/2)) * width, xtick_labels, fontsize=45) # Tweak spacing to prevent clipping of tick-labels plt.subplots_adjust(bottom=0, top=0.95, right=0.96) plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_method + 0.5) * width]) plt.gca().set_ylim([0, 17]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both') fig.autofmt_xdate() plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_fractional_volume.pdf', format='PDF') plt.show(block=False)
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter/map" #"C:/cygwin64/home/Simon_2/data_map" methods_to_display = 'map' else: # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg sct.printv("Working directory: "+os.getcwd()) sct.printv('\n\nData will be extracted from folder '+results_folder+' .', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(results_folder) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') fname_results = glob.glob(results_folder + '/*.txt') nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 6)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 6)) # min min_results = numpy.zeros((nb_results_file, 6)) # max max_results = numpy.zeros((nb_results_file, 6)) # Extract variance within labels and variance of noise map_var_params = numpy.zeros((nb_results_file, 2)) for i_file in range(0, nb_results_file): fname = fname_results[i_file].strip() ind_start, ind_end = fname.index('results_map')+11, fname.index('_all.txt') var = fname[ind_start:ind_end] map_var_params[i_file, 0] = float(var.split(",")[0]) map_var_params[i_file, 1] = float(var.split(",")[1]) # Read each file and extract data for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise] if len(ind_line_noise) != 1: sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std] if len(ind_line_tract_std) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label] if len(ind_line_label) != 1: sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:]) # extract median ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median] if len(ind_line_median) != 1: sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min] if len(ind_line_min) != 1: sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max] if len(ind_line_max) != 1: sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines)+1] for i_line in range(ind_line_label[0]+1, ind_line_median[0]-1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]]) std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]]) labels_id_for_file_i.append(line_label_i[0]) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) sct.printv('Noise std of the '+str(nb_results_file)+' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the '+str(nb_results_file)+' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the '+str(nb_results_file)+' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv('Errors obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print error_per_label # ********************************** START PLOTTING HERE *********************************************************** matplotlib.rcParams.update({'font.size': 45, 'font.family': 'Trebuchet'}) plt.rcParams['xtick.major.pad'] = '9' plt.rcParams['ytick.major.pad'] = '15' # matplotlib.rcParams['legend.handlelength'] = 0 # find indexes of files to be plotted ind_var_noise20 = numpy.where(map_var_params[:, 1] == 20) # indexes where noise variance = 20 ind_ind_var_label_sort_var_noise20 = numpy.argsort(map_var_params[ind_var_noise20, 0]) # indexes of indexes where noise variance=20 sorted according to values of variance within labels (in ascending order) ind_var_label_sort_var_noise20 = ind_var_noise20[0][ind_ind_var_label_sort_var_noise20][0] # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order) ind_var_label20 = numpy.where(map_var_params[:, 0] == 20) # indexes where variance within labels = 20 ind_ind_var_noise_sort_var_label20 = numpy.argsort(map_var_params[ind_var_label20, 1]) # indexes of indexes where label variance=20 sorted according to values of noise variance (in ascending order) ind_var_noise_sort_var_label20 = ind_var_label20[0][ind_ind_var_noise_sort_var_label20][0] # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order) plt.close('all') # Errorbar plot plt.figure() plt.ylabel('Mean absolute error (%)', fontsize=55) plt.xlabel('Variance within labels (in percentage of the mean)', fontsize=55) plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65) plt.errorbar(map_var_params[ind_var_label_sort_var_noise20, 0], mean_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], std_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], color='blue', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10) plt.errorbar(map_var_params[ind_var_noise_sort_var_label20, 1], mean_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], std_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], color='red', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10) # plt.legend(plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., handler_map={Line2D: HandlerLine2D(numpoints=1)}) plt.legend(['noise variance = 20', 'variance within labels = 20% of the mean'], loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}) plt.gca().set_xlim([numpy.min(map_var_params[ind_var_label_sort_var_noise20, 0]) - 1, numpy.max(map_var_params[ind_var_label_sort_var_noise20, 0]) + 1]) plt.grid(b=True, axis='both') # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.5)) # Box-and-whisker plots nb_box = 2 plt.figure(figsize=(30, 15)) width = 1.0 / (nb_box + 1) ind_fig = numpy.arange(len(map_var_params[ind_var_label_sort_var_noise20, 0])) * (1.0 + width) plt.ylabel('Absolute error (%)\n', fontsize=55) plt.xlabel('Variance', fontsize=55) plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_box)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] box_plots = [] boxprops = dict(linewidth=6, color='b') flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='b') whiskerprops = dict(linewidth=5, color='b') capprops = dict(linewidth=5, color='b') medianprops = dict(linewidth=6, color='b') meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5) plot_constant_noise_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_label_sort_var_noise20, :, 0]), positions=ind_fig, widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) box_plots.append(plot_constant_noise_var['boxes'][0]) boxprops = dict(linewidth=6, color='r') flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='r') whiskerprops = dict(linewidth=5, color='r') capprops = dict(linewidth=5, color='r') medianprops = dict(linewidth=6, color='r') meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick') meanlineprops = dict(linestyle='--', linewidth=2.5) plot_constant_label_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_noise_sort_var_label20, :, 0]), positions=ind_fig + width + width / (nb_box + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops) box_plots.append(plot_constant_label_var['boxes'][0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig), 2): plt.axvspan(ind_fig[i_xtick] - width - width / 4, ind_fig[i_xtick] + (nb_box+1) * width - width / 4, facecolor='grey', alpha=0.1) # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.) # leg = plt.legend(box_plots, [r'$\mathrm{\mathsf{noise\ variance\ =\ 20\ voxels^2}}$', r'$\mathrm{\mathsf{variance\ within\ labels\ =\ 20\%\ of\ the\ mean\ value}}$'], loc=1, handletextpad=-2) # color_legend_texts(leg) # convert xtick labels into int xtick_labels = [int(xtick) for xtick in map_var_params[ind_var_label_sort_var_noise20, 0]] plt.xticks(ind_fig + (numpy.floor(nb_box / 2)) * (width/2) * (1.0 + 1.0 / (nb_box + 1)), xtick_labels) plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_box + 0.5) * width]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.25)) plt.grid(b=True, axis='y', which='both') plt.savefig(os.path.join(param_default.fname_folder_to_save_fig, 'absolute_error_as_a_function_of_MAP_parameters.pdf'), format='PDF') plt.show(block=False)
def main(): results_folder = param_default.results_folder methods_to_display = param_default.methods_to_display # Parameters for debug mode if param_default.debug: print '\n*** WARNING: DEBUG MODE ON ***\n' results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_auto_vs_manual" path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox' methods_to_display = 'bin,man0,man1,man2,man3' else: status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags except getopt.GetoptError as err: # check if the arguments are defined print str(err) # error # usage() # display usage # if not opts: # print 'Please enter the path to the result folder. Exit program.' # sys.exit(1) # # usage() for opt, arg in opts: # explore flags if opt in '-i': results_folder = arg if opt in '-m': methods_to_display = arg # Append path that contains scripts, to be able to load modules sys.path.append(path_sct + '/scripts') import sct_utils as sct sct.printv("Working directory: "+os.getcwd()) # Folder including data "automatic vs manual" folder_auto_vs_manual = results_folder+'/manual_mask/sub' sct.printv('\n\nData will be extracted from folder '+folder_auto_vs_manual+' .', 'warning') sct.printv('\t\tCheck existence...') sct.check_folder_exist(folder_auto_vs_manual) # Extract methods to display methods_to_display = methods_to_display.strip().split(',') fname_results = glob.glob(folder_auto_vs_manual + '/*.txt') nb_results_file = len(fname_results) # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object) # SNR snr = numpy.zeros((nb_results_file)) # Tracts std tracts_std = numpy.zeros((nb_results_file)) # CSF value csf_values = numpy.zeros((nb_results_file)) # methods' name methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object) # labels error_per_label = [] std_per_label = [] labels_id = [] # median median_results = numpy.zeros((nb_results_file, 6)) # median std across bootstraps median_std = numpy.zeros((nb_results_file, 6)) # min min_results = numpy.zeros((nb_results_file, 6)) # max max_results = numpy.zeros((nb_results_file, 6)) # for i_file in range(0, nb_results_file): # Open file f = open(fname_results[i_file]) # open file # Extract all lines in .txt file lines = [line for line in f.readlines() if line.strip()] # extract SNR # find all index of lines containing the string "sigma noise" ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise] if len(ind_line_noise) != 1: sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit())) # extract tract std ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std] if len(ind_line_tract_std) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule) # match = regex.search(lines[ind_line_tract_std[0]]) # result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*' tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit())) # extract CSF value ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if "# value CSF" in line_csf_value] if len(ind_line_csf_value) != 1: sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error') sys.exit(1) else: csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit())) # extract method name ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label] if len(ind_line_label) != 1: sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error') sys.exit(1) else: # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:]) methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:]) # extract median ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median] if len(ind_line_median) != 1: sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: median = lines[ind_line_median[0]].strip().split(',')[1:] # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median] median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median]) median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median]) # extract min ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min] if len(ind_line_min) != 1: sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: min = lines[ind_line_min[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min] min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min]) # extract max ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max] if len(ind_line_max) != 1: sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning') # sys.exit(1) else: max = lines[ind_line_max[0]].strip().split(',')[1:] # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max] max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max]) # extract error for each label error_per_label_for_file_i = [] std_per_label_for_file_i = [] labels_id_for_file_i = [] # Due to 2 different kind of file structure, the number of the last label line must be adapted if not ind_line_median: ind_line_median = [len(lines)+1] for i_line in range(ind_line_label[0]+1, ind_line_median[0]-1): line_label_i = lines[i_line].strip().split(',') error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]]) std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]]) labels_id_for_file_i.append(line_label_i[0]) error_per_label.append(error_per_label_for_file_i) std_per_label.append(std_per_label_for_file_i) labels_id.append(labels_id_for_file_i) # close file f.close() # check if all the files in the result folder were generated with the same number of methods if not all(x == methods_name[0] for x in methods_name): sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same number of methods. Exit program.', 'error') sys.exit(1) # check if all the files in the result folder were generated with the same labels if not all(x == labels_id[0] for x in labels_id): sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same labels. Exit program.', 'error') sys.exit(1) # convert the list "error_per_label" into a numpy array to ease further manipulations error_per_label = numpy.array(error_per_label) std_per_label = numpy.array(std_per_label) # compute different stats abs_error_per_labels = numpy.absolute(error_per_label) max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1) min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1) mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1) std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1) nb_method = len(methods_to_display) sct.printv('Noise std of the '+str(nb_results_file)+' generated files:') print snr print '----------------------------------------------------------------------------------------------------------------' sct.printv('Tracts std of the '+str(nb_results_file)+' generated files:') print tracts_std print '----------------------------------------------------------------------------------------------------------------' sct.printv('Methods used to generate results for the '+str(nb_results_file)+' generated files:') print methods_name print '----------------------------------------------------------------------------------------------------------------' sct.printv('Median obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print median_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Minimum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print min_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Maximum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print max_results print '----------------------------------------------------------------------------------------------------------------' sct.printv('Labels\' ID (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print labels_id print '----------------------------------------------------------------------------------------------------------------' sct.printv('Mean errors (across bootstraps) obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print error_per_label print '----------------------------------------------------------------------------------------------------------------' sct.printv('Errors std (across bootstraps) obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):') print std_per_label # ********************************** START PLOTTING HERE *********************************************************** # find index of the file generated with sigma noise = 10 and range tracts = -10:+10 ind_file_noise10_tracts_std10 = numpy.where((snr == 10) & (tracts_std == 10))[0][0] matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'}) plt.rcParams['xtick.major.pad'] = '9' plt.rcParams['ytick.major.pad'] = '15' fig0 = plt.figure(0, figsize=(34, 17)) width = 0.5/(nb_method+1) ind_fig0 = numpy.arange(len(labels_id[0])) plt.ylabel('Absolute error (%)', fontsize=55) plt.xlabel('Labels', fontsize=55) plt.suptitle('Automatic estimation vs. manual estimation', fontsize=65) plt.title('(Noise std='+str(snr[0])+', Tracts std='+str(tracts_std[0])+', CSF value='+str(csf_values[0])+')\n', fontsize=65) # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method)) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] plots = [] marker_size = 30 line_width = 3.0 cap_size = 15 eline_width = 7.0 cap_thick = 5.0 for meth, color in zip(methods_to_display, colors): i_meth = methods_name[0].index(meth) i_meth_to_display = methods_to_display.index(meth) plot = plt.errorbar(ind_fig0+i_meth_to_display*width+(float(i_meth_to_display)*width)/(nb_method+1), error_per_label[ind_file_noise10_tracts_std10, :, i_meth], std_per_label[ind_file_noise10_tracts_std10, :, i_meth], color=color, linestyle='None', marker='o', ms=marker_size, lw=line_width, capsize=cap_size, capthick=cap_thick, elinewidth=eline_width) plots.append(plot[0]) # add alternated vertical background colored bars for i_xtick in range(0, len(ind_fig0), 2): plt.axvspan(ind_fig0[i_xtick] - 3*width, ind_fig0[i_xtick] + (nb_method + 1) * width + 2*width, facecolor='grey', alpha=0.1) # plt.legend(plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., handler_map={Line2D: HandlerLine2D(numpoints=1)}) plt.legend(plots, methods_to_display, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}) # plt.xticks(ind_fig0 + (numpy.floor(nb_method/2))*width*(1.0+1.0/(nb_method+1)), labels_id[0], fontsize=45) plt.xticks(ind_fig0 + (numpy.floor(nb_method/2))*width*(1.0+1.0/(nb_method+1)), ['l-cst', 'r-cst', 'dc'], fontsize=45) plt.gca().set_xlim([-3*width, numpy.max(ind_fig0)+(nb_method+3)*width]) # plt.gca().set_ylim([0, 2]) plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.5)) plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5)) plt.grid(b=True, axis='y', which='both') # adjust the size of the frame # plt.subplots_adjust(bottom=0.15, top=0.86, right=0.7, left=0.2) plt.savefig(param_default.fname_folder_to_save_fig +'/automatic_method_vs_manual_methods.pdf', format='PDF') plt.show(block=False)