def plot_diadem_score(neuron_distance_csv, outputDir,algorithms=None): df_nd = pd.read_csv(neuron_distance_csv) if not path.exists(outputDir): os.mkdir(outputDir) if algorithms is None: algorithms= order_algorithms_by_size(df_nd) ### all algorithm plot dfg = df_nd.groupby('algorithm') sample_size_per_algorithm=[] for alg in algorithms: sample_size_per_algorithm.append(dfg.get_group(alg).shape[0]) #plot the average node distances plt.figure() sb.set_context("talk", font_scale=0.7) a=sb.barplot(y='algorithm', x='diadem_score', data=df_nd,order=algorithms) algorithm_name_mapping = rp.get_algorithm_name_dict() algorithm_names = [algorithm_name_mapping[x] for x in algorithms] a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ]) #sb.set_context("talk", font_scale=3.0) #plt.xticks(rotation="90") plt.xlabel('Diadem Scores') plt.subplots_adjust(left=0.5,right=0.95,bottom=0.1, top=0.9) plt.savefig(outputDir + '/Diadem_score.png', format='png') #plt.show() plt.close() return
def plot_all_score(neuron_distance_csv, outputDir, algorithms=None): df_nd = pd.read_csv(neuron_distance_csv) if not path.exists(outputDir): os.mkdir(outputDir) if algorithms is None: algorithms = order_algorithms_by_size(df_nd) ### all algorithm plot dfg = df_nd.groupby('algorithm') sample_size_per_algorithm = [] for alg in algorithms: sample_size_per_algorithm.append(dfg.get_group(alg).shape[0]) #plot the average node distances plt.figure() sb.set_style("white") #g = sb.lmplot(x="image_id", y="diadem_score", hue="algorithm", data=df_nd,fit_reg=False) a = sb.barplot(y='algorithm', x=['diadem_score', 'nblast_bi_score'], data=df_nd, order=algorithms) algorithm_name_mapping = rp.get_algorithm_name_dict() algorithm_names = [algorithm_name_mapping[x] for x in algorithms] a.set_yticklabels([ '%s ($n$=%d )' % (algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ]) sb.set_context("talk", font_scale=3.0) plt.xticks(rotation="90") plt.xlabel('All Scores') plt.subplots_adjust(left=0.5, right=0.95, bottom=0.1, top=0.9) plt.savefig(outputDir + '/All_scores.png', format='png') plt.show() # plt.close() return
def plot_similarities(neuron_distance_csv, outputDir,algorithms=None,metric='neuron_distance',CASE_BY_CASE_PLOT = 0, value_label=None): df_nd_ori = pd.read_csv(neuron_distance_csv) algorithm_name_mapping = rp.get_algorithm_name_dict() df_nd = calculate_similarities(neuron_distance_csv,metric,output_similarity_csv=neuron_distance_csv+".similarity.csv") all_images = np.unique(df_nd.image_id) if not path.exists(outputDir): os.mkdir(outputDir) if algorithms is None: algorithms= order_algorithms_by_size(df_nd_ori) if CASE_BY_CASE_PLOT: dfg = df_nd.groupby('image_id') #sample_size_per_algorithm=[] for image in all_images: df_image_cur = dfg.get_group(image) if df_image_cur.shape[0] > 0: plt.figure() plt.bar(range(df_image_cur.swc_file.size), df_image_cur['similarity']) algorithm_names = [algorithm_name_mapping[x] for x in df_image_cur['algorithm']] plt.xticks(range(df_image_cur.swc_file.size), algorithm_names, rotation="90") plt.ylabel(' Similarity (0~1) by ' +metric) #plt.subplots_adjust(bottom=0.3) plt.savefig(outputDir + '/sorted/figs/' + image.split('/')[-1] + '_'+metric+'_similarity.png', format='png') #plt.show() plt.close() else: print image+" has no valid reconstructions" # dfg = df_nd.groupby('algorithm') # rate_per_algorithm=[] # for alg in algorithms: # df_a = dfg.get_group(alg) # sucessrate= float(np.count_nonzero(df_a['similarity']))/df_a.shape[0] * 100 # number = np.count_nonzero(df_a['similarity']) # rate_per_algorithm.append(number) dfg = df_nd_ori.groupby('algorithm') sample_size_per_algorithm=[] for alg in algorithms: sample_size_per_algorithm.append(dfg.get_group(alg).shape[0]) plt.figure() sb.set_context("talk", font_scale=0.7) a=sb.barplot(y='algorithm', x='similarity', data=df_nd,order=algorithms) algorithm_names = [algorithm_name_mapping[x] for x in algorithms] a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ]) #sb.set_context("talk", font_scale=3.0) #plt.xticks(rotation="90") if value_label == None: value_label = ' Similarity (0~1) by '+ metric plt.ylabel('algorithm (n = # recons)') plt.xlabel(value_label) plt.subplots_adjust(left=0.4,right=0.9, bottom=0.1, top=0.9) plt.savefig(outputDir + '/'+value_label+'.png', format='png') #plt.show() plt.close() return
def plot_similarities(neuron_distance_csv, outputDir, algorithms=None, metric='neuron_distance', CASE_BY_CASE_PLOT=0, value_label=None): df_nd_ori = pd.read_csv(neuron_distance_csv) algorithm_name_mapping = rp.get_algorithm_name_dict() df_nd = calculate_similarities(neuron_distance_csv, metric, output_similarity_csv=neuron_distance_csv + ".similarity.csv") all_images = np.unique(df_nd.image_id) if not path.exists(outputDir): os.mkdir(outputDir) if algorithms is None: algorithms = order_algorithms_by_size(df_nd_ori) if CASE_BY_CASE_PLOT: dfg = df_nd.groupby('image_id') #sample_size_per_algorithm=[] for image in all_images: df_image_cur = dfg.get_group(image) if df_image_cur.shape[0] > 0: plt.figure() plt.bar(range(df_image_cur.swc_file.size), df_image_cur['similarity']) algorithm_names = [ algorithm_name_mapping[x] for x in df_image_cur['algorithm'] ] plt.xticks(range(df_image_cur.swc_file.size), algorithm_names, rotation="90") plt.ylabel(' Similarity (0~1) by ' + metric) #plt.subplots_adjust(bottom=0.3) plt.savefig(outputDir + '/sorted/figs/' + image.split('/')[-1] + '_' + metric + '_similarity.png', format='png') #plt.show() plt.close() else: print image + " has no valid reconstructions" # dfg = df_nd.groupby('algorithm') # rate_per_algorithm=[] # for alg in algorithms: # df_a = dfg.get_group(alg) # sucessrate= float(np.count_nonzero(df_a['similarity']))/df_a.shape[0] * 100 # number = np.count_nonzero(df_a['similarity']) # rate_per_algorithm.append(number) dfg = df_nd_ori.groupby('algorithm') sample_size_per_algorithm = [] for alg in algorithms: sample_size_per_algorithm.append(dfg.get_group(alg).shape[0]) plt.figure() sb.set_context("talk", font_scale=0.7) a = sb.barplot(y='algorithm', x='similarity', data=df_nd, order=algorithms) algorithm_names = [algorithm_name_mapping[x] for x in algorithms] a.set_yticklabels([ '%s ($n$=%d )' % (algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ]) #sb.set_context("talk", font_scale=3.0) #plt.xticks(rotation="90") if value_label == None: value_label = ' Similarity (0~1) by ' + metric plt.ylabel('algorithm (n = # recons)') plt.xlabel(value_label) plt.subplots_adjust(left=0.4, right=0.9, bottom=0.1, top=0.9) plt.savefig(outputDir + '/' + value_label + '.png', format='png') #plt.show() plt.close() return