Example #1
0
#     sample_size_per_algorithm[i] = (dfg.get_group(algorithms[i]).shape[0])
#
# order = sample_size_per_algorithm.argsort()
# algorithms_ordered = algorithms[order[::-1]]


#remove algorithms that produce too few results
for i in range( algorithms.size):
      siz = (dfg.get_group(algorithms[i]).shape[0])
      if siz<100:
          df_merge = df_merge[df_merge['algorithm'] != algorithms[i]]

algorithms = np.unique(df_merge.algorithm)
ave_dis_algorithm = np.zeros(algorithms.size)
for i in range( algorithms.size):
    df_alg=dfg.get_group(algorithms[i])
    ave_dis_algorithm[i] = df_alg[REPORTING_METRIC].mean()

order = ave_dis_algorithm.argsort()
algorithms_ordered = algorithms[order[::-1]]




#error bar plot
plt_dist.plot_compare_consensus_distance(merged_csv, data_DIR,algorithms_ordered,metric=REPORTING_METRIC,CASE_BY_CASE_PLOT = 1,
                                         value_label='Weighted Average Neuron Distance  (w.r.t.  Gold Standard)')
#plt_dist.plot_similarities(merged_csv, data_DIR,algorithms_ordered,metric='weighted_ave_neuron_distance',CASE_BY_CASE_PLOT = 0,
#                                        value_label='Similarities on  Weighted Average Neuron Distance (to the Gold Standard)')
df_merge_m_c=df_merge[ (df_merge['algorithm'] == "median") |( df_merge['algorithm'] == "consensus")]
plt_dist.plot_compare_median_consensus(output_dir=data_DIR,df_order= df_merge_m_c, metric=REPORTING_METRIC, DISPLAY = 1)
         consensus_wd_t = df_con_matching.iloc[0]['weighted_neuron_distance_ave']
         df_merge.loc[i] = [image,'consensus',consensus_wd_t]
         i= i+1



merged_csv= data_DIR+'/consensus_compare_wnd.csv'
df_merge.to_csv(merged_csv, index=False)

# plot
# ## sort by sample size
algorithms = np.unique(df_nd.algorithm)

dfg = df_merge.groupby('algorithm')
sample_size_per_algorithm = np.zeros(algorithms.size)
for i in range( algorithms.size):
    sample_size_per_algorithm[i] = (dfg.get_group(algorithms[i]).shape[0])

order = sample_size_per_algorithm.argsort()
algorithms_ordered = algorithms[order[::-1]]
algorithms_ordered= np.append('consensus',algorithms_ordered)





plt_dist.plot_compare_consensus_distance(merged_csv, data_DIR,algorithms_ordered,metric='weighted_ave_neuron_distance',CASE_BY_CASE_PLOT = 0,
                                         value_label='Weighted Average Neuron Distance (bidirectional)')
plt_dist.plot_similarities(merged_csv, data_DIR,algorithms_ordered,metric='weighted_ave_neuron_distance',CASE_BY_CASE_PLOT = 0,
                                        value_label='Similarities on  Weighted Average Neuron Distance (bidirectional)')
df_merge.to_csv(merged_csv, index=False)

# plot
# ## sort by sample size
algorithms = np.unique(df_nd.algorithm)

dfg = df_merge.groupby('algorithm')
sample_size_per_algorithm = np.zeros(algorithms.size)
for i in range(algorithms.size):
    sample_size_per_algorithm[i] = (dfg.get_group(algorithms[i]).shape[0])

order = sample_size_per_algorithm.argsort()
algorithms_ordered = algorithms[order[::-1]]
algorithms_ordered = np.append('consensus', algorithms_ordered)

plt_dist.plot_compare_consensus_distance(
    merged_csv,
    data_DIR,
    algorithms_ordered,
    metric='weighted_ave_neuron_distance',
    CASE_BY_CASE_PLOT=0,
    value_label='Weighted Average Neuron Distance (bidirectional)')
plt_dist.plot_similarities(
    merged_csv,
    data_DIR,
    algorithms_ordered,
    metric='weighted_ave_neuron_distance',
    CASE_BY_CASE_PLOT=0,
    value_label=
    'Similarities on  Weighted Average Neuron Distance (bidirectional)')