def generate_bayesian_diagram(result_matrices, algo_names=["algo1", "algo2"], rope=0.01, rho=1 / 5, show_diagram=True, save_diagram=None): #rope=0.01 #we consider two classifers equivalent when the difference of accuracy is less that 1% print(rope, rho) #rho=1/5 #we are performing 10 folds, 10 runs cross-validation pl, pe, pr = bt.hierarchical(result_matrices, rope, rho, verbose=True, names=algo_names) samples = bt.hierarchical_MC(result_matrices, rope, rho, names=algo_names) #plt.rcParams['figure.facecolor'] = 'black' fig = bt.plot_posterior(samples, algo_names) if show_diagram: plt.show() if save_diagram is not None: plt.savefig(save_diagram) return (pl, pe, pr)
# Algorithm names alg_names = (('ReliefSeq', 'TuRF'), ) # Saved files names save_names = (('reliefseq', 'turf'), ) for save_names_nxt, file_names, names in zip(save_names, comp_pairs, alg_names): # Load scores matrices. scores_l = sio.loadmat('./raw_scores/' + file_names[0])['data'] scores_r = sio.loadmat('./raw_scores/' + file_names[1])['data'] # Compute difference. scores = scores_l - scores_r msk = np.logical_not( np.apply_along_axis(lambda x: np.all(x == 0), 1, scores)) scores = scores[msk, :] # Compute probabilities. pleft, prope, pright = bt.hierarchical(scores, rope, rho) with open('results_bhctt.res', 'a') as f: f.write('{0}, {1}, {2}, {3}, {4}\n'.format(names[0], names[1], pleft, prope, pright)) # Sample posterior and make simplex plot. samples = bt.hierarchical_MC(scores, rope, rho, names=alg_names) fig = bt.plot_posterior(samples, names) plt.savefig(save_names_nxt[0] + '_' + save_names_nxt[1] + '_final.png')