def plot_information_flow(simulation_list): for ind_key in range(len(simulations_above)): print('ind_key = %d' % ind_key) simulation_key = simulations_above[ind_key] (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(simulation_key) retrieved_saved = file_handling.load_retrieved(simulation_key) m_saved, mi_saved, control, shuffled = get_mi(retrieved_saved, retrieved_saved) corrected = np.array(mi_saved)[:, None] - np.array(shuffled) print((np.array(mi_saved)).shape) print((np.array(shuffled)).shape) print(corrected.shape) plt.title('Information flow, shuffled bias estimate') plt.plot(m_saved, corrected, '-o', color=color_s[ind_key], label=r'$g_A$=%.1f, $w$=%.1f' % (g_A, w)) plt.plot(m_saved, shuffled, ':', color=color_s[ind_key], label='bias') plt.yscale('log', basey=2) plt.ylim([ymin, ymax]) plt.legend(loc='upper right')
def compare_mi_crossovere(simulation_list): for ind_sim in range(len(simulation_list)): simulation = simulation_list[ind_sim] retrieved_saved = file_handling.load_retrieved(simulation) lamb = file_handling.load_overlap(simulation) (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(simulation) thresholds = np.linspace(0.1, 0.9, 10) mi_high_s = thresholds.copy() mi_low_s = thresholds.copy() shuffled_high_s = thresholds.copy() shuffled_low_s = thresholds.copy() random_high_s = thresholds.copy() random_low_s = thresholds.copy() for ii in range(len(thresholds)): threshold = thresholds[ii] mi_high, mi_low, shuffled_high, shuffled_low, random_high, random_low = \ get_mi_crossovers(retrieved_saved, lamb, threshold) mi_high_s[ii] = mi_high mi_low_s[ii] = mi_low shuffled_high_s[ii] = shuffled_high shuffled_low_s[ii] = shuffled_low random_low_s[ii] = random_high random_high_s[ii] = random_low plt.subplot(3, 2, ind_sim + 1) plt.plot(thresholds, mi_high_s - shuffled_high_s, 'b', label='Corrected high') plt.plot(thresholds, mi_low_s - shuffled_low_s, 'g', label='Corrected low') plt.plot(thresholds, mi_high_s, ':b', label='Original high') plt.plot(thresholds, mi_low_s, ':g', label='Original how') plt.plot(thresholds, shuffled_high_s, '--b', label='Bias high') plt.plot(thresholds, shuffled_low_s, '--g', label='Bias low') plt.ylabel('Mutual information in pairs (m=2)') plt.xlim(0, 1) plt.title('w=%.2f, g_A=%.2f' % (w, g_A)) if ind_sim == 0: plt.legend() if ind_sim in [4, 5]: plt.xlabel('Overlap threshold')
def find_ind_max(sim_list): ind_max = np.zeros(p, dtype=int) for ind_key in range(len(sim_list)): key = sim_list[ind_key] retrieved_saved = file_handling.load_retrieved(key) for cue_ind in range(p): duration = len(retrieved_saved[cue_ind]) if cue_ind != retrieved_saved[cue_ind][0]: duration += 1 if ind_max[cue_ind] != 0: ind_max[cue_ind] = min(ind_max[cue_ind], duration) else: ind_max[cue_ind] = duration return ind_max
def test_shuffle_error(N, p): mi = [] xx = np.logspace(2, np.log10(N), 100, dtype=int) for n in xx: print(np.log(n) / np.log(2)) deck = np.arange(0, n, 1, dtype=int) rd.shuffle(deck) test_list = rd.randint(0, p, n) shuffled = test_list[deck] mi.append(mutual_information(test_list, shuffled, p)[2]) G = 1 / 2 / np.log(2) * (p - 1)**2 A = G / 6.5 plt.plot(xx, mi, '--', label='Shuffled') plt.plot(xx, G / (A + xx), '--', label='First order Pan+96a') plt.yscale('log', basey=2) plt.xscale('log', basex=2) plt.xlabel('Number of samples') plt.ylabel('Sampling error') for ind_key in range(len(simulations)): print('ind_key = %d' % ind_key) simulation_key = simulations[ind_key] ryom_name = ryom_data[ind_key] (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(simulation_key) retrieved_saved = file_handling.load_retrieved(simulation_key) ryom_retrieved = file_handling.load_ryom_retrieved(ryom_name) plt.vlines(event_counter(retrieved_saved), 0, 2**3, colors=color_s[ind_key]) plt.vlines(event_counter(ryom_retrieved), 0, 2**3, colors=color_s_ryom[ind_key]) plt.ylim([ymin, ymax]) plt.legend() plt.show()
def find_neighbors(key): (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(key) graph_all = nx.Graph() graph_high = nx.Graph() graph_low = nx.Graph() num_trans_all = np.zeros((p, p), dtype=int) num_trans_high = np.zeros((p, p), dtype=int) num_trans_low = np.zeros((p, p), dtype=int) retrieved_saved = file_handling.load_retrieved(key) lamb = file_handling.load_overlap(key) threshold = median([ lamb[ind_cue][trans] for ind_cue in range(p) for trans in range(len(lamb[ind_cue])) ]) neighbors = [[] for pat in range(p)] for cue_ind in range(p): if len(retrieved_saved[cue_ind][:ind_max[cue_ind]]) >= 3: # print(len(retrieved_saved[cue_ind])) duration = len(retrieved_saved[cue_ind]) if cue_ind != retrieved_saved[cue_ind][0]: duration += 1 # ind_max[cue_ind] = duration sequence = [] if cue_ind != retrieved_saved[cue_ind][0]: sequence.append(cue_ind) sequence += retrieved_saved[cue_ind] sequence = sequence[3:ind_max[cue_ind]] for ind_trans in range(len(sequence) - 1): patt1 = sequence[ind_trans] patt2 = sequence[ind_trans + 1] num_trans_all[patt1, patt2] += 1 if lamb[cue_ind][ind_trans + 1] >= threshold: num_trans_high[patt1, patt2] += 1 else: num_trans_low[patt1, patt2] += 1 # if True: if patt2 not in neighbors[patt1]: neighbors[patt1].append(patt2) for patt1 in range(p): for patt2 in range(p): if patt1 != patt2: if num_trans_all[patt1, patt2]: graph_all.add_edge(patt1, patt2, weight=num_trans_all[patt1, patt2]) if num_trans_high[patt1, patt2]: graph_high.add_edge(patt1, patt2, weight=num_trans_high[patt1, patt2]) if num_trans_low[patt1, patt2]: graph_low.add_edge(patt1, patt2, weight=num_trans_low[patt1, patt2]) return neighbors, graph_all, num_trans_all, graph_low, num_trans_low, \ graph_high, num_trans_high, threshold
def compare_mi_crossover(simulation_list): mi_high_s = np.zeros(len(simulation_list)) mi_low_s = mi_high_s.copy() shuffled_high_s = mi_high_s.copy() shuffled_low_s = mi_high_s.copy() random_low_s = mi_high_s.copy() random_high_s = mi_high_s.copy() similarity_s = mi_high.copy() similarity_shuf_s = mi_high.copy() w_s = mi_high_s.copy() g_A_s = mi_high_s.copy() threshold_s = mi_high_s.copy() for ind_sim in range(len(simulation_list)): simulation = simulation_list[ind_sim] retrieved_saved = file_handling.load_retrieved(simulation) lamb = file_handling.load_overlap(simulation) print('Events %d' % event_counter(lamb)) (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(simulation) lamb_list = [ lamb[ind_cue][ind_trans] for ind_cue in range(len(lamb)) for ind_trans in range(2, len(lamb[ind_cue]) - 1) ] # print(lamb_list[:10]) threshold = median(lamb_list) print(threshold) mi_high, mi_low, shuffled_high, shuffled_low, random_high, random_low = \ get_mi_crossovers(retrieved_saved, lamb, threshold) print(mi_high) mi_high_s[ind_sim] = mi_high mi_low_s[ind_sim] = mi_low shuffled_high_s[ind_sim] = shuffled_high shuffled_low_s[ind_sim] = shuffled_low random_low_s[ind_sim] = random_high random_high_s[ind_sim] = random_low w_s[ind_sim] = w g_A_s[ind_sim] = g_A threshold_s[ind_sim] = threshold ax1 = plt.subplot2grid((3, 2), (0, 0), colspan=2, rowspan=2) ax1.plot(g_A_s, mi_high_s - shuffled_high_s, 'b-o', label='Corrected high') ax1.plot(g_A_s, mi_low_s - shuffled_low_s, 'g-o', label='Corrected low') ax1.plot(g_A_s, mi_high_s, ':b', label='Original high') ax1.plot(g_A_s, mi_low_s, ':g', label='Original how') ax1.plot(g_A_s, shuffled_high_s, '--b', label='Bias high') ax1.plot(g_A_s, shuffled_low_s, '--g', label='Bias low') ax1.set_ylabel('Mutual information in pairs (m=2)') ax1.set_xlim(-0.1, 1.1) ax1.set_title('High-and-low-crossover mutual information') ax1.legend() ax1.set_xlabel(r'$g_A$') ax2 = plt.subplot2grid((3, 2), (2, 0)) ax2.plot(g_A_s, w_s, '-o') ax2.set_xlim(-0.1, 1.1) ax2.set_xlabel(r'$g_A$') ax2.set_ylabel(r'$w$') ax2.set_ylim(0.9, 1.5) ax2.set_title('Latching border') ax3 = plt.subplot2grid((3, 2), (2, 1)) ax3.plot(g_A_s, threshold_s, '-o') ax3.set_xlim(-0.1, 1.1) ax3.set_xlabel(r'$g_A$') ax3.set_ylabel(r'$\lambda$') ax3.set_ylim(0, 1) ax3.set_title('Crossover threshold') plt.tight_layout()
import numpy as np import seaborn as sns # Local modules import file_handling import networkx as nx from scipy.optimize import curve_fit from scipy.special import erf from statistics import median import scipy.stats as st plt.ion() plt.close('all') p = 200 simulations = ['7ca8d570d1dff01c0133de4031a05b46'] retrieved_saved = [[], [], []] for kick_seed in range(3): retrieved_saved[kick_seed] = \ file_handling.load_retrieved(kick_seed, simulations[0]) cpt = 0 nmax = 6 for ind_cue in range(p): if retrieved_saved[0][ind_cue][0:nmax] == retrieved_saved[1][ind_cue][0:nmax] \ or retrieved_saved[0][ind_cue][0:nmax] == retrieved_saved[2][ind_cue][0:nmax] \ or retrieved_saved[1][ind_cue][0:nmax] == retrieved_saved[2][ind_cue][0:nmax]: cpt += 1 print(cpt) print(len(retrieved_saved), len(retrieved_saved[0]), len(retrieved_saved[0][0]))
# norm_factor = file_handling.event_counter(retrieved_saved, p) # num_ABA_plot[ii] = np.mean(num_ABA) / norm_factor # num_AB_rand_plot[ii] = np.mean(num_ABA_rand) / norm_factor # num_ABA_shuf_plot[ii] = np.mean(num_ABA_shuf) / norm_factor # plt.title('Num_AB/Num_transitions') # plt.plot(num_ABA_plot, label="Latching") # plt.plot(num_AB_rand_plot, label='Random') # plt.plot(num_ABA_shuf_plot, label='Shuffled') # plt.legend() # plt.title('Proba_ABA Knowing AB') bins = np.arange(-0.1, 10.2, 0.2) alpha = 1 key = simulations[1] retrieved_saved = file_handling.load_retrieved(key) (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = \ file_handling.load_parameters(key) random_retrieved, shuffled_retrieved = random_eq(retrieved_saved) p_B_ABA, p_AB_ABA, p_B, num_B, num_AB, num_ABA = \ trio_prob_table(retrieved_saved, key) p_ABA = num_ABA / np.sum(num_B) p_AB = num_AB / np.sum(num_B) metric = 0 metric_markhov = 0 for pattA in range(p): for pattB in range(p): metric += (p_ABA[pattA, pattB] * p_B[pattB] - p_AB[pattA, pattB] * p_AB[pattB, pattA])**2
key = simulations_correlated[8] ksi_i_mu, delta__ksi_i_mu__k, J_i_j_k_l, _ = file_handling.load_network(key) (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode, _) = \ file_handling.load_parameters(key) crossovers = file_handling.load_crossover(0, key) lamb = [] for ind_cue in range(p): lamb += crossovers[ind_cue][1:] lamb = np.array(lamb) retrieved = file_handling.load_retrieved(0, key) retrieved_saved = [] previously_saved = [] for ind_cue in range(p): retrieved_saved += retrieved[ind_cue][1:] previously_saved += retrieved[ind_cue][:-1] tSnap = np.array(file_handling.load_time(0, key)[0]) m_mu_plot = file_handling.load_evolution(0, 0, key) s = 2 shift = 1 / N / a / 5 # In order categories to be visible in scatter lamb = np.array(lamb) low_cor = lamb < 0.2 l_low_cor = r'$\lambda < 0.2$'
import file_handling import csv simulations = ['f30d8a2438252005f6a9190c239c01c1'] n_seeds = [11] for ind_key in range(len(simulations)): key = simulations[ind_key] (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode, muted_prop) \ = file_handling.load_parameters(key) with open('data_analysis/' + key + '/matlab_retrieved.csv', mode='w') as f: writer = csv.writer(f) for kick_seed in range(n_seeds[ind_key]): retrieved = file_handling.load_retrieved(kick_seed, key) for cue_ind in range(p): eq_string = '' for ind_trans in range(len(retrieved[cue_ind])): eq_string += chr(retrieved[cue_ind][ind_trans] + 8704) writer.writerow([eq_string])
for cue_ind in range(p): res += len(retrieved[cue_ind]) return res plt.figure('Information_flow') for ind_key in range(len(simulations)): print('ind_key = %d' % ind_key) simulation_key = simulations[ind_key] ryom_name = ryom_data[ind_key] (dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, f_russo, cm, a, U, w, tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, tau, t_0, g, random_seed, p_0, n_p, nSnap, russo2008_mode) = file_handling.load_parameters(simulation_key) retrieved_saved = file_handling.load_retrieved(simulation_key) ryom_retrieved = file_handling.load_ryom_retrieved(ryom_name) m_saved, mi_ryom, control_ryom, shuffled_ryom = get_mi( ryom_retrieved, retrieved_saved) m_saved, mi_saved, control, shuffled = get_mi(retrieved_saved, ryom_retrieved) plt.subplot(221) plt.title('Gsln and Ryom data with shuffled control') plt.plot(m_saved, mi_saved, '-o', color=color_s[ind_key], label='Gsln, g_A=' + str(g_A)) plt.plot(m_saved, shuffled, ':', color=color_s[ind_key])
def get_retrieved_seeds(key, n_seeds): retrieved = [[] for ii in range(n_seeds)] for kick_seed in range(n_seeds): retrieved[kick_seed] = file_handling.load_retrieved(kick_seed, key) return retrieved