def compute_distr_ns(states, W, b_v, b_h, steps=10, T=t_sim * 1000): nT = int(T) distr = kl_tools.states2distr(states[:nT], N_v + N_c + N_h) + 1 distr /= distr.sum() res_distr_cut = np.zeros(steps) nTi = np.logspace(2, np.log10(nT), 10, base=10) for i in range(steps): d = kl_tools.states2distr(states[:nTi[i]], N_v + N_c + N_h) + 1 d /= d.sum() res_distr_cut[i] = kl_divergence_pdf(d, W, b_v, b_h) / entropy(d) return distr, nTi, res_distr_cut
def compute_distr_ns(states, W, b_v, b_h, steps=10, T=t_sim*1000): nT = int(T) distr = kl_tools.states2distr(states[:nT],N_v+N_c+N_h)+1 distr/= distr.sum() res_distr_cut = np.zeros(steps) nTi = np.logspace(2,np.log10(nT),10,base=10) for i in range(steps): d = kl_tools.states2distr(states[ :nTi[i] ], N_v+N_c+N_h)+1 d /= d.sum() res_distr_cut[i] = kl_divergence_pdf(d, W, b_v, b_h)/entropy(d) return distr, nTi, res_distr_cut
def wrap_run(runID=0): if isinstance(runID, int): r, W, b_v, b_h = run_NS() elif isinstance(runID, dict): r, W, b_v, b_h = run_NS(runID) states_ns = [] for rr in r: states = states_NS(rr['Mv'], rr['Mh'], t_conv=1.0) distr = kl_tools.states2distr(states, N_v + N_c + N_h) + 1 states_ns.append(states) return states_ns, W, b_v, b_h
def wrap_run(runID = 0): if isinstance(runID, int): r, W, b_v, b_h = run_NS() elif isinstance(runID, dict): r, W, b_v, b_h = run_NS(runID) states_ns = [] for rr in r: states = states_NS(rr['Mv'], rr['Mh'], t_conv = 1.0) distr = kl_tools.states2distr(states,N_v+N_c+N_h)+1 states_ns.append(states) return states_ns, W, b_v, b_h