def compute_ccdfs(binned_temporal_network,max_group,time_normalization_factor=1./3600.,n_bins=50,logarithmic_bins=False): t_fw, k_fw = tc.mean_degree(binned_temporal_network) if logarithmic_bins: bins = np.append([0.],np.logspace(log10(k_fw[k_fw>0.0].min())-0.1,log10(k_fw.max()),n_bins) ) else: bins = np.append([0.],np.linspace(k_fw[k_fw>0.0].min(), k_fw.max(), n_bins) ) x_k, y_k = get_ccdf(k_fw) y_k = tc.sample_a_function(x_k, y_k, bins) x_k = bins result = tc.measure_group_sizes_and_durations(binned_temporal_network) grp_sizes = np.array(result.aggregated_size_histogram[1:]) m = np.arange(1,len(grp_sizes)+1) m, grp_sizes = get_ccdf_from_distribution(m, grp_sizes) durations = np.array(result.contact_durations) * time_normalization_factor if logarithmic_bins: bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) ) else: bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) ) x_contact, y_contact = get_ccdf(durations) y_contact = tc.sample_a_function(x_contact, y_contact, bins) x_contact = bins y_groups = [] x_groups = [] for group_size in range(1,max_group+1): durations = np.array(result.group_durations[group_size]) * time_normalization_factor if len(durations) <= 2: x = [] y = [] else: if logarithmic_bins: bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) ) else: bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) ) x, y = get_ccdf(durations) y = tc.sample_a_function(x_contact, y_contact, bins) x = bins #if group_size == 1: # print('\n',alpha,'\n') x_groups.append(x) y_groups.append(y) xs = [x_k, [], x_contact ] + x_groups ys = [y_k, grp_sizes, y_contact ] + y_groups return xs, ys
eta = R0 * rho / mean_k i_sample = np.zeros_like(t_sample) successful = 0 for meas in range(N_meas): sis = tc.SIS(N, t_simulation, eta, rho, number_of_initially_infected=10) tc.gillespie_SIS(tn, sis) t = np.array(sis.time) i = np.array(sis.I, dtype=float) / N this_sample = tc.sample_a_function(t, i, t_sample) if this_sample[-1] > 0.0: successful += 1 i_sample += this_sample ax[2].plot(t_sample, this_sample, c=line.get_color(), alpha=0.1) ax[1].plot(t_sample, i_sample / successful) pl.show()
edge_weight_tuples = [(u, v, 1.0) for u, v in G.edges()] t_sample = np.linspace(0, 10, 100) i_sample = np.zeros_like(t_sample) i_sample_tc = np.zeros_like(t_sample) N_measurements = 1000 for meas in range(N_measurements): sir = SIR_weighted(N, edge_weight_tuples, infection_rate, recovery_rate, I0) t, I, R = sir.simulation(tmax) print("simulation time:", t[-1]) i_sample += tc.sample_a_function(t, I / N, t_sample) / N_measurements #pl.plot(t[[0,-1]], [1-1/R0]*2) #pl.plot(t[[0,-1]], [I_mean/N]*2, '--', lw=2,) #============================== _G = tc.convert_static_network(N, list(G.edges())) sir = tc.SIR(N, tmax, infection_rate, recovery_rate, I0) tc.gillespie_SIR(_G, sir) t = np.array(sir.time) I = np.array(sir.I) #pl.figure() print("simulation time tacoma:", t[-1]) i_sample_tc += tc.sample_a_function(t, I / N, t_sample) / N_measurements
def flockwork_P_contact_time_distributions_for_varying_alpha_beta( tau, N, k_initial, t, alpha, beta, tmax, sampling_points=10): r"""Compute the mean group size distribution for a Flockwork-P system with varying rates. Parameters ---------- tau : numpy.ndarray of float durations for which to evaluate the probability density N : int Number of nodes k_initial : float initial mean degree t : numpy.ndarray of float time points at which :math:`\alpha(t)` and :math:`\beta(t)` change alpha : numpy.ndarray of float active reconnection rate associated with the time points in ``t`` beta : numpy.ndarray of float active disconnection rate associated with the time points in ``t`` tmax : float final time sampling_points : int, default : 10 how many points to sample in between two time points in `t` Returns ------- P_tau_c : numpy.array Mean probability density of values at ``tau`` (contact duration) P_tau_ic : numpy.array Mean probability density of values at ``tau`` (inter-contact duration) """ # estimate mean degree from integrating ODE new_t, k = flockwork_P_mean_degree_for_varying_alpha_beta( N, k_initial, t, alpha, beta, tmax, sampling_points) # from equilibrium assumption k = P/(1-P) compute adjusted P new_P = k / (k + 1) gamma = alpha + beta new_gamma = tc.sample_a_function(t, gamma, new_t) new_alpha = tc.sample_a_function(t, alpha, new_t) distro_c = [] distro_ic = [] ks = np.arange(N) # for every time point and adjusted P, compute the equilibrium group size distribution for a_, P_, _k_ in zip(new_alpha, new_P, k): if P_ > 0: g_ = a_ / P_ else: g_ = 0 p_k = degree_distribution(N, P_) _k_ = p_k.dot(ks) _k2_ = p_k.dot(ks**2) omega = 2 * g_ * (1 - P_ / (N - 1) * _k2_ / _k_) lambda_1 = 2 * a_ this_distro_c = omega * np.exp(-tau * omega) this_distro_ic = lambda_1 * np.exp(-lambda_1 * tau) distro_c.append(this_distro_c) distro_ic.append(this_distro_ic) # compute the mean group size distribution as a time integral over the # group size distribution distro_c = np.array(distro_c) distro_ic = np.array(distro_ic) mean_distro_c = np.trapz(distro_c, x=new_t, axis=0) / (new_t[-1] - new_t[0]) mean_distro_ic = np.trapz(distro_ic, x=new_t, axis=0) / (new_t[-1] - new_t[0]) return mean_distro_c, mean_distro_ic
def flockwork_P_group_life_time_distributions_for_varying_alpha_beta( tau, max_group_size, N, k_initial, t, alpha, beta, tmax, min_group_size=2, sampling_points=10): r"""Compute the mean group size distribution for a Flockwork-P system with varying rates. Parameters ---------- tau : numpy.ndarray of float durations for which to evaluate the probability density max_group_size : int until which group size the life time distribution should be computed N : int Number of nodes k_initial : float initial mean degree t : numpy.ndarray of float time points at which :math:`\alpha(t)` and :math:`\beta(t)` change alpha : numpy.ndarray of float active reconnection rate associated with the time points in ``t`` beta : numpy.ndarray of float active disconnection rate associated with the time points in ``t`` tmax : float final time min_group_size : int, default : 2 min group size the life time distribution should be computed for sampling_points : int, default : 10 how many points to sample in between two time points in `t` Returns ------- P_taus : list of numpy.ndarray list of Mean probability density of values at ``tau`` (contact duration) """ # estimate mean degree from integrating ODE new_t, k = flockwork_P_mean_degree_for_varying_alpha_beta( N, k_initial, t, alpha, beta, tmax, sampling_points) # from equilibrium assumption k = P/(1-P) compute adjusted P new_P = k / (k + 1) gamma = alpha + beta #new_gamma = tc.sample_a_function(t, gamma, new_t) new_alpha = tc.sample_a_function(t, alpha, new_t) distros = [[] for m in range(min_group_size, max_group_size + 1)] ks = np.arange(N) # for every time point and adjusted P, compute the equilibrium group size distribution for a_, P_, _k_ in zip(new_alpha, new_P, k): if P_ > 0: g_ = a_ / P_ else: g_ = 0 for m in range(min_group_size, max_group_size + 1): lambda_m = m * g_ * (1 - P_) + 2 * a_ * m * (N - m) / (N - 1.0) y = lambda_m * np.exp(-lambda_m * tau) distros[m - min_group_size].append(y) mean_distros = [] for m in range(min_group_size, max_group_size + 1): dist = np.array(distros[m - min_group_size]) mean_distros.append( np.trapz(dist, x=new_t, axis=0) / (new_t[-1] - new_t[0])) return mean_distros
if __name__ == "__main__": import matplotlib.pyplot as pl orig = tc.load_json_taco('~/.tacoma/ht09.taco') orig_binned = tc.bin(orig,20.) result = tc.measure_group_sizes_and_durations(orig_binned) n_bins = 100 durations = np.array(result.group_durations[1]) / 3600. bins = np.append([0.],np.logspace(log10(durations.min())-1,log10(durations.max()),n_bins) ) x, y = get_ccdf(durations) y_sampled = tc.sample_a_function(x,y,bins) print("====== HEAD ======") print("original", x[:4], y[:4]) print("sampled", bins[:4], y_sampled[:4]) print("====== TAIL ======") print("original", x[-4:], y[-4:]) print("sampled", bins[-4:], y_sampled[-4:]) fig, ax = pl.subplots(1,2) ax[0].step(x,y,where='post') ax[0].plot(bins, y_sampled)