Exemplo n.º 1
0
def compute_ccdfs(binned_temporal_network,max_group,time_normalization_factor=1./3600.,n_bins=50,logarithmic_bins=False):

    t_fw, k_fw = tc.mean_degree(binned_temporal_network)

    if logarithmic_bins:
        bins = np.append([0.],np.logspace(log10(k_fw[k_fw>0.0].min())-0.1,log10(k_fw.max()),n_bins) )
    else:
        bins = np.append([0.],np.linspace(k_fw[k_fw>0.0].min(), k_fw.max(), n_bins) ) 

    x_k, y_k = get_ccdf(k_fw)
    y_k = tc.sample_a_function(x_k, y_k, bins)
    x_k = bins

    result = tc.measure_group_sizes_and_durations(binned_temporal_network)

    grp_sizes = np.array(result.aggregated_size_histogram[1:])
    m = np.arange(1,len(grp_sizes)+1)
    m, grp_sizes = get_ccdf_from_distribution(m, grp_sizes)

    durations = np.array(result.contact_durations) * time_normalization_factor

    if logarithmic_bins:
        bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) )
    else:
        bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) )

    x_contact, y_contact = get_ccdf(durations)
    y_contact = tc.sample_a_function(x_contact, y_contact, bins)
    x_contact = bins

    y_groups = []
    x_groups = []

    for group_size in range(1,max_group+1):
        durations = np.array(result.group_durations[group_size]) * time_normalization_factor


        if len(durations) <= 2:
            x = []
            y = []
        else:
            if logarithmic_bins:
                bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) )
            else:
                bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) )

            x, y = get_ccdf(durations)
            y = tc.sample_a_function(x_contact, y_contact, bins)
            x = bins

        #if group_size == 1:
        #    print('\n',alpha,'\n')
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys
Exemplo n.º 2
0
def compute_all_bins(binned_temporal_network,max_group,time_normalization_factor=1./3600.,n_bins=50,logarithmic_bins=False):

    t_fw, k_fw = tc.mean_degree(binned_temporal_network)

    if logarithmic_bins:
        k_fw = k_fw[k_fw>0]
        bins = np.logspace(log10(k_fw.min()),log10(k_fw.max()),n_bins+1)
    else:
        bins = n_bins

    y_k, x_k = np.histogram(k_fw,bins=bins,density=True)
    x_k = get_bin_means(x_k,logarithmic_bins)

    result = tc.measure_group_sizes_and_durations(binned_temporal_network)

    grp_sizes = np.array(result.aggregated_size_histogram[1:])
    max_ndx = np.where(grp_sizes>0)[0][-1]
    grp_sizes = grp_sizes[:max_ndx+1]

    durations = np.array(result.contact_durations) * time_normalization_factor
    if logarithmic_bins:
        bins = np.logspace(log10(durations.min()),log10(durations.max()),n_bins+1)
    else:
        bins = n_bins
    y_contact, x_contact = np.histogram(durations,bins=n_bins,density=True)
    x_contact = get_bin_means(x_contact,logarithmic_bins)

    y_groups = []
    x_groups = []

    for group_size in range(1,max_group+1):
        durations = np.array(result.group_durations[group_size]) * time_normalization_factor

        n = int(min([np.sqrt(len(durations)),n_bins]))

        if len(durations) <= 6:
            x = []
            y = []
        else:
            if logarithmic_bins:
                bins = np.logspace(log10(durations.min()),log10(durations.max()),n)
            else:
                bins = n_bins
            y, x = np.histogram(durations,bins=bins,density=True)
            x = get_bin_means(x,logarithmic_bins)

        #if group_size == 1:
        #    print('\n',alpha,'\n')
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys
Exemplo n.º 3
0
def estimated_mean_group_size_distribution(temporal_network):
    """Compute the mean group size distribution for a temporal network under the assumption
    that it can be described reasonably by a flockwork-P model.

    Parameters
    ----------
    temporal_network : :mod:`edge_changes` or :mod:`edge_lists`
        A temporal network.

    Returns
    -------
    mean_distribution : numpy.array
        The average group size distribution of the temporal network which is closer to
        to the _true_ group size distribution than measuring over the binned system.
        The result is an array of length `N` with its `i`-th entry containing the mean number of
        groups of size `m = i + 1`.
    """

    new_t, k = tc.mean_degree(temporal_network)
    N = temporal_network.N

    # from equilibrium assumption k = P/(1-P) compute adjusted P
    new_P = k / (k + 1)

    distro = []

    # for every time point and adjusted P, compute the equilibrium group size distribution
    for P_ in new_P:
        this_distro = flockwork_P_equilibrium_group_size_distribution(N, P_)
        distro.append(this_distro[1:])

    # compute the mean group size distribution as a time integral over the
    # group size distribution
    distro = np.array(distro)
    mean_distro = np.trapz(distro, x=new_t, axis=0) / (new_t[-1] - new_t[0])

    return mean_distro
Exemplo n.º 4
0
N = fwP.N

R0 = 2.0
rho = 1.0 / (3 * 24 * 3600.)
dt = 3600.
t_simulation = 4 * fwP.tmax
t_sample = np.arange(int(t_simulation / dt) + 1, dtype=float) * dt

N_meas = 30

fig, ax = pl.subplots(1, 3, figsize=(12, 4))

for tn in [socio, fwP, fwP_binned]:
    start = time.time()
    t, k = np.array(tc.mean_degree(tn))
    end = time.time()

    print("took", end - start, "seconds")

    line, = ax[0].step(t, k, where='post', lw=1)

    mean_k = tc.time_average(t, k)
    print(mean_k)
    eta = R0 * rho / mean_k

    i_sample = np.zeros_like(t_sample)

    successful = 0

    for meas in range(N_meas):
Exemplo n.º 5
0
    k_scaling = tc.estimate_k_scaling_gradient_descent(
        orig,
        dt_for_inference=dt_for_inference,
        dt_for_binning=dt_binning,
        measurements_per_configuration=20,
        learning_rate=0.5,
        relative_error=1e-2,
    )
else:
    k_scaling = 5

from tacoma.model_conversions import estimate_flockwork_P_args
import matplotlib.pyplot as pl
import numpy as np

t_orig, k_orig = tc.mean_degree(tc.bin(orig, dt_binning))

fig, ax = pl.subplots(1, 2, sharex=True, sharey=True)
ax[0].plot(t_orig, k_orig, label='original')
ax[1].plot(t_orig, k_orig, label='original')

n_k_meas = 6

for iscl, scaling in enumerate([k_scaling, 1.0]):

    these_ks = []

    for meas in range(n_k_meas):

        args = estimate_flockwork_P_args(
            orig,
Exemplo n.º 6
0
import tacoma as tc
import epipack as epk

# load DTU data as edge_changes
dtu = tc.load_json_taco('~/.tacoma/dtu_1_weeks.taco')

# convert to edge_lists
dtu = tc.convert(dtu)

k = tc.time_average(*tc.mean_degree(dtu), tmax=dtu.tmax)

R0 = 3
recovery_rate = 1 / (24 * 3600)
infection_rate = R0 / k * recovery_rate

tmax = 7 * 24 * 3600
SIS = tc.SIS(dtu.N, dtu.tmax, infection_rate, recovery_rate)