コード例 #1
0
ファイル: ccdf.py プロジェクト: rmhilman/tacoma
def compute_ccdfs(binned_temporal_network,max_group,time_normalization_factor=1./3600.,n_bins=50,logarithmic_bins=False):

    t_fw, k_fw = tc.mean_degree(binned_temporal_network)

    if logarithmic_bins:
        bins = np.append([0.],np.logspace(log10(k_fw[k_fw>0.0].min())-0.1,log10(k_fw.max()),n_bins) )
    else:
        bins = np.append([0.],np.linspace(k_fw[k_fw>0.0].min(), k_fw.max(), n_bins) ) 

    x_k, y_k = get_ccdf(k_fw)
    y_k = tc.sample_a_function(x_k, y_k, bins)
    x_k = bins

    result = tc.measure_group_sizes_and_durations(binned_temporal_network)

    grp_sizes = np.array(result.aggregated_size_histogram[1:])
    m = np.arange(1,len(grp_sizes)+1)
    m, grp_sizes = get_ccdf_from_distribution(m, grp_sizes)

    durations = np.array(result.contact_durations) * time_normalization_factor

    if logarithmic_bins:
        bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) )
    else:
        bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) )

    x_contact, y_contact = get_ccdf(durations)
    y_contact = tc.sample_a_function(x_contact, y_contact, bins)
    x_contact = bins

    y_groups = []
    x_groups = []

    for group_size in range(1,max_group+1):
        durations = np.array(result.group_durations[group_size]) * time_normalization_factor


        if len(durations) <= 2:
            x = []
            y = []
        else:
            if logarithmic_bins:
                bins = np.append([0.],np.logspace(log10(durations.min())-0.1,log10(durations.max()),n_bins) )
            else:
                bins = np.append([0.],np.linspace(durations.min(), durations.max(), n_bins) )

            x, y = get_ccdf(durations)
            y = tc.sample_a_function(x_contact, y_contact, bins)
            x = bins

        #if group_size == 1:
        #    print('\n',alpha,'\n')
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys
コード例 #2
0
def estimate_dynamic_RGG_args(sampled_or_binned_temporal_network,
                              dt = None,
                              periodic_boundary_conditions_for_link_building = True,
                              group_sizes_and_durations = None,
                              ):

    if not periodic_boundary_conditions_for_link_building:
        raise ValueError('So far, only parameter estimation for periodic_boundary_conditions_for_link_building = True has been implemented')

    if group_sizes_and_durations is not None:
        result = group_sizes_and_durations
    else:
        result = tc.measure_group_sizes_and_durations(sampled_or_binned_temporal_network)

    mean_m = tc.mean_group_size(result)
    log_y = np.log(mean_m)

    params = [0.7895411,  1.28318048, 0.0]
    params[-1] -= log_y

    def get_root(p):
        a, b, c = params
        r = b**2 - 4*a*c

        if r >= 0:
            r1 = (-b + np.sqrt(r)) / (2.0*a)     
            r2 = (-b - np.sqrt(r)) / (2.0*a)
            return max(r1,r2)
        else:
            return None
        """
        elif r == 0:
            r = -b/2.0*a
            return r
        """

    density = get_root(params)

    """
    print params
    this_poly = Polynomial(params)
    print this_poly
    roots = poly.polyroots(this_poly)
    print roots
    density = roots.max()
    """

    if dt is None:
        dt = sampled_or_binned_temporal_network.t[1] - sampled_or_binned_temporal_network.t[0]

    mean_link_duration = np.array(result.contact_durations).mean() / dt

    kwargs = {}
    kwargs['N'] = sampled_or_binned_temporal_network.N
    kwargs['t_run_total'] = len(sampled_or_binned_temporal_network.t)
    kwargs['mean_link_duration'] = mean_link_duration
    kwargs['critical_density'] = density

    return kwargs
コード例 #3
0
ファイル: ccdf.py プロジェクト: rmhilman/tacoma
def compute_all_bins(binned_temporal_network,max_group,time_normalization_factor=1./3600.,n_bins=50,logarithmic_bins=False):

    t_fw, k_fw = tc.mean_degree(binned_temporal_network)

    if logarithmic_bins:
        k_fw = k_fw[k_fw>0]
        bins = np.logspace(log10(k_fw.min()),log10(k_fw.max()),n_bins+1)
    else:
        bins = n_bins

    y_k, x_k = np.histogram(k_fw,bins=bins,density=True)
    x_k = get_bin_means(x_k,logarithmic_bins)

    result = tc.measure_group_sizes_and_durations(binned_temporal_network)

    grp_sizes = np.array(result.aggregated_size_histogram[1:])
    max_ndx = np.where(grp_sizes>0)[0][-1]
    grp_sizes = grp_sizes[:max_ndx+1]

    durations = np.array(result.contact_durations) * time_normalization_factor
    if logarithmic_bins:
        bins = np.logspace(log10(durations.min()),log10(durations.max()),n_bins+1)
    else:
        bins = n_bins
    y_contact, x_contact = np.histogram(durations,bins=n_bins,density=True)
    x_contact = get_bin_means(x_contact,logarithmic_bins)

    y_groups = []
    x_groups = []

    for group_size in range(1,max_group+1):
        durations = np.array(result.group_durations[group_size]) * time_normalization_factor

        n = int(min([np.sqrt(len(durations)),n_bins]))

        if len(durations) <= 6:
            x = []
            y = []
        else:
            if logarithmic_bins:
                bins = np.logspace(log10(durations.min()),log10(durations.max()),n)
            else:
                bins = n_bins
            y, x = np.histogram(durations,bins=bins,density=True)
            x = get_bin_means(x,logarithmic_bins)

        #if group_size == 1:
        #    print('\n',alpha,'\n')
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys
コード例 #4
0
ファイル: test_DTU_FW_P_SIS.py プロジェクト: rmhilman/tacoma
from tacoma.drawing import draw_edges
from tacoma.analysis import temporal_network_group_analysis
import matplotlib.pyplot as pl

from tacoma.model_conversions import estimate_ZSBB_args
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

import time
import numpy as np

# ======== get original network =============
socio = tc.load_json_taco("~/.tacoma/dtu_1_weeks.taco")
socio_binned = tc.bin(socio, dt=300)

socio_result = tc.measure_group_sizes_and_durations(socio)

# ============== generate surrogate network from flockwork_P model ==============
fwP_params = estimate_flockwork_P_args(
    socio_binned, dt=3600., aggregated_network=socio_result.aggregated_network)
fwP = tc.flockwork_P_varying_rates_neighbor_affinity(**fwP_params)

fwP_params = estimate_flockwork_P_args(socio_binned, dt=3600.)
fwP = tc.flockwork_P_varying_rates(**fwP_params)

fwP_binned = tc.bin(fwP, dt=300)

N = fwP.N

R0 = 2.0
rho = 1.0 / (3 * 24 * 3600.)
コード例 #5
0
import tacoma as tc
from tacoma.drawing import draw_edges
from tacoma.analysis import temporal_network_group_analysis
import matplotlib.pyplot as pl

from tacoma.model_conversions import estimate_ZSBB_args
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

# ======== get original network =============
socio = tc.load_sociopatterns_hypertext_2009()
socio_binned = tc.bin(socio, dt=20.)

# ========= plot properties ==============
socio_result = tc.measure_group_sizes_and_durations(socio)
fig, ax, data = temporal_network_group_analysis(socio_result,
                                                time_unit=socio.time_unit)
fig.tight_layout()
traj = tc.get_edge_trajectories(socio)
draw_edges(traj.trajectories, ax=ax[3])

# ========== generate surrogate from ZSBB model ======
ZSBB_params = estimate_ZSBB_args(socio, group_sizes_and_durations=socio_result)
ZSBB_params['b0'] = 0.51
ZSBB_params['b1'] = 1.0
ZSBB_params['lambda'] = 0.9

zsbb = tc.ZSBB_model(**ZSBB_params)
this_t0 = zsbb.t0
zsbb.t0 = 0.
zsbb.tmax -= this_t0
コード例 #6
0
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

import numpy as np

SIZE = 10 
matplotlib.rc('font', size=SIZE,family='Arial')
matplotlib.rc('axes', titlesize=SIZE)
matplotlib.rc('mathtext', default='regular') 

# ======== get original network =============
socio = tc.load_json_taco("~/.tacoma/dtu_1_weeks.taco")
socio_binned = tc.bin(socio,dt=300)

# ========= plot properties ==============
socio_result = tc.measure_group_sizes_and_durations(socio)
fig, ax, data = temporal_network_group_analysis(socio_result,time_normalization_factor = 1/3600.,time_unit='h')

new_fig, new_ax = pl.subplots(1,1,figsize=(4.5,3))

x_group, y_group = data['size_histogram']
new_ax.plot(x_group,y_group,'o',
                ms=4,
                mew=1,
                mfc='None',
                label = 'DTU one week',
            )
new_ax.legend()
new_ax.set_xlim([0.8,200])
new_ax.set_ylim([1e-4,1e3])
new_ax.set_xscale("log")
コード例 #7
0
ファイル: ccdf.py プロジェクト: rmhilman/tacoma
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys


if __name__ == "__main__":

    import matplotlib.pyplot as pl

    orig = tc.load_json_taco('~/.tacoma/ht09.taco')
    orig_binned = tc.bin(orig,20.)
    result = tc.measure_group_sizes_and_durations(orig_binned)

    n_bins = 100

    durations = np.array(result.group_durations[1]) / 3600.

    bins = np.append([0.],np.logspace(log10(durations.min())-1,log10(durations.max()),n_bins) )

    x, y = get_ccdf(durations)
    y_sampled = tc.sample_a_function(x,y,bins)

    print("====== HEAD ======")

    print("original", x[:4], y[:4])
    print("sampled", bins[:4], y_sampled[:4])
コード例 #8
0
L.N = 5
L.t = [0., 1., 2., 3. ]
L.tmax = 4.
L.edges = [ 
            [],
            [
                (0, 1), (2,3), (3, 4), (2, 4),
            ],
            [
                (2,3), (3, 4), (2, 4),
            ],
            [],
        ]

C = tc.convert(L)

L_result = tc.measure_group_sizes_and_durations(L)
C_result = tc.measure_group_sizes_and_durations(C)

for res in [L_result, C_result]:
    contact_durations = res.contact_durations
    pair_durations = res.group_durations[2]

    print("contact durations =", contact_durations)
    print("(should be [ 1.0, 2.0, 2.0, 2.0])")
    print("pair durations =", pair_durations)
    print("(should be [ 1.0 ])")
    print()


コード例 #9
0
ファイル: analysis.py プロジェクト: rmhilman/tacoma
            ax.plot(t * time_normalization_factor, [group, group], '-k')

    xlabel = 'time'
    if time_unit is not None:
        xlabel += '[' + time_unit + ']'
    ax.set_xlabel(xlabel)
    ax.set_ylabel('group id')


if __name__ == "__main__":

    import tacoma as tc

    # let's simulate a similar dynamic RGG
    RGG_edge_lists = _tacoma.dynamic_RGG(
        N=412,
        t_run_total=int(24 * 2 * 3600 / 300),  # 2 days
        mean_link_duration=5.,
        periodic_boundary_conditions_for_link_building=False,
        record_sizes_and_durations=False,
        # verbose = True)
        seed=2335)

    RGG_result = tc.measure_group_sizes_and_durations(RGG_edge_lists)

    fig, ax, data = temporal_network_group_analysis(
        RGG_result, time_normalization_factor=300. / 3600., time_unit='h')
    fig.tight_layout()

    pl.show()
コード例 #10
0
def estimate_ZSBB_args(temporal_network,
                       group_sizes_and_durations = None,
                       fit_discrete = False,
                       dt = None,
                       ):

    if fit_discrete and dt is None:
        raise ValueError('If the data is supposed to be treated as discrete, a value for `dt` must be provided in order to norm the group size durations.')
    elif not fit_discrete:
        dt = 1.

    result = group_sizes_and_durations
    if result is None:
        result = tc.measure_group_sizes_and_durations(temporal_network)

    m_in, m_out, m = tc.edge_counts(temporal_network)

    max_edge_events = sum(m_in) + sum(m_out)

    N = temporal_network.N

    # let's estimate b1 with the distribution of group_durations (pairs)
    #group_size = 2
    #b_ones = []
    #while group_size <= N: 
    #    values = result.group_durations[group_size] 
    #    alpha_1, err, xmin = fit_power_law_clauset(values)
    #    if (err/alpha_1)>0.05:
    #        break
    #    b_ones.append((alpha_1-1)/group_size)
    #    print b_ones
    #    group_size += 1
    #b1 = np.mean(b_ones)
    #print alpha_1

    values = result.group_durations[2] 
    alpha_1, err, xmin = fit_power_law_clauset(values)
    b1 = (alpha_1 - 1) / 2.0
    if b1 < 0.5:
        b1 = 0.51
    elif b1 > 1:
        b1 = 1.0

    # let's estimate b0 with the distribution of inter-contact durations

    if fit_discrete:
        values = np.array(result.group_durations[1]) / dt
        values = np.array(values,dtype=int)
        alpha_0, err, xmin = fit_power_law_clauset(values+1,discrete=True)
    else:
        alpha_0, err, xmin = fit_power_law_clauset(result.group_durations[1])

    mean_n = mean_coordination_number(result)

    def equations(p):
        b0, lam = p
        n = ZSBB_mean_coordination_number(b0,lam,N,b1)
        al0 = ZSBB_b0_func(b0,lam)
        return (n - mean_n, al0 - alpha_0)

    def cost(p):
        b0, lam = p
        n = ZSBB_mean_coordination_number(b0,lam,N,b1)
        al0 = ZSBB_b0_func(b0,lam)
        return np.abs((n - mean_n)/mean_n) + np.abs((al0 - alpha_0)/alpha_0)

    #b0, lam = fsolve(equations,(0.7,0.7))
    res = minimize(cost,(0.7,0.7))
    b0, lam = res.x

    if lam < 0.5:
        lam = 0.51
    elif lam > 1:
        lam = 1.0

    if b0 < 0.5:
        b0 = 0.51
    elif b0 > 1:
        b0 = 1.0

    if b0 <= (2*lam-1) / (3*lam-1.0):
        b0 = (2*lam-1) / (3*lam-1.0) + 0.01


    kwargs = {}
    kwargs['b0'] = b0
    kwargs['b1'] = b1
    kwargs['lambda'] = lam

    #temporal_network = _get_raw_temporal_network(temporal_network)

    kwargs['E'] = []
    kwargs['N'] = N
    kwargs['return_after_equilibration_only'] = True
    kwargs['t_equilibration'] = float(10000*N)
    kwargs['max_edge_events_to_end_simulation'] = max_edge_events

    return kwargs
コード例 #11
0

if __name__ == "__main__":
    import tacoma as tc
    import matplotlib.pyplot as pl
    from tacoma.analysis import temporal_network_group_analysis
    
    # THESE TESTS ARE DEPRECATED

    test = tc.dtu_week()
    rewiring_rate = test.gamma
    P = test.P

    fw = tc.flockwork_P_varying_rates([],100,P,24*3600,rewiring_rate,tmax=24*3600*7)
    fw_binned = tc.sample(fw,dt=300)
    fw_binned_result = tc.measure_group_sizes_and_durations(fw_binned)

    kwargs = get_ZSBB_parameters(fw_binned,fw_binned_result,fit_discrete=True,dt=300.)
    print("lambda =", kwargs['lambda'])
    print("b0 =", kwargs['b0'])
    print("b1 =", kwargs['b1'])
    kwargs['t_run_total'] = (len(fw_binned.t) + 1)*kwargs['N']
    zsbb = tc.ZSBB_model(**kwargs)
    zsbb_binned = tc.sample(zsbb,dt=kwargs['N'])
    zsbb_binned_result = tc.measure_group_sizes_and_durations(zsbb_binned)


    fig, ax, data = temporal_network_group_analysis(fw_binned_result)
    temporal_network_group_analysis(zsbb_binned_result,
                                    time_normalization_factor = 300./kwargs['N'],
                                    ax=ax)
コード例 #12
0
    return tn_b


# ============ HT 09 ==============

tn = tc.load_json_taco("~/.tacoma/ht09.taco")
tn_b = get_prepared_network(tn, dt=20)

tc.write_edge_trajectory_coordinates(
    tn_b,
    "~/Sites/tacoma/data/ht09_edge_trajectories.json",
    filter_for_duration=0)
tc.write_json_taco(tn_b, "~/Sites/tacoma/data/ht09_binned.taco")

aggregated_network = tc.measure_group_sizes_and_durations(
    tn).aggregated_network
fw_params = estimate_flockwork_P_args(tn,
                                      dt=120,
                                      k_over_k_real_scaling=2.05,
                                      aggregated_network=aggregated_network,
                                      ensure_empty_network=True,
                                      adjust_last_bin_if_dt_does_not_fit=True)

fw = tc.flockwork_P_varying_rates_neighbor_affinity(**fw_params)
fw_b = get_prepared_network(fw, 20)
tc.write_edge_trajectory_coordinates(
    fw_b,
    "~/Sites/tacoma/data/fw_ht09_edge_trajectories.json",
    filter_for_duration=0)
tc.write_json_taco(fw_b, "~/Sites/tacoma/data/fw_ht09_binned.taco")