コード例 #1
0
def get_prepared_network(tn, dt):

    tn_b = tc.bin(tn, dt=dt)  # rebin the network
    tn_b.t = [t / 3600. for t in tn_b.t]  # rescale the network's time
    tn_b.tmax /= 3600.
    tn_b.time_unit = 'h'  # set time unit

    return tn_b
コード例 #2
0
ファイル: interactive.py プロジェクト: rmhilman/tacoma
def _get_prepared_network(tn, dt, time_unit, time_normalization_factor):
    """Prepare the provided network (i.e. bin it in discrete time)"""

    tn_b = tc.bin(tn, dt=dt)  # rebin the network
    # rescale the network's time
    tn_b.t = [t * time_normalization_factor for t in tn_b.t]
    tn_b.tmax *= time_normalization_factor
    if time_unit is None:
        time_unit = ""
    tn_b.time_unit = time_unit  # set time unit

    return tn_b
コード例 #3
0
ファイル: test_DTU_FW_P_SIS.py プロジェクト: rmhilman/tacoma
import tacoma as tc
from tacoma.drawing import draw_edges
from tacoma.analysis import temporal_network_group_analysis
import matplotlib.pyplot as pl

from tacoma.model_conversions import estimate_ZSBB_args
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

import time
import numpy as np

# ======== get original network =============
socio = tc.load_json_taco("~/.tacoma/dtu_1_weeks.taco")
socio_binned = tc.bin(socio, dt=300)

socio_result = tc.measure_group_sizes_and_durations(socio)

# ============== generate surrogate network from flockwork_P model ==============
fwP_params = estimate_flockwork_P_args(
    socio_binned, dt=3600., aggregated_network=socio_result.aggregated_network)
fwP = tc.flockwork_P_varying_rates_neighbor_affinity(**fwP_params)

fwP_params = estimate_flockwork_P_args(socio_binned, dt=3600.)
fwP = tc.flockwork_P_varying_rates(**fwP_params)

fwP_binned = tc.bin(fwP, dt=300)

N = fwP.N

R0 = 2.0
コード例 #4
0
import tacoma as tc
from tacoma.drawing import draw_edges
from tacoma.analysis import temporal_network_group_analysis
import matplotlib.pyplot as pl

from tacoma.model_conversions import estimate_ZSBB_args
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

# ======== get original network =============
socio = tc.load_sociopatterns_hypertext_2009()
socio_binned = tc.bin(socio, dt=20.)

# ========= plot properties ==============
socio_result = tc.measure_group_sizes_and_durations(socio)
fig, ax, data = temporal_network_group_analysis(socio_result,
                                                time_unit=socio.time_unit)
fig.tight_layout()
traj = tc.get_edge_trajectories(socio)
draw_edges(traj.trajectories, ax=ax[3])

# ========== generate surrogate from ZSBB model ======
ZSBB_params = estimate_ZSBB_args(socio, group_sizes_and_durations=socio_result)
ZSBB_params['b0'] = 0.51
ZSBB_params['b1'] = 1.0
ZSBB_params['lambda'] = 0.9

zsbb = tc.ZSBB_model(**ZSBB_params)
this_t0 = zsbb.t0
zsbb.t0 = 0.
zsbb.tmax -= this_t0
コード例 #5
0
import tacoma as tc
from tacoma.drawing import draw_edges
from tacoma.analysis import temporal_network_group_analysis
import matplotlib.pyplot as pl

from tacoma.model_conversions import estimate_ZSBB_args
from tacoma.model_conversions import estimate_flockwork_P_args
from tacoma.model_conversions import estimate_dynamic_RGG_args

# ======== get original network =============
socio = tc.load_json_taco("~/.tacoma/dtu_1_weeks.taco")
socio_binned = tc.bin(socio, dt=300)

# ========= plot properties ==============
socio_result = tc.measure_group_sizes_and_durations(socio)
fig, ax, data = temporal_network_group_analysis(socio_result,
                                                time_normalization_factor=1 /
                                                3600.,
                                                time_unit='h')
fig.tight_layout()
traj = tc.get_edge_trajectories(socio)
draw_edges(
    traj.trajectories,
    ax=ax[3],
    time_unit='h',
    time_normalization_factor=1 / 3600.,
)

# ========== generate surrogate from ZSBB model ======
ZSBB_params = estimate_ZSBB_args(socio, group_sizes_and_durations=socio_result)
print(ZSBB_params)
コード例 #6
0
ファイル: ccdf.py プロジェクト: rmhilman/tacoma
        #    print('\n',alpha,'\n')
        x_groups.append(x)
        y_groups.append(y)

    xs = [x_k, [], x_contact ] + x_groups
    ys = [y_k, grp_sizes, y_contact ] + y_groups

    return xs, ys


if __name__ == "__main__":

    import matplotlib.pyplot as pl

    orig = tc.load_json_taco('~/.tacoma/ht09.taco')
    orig_binned = tc.bin(orig,20.)
    result = tc.measure_group_sizes_and_durations(orig_binned)

    n_bins = 100

    durations = np.array(result.group_durations[1]) / 3600.

    bins = np.append([0.],np.logspace(log10(durations.min())-1,log10(durations.max()),n_bins) )

    x, y = get_ccdf(durations)
    y_sampled = tc.sample_a_function(x,y,bins)

    print("====== HEAD ======")

    print("original", x[:4], y[:4])
    print("sampled", bins[:4], y_sampled[:4])
コード例 #7
0
    k_scaling = tc.estimate_k_scaling_gradient_descent(
        orig,
        dt_for_inference=dt_for_inference,
        dt_for_binning=dt_binning,
        measurements_per_configuration=20,
        learning_rate=0.5,
        relative_error=1e-2,
    )
else:
    k_scaling = 5

from tacoma.model_conversions import estimate_flockwork_P_args
import matplotlib.pyplot as pl
import numpy as np

t_orig, k_orig = tc.mean_degree(tc.bin(orig, dt_binning))

fig, ax = pl.subplots(1, 2, sharex=True, sharey=True)
ax[0].plot(t_orig, k_orig, label='original')
ax[1].plot(t_orig, k_orig, label='original')

n_k_meas = 6

for iscl, scaling in enumerate([k_scaling, 1.0]):

    these_ks = []

    for meas in range(n_k_meas):

        args = estimate_flockwork_P_args(
            orig,
コード例 #8
0
ファイル: drawing.py プロジェクト: rmhilman/tacoma
    L = tc.edge_lists()

    L.N = 3
    L.t = [0.0, 1.0, 2.0]
    L.tmax = 3.0
    L.edges = [
        [(0, 1)],
        [(1, 2), (0, 2)],
        [(0, 1)],
    ]

    L = _tacoma.dynamic_RGG(100, 100, mean_link_duration=10)
    #F = tc.flockwork_P_varying_rates([],100,[0.5],100,[(0.0,1.0)],tmax=100)
    F = L
    FBIN = tc.bin(F, dt=1)
    # draw_rows(FBIN)

    start = time.time()
    traj, similarities = tc.get_edge_trajectories(
        FBIN, return_edge_similarities=True)
    end = time.time()
    print(similarities)

    print("needed ", end - start, "seconds")
    draw_edges(traj, fit=True)

    start = time.time()
    result = tc.get_edge_trajectories(F)
    end = time.time()
    print("needed ", end - start, "seconds")