Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('filename')
    options = parser.parse_args()

    results = collections.defaultdict(list)

    with open(options.filename) as f:
        for line in f:
            for pattern in patterns:
                m = pattern.search(line)
                if m:
                    gd = m.groupdict()
                    iteration = gd['iter']
                    del gd['iter']
                    key, val = list(gd.items())[0]
                    results[key].append((float(iteration), float(val)))

    fig, ax = util.make_figure()

    for key, data in sorted(results.items()):
        x, y = list(zip(*data))
        ax.loglog(x, y, label=key)
        util.save_columns(options.filename + '-' + key + '.tsv', x, y)

    ax.set_xlabel('Conjugate Gradient Iteration')
    ax.set_ylabel('Spinor Norms')
    ax.set_title(options.filename)

    util.save_figure(fig, options.filename)
Ejemplo n.º 2
0
def plot_num_cases_over_period(output_dir, fig_name, all_num_cases,
                               display_scenario_names, y_label):
    plt.violinplot(all_num_cases)
    plt.plot(range(1,
                   len(all_num_cases) + 1),
             [np.mean(scen) for scen in all_num_cases],
             marker="o",
             linestyle="None")
    plt.xticks(range(1,
                     len(all_num_cases) + 1),
               scenario_display_names,
               rotation=25)
    plt.ylabel(y_label)
    save_figure(output_dir, fig_name, extension="png")
def io_extract_mass(paths_in, path_out):
    twopts_orig = correlators.loader.folded_list_loader(paths_in)

    sample_count = 3 * len(twopts_orig)

    b_twopts = bootstrap.Boot(
        bootstrap.make_dist_draw(twopts_orig, sample_count))

    b_corr_matrix = bootstrap.Boot([
        correlators.corrfit.correlation_matrix(twopts)
        for twopts in b_twopts.dist
    ])

    omit_pre = 7

    b_inv_corr_mat = bootstrap.Boot([
        corr_matrix[omit_pre:, omit_pre:].getI()
        for corr_matrix in b_corr_matrix.dist
    ])

    time_extent = len(b_twopts.dist[0][0])
    time = np.arange(time_extent)

    fit_function = correlators.fit.cosh_fit_decorator(2 * (time_extent - 1))
    b_fit_param = bootstrap.Boot([
        perform_fits(time, bootstrap.average_arrays(twopts),
                     bootstrap.std_arrays(twopts), inv_corr_mat, fit_function,
                     (0.4, 1.0, 0.0), omit_pre)
        for twopts, inv_corr_mat in zip(b_twopts.dist, b_inv_corr_mat.dist)
    ])

    fig, ax = util.make_figure()
    ax.errorbar(time, bootstrap.average_arrays(b_twopts.cen),
                bootstrap.std_arrays(b_twopts.cen))
    ax.plot(time, fit_function(time, *b_fit_param.cen))
    ax.set_yscale('log')
    util.save_figure(fig, 'test-corr.pdf')

    print('cen', b_fit_param.cen[0])
    print('val', b_fit_param.val[0])
    print('err', b_fit_param.err[0])

    print('len', len(twopts_orig), len(b_fit_param.dist))

    np.savetxt(
        path_out,
        np.column_stack(
            [b_fit_param.cen[0], b_fit_param.val[0], b_fit_param.err[0]]))
Ejemplo n.º 4
0
def main():
    options = _parse_args()

    fig, ax = util.make_figure()

    t, e = util.load_columns(
        '/home/mu/Dokumente/Studium/Master_Science_Physik/Masterarbeit//Runs/0106-bmw-rho011/shard-wflow.config-100.out.xml.e.tsv'
    )
    t, w = util.load_columns(
        '/home/mu/Dokumente/Studium/Master_Science_Physik/Masterarbeit//Runs/0106-bmw-rho011/shard-wflow.config-100.out.xml.w.tsv'
    )

    data = np.loadtxt('gradflow.000100', skiprows=1)

    tm_traj = data[:, 0]
    tm_t = data[:, 1]
    tm_P = data[:, 2]
    tm_Eplaq = data[:, 3]
    tm_Esym = data[:, 4]
    tm_tsqEplaq = data[:, 5]
    tm_tsqEsym = data[:, 6]
    tm_Wsym = data[:, 7]

    for i, method in enumerate(
        ['gradient', 'chain-gradient', 'explicit-sym', 'explicit-asym']):
        tm_my_w = wflow.derive_w(tm_t, tm_Esym, method=method)
        ax.plot(tm_t + 0.1 * i, np.abs(tm_Wsym - tm_my_w), label=method)

    ax.set_yscale('log')
    ax.set_xlabel('$t/a^2$ (shifted)')
    ax.set_ylabel(
        r'$\left|(w/a)^\mathrm{tmLQCD} - (w/a)^\mathrm{Method}\right|$')
    util.save_figure(fig, 'plot-wflow-norm')

    x = np.linspace(0, 4, 1000)
    y = np.sin(x)
    z = x * (x**2 * np.cos(x) + 2 * x * np.sin(x))
    fig, ax = util.make_figure()
    for i, method in enumerate(
        ['gradient', 'chain-gradient', 'explicit-sym', 'explicit-asym']):
        w = wflow.derive_w(x, y, method=method)
        ax.plot(x + 0.1 * i, np.abs(z - w), label=method)
    ax.set_xlabel('$x$ (shifted)')
    ax.set_ylabel('absolute deviation from analytic $w(x)$')
    ax.set_yscale('log')
    util.save_figure(fig, 'plot-gradient-check')
Ejemplo n.º 5
0
def plot_resurgence_probabilities(output_dir, fig_name,
                                  all_num_case_after_release,
                                  display_scenario_names,
                                  extinction_threshold):
    resurgence_probs = []
    for scenario in all_num_case_after_release:
        resurgence_probs.append(
            len([run for run in scenario if run >= extinction_threshold]) /
            len(scenario))

    plt.bar(range(len(resurgence_probs)), resurgence_probs)
    plt.xticks(range(len(resurgence_probs)),
               display_scenario_names,
               rotation=25)
    plt.ylabel("Resurgence probability")
    plt.ylim((0, 1))
    save_figure(output_dir, fig_name)
Ejemplo n.º 6
0
def plot_extinction_probabilities(output_dir,
                                  fig_name,
                                  display_scenario_names,
                                  all_total_cases,
                                  extinction_threshold=0):
    extinction_probabilities = []
    for scenario_total_cases in all_total_cases:
        extinction_probabilities.append(
            len([x
                 for x in scenario_total_cases if x < extinction_threshold]) /
            len(scenario_total_cases))

    plt.bar(range(len(all_total_cases)), extinction_probabilities)
    plt.xticks(range(len(display_scenario_names)),
               display_scenario_names,
               rotation=25)
    plt.ylabel("Extinction probability (threshold = {} cases)".format(
        extinction_threshold))
    plt.ylim(0, 1.1)

    save_figure(output_dir, fig_name)
Ejemplo n.º 7
0
def plot_effective_r_by_day(output_dir,
                            fig_name,
                            scenario_name,
                            rt_by_day,
                            num_days,
                            fraction=False):
    mean = []
    lower = []
    upper = []

    for day in range(num_days):
        er_on_day = [run[day] for run in rt_by_day]
        er_on_day = [result for result in er_on_day if not np.isnan(result)]

        mean.append(np.mean(er_on_day) if len(er_on_day) > 0 else np.nan)
        lower.append(
            np.percentile(er_on_day, 2.5) if len(er_on_day) > 0 else np.nan)
        upper.append(
            np.percentile(er_on_day, 97.5) if len(er_on_day) > 0 else np.nan)

    plt.plot(range(num_days), mean)
    plt.fill_between(range(num_days), lower, upper, color="lightgrey")

    if not fraction:  # If looking at mean Rt per day, add reference line at Rt = 1
        plt.plot(range(num_days), [1] * num_days)

    plt.xlabel("Simulation day")
    plt.xlim(0, num_days)

    if fraction:
        plt.ylabel("Fraction of Rt >= 1")
        plt.ylim(-0.1, 1.1)
    else:
        plt.ylabel("Mean Rt")
        plt.ylim(-0.5, 22)

    save_figure(output_dir, fig_name + "_" + scenario_name)
Ejemplo n.º 8
0
def plot_herd_immunity_threshold(output_dir,
                                 fig_name,
                                 display_scenario_names,
                                 all_herd_immunity_thresholds,
                                 show_day=False):
    means = [
        np.mean(scenario_results)
        for scenario_results in all_herd_immunity_thresholds
    ]

    plt.violinplot(all_herd_immunity_thresholds)
    plt.plot(range(1, len(means) + 1), means, linestyle="None", marker="o")
    plt.xticks(range(1,
                     len(scenario_display_names) + 1),
               display_scenario_names,
               rotation=25)

    if show_day:
        plt.ylabel("Day on which Rt >= 1 for the last time")
    else:
        plt.ylabel("Herd immunity threshold")

    save_figure(output_dir, fig_name, extension="png"
                )  # Use png extension to accurately save opaqueness in figure
Ejemplo n.º 9
0
def main():
    options = _parse_args()

    pattern = re.compile(
        r'0105-perf_nodes=(?P<A_nodes>\d+)_ntasks=(?P<B_ntasks>\d+)_cpus=(?P<C_cpus>\d+)_affinity=(?P<E_affinity>\w+?)/'
    )

    pattern_total_time = re.compile('HMC: total time = ([\d.]+) secs')

    rows = []

    for run in options.run:
        print(run)
        m = pattern.match(run)
        if not m:
            continue

        cols1 = m.groupdict()

        nodes = int(cols1['A_nodes'])
        tasks = int(cols1['B_ntasks'])
        cpus = int(cols1['C_cpus'])

        cols1['D_SMT'] = tasks * cpus // 24

        try:
            cols2 = {
                'QPhiX CG Perf':
                np.loadtxt(
                    os.path.join(
                        run,
                        'extract-solver-QPhiX_Clover_CG-gflops_per_node.tsv'))
                [1],
                'QPhiX M-Shift Perf':
                np.loadtxt(
                    os.path.join(
                        run,
                        'extract-solver-QPhiX_Clover_M-Shift_CG-gflops_per_node.tsv'
                    ))[1],
            }
        except FileNotFoundError as e:
            print(e)
            continue

        logfile = glob.glob(os.path.join(run, 'slurm-*.out'))[0]

        with open(logfile) as f:
            lines = f.readlines()

        m = pattern_total_time.match(lines[-1])
        if m:
            cols2['minutes'] = float(m.group(1)) / 60
        else:
            cols2['minutes'] = 0

        print(cols2.values())

        rows.append((cols1, cols2))

    print()
    print()

    for key in itertools.chain(sorted(cols1.keys()), sorted(cols2.keys())):
        print('{:15s}'.format(str(key)[:13]), end='')
    print()

    for cols1, cols2 in rows:
        for key, value in itertools.chain(sorted(cols1.items()),
                                          sorted(cols2.items())):
            print('{:15s}'.format(str(value)[:13]), end='')
        print()

    for x in cols1.keys():
        for y in cols2.keys():
            fig, ax = util.make_figure()
            data = collections.defaultdict(list)
            for c1, c2 in rows:
                data[c1[x]].append(c2[y])
            d = [value for key, value in sorted(data.items())]
            l = [key for key, value in sorted(data.items())]
            ax.boxplot(d, labels=l)
            ax.set_xlabel(x)
            ax.set_ylabel(y)

            util.save_figure(fig, 'boxplot-{}-{}'.format(x, y))
def main():
    options = _parse_args()
    R = 300

    # Read in the data from the paper.
    a_inv_val = 1616
    a_inv_err = 20
    a_inv_dist = bootstrap.make_dist(a_inv_val, a_inv_err, n=R)
    aml, ams, l, t, trajectories, ampi_val, ampi_err, amk_val, amk_err, f_k_f_pi_val, f_k_f_pi_err = util.load_columns(
        'physical_point/gmor.txt')
    ampi_dist = bootstrap.make_dist(ampi_val, ampi_err, n=R)
    amk_dist = bootstrap.make_dist(amk_val, amk_err, n=R)
    mpi_dist = [ampi * a_inv for ampi, a_inv in zip(ampi_dist, a_inv_dist)]
    mk_dist = [amk * a_inv for amk, a_inv in zip(amk_dist, a_inv_dist)]

    # Convert the data in lattice units into physical units.
    mpi_dist = [a_inv * ampi for ampi, a_inv in zip(ampi_dist, a_inv_dist)]
    mpi_val, mpi_avg, mpi_err = bootstrap.average_and_std_arrays(mpi_dist)
    mpi_sq_dist = [mpi**2 for mpi in mpi_dist]
    mpi_sq_val, mpi_sq_avg, mpi_sq_err = bootstrap.average_and_std_arrays(
        mpi_sq_dist)
    ampi_sq_dist = [ampi**2 for ampi in ampi_dist]
    ampi_sq_val, ampi_sq_avg, ampi_sq_err = bootstrap.average_and_std_arrays(
        ampi_sq_dist)

    # Do a GMOR fit in order to extract `a B` and `a m_cr`.
    popt_dist = [
        op.curve_fit(gmor_pion, aml, ampi_sq)[0] for ampi_sq in ampi_sq_dist
    ]
    aB_dist = [popt[0] for popt in popt_dist]
    amcr_dist = [popt[1] for popt in popt_dist]
    aB_val, aB_avg, aB_err = bootstrap.average_and_std_arrays(aB_dist)
    amcr_val, amcr_avg, amcr_err = bootstrap.average_and_std_arrays(amcr_dist)
    print('aB =', siunitx(aB_val, aB_err))
    print('am_cr =', siunitx(amcr_val, amcr_err))

    ams_paper = -0.057
    ams_phys = ams_paper - amcr_val
    ams_red = 0.9 * ams_phys
    ams_bare_red = ams_red + amcr_val

    print(ams_paper, ams_phys, ams_red, ams_bare_red)

    print()
    print('Mass preconditioning masses:')

    amlq = aml - amcr_val
    for i in range(3):
        amprec = amlq * 10**i + amcr_val
        kappa = 1 / (amprec * 2 + 8)
        print('a m_prec:', amprec)
        print('κ', kappa)

    exit()

    diff_dist = [
        np.sqrt(2) * np.sqrt(mk**2 - 0.5 * mpi**2)
        for mpi, mk in zip(mpi_dist, mk_dist)
    ]
    diff_val, diff_avg, diff_err = bootstrap.average_and_std_arrays(diff_dist)

    popt_dist = [
        op.curve_fit(linear, mpi, diff)[0]
        for mpi, diff in zip(mpi_dist, diff_dist)
    ]
    fit_x = np.linspace(np.min(mpi_dist), np.max(mpi_dist), 100)
    fit_y_dist = [linear(fit_x, *popt) for popt in popt_dist]
    fit_y_val, fit_y_avg, fit_y_err = bootstrap.average_and_std_arrays(
        fit_y_dist)

    # Physical meson masses from FLAG paper.
    mpi_phys_dist = bootstrap.make_dist(134.8, 0.3, R)
    mk_phys_dist = bootstrap.make_dist(494.2, 0.3, R)
    mpi_phys_val, mpi_phys_avg, mpi_phys_err = bootstrap.average_and_std_arrays(
        mpi_phys_dist)
    ampi_phys_dist = [
        mpi_phys / a_inv for a_inv, mpi_phys in zip(a_inv_dist, mpi_phys_dist)
    ]
    amk_phys_dist = [
        mk_phys / a_inv for a_inv, mk_phys in zip(a_inv_dist, mk_phys_dist)
    ]
    ampi_phys_val, ampi_phys_avg, ampi_phys_err = bootstrap.average_and_std_arrays(
        ampi_phys_dist)
    amk_phys_val, amk_phys_avg, amk_phys_err = bootstrap.average_and_std_arrays(
        amk_phys_dist)
    print('aM_pi phys =', siunitx(ampi_phys_val, ampi_phys_err))
    print('aM_k phys =', siunitx(amk_phys_val, amk_phys_err))

    new_b_dist = [
        np.sqrt(mk_phys**2 - 0.5 * mpi_phys**2) - popt[0] * mpi_phys for
        mpi_phys, mk_phys, popt in zip(mpi_phys_dist, mk_phys_dist, popt_dist)
    ]

    diff_sqrt_phys_dist = [
        np.sqrt(mk_phys**2 - 0.5 * mpi_phys**2)
        for mpi_phys, mk_phys in zip(mpi_phys_dist, mk_phys_dist)
    ]
    diff_sqrt_phys_val, diff_sqrt_phys_avg, diff_sqrt_phys_err = bootstrap.average_and_std_arrays(
        diff_sqrt_phys_dist)

    ex_x = np.linspace(120, 700, 100)
    ex_y_dist = [
        linear(ex_x, popt[0], b) for popt, b in zip(popt_dist, new_b_dist)
    ]
    ex_y_val, ex_y_avg, ex_y_err = bootstrap.average_and_std_arrays(ex_y_dist)

    ams_art_dist = [
        linear(mpi, popt[0], b)**2 / a_inv**2 / aB - amcr
        for mpi, popt, b, a_inv, aB, amcr in zip(
            mpi_dist, popt_dist, new_b_dist, a_inv_dist, aB_dist, amcr_dist)
    ]
    ams_art_val, ams_art_avg, ams_art_err = bootstrap.average_and_std_arrays(
        ams_art_dist)
    print('a m_s with artifacts', siunitx(ams_art_val, ams_art_err))

    fig, ax = util.make_figure()
    ax.fill_between(fit_x,
                    fit_y_val + fit_y_err,
                    fit_y_val - fit_y_err,
                    color='red',
                    alpha=0.2)
    ax.plot(fit_x, fit_y_val, label='Fit', color='red')
    ax.fill_between(ex_x,
                    ex_y_val + ex_y_err,
                    ex_y_val - ex_y_err,
                    color='orange',
                    alpha=0.2)
    ax.plot(ex_x, ex_y_val, label='Extrapolation', color='orange')
    ax.errorbar(mpi_val,
                diff_val,
                xerr=mpi_err,
                yerr=diff_err,
                linestyle='none',
                label='Data (Dürr 2010)')
    ax.errorbar([mpi_phys_val], [diff_sqrt_phys_val],
                xerr=[mpi_phys_err],
                yerr=[diff_sqrt_phys_err],
                label='Physical Point (Aoki)')
    util.save_figure(fig, 'test')

    np.savetxt('artifact-bmw-data.tsv',
               np.column_stack([mpi_val, diff_val, mpi_err, diff_err]))
    np.savetxt('artifact-bmw-fit.tsv', np.column_stack([fit_x, fit_y_val]))
    np.savetxt('artifact-bmw-band.tsv',
               bootstrap.pgfplots_error_band(fit_x, fit_y_val, fit_y_err))
    np.savetxt(
        'artifact-phys-data.tsv',
        np.column_stack([[mpi_phys_val], [diff_sqrt_phys_val], [mpi_phys_err],
                         [diff_sqrt_phys_err]]))
    np.savetxt('artifact-phys-fit.tsv', np.column_stack([ex_x, ex_y_val]))
    np.savetxt('artifact-phys-band.tsv',
               bootstrap.pgfplots_error_band(ex_x, ex_y_val, ex_y_err))
    np.savetxt('artifact-ms.tsv',
               np.column_stack([mpi_val, ams_art_val, mpi_err, ams_art_err]))

    # Compute the strange quark mass that is needed to obtain a physical meson
    # mass difference, ignoring lattice artifacts.
    ams_phys_dist = [(amk_phys**2 - 0.5 * ampi_phys**2) / aB - amcr
                     for ampi_phys, amk_phys, aB, amcr in zip(
                         ampi_phys_dist, amk_phys_dist, aB_dist, amcr_dist)]
    ams_phys_cen, ams_phys_val, ams_phys_err = bootstrap.average_and_std_arrays(
        ams_phys_dist)
    print('M_K = {} MeV <== am_s ='.format(siunitx(494.2, 0.3)),
          siunitx(ams_phys_cen, ams_phys_err))
    aml_phys_dist = [
        op.newton(lambda aml: gmor_pion(aml, *popt) - ampi_phys**2,
                  np.min(aml))
        for popt, ampi_phys in zip(popt_dist, ampi_phys_dist)
    ]

    fit_x = np.linspace(np.min(aml_phys_dist), np.max(aml), 100)
    fit_y_dist = [
        np.sqrt(gmor_pion(fit_x, *popt)) * a_inv
        for popt, a_inv in zip(popt_dist, a_inv_dist)
    ]
    fit_y_cen, fit_y_val, fit_y_err = bootstrap.average_and_std_arrays(
        fit_y_dist)

    np.savetxt('physical_point/mpi-vs-aml-data.tsv',
               np.column_stack([aml, mpi_val, mpi_err]))
    np.savetxt('physical_point/mpi-vs-aml-fit.tsv',
               np.column_stack([fit_x, fit_y_cen]))
    np.savetxt('physical_point/mpi-vs-aml-band.tsv',
               bootstrap.pgfplots_error_band(fit_x, fit_y_cen, fit_y_err))

    aml_phys_val, aml_phys_avg, aml_phys_err = bootstrap.average_and_std_arrays(
        aml_phys_dist)
    mpi_cen, mpi_val, mpi_err = bootstrap.average_and_std_arrays(mpi_dist)
    #aml_240_val, aml_240_avg, aml_240_err = bootstrap.average_and_std_arrays(aml_240_dist)

    print('M_pi = {} MeV <== am_l ='.format(siunitx(134.8, 0.3)),
          siunitx(aml_phys_val, aml_phys_err))
    #print('M_pi = 240 MeV <== am_l =', siunitx(aml_240_val, aml_240_err))

    fig = pl.figure()
    ax = fig.add_subplot(2, 1, 1)
    ax.fill_between(fit_x,
                    fit_y_val - fit_y_err,
                    fit_y_val + fit_y_err,
                    color='0.8')
    ax.plot(fit_x, fit_y_val, color='black', label='GMOR Fit')
    ax.errorbar(aml,
                mpi_val,
                yerr=mpi_err,
                color='blue',
                marker='+',
                linestyle='none',
                label='Data')
    ax.errorbar([aml_phys_val], [135],
                xerr=[aml_phys_err],
                marker='+',
                color='red',
                label='Extrapolation')
    #ax.errorbar([aml_240_val], [240], xerr=[aml_240_err], marker='+', color='red')
    ax.set_title('Extrapolation to the Physical Point')
    ax.set_xlabel(r'$a m_\mathrm{ud}$')
    ax.set_ylabel(r'$M_\pi / \mathrm{MeV}$')
    util.dandify_axes(ax)

    ax = fig.add_subplot(2, 1, 2)
    ax.hist(aml_phys_dist - aml_phys_val, bins=50)
    ax.locator_params(nbins=6)
    ax.set_title('Bootstrap Bias')
    ax.set_xlabel(
        r'$(a m_\mathrm{ud}^\mathrm{phys})^* - a m_\mathrm{ud}^\mathrm{phys}$')
    util.dandify_axes(ax)

    util.dandify_figure(fig)
    fig.savefig('physical_point/GMOR.pdf')

    np.savetxt('physical_point/ampi-sq-vs-aml.tsv',
               np.column_stack([aml, ampi_sq_val, ampi_sq_err]))
    np.savetxt('physical_point/mpi-sq-vs-aml.tsv',
               np.column_stack([aml, mpi_sq_val, mpi_sq_err]))
Ejemplo n.º 11
0
def plot_p80s(output_dir, fig_name, display_scenario_names, all_p80s):
    plt.boxplot(all_p80s, labels=display_scenario_names)
    plt.xticks(rotation=25)
    plt.ylabel("P80")

    save_figure(output_dir, fig_name)
def main(output_dir, scenario_names, overdispersion_params, population_file, contact_matrix_file):
    infectious_period_length = 7 # FIXME This probably should not be hard-coded here...

    for scenario_i in range(len(scenario_names)):
        print(scenario_names[scenario_i])

        # Get transmission probability per experiment
        experiments = get_trans_prob_by_exp(output_dir, scenario)

        secondary_cases = []
        index_case_ids = []
        with multiprocessing.Pool(processes=4) as pool:
            secondary_cases = pool.starmap(get_secondary_cases_per_index_case,
                                            [(output_dir, scenario_names[scenario_i], exp_id) for exp_id in experiments.keys()])
            index_case_ids = pool.starmap(get_index_case_ids,
                                            [(output_dir, scenario_names[scenario_i], exp_id) for exp_id in experiments.keys()])

        # Group by transmission probability
        secondary_cases_by_tp = {}
        for experiment_id, cases in secondary_cases:
            tp = experiments[experiment_id]
            if tp in secondary_cases_by_tp:
                secondary_cases_by_tp[tp].append(cases)
            else:
                secondary_cases_by_tp[tp] = [cases]

        tps_sorted = list(secondary_cases_by_tp.keys())
        tps_sorted.sort()

        # Estimate mean + variance number of effective contacts for index cases
        person_ids = list(set([x[1][0] for x in index_case_ids]))

        means_theoretical_by_tp, var_theoretical_by_tp = estimate_effective_contacts(population_file, contact_matrix_file,
                                                                tps_sorted, infectious_period_length,
                                                                overdispersion=overdispersion_vals[scenario_i],
                                                                person_ids=person_ids)

        # Plot theoretical estimate of mean number of secondary cases per index case
        # VS mean and 95% interval of number of secondary cases per index case from simulations
        means = [np.mean(secondary_cases_by_tp[tp]) for tp in tps_sorted]
        lower = [np.percentile(secondary_cases_by_tp[tp], 2.5) for tp in tps_sorted]
        upper = [np.percentile(secondary_cases_by_tp[tp], 97.5) for tp in tps_sorted]

        plt.plot(tps_sorted, means, marker="o", label="Simulations")
        plt.fill_between(tps_sorted, lower, upper, color="lightgrey")

        plt.plot(tps_sorted, [means_theoretical_by_tp[tp] for tp in tps_sorted], linestyle="None", marker="^", label="Theoretical")

        plt.xlabel("Mean transmission probality")
        plt.ylabel("Secondary cases caused by index case")
        plt.ylim(-2, 100)
        plt.legend()
        save_figure(output_dir, "mean_ibrn_comparison_" + scenario_names[scenario_i], extension="png")

        # Plot theoretical estimate of variance for number of secondary cases per index case
        # VS variance for number of secondary cases per index case from simulations

        variances = [np.var(secondary_cases_by_tp[tp]) for tp in tps_sorted]

        plt.plot(tps_sorted, variances, linestyle="None", marker="o")
        plt.plot(tps_sorted, [var_theoretical_by_tp[tp] for tp in tps_sorted], linestyle="None", marker="^")
        plt.legend(["Simulations", "Theoretical"])
        plt.xlabel("Mean transmission probality")
        plt.ylabel("Variance of secondary cases caused by index case")
        save_figure(output_dir, "var_" + scenario_names[scenario_i], extension="png")
Ejemplo n.º 13
0

def load_average_corr(paths):
    t = util.load_columns(paths[0])[0]
    reals = [util.load_columns(path)[1] for path in paths]
    a = np.row_stack(reals)
    return t, np.mean(a, axis=0), np.std(a, axis=0) / np.sqrt(len(reals))


source_0 = load_average_corr(
    glob.glob(
        '/home/mu/Dokumente/Studium/Master_Science_Physik/Masterarbeit/Runs/0120-Mpi270-L24-T96/corr/T=0/extract/corr/*.tsv'
    ))
source_20 = load_average_corr(
    glob.glob(
        '/home/mu/Dokumente/Studium/Master_Science_Physik/Masterarbeit/Runs/0120-Mpi270-L24-T96/corr/extract/corr/*.tsv'
    ))

fig, ax = util.make_figure()

print([x.shape for x in source_0])

ax.errorbar(source_0[0], source_0[1], source_0[2], label='T = 0')
ax.errorbar(source_20[0], source_20[1], source_20[2], label='T = 20')
ax.set_title('Different Source time with Chroma')
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$C(t)$')
ax.set_yscale('log')

util.save_figure(fig, 'chroma-source_t20')
Ejemplo n.º 14
0
def main(output_dir, scenario_names, overdispersion_params, display_scenario_names):
    if len(display_scenario_names) < len(scenario_names):
        display_scenario_names = scenario_names

    all_means = []
    all_means_exclude_extinction = []

    for scenario_i in range(len(scenario_names)):
        print(scenario_names[scenario_i])

        experiments = get_trans_prob_by_exp(output_dir, scenario)

        secondary_cases = []
        with multiprocessing.Pool(processes=4) as pool:
            secondary_cases = pool.starmap(get_secondary_cases_per_index_case,
                                    [(output_dir, scenario_names[scenario_i], exp_id) for exp_id in experiments.keys()])

        # Group by transmission probability
        secondary_cases_by_tp = {}
        for experiment_id, cases in secondary_cases:
            tp = experiments[experiment_id]
            if tp in secondary_cases_by_tp:
                secondary_cases_by_tp[tp].append(cases)
            else:
                secondary_cases_by_tp[tp] = [cases]

        tps_sorted = list(secondary_cases_by_tp.keys())
        tps_sorted.sort()

        # Mean secondary cases per index case
        mean_secondary_cases_by_tp = [np.mean(secondary_cases_by_tp[tp]) for tp in tps_sorted]
        all_means.append(mean_secondary_cases_by_tp)

        # Mean secondary cases per index case,
        # excluding runs where index case makes 0 secondary cases
        mean_secondary_cases_by_tp_exclude_extinction = []
        for tp in tps_sorted:
            if tp == 0:
                mean_secondary_cases_by_tp_exclude_extinction.append(np.nan)
            else:
                mean_secondary_cases_by_tp_exclude_extinction.append(np.mean([x for x in secondary_cases_by_tp[tp] if x > 0]))
        all_means_exclude_extinction.append(mean_secondary_cases_by_tp_exclude_extinction)

        # Create QQ-plots to compare offspring distribution
        # to negative binomial distribution
        tp_i = 0
        for tp in tps_sorted:
            k = overdispersion_params[scenario_i]
            if k is None:
                k = np.inf
            res = probplot(secondary_cases_by_tp[tp], dist=nbinom, sparams=(k, k / (mean_secondary_cases_by_tp[tp_i] + k)), fit=False, plot=plt)
            save_figure(output_dir, "QQplot_" + scenario + "_tp_" + str(tp))

            tp_i += 1

    # Plot mean secondary cases per index case
    for scenario_i in scenario_names:
        plt.plot([0.0, 0.025, 0.05, 0.075, 0.10], all_means[scenario_i])
    plt.xlabel("Mean individual transmission probability")
    plt.xticks([0.00, 0.02, 0.04, 0.06, 0.08, 0.10])
    plt.ylabel("Mean number of secondary cases per index case")
    plt.legend(display_scenario_names)
    save_figure(output_dir, "mean_secondary_cases_by_tp")

    # Plot mean secondary cases per index case, excluding runs where index case had 0 secondary cases
    for scenario_i in scenario_names:
        plt.plot([0.0, 0.025, 0.05, 0.075, 0.10], all_means_exclude_extinction[scenario_i])
    plt.xlabel("Mean individual transmission probability")
    plt.xticks([0.00, 0.02, 0.04, 0.06, 0.08, 0.10])
    plt.ylabel("Mean number of secondary cases per index case")
    plt.legend(display_scenario_names)
    save_figure(output_dir, "mean_secondary_cases_by_tp_exclude_extinction")