Exemplo n.º 1
0
            alpha_par, alpha_perp = model.get_alphas(alpha, epsilon)
            df["$\\alpha_\\parallel$"] = alpha_par
            df["$\\alpha_\\perp$"] = alpha_perp

            c.add_chain(df, weights=weight, **extra)

            max_post = posterior.argmax()
            chi2 = -2 * posterior[max_post]

            dof = data[0]["pk"].shape[0] - 1 - len(df.columns)
            ps = chain[max_post, :]
            best_fit = {}
            for l, p in zip(model.get_labels(), ps):
                best_fit[l] = p

            mean_par, std_par = weighted_avg_and_std(alpha_par, weight)
            mean_per, std_per = weighted_avg_and_std(alpha_perp, weight)

            c2 = ChainConsumer()
            c2.add_chain(df[["$\\alpha_\\parallel$", "$\\alpha_\\perp$"]], weights=weight)
            _, corr = c2.analysis.get_correlations()
            corr = corr[1, 0]
            output.append(
                f"{data[0]['min_k']:5.2f}, {data[0]['max_k']:5.2f}, {mean_par:5.3f}, {mean_per:5.3f}, {std_par:5.3f}, {std_per:5.3f}, {corr:5.3f}, {r_s:6.3f}, {chi2:5.3f}, {dof:4d}, {chi2/dof:5.2f}"
            )

        np.savetxt(pfn + "_Pk_linear_CAMB.dat", np.c_[model.camb.ks, pks[0][0]], fmt="%g        %g", header="k     pk")
        np.savetxt(pfn + "_Pk_linear_CAMB_smooth.dat", np.c_[model.camb.ks, pks[0][1]], fmt="%g     %g", header="k     pk_smooth")
        np.savetxt(
            pfn + "_bestfit_model.dat",
            np.c_[bestfit_model[0][0], bestfit_model[0][1][: len(bestfit_model[0][0])], bestfit_model[0][1][len(bestfit_model[0][0]) :]],
Exemplo n.º 2
0
                    },
                    axis="columns",
                    inplace=True)
        else:

            logging.info("Didn't find alphameans.csv, reading chains")

            doonce = True
            for posterior, weight, chain, evidence, model, data, extra in fitter.load(
            ):
                n = extra["name"].split(",")[0]
                if res.get(n) is None:
                    res[n] = []
                i = posterior.argmax()
                chi2 = -2 * posterior[i]
                m, s = weighted_avg_and_std(chain[:, 0], weight)

                if doonce:
                    doonce = False
                    import pandas as pd

                    df = pd.DataFrame(chain[:, 0], columns=["alpha"])
                    nsamp = int((weight / weight.max()).sum())
                    r = []
                    for ii in range(1000):
                        r.append(
                            df.sample(weights=weight, replace=True,
                                      n=nsamp).std())
                    print(f"SE of std is {np.std(r)}")

                res[n].append([
Exemplo n.º 3
0
    # An alternative way to account for missing uncertainty is to simply integrate over the different results. So try this too.
    from barry.utils import weighted_avg_and_std

    nalpha = 100
    alpha_grid = np.linspace(0.8, 1.2, nalpha)
    integrate_val = np.empty((3, len(means[0])))
    integrate_err = np.empty((3, len(means[0])))
    for i in range(3):
        for j, (std, val) in enumerate(zip(stds[i], means[i])):
            posteriors = 1.0 / (np.sqrt(2.0 * np.pi) * std) * np.exp(-0.5 * (
                (val - np.tile(alpha_grid, (len(val), 1)).T)**2 / std**2))
            combined_posterior = np.sum(posteriors, axis=1) / len(std)
            norm = np.sum(combined_posterior) * (alpha_grid[-1] -
                                                 alpha_grid[0]) / nalpha
            combined_posterior /= norm
            integrate_val[i, j], integrate_err[i, j] = weighted_avg_and_std(
                alpha_grid, combined_posterior)

    # Now have a look at the distributions for each of the three methods
    for i, (index, type) in enumerate(zip([1, 2, 0], ["Pk", "Xi", "All"])):
        best_chi = (best_val[index, 0:] - alpha)**2 / best_err[index, 0:]**2
        best_combined_chi = (best_combined_val[index, 0:] -
                             alpha)**2 / best_combined_err[index, 0:]**2

        best_add_chi = (best_add_val[index, 0:] -
                        alpha)**2 / best_add_err[index, 0:]**2
        best_add_combined_chi = (best_add_combined_val[index, 0:] - alpha
                                 )**2 / best_add_combined_err[index, 0:]**2

        integrate_chi = (integrate_val[index, 0:] -
                         alpha)**2 / integrate_err[index, 0:]**2
    sampler = DynestySampler(temp_dir=dir_name)

    fitter.set_sampler(sampler)
    fitter.set_num_walkers(30)
    fitter.fit(file)

    if fitter.should_plot():
        from chainconsumer import ChainConsumer

        c = ChainConsumer()
        alphas = []
        aas = []
        for posterior, weight, chain, evidence, model, data, extra in fitter.load():
            print(extra["name"])
            c.add_chain(chain, weights=weight, parameters=model.get_labels(), **extra)
            m, s = weighted_avg_and_std(evidence, weights=weight)
            m2, s2 = weighted_avg_and_std(chain[:, -1], weights=weight)
            alphas.append(m)
            aas.append(m2)
        print(np.std(alphas), np.std(aas))
        c.configure(shade=True, bins=25, legend_artists=True, cmap="plasma", sigmas=[0, 1, 2])
        params = ["$\\alpha$", "$A$", "$b$"]
        truth = {"$\\Omega_m$": 0.31, "$\\alpha$": 0.9982}
        c.analysis.get_latex_table(filename=pfn + "_params.txt")
        c.plotter.plot_summary(filename=[pfn + "_summary.png", pfn + "_summary.pdf"], errorbar=True, truth=truth, parameters=params)
        c.plotter.plot(filename=[pfn + "_contour.png", pfn + "_contour.pdf"], truth=truth, parameters=params, figsize="COLUMN")

    # FINDINGS
    # Well, looks like where you transition from alternating indices to only using the extractor
    # has a strong impact on not only where you fit alpha, b, gamma and A, but also on their uncertainties.
    # It also impracts the degeneracy direction between alpha and A.