def test_bic_fail_no_posterior():
    d = norm.rvs(size=1000)
    c = ChainConsumer()
    c.add_chain(d, num_eff_data_points=1000, num_free_params=1)
    bics = c.comparison.bic()
    assert len(bics) == 1
    assert bics[0] is None
Beispiel #2
0
def plot_results(chain, param, chainf, chainf2, paramf, t0, x0, x1, c, temp_dir, seed, interped):
    cc = ChainConsumer()
    cc.add_chain(chain, parameters=param, name="Posterior")
    cc.add_chain(chainf, parameters=paramf, name="Minuit")
    cc.add_chain(chainf2, parameters=paramf, name="Emcee")
    truth = {"$t_0$": t0, "$x_0$": x0, "$x_1$": x1, "$c$": c, r"$\mu$": get_mu(interped, x0, x1, c)}
    cc.plot(filename=temp_dir + "/surfaces_%d.png" % seed, truth=truth)
Beispiel #3
0
def plot_all_no_weight(folder, output):
    """ Plot all chains as one, with and without weights applied """
    print("Plotting all as one, with old and new weights")
    chain, posterior, t, p, f, l, w, ow = load_stan_from_folder(folder, merge=True)
    c = ChainConsumer()
    c.add_chain(chain, posterior=posterior, walkers=l)
    c.plot(filename=output, truth=t, figsize=0.75)
Beispiel #4
0
def debug_plots(std):
    print(std)

    res = load_stan_from_folder(std, merge=True, cut=False)
    chain, posterior, t, p, f, l, w, ow = res
    print(w.mean(), np.std(w), np.mean(np.log(w)), np.std(np.log(w)))
    # import matplotlib.pyplot as plt
    # plt.hist(np.log(w), 100)
    # plt.show()
    # exit()

    logw = np.log(w)
    m = np.mean(logw)
    s = np.std(logw)
    print(m, s)
    logw -= (m + 3 * s)
    good = logw < 0
    logw *= good
    w = np.exp(logw)

    sorti = np.argsort(w)
    for key in chain.keys():
        chain[key] = chain[key][sorti]
    w = w[sorti]
    ow = ow[sorti]
    posterior = posterior[sorti]

    c = ChainConsumer()
    truth = [0.3, 0.14, 3.1, -19.365, 0, 0, 0.1, 1.0, 0.1, 0, 0, 0, 0, 0, 0]
    c.add_chain(chain, name="uncorrected", posterior=posterior)
    c.add_chain(chain, weights=w, name="corrected", posterior=posterior)
    c.plot(filename="output.png", parameters=9, truth=truth, figsize=1.3)
    # c = ChainConsumer()
    # c.add_chain(chain, weights=w, name="corrected")
    c.plot_walks(chains="corrected", filename="walks.png", truth=truth)
def test_bic_fail_no_num_params():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_eff_data_points=1000)
    bics = c.comparison.bic()
    assert len(bics) == 1
    assert bics[0] is None
Beispiel #6
0
def plot_separate(folder, output):
    """ Plot separate cosmologies """
    print("Plotting all cosmologies separately")
    res = load_stan_from_folder(folder, merge=False)
    c = ChainConsumer()
    for i, (chain, posterior, t, p, f, l, w, ow) in enumerate(res):
        c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="%d"%i)
    c.plot(filename=output, truth=t, figsize=0.75)
 def test_get_chain_via_object(self):
     c = ChainConsumer()
     c.add_chain(self.data, name="A")
     c.add_chain(self.data, name="B")
     assert c._get_chain(c.chains[0])[0] == 0
     assert c._get_chain(c.chains[1])[0] == 1
     assert len(c._get_chain(c.chains[0])) == 1
     assert len(c._get_chain(c.chains[1])) == 1
Beispiel #8
0
def plot_single_cosmology_weight(folder, output, i=0):
    print("Plotting cosmology realisation %d" % i)
    res = load_stan_from_folder(folder, merge=False)
    c = ChainConsumer()
    chain, posterior, t, p, f, l, w, ow = res[i]
    c.add_chain(chain, posterior=posterior, walkers=l, name="Uncorrected %d"%i)
    c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="Corrected %d"%i)
    c.plot(filename=output, truth=t, figsize=0.75)
def test_dic_0():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p)
    dics = c.comparison.dic()
    assert len(dics) == 1
    assert dics[0] == 0
def test_aic_0():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000)
    aics = c.comparison.aic()
    assert len(aics) == 1
    assert aics[0] == 0
 def test_shade_alpha_algorithm2(self):
     consumer = ChainConsumer()
     consumer.add_chain(self.data)
     consumer.add_chain(self.data)
     consumer.configure()
     alpha0 = consumer.chains[0].config["shade_alpha"]
     alpha1 = consumer.chains[0].config["shade_alpha"]
     assert alpha0 == 1.0 / 2.0
     assert alpha1 == 1.0 / 2.0
Beispiel #12
0
def plot_all(folder, output, output_walk=None):
    """ Plot all chains as one """
    print("Plotting all as one")
    chain, posterior, t, p, f, l, w, ow = load_stan_from_folder(folder, merge=True)
    c = ChainConsumer()
    c.add_chain(chain, weights=w, posterior=posterior, walkers=l)
    c.plot(filename=output, truth=t, figsize=0.75)
    if output_walk is not None:
        c.plot_walks(filename=output_walk)
Beispiel #13
0
def is_unconstrained(chain, param):
    c = ChainConsumer()
    c.add_chain(chain, parameters=param)
    constraints = c.get_summary()[0]
    for key in constraints:
        val = constraints[key]
        if val[0] is None or val[2] is None:
            return True
    return False
def test_bic_data_dependence2():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=2, num_eff_data_points=1000)
    c.add_chain(d, posterior=p, num_free_params=3, num_eff_data_points=500)
    bics = c.comparison.bic()
    assert len(bics) == 2
    assert bics[0] == 0
    expected = 3 * np.log(500) - 2 * np.log(1000)
    assert np.isclose(bics[1], expected, atol=1e-3)
 def test_shade_alpha_algorithm3(self):
     consumer = ChainConsumer()
     consumer.add_chain(self.data)
     consumer.add_chain(self.data)
     consumer.add_chain(self.data)
     consumer.configure()
     alphas = [c.config["shade_alpha"] for c in consumer.chains]
     assert len(alphas) == 3
     assert alphas[0] == 1.0 / 3.0
     assert alphas[1] == 1.0 / 3.0
     assert alphas[2] == 1.0 / 3.0
Beispiel #16
0
def get_instance():
    np.random.seed(0)
    c = ChainConsumer()
    parameters = ["$x$", r"$\Omega_\epsilon$", "$r^2(x_0)$"]
    for name in ["Ref. model", "Test A", "Test B", "Test C"]:
        # Add some random data
        mean = np.random.normal(loc=0, scale=3, size=3)
        sigma = np.random.uniform(low=1, high=3, size=3)
        data = np.random.multivariate_normal(mean=mean, cov=np.diag(sigma**2), size=100000)
        c.add_chain(data, parameters=parameters, name=name)
    return c
def test_aic_data_dependence():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000)
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=500)
    aics = c.comparison.aic()
    assert len(aics) == 2
    assert aics[0] == 0
    expected = (2.0 * 1 * 2 / (500 - 1 - 1)) - (2.0 * 1 * 2 / (1000 - 1 - 1))
    assert np.isclose(aics[1], expected, atol=1e-3)
Beispiel #18
0
 def _get_consumer(self, results, chain_consumer=None, include_latent=False):
     if chain_consumer is None:
         from chainconsumer import ChainConsumer
         chain_consumer = ChainConsumer()
     n = len(self._theta_labels) if include_latent else self._num_actual
     chain_consumer.add_chain(results["chain"],
                              weights=results.get("weights"),
                              posterior=results.get("posterior"),
                              parameters=self._theta_labels[:n],
                              name=self.model_name)
     return chain_consumer
Beispiel #19
0
def plot_separate_weight(folder, output):
    """ Plot separate cosmologies, with and without weights applied """
    print("Plotting all cosmologies separately, with old and new weights")
    res = load_stan_from_folder(folder, merge=False)
    c = ChainConsumer()
    ls = []
    for i, (chain, posterior, t, p, f, l, w, ow) in enumerate(res):
        c.add_chain(chain, posterior=posterior, walkers=l, name="Uncorrected %d"%i)
        c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="Corrected %d"%i)
        ls += ["-", "--"]
    c.configure_general(linestyles=ls)
    c.plot(filename=output, truth=t, figsize=0.75)
def test_aic_posterior_dependence():
    d = norm.rvs(size=1000)
    p = norm.logpdf(d)
    p2 = norm.logpdf(d, scale=2)
    c = ChainConsumer()
    c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000)
    c.add_chain(d, posterior=p2, num_free_params=1, num_eff_data_points=1000)
    aics = c.comparison.aic()
    assert len(aics) == 2
    assert aics[0] == 0
    expected = 2 * np.log(2)
    assert np.isclose(aics[1], expected, atol=1e-3)
 def test_remove_chain_by_name(self):
     tolerance = 5e-2
     consumer = ChainConsumer()
     consumer.add_chain(self.data * 2, name="a")
     consumer.add_chain(self.data, name="b")
     consumer.remove_chain(chain="a")
     consumer.configure()
     summary = consumer.analysis.get_summary()
     assert isinstance(summary, dict)
     actual = np.array(list(summary.values())[0])
     expected = np.array([3.5, 5.0, 6.5])
     diff = np.abs(expected - actual)
     assert np.all(diff < tolerance)
def test_dic_posterior_dependence():
    d = norm.rvs(size=1000000)
    p = norm.logpdf(d)
    p2 = norm.logpdf(d, scale=2)
    c = ChainConsumer()
    c.add_chain(d, posterior=p)
    c.add_chain(d, posterior=p2)
    bics = c.comparison.dic()
    assert len(bics) == 2
    assert bics[1] == 0
    dic1 = 2 * np.mean(-2 * p) + 2 * norm.logpdf(0)
    dic2 = 2 * np.mean(-2 * p2) + 2 * norm.logpdf(0, scale=2)
    assert np.isclose(bics[0], dic1 - dic2, atol=1e-3)
Beispiel #23
0
def debug_plots(std):
    print(std)

    do_weight = True
    do_walk = True
    load_second = True

    res = load_stan_from_folder(std, merge=False, num=0, cut=False)
    chain, posterior, t, p, f, l, w, ow = res[0]

    if do_walk:
        # print("owww ", ow.min(), ow.max())
        # print("www ", w.min(), w.max())
        chain2 = chain.copy()
        logw = np.log10(w)
        a = np.argsort(logw)
        logw = logw[a]
        for k in chain:
            chain2[k] = chain2[k][a]
        chain2["ow"] = np.log10(ow)
        chain2["ww"] = logw
        c = ChainConsumer()
        c.add_chain(chain2, weights=w, name="calib")
        c.plot_walks(truth=t, filename="walk_new.png")

    c = ChainConsumer()
    if do_weight:
        c.add_chain(chain, weights=w, name="calib")
    else:
        c.add_chain(chain, name="calib")

    if load_second:
        res2 = load_stan_from_folder(std + "_calib_data_no_calib_model_and_bias", num=0, merge=False, cut=False)
        chain, posterior, _, p, f, l, w, ow = res2[0]

        if do_weight:
            c.add_chain(chain, weights=w, name="nocalib")
        else:
            c.add_chain(chain, name="nocalib")
    c.plot(filename="output.png", truth=t, figsize=0.75)
Beispiel #24
0
def debug_plots(std):
    print(std)

    res = load_stan_from_folder(std, merge=True, cut=False)
    chain, posterior, t, p, f, l, w, ow = res
    # print(w.mean())
    # import matplotlib.pyplot as plt
    # plt.hist(np.log(w), 100)
    # plt.show()
    # exit()
    logw = np.log(w)
    m = np.mean(logw)
    s = np.std(logw)
    print(m, s)
    logw -= (m + 3 * s)
    good = logw < 0
    logw *= good
    w = np.exp(logw)

    c = ChainConsumer()
    c.add_chain(chain, weights=w, name="corrected")
    c.configure(summary=True)
    c.plot(figsize=2.0, filename="output.png", parameters=9)

    c = ChainConsumer()
    c.add_chain(chain, name="uncorrected")
    c.add_chain(chain, weights=w, name="corrected")
    # c.add_chain(chain, name="calib")
    c.plot(filename="output_comparison.png", parameters=9, figsize=1.3)
    c.plot_walks(chains=1, filename="walks.png")
 def test_remove_multiple_chains3(self):
     tolerance = 5e-2
     consumer = ChainConsumer()
     consumer.add_chain(self.data * 2, parameters=["p1"], name="a")
     consumer.add_chain(self.data, parameters=["p2"], name="b")
     consumer.add_chain(self.data * 3, parameters=["p3"], name="c")
     consumer.remove_chain(chain=["a", 2])
     consumer.configure()
     summary = consumer.analysis.get_summary()
     assert isinstance(summary, dict)
     assert "p2" in summary
     assert "p1" not in summary
     assert "p3" not in summary
     actual = np.array(list(summary.values())[0])
     expected = np.array([3.5, 5.0, 6.5])
     diff = np.abs(expected - actual)
     assert np.all(diff < tolerance)
"""
==============
Shade Gradient
==============

Control contour contrast!

To help make your confidence levels more obvious, you can play with the gradient steepness and
resulting contrast in your contours.
"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data1 = multivariate_normal([0, 0], [[1, 0], [0, 1]], size=1000000)
data2 = multivariate_normal([4, -4], [[1, 0], [0, 1]], size=1000000)

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"])
c.add_chain(data2, parameters=["$x$", "$y$"])
c.configure(shade_gradient=[0.1, 3.0],
            colors=['o', 'k'],
            sigmas=[0, 1, 2, 3],
            shade_alpha=1.0)
fig = c.plotter.plot()

fig.set_size_inches(
    3 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #27
0
# -*- coding: utf-8 -*-
"""
=========================
Plot Parameter Covariance
=========================

You can also get LaTeX tables for parameter covariance.

Turned into glorious LaTeX, we would get something like the following:

.. figure::     ../../examples/resources/covariance.png
    :align:     center
    :width:     60%

"""

###############################################################################
# The code to produce this, and the raw LaTeX, is given below:

import numpy as np
from chainconsumer import ChainConsumer

cov = [[1.0, 0.5, 0.2], [0.5, 2.0, 0.3], [0.2, 0.3, 3.0]]
data = np.random.multivariate_normal([0, 0, 1], cov, size=1000000)
parameters = ["x", "y", "z"]
c = ChainConsumer()
c.add_chain(data, parameters=parameters)
latex_table = c.analysis.get_covariance_table()
print(latex_table)
Beispiel #28
0
def run_grid():
	if util.does_grid_exist(model_name,root_dir):
		print 'Grid already exists, using existing grid...'
		resolution, likelihoods, parameters, theta_max = util.read_grid(model_name,root_dir)
		a_par, k_par, s0_par, alpha_par = parameters
	else:
		print 'Grid does not exist, computing grid...'
	
		resolution = 50
		#theta_pass = 4.2e-14, 0.272, 3.8e-11, 0.9
		a_min, a_max = 2e-14, 20e-14
		k_min, k_max = 0.05, 1.4
		s0_min, s0_max = 1e-11, 60e-11
		alpha_min, alpha_max = 0.3, 1.4

		#a_min, a_max = 2e-14, 13e-14
		#k_min, k_max = 0.05, 2
		#s0_min, s0_max = 1e-11, 20e-11
		#alpha_min, alpha_max = 0.55, 1.4

		# Reading in data
		ssfr, snr, snr_err = util.read_data()

		a_par = np.linspace(a_min,a_max,resolution)
		k_par = np.linspace(k_min,k_max,resolution)
		s0_par = np.linspace(s0_min,s0_max,resolution)
		alpha_par = np.linspace(alpha_min,alpha_max,resolution)

		likelihoods = np.ones((resolution,resolution,resolution,resolution))
		max_like = 0.

		for ii in np.arange(resolution):
			if ii%2 == 0:
				print np.round((float(ii) / resolution) * 100.,2), "% Done"
			for jj in np.arange(resolution):
				for kk in np.arange(resolution):
					for ll in np.arange(resolution):
						theta = a_par[ii], k_par[jj], s0_par[kk], alpha_par[ll]
						likelihoods[ii,jj,kk,ll] = np.exp(lnlike(theta,ssfr,snr,snr_err))
						if likelihoods[ii,jj,kk,ll] > max_like:
							max_like = likelihoods[ii,jj,kk,ll]
							theta_max = a_par[ii], k_par[jj], s0_par[kk], alpha_par[ll]
							#print "New max like:", max_like
							#print theta_max, "\n"
		likelihoods /= np.sum(likelihoods)
		output = open(root_dir + 'Data/MCMC_nicelog_grid.pkl','wb')
		parameters = a_par, k_par, s0_par, alpha_par
		result = resolution, likelihoods, parameters, theta_max
 		pick.dump(result,output)
 		output.close()

	a_like = np.zeros(resolution)
	k_like = np.zeros(resolution)
	s0_like = np.zeros(resolution)
	alpha_like = np.zeros(resolution)
	for ii in np.arange(resolution):
		a_like[ii]    = np.sum(likelihoods[ii,:,:,:])
		k_like[ii]    = np.sum(likelihoods[:,ii,:,:])
		s0_like[ii]    = np.sum(likelihoods[:,:,ii,:])
		alpha_like[ii]    = np.sum(likelihoods[:,:,:,ii])
	
	yes_chainconsumer = False
	if yes_chainconsumer:
		print "Defining chainconsumer"
		c = ChainConsumer()
		print "Adding chain"
		c.add_chain([a_par, k_par, s0_par, alpha_par], parameters=["a","k","s0","alpha"],weights=likelihoods,grid=True)
		print "Doing plot"
		fig = c.plotter.plot()

	'''
	plt.figure()
	ax = plt.subplot()
	ax.set_xscale("log")
	plt.plot(a_par,a_like,'x')
	plt.xlabel('a')

	plt.figure()
	ax = plt.subplot()
	ax.set_xscale("log")
	plt.plot(k_par,k_like,'x')
	plt.xlabel('k')

	plt.figure()
	ax = plt.subplot()
	ax.set_xscale("log")
	plt.plot(s0_par,s0_like,'x')
	plt.xlabel('ssfr0')

	plt.figure()
	ax = plt.subplot()
	ax.set_xscale("log")
	plt.plot(alpha_par,alpha_like,'x')
	plt.xlabel('alpha')
	'''
	
	# These are the marginalised maximum likelihood parameters
	a_fit = a_par[np.argmax(a_like)]
	k_fit = k_par[np.argmax(k_like)]
	s0_fit = s0_par[np.argmax(s0_like)]
	alpha_fit = alpha_par[np.argmax(alpha_like)]

	print "ML parameters:"
	#theta_pass = a_fit, k_fit, s0_fit, alpha_fit
	theta_pass = theta_max
	print theta_pass
	return theta_pass
Beispiel #29
0
    c2.samples['cosmological_parameters--omega_m'],
    c2.samples['cosmological_parameters--s8'],
    c2.samples['cosmological_parameters--sigma_8']
])

#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()

names = [r'$\Omega_{\rm m}$', '$S_8$', r'$\sigma_8$']

cc.add_chain(samp1.T,
             parameters=names,
             weights=c1.weight,
             kde=True,
             name=r'Baseline')
cc.add_chain(samp2.T,
             parameters=names,
             weights=c2.weight,
             kde=True,
             name=r'Contaminated')

cc.configure(colors=['#FA86C9', '#7223AD', '#DDA0DD'],
             shade=[True, True, True] * 3,
             shade_alpha=[0.65, 0.55, 0.1, 0.5],
             kde=[2] * 3,
             legend_kwargs={
                 "loc": "upper right",
                 "fontsize": 16
Beispiel #30
0
#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()

names = [r'$\Omega_{\rm m}$', '$S_8$', '$A_1$', r'$\eta_1$']
names2 = [
    r'$\Omega_{\rm m}$', '$S_8$', '$A_1$', r'$\eta_1$', '$A_2$', r'$\eta_2$',
    r'$b_{\rm TA}$'
]

cc.add_chain(samp1.T,
             parameters=names,
             weights=c1.weight,
             kde=True,
             name=r'NLA (Simon)')
cc.add_chain(samp2.T,
             parameters=names,
             weights=c2.weight,
             kde=True,
             name=r'NLA (Alex)')
#cc.add_chain(samp3.T, parameters=names, weights=c3.weight, kde=True, name=r'model=NLA, data=NLA')

cc.configure(colors=[
    '#121F90',
    '#8b008b',
    '#FF94CB',
    '#FF1493',
],
Beispiel #31
0
        fitter.add_model_and_dataset(model, d, name=f"Beutler 2017 Fixed $\\Sigma_{{nl}}$ {t}", linestyle=ls, color=cs[0])
        fitter.add_model_and_dataset(CorrSeo2016(recon=r), d, name=f"Seo 2016 {t}", linestyle=ls, color=cs[1])
        fitter.add_model_and_dataset(CorrDing2018(recon=r), d, name=f"Ding 2018 {t}", linestyle=ls, color=cs[2])

    fitter.set_sampler(sampler)
    fitter.set_num_walkers(30)
    fitter.fit(file)

    if fitter.should_plot():
        import logging

        logging.info("Creating plots")
        from chainconsumer import ChainConsumer

        c = ChainConsumer()
        for posterior, weight, chain, evidence, model, data, extra in fitter.load():
            c.add_chain(chain, weights=weight, parameters=model.get_labels(), **extra)
            print(extra["name"], chain.shape, weight.shape, posterior.shape)
        c.configure(shade=True, bins=30, legend_artists=True)
        c.analysis.get_latex_table(filename=pfn + "_params.txt", parameters=[r"$\alpha$"])
        c.plotter.plot_summary(filename=pfn + "_summary.png", extra_parameter_spacing=1.5, errorbar=True, truth={"$\\Omega_m$": 0.31, "$\\alpha$": 0.9982})
        c.plotter.plot_summary(
            filename=[pfn + "_summary2.png", pfn + "_summary2.pdf"],
            extra_parameter_spacing=1.5,
            parameters=1,
            errorbar=True,
            truth={"$\\Omega_m$": 0.31, "$\\alpha$": 0.9982},
        )
        # c.plotter.plot(filename=pfn + "_contour.png", truth={"$\\Omega_m$": 0.31, '$\\alpha$': 1.0})
        # c.plotter.plot_walks(filename=pfn + "_walks.png", truth={"$\\Omega_m$": 0.3121, '$\\alpha$': 1.0})
Beispiel #32
0
"""

import numpy as np
from numpy.random import normal, multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(2)
cov = normal(size=(2, 2)) + np.identity(2)
d1 = multivariate_normal(normal(size=2), np.dot(cov, cov.T), size=100000)
cov = normal(size=(2, 2)) + np.identity(2)
d2 = multivariate_normal(normal(size=2), np.dot(cov, cov.T), size=100000)
cov = normal(size=(2, 2)) + np.identity(2)
d3 = multivariate_normal(normal(size=2), np.dot(cov, cov.T), size=1000000)

c = ChainConsumer()
c.add_chain(d1, parameters=["$x$", "$y$"])
c.add_chain(d2)
c.add_chain(d3)

c.configure(linestyles=["-", "--", "-"],
            linewidths=[1.0, 3.0, 1.0],
            bins=[3.0, 1.0, 1.0],
            colors=["#1E88E5", "#D32F2F", "#111111"],
            smooth=[0, 1, 2],
            shade=[True, True, False],
            shade_alpha=[0.2, 0.1, 0.0],
            bar_shade=[True, False, False])
fig = c.plotter.plot()

fig.set_size_inches(
    4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #33
0
x_dat = x.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
y_dat = y.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
err_dat = err.ctypes.data_as(ctypes.POINTER(ctypes.c_double))

# Run the HMC
results = test(num_data, x_dat, y_dat, err_dat, num_samps, num_steps, num_burn,
               epsilon)
print('Acceptance Rate: ', str(results.chain.accept_rate))

# Get the results.
chain = np.array([[results.chain.samples[i][j] for j in range(2)]
                  for i in range(num_samps)])
likelihoods = np.array(
    [results.chain.log_likelihoods[i] for i in range(num_samps)])

# Should see a nice converged chain.
plt.plot(range(len(likelihoods)), likelihoods)
plt.title('Log-Likelihood')
plt.xlabel('Step \#')
plt.ylabel('$\ln(P)$')
plt.show()

# Should match the distribution from emcee.
c = ChainConsumer()
c.add_chain(samples, ['m', 'b'], name='emcee')
c.add_chain(chain, ['m', 'b'], name='HMC')
c.configure(sigma2d=False)
fig = c.plotter.plot(figsize='column', truth=x_true)
plt.tight_layout()
plt.savefig('emcee_compare.png')
plt.show()
            #    continue

            print(extra["name"])
            model.set_data(data)
            r_s = model.camb.get_data()["r_s"]

            df = pd.DataFrame(chain, columns=model.get_labels())
            alpha = df["$\\alpha$"].to_numpy()
            epsilon = df["$\\epsilon$"].to_numpy()
            print(model, np.shape(alpha), np.shape(epsilon))
            alpha_par, alpha_perp = model.get_alphas(alpha, epsilon)
            df["$\\alpha_\\parallel$"] = alpha_par
            df["$\\alpha_\\perp$"] = alpha_perp

            extra.pop("realisation", None)
            c.add_chain(df, weights=weight, **extra)

            max_post = posterior.argmax()
            chi2 = -2 * posterior[max_post]

            params = model.get_param_dict(chain[max_post])
            for name, val in params.items():
                model.set_default(name, val)

            # Ensures we return the window convolved model
            icov_m_w = model.data[0]["icov_m_w"]
            model.data[0]["icov_m_w"][0] = None

            ks = model.data[0]["ks"]
            err = np.sqrt(np.diag(model.data[0]["cov"]))
            mod, mod_odd, polymod, polymod_odd, _ = model.get_model(params, model.data[0], data_name=data[0]["name"])
Beispiel #35
0
best_fit_hybrid = [12.29868808, 10.48771974,  0.41364778,  0.41375455,  0.30506659,
       10.1679343 , 13.10135398,  0.81869216,  0.13844437]
#46
best_fit_halo = [12.37399415, 10.57683767,  0.42357192,  0.50163458,  0.31593679,
       11.86645536, 12.54502723,  1.42736618,  0.5261119 ]
# parameters=[r"${log_{10}\ M_{1}}$", 
#         r"${log_{10}\ M_{*}}$", r"${\beta}$",
#         r"${\delta}$", r"${\xi}$", 
#         r"${log_{10}\ M^{q}_{*}}$", r"${log_{10}\ M^{q}_{h}}$", 
#         r"${\mu}$", r"${\nu}$"]

c = ChainConsumer()
if quenching == 'hybrid':
    c.add_chain(samples, parameters=[r"$\mathbf{log_{10}\ M_{1}}$", 
        r"$\mathbf{log_{10}\ M_{*}}$", r"$\boldsymbol{\beta}$",
        r"$\boldsymbol{\delta}$", r"$\boldsymbol{\xi}$", 
        r"$\mathbf{log_{10}\ M^{q}_{*}}$", r"$\mathbf{log_{10}\ M^{q}_{h}}$", 
        r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
        name=r"ECO hybrid (45)", color="#663399", zorder=10)

    # for i in range(len(best_fit_hybrid)):
    #     for j in range(len(best_fit_hybrid)):
    #         if i==j:
    #             continue
    #         else:
    #             c.add_marker([best_fit_hybrid[i],best_fit_hybrid[j]], 
    #             [parameters[i], parameters[j]], marker_style="*", 
    #             marker_size=100, color='#1f77b4')
        

    c.add_chain(samples_43,parameters=[r"$\mathbf{log_{10}\ M_{1}}$", 
        r"$\mathbf{log_{10}\ M_{*}}$", r"$\boldsymbol{\beta}$",
"""
==============
Shade Gradient
==============

Control contour contrast!

To help make your confidence levels more obvious, you can play with the gradient steepness and
resulting contrast in your contours.
"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data1 = multivariate_normal([0, 0], [[1, 0], [0, 1]], size=1000000)
data2 = multivariate_normal([4, -4], [[1, 0], [0, 1]], size=1000000)

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"])
c.add_chain(data2, parameters=["$x$", "$y$"])
c.configure(shade_gradient=[0.1, 3.0], colors=['o', 'k'], sigmas=[0, 1, 2, 3], shade_alpha=1.0)
fig = c.plotter.plot()

fig.set_size_inches(4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #37
0
import numpy as np
from numpy.random import normal
from chainconsumer import ChainConsumer

np.random.seed(0)
# Here we have some nice data, and then some bad data,
# where the last part of the chain has walked off, and the first part
# of the chain isn't agreeing with anything else!
data_good = normal(size=100000)
data_bad = data_good.copy()
data_bad += np.linspace(-0.5, 0.5, 100000)
data_bad[98000:] += 2

# Lets load it into ChainConsumer, and pretend 10 walks went into making the chain
c = ChainConsumer()
c.add_chain(data_good, walkers=10, name="good")
c.add_chain(data_bad, walkers=10, name="bad")

# Now, lets check our convergence using the Gelman-Rubin statistic
gelman_rubin_converged = c.diagnostic.gelman_rubin()
# And also using the Geweke metric
geweke_converged = c.diagnostic.geweke()

# Lets just output the results too
print(gelman_rubin_converged, geweke_converged)

###############################################################################
# We can see that both the Gelman-Rubin and Geweke statistics failed.
# Note that by not specifying a chain when calling the diagnostics,
# they are invoked on *all* chains. For example, to invoke the statistic
# on only the second chain we can pass in either the chain index, or the chain
"""
===================
Change Font Options
===================

Control tick rotation and font sizes.

Here the tick rotation has been turned off, ticks made smaller,
more ticks added, and label size increased!
"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer


np.random.seed(0)
data = multivariate_normal([0, 1, 2], np.eye(3) + 0.2, size=100000)

# If you pass in parameter labels and only one chain, you can also get parameter bounds
c = ChainConsumer()
c.add_chain(data, parameters=["$x$", "$y^2$", r"$\Omega_\beta$"], name="Example")
c.configure(diagonal_tick_labels=False, tick_font_size=8, label_font_size=25, max_ticks=8)
fig = c.plotter.plot(figsize="column", legend=True)

fig.set_size_inches(4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #39
0
        param_names = [
            r'$\Omega_m$', r'${\alpha}$', r'$\beta$', r'$M_B$', r'$\Delta_M$'
        ]
    ndim = len(startValues)

    # fitting happens here
    run_MCMC(chains, startValues, nsteps)

    pdata = np.genfromtxt(chains + '/params.txt', delimiter=',')
    post = -0.5 * np.genfromtxt(chains + '/chisq.txt', delimiter=',')
    pdata = pdata[burnin:]
    post = post[burnin:]

    # plotting and summary happens here - comment out any of the below
    c = ChainConsumer()
    c.add_chain(pdata, parameters=param_names, posterior=post)
    c.configure(statistics="max_symmetric", rainbow=True)
    summary = c.analysis.get_summary()

    # write parameter values
    write_summary(chains, summary, param_names)

    # plot contours
    fig = c.plotter.plot(figsize=(6, 6),
                         filename=chains + '/marginals-%sdim.png' %
                         (nparams))  #, blind = ['$\Omega_m$', '$w$'])
    fig.show()
    # plot walks
    fig2 = c.plotter.plot_walks(figsize=(6, 6),
                                filename=chains + '/walks-%s.png' % burnin)
    fig2.show()
We can turn off the default gaussian filter on marginalised distributions.

This can be done by setting ``smooth`` to either ``0``, ``None`` or ``False``.
Note that the parameter summaries also have smoothing turned off, and
thus summaries may change.

Fun colour change! And thicker lines!

"""

import numpy as np
from chainconsumer import ChainConsumer

data = np.random.multivariate_normal([0.0, 4.0], [[1.0, 0.7], [0.7, 1.5]],
                                     size=100000)

c = ChainConsumer()
c.add_chain(data, parameters=["$x_1$", "$x_2$"])
c.configure(smooth=0, linewidths=2, colors="#673AB7")
fig = c.plotter.plot(figsize="column", truth=[0.0, 4.0])

# If we wanted to save to file, we would instead have written
# fig = c.plotter.plot(filename="location", figsize="column", truth=[0.0, 4.0])

# If we wanted to display the plot interactively...
# fig = c.plotter.plot(display=True, figsize="column", truth=[0.0, 4.0])

fig.set_size_inches(
    3 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #41
0
import numpy as np
from chainconsumer import ChainConsumer

if __name__ == "__main__":
    ndim, nsamples = 4, 200000
    np.random.seed(0)

    data = np.random.randn(nsamples, ndim)
    data[:, 2] += data[:, 1] * data[:, 2]
    data[:, 1] = data[:, 1] * 3 + 5
    data[:, 3] /= (np.abs(data[:, 1]) + 1)

    data2 = np.random.randn(nsamples, ndim)
    data2[:, 0] -= 1
    data2[:, 2] += data2[:, 1]**2
    data2[:, 1] = data2[:, 1] * 2 - 5
    data2[:, 3] = data2[:, 3] * 1.5 + 2

    # If you pass in parameter labels and only one chain, you can also get parameter bounds
    c = ChainConsumer()
    c.add_chain(data,
                parameters=["$x$", "$y$", r"$\alpha$", r"$\beta$"],
                name="Model A")
    c.add_chain(data2,
                parameters=["$x$", "$y$", r"$\alpha$", r"$\gamma$"],
                name="Model B")
    table = c.analysis.get_latex_table(caption="Results for the tested models",
                                       label="tab:example")
    print(table)
===================
Change Font Options
===================

Control tick rotation and font sizes.

Here the tick rotation has been turned off, ticks made smaller,
more ticks added, and label size increased!
"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data = multivariate_normal([0, 1, 2], np.eye(3) + 0.2, size=100000)

# If you pass in parameter labels and only one chain, you can also get parameter bounds
c = ChainConsumer()
c.add_chain(data,
            parameters=["$x$", "$y^2$", r"$\Omega_\beta$"],
            name="Example")
c.configure(diagonal_tick_labels=False,
            tick_font_size=8,
            label_font_size=25,
            max_ticks=8)
fig = c.plotter.plot(figsize="column", legend=True)

fig.set_size_inches(
    4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #43
0
                #    continue

                # Throw away the first 20% or so of samples so as to avoid considering initial burn-in period
                #chain = chain[int(nWalkers*nSteps*0.1):]
                chain = np.array(chains[int(chain_length*0.2):])

                # Name params for chainconsumer (i.e. axis labels)
                plot_params = ["T [K]", "log(n$_{H}$) [cm$^{-3}$]"] + [
                    "log(N$_{{{0}}}$)[cm$^ {{-2}}$]".format(spec) for spec in obs["species"]]

                # Chain consumer plots posterior distributions and calculates
                # maximum likelihood statistics as well as Geweke test
                file_out = "{0}/radex-plots/new/corner_{1}.pdf".format(DIREC, obs["source"])
                file_out_walk = "{0}/radex-plots/new/walks/walk_{1}.pdf".format(DIREC, obs["source"])
                c = ChainConsumer() 
                c.add_chain(chain, parameters=plot_params, walkers=nWalkers)

                # Convergence tests
                gelman_rubin = c.diagnostic.gelman_rubin(threshold=0.15)
                if gelman_rubin:
                    print("Chains have converged")
                else:
                    print("Chains have yet to converge")

                c.configure(color_params="posterior", usetex=True, summary=False, bins=0.3, cloud=False, spacing=2.0, sigmas=np.linspace(0, 2, 3), plot_contour=True, bar_shade=[False]*len(plot_params))
                fig = c.plotter.plot(filename=file_out, display=False,) #extents=[(60, 1000), (4, 7), (13, 15), (13, 15), (14, 17), (14, 15), (14, 15)])

                fig_walks = c.plotter.plot_walks(filename=file_out_walk, display=False, plot_posterior=True)
                plt.close()

                # Convert both pdf to jpeg
Beispiel #44
0
            color = plt.colors.rgb2hex(cmap(float(i / len(datasets))))

            model.set_data(data)
            r_s = model.camb.get_data()["r_s"]

            df = pd.DataFrame(chain, columns=model.get_labels())
            alpha = df["$\\alpha$"].to_numpy()
            epsilon = df["$\\epsilon$"].to_numpy()
            alpha_par, alpha_perp = model.get_alphas(alpha, epsilon)
            df["$\\alpha_\\parallel$"] = alpha_par
            df["$\\alpha_\\perp$"] = alpha_perp

            extra.pop("realisation", None)
            c.add_chain(df,
                        weights=weight,
                        color=color,
                        posterior=posterior,
                        **extra)

            max_post = posterior.argmax()
            chi2 = -2 * posterior[max_post]

            params = model.get_param_dict(chain[max_post])
            for name, val in params.items():
                model.set_default(name, val)

            new_chi_squared, dof, bband, mods, smooths = model.plot(
                params, figname=pfn + fitname + "_bestfit.pdf", display=False)

        c.configure(shade=True,
                    bins=20,
Beispiel #45
0
    c1.samples['cosmological_parameters--omega_m'],
    c1.samples['cosmological_parameters--s8'],
    c1.samples['cosmological_parameters--sigma_8'],
])

#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()

names = [r'$\Omega_{\rm m}$', '$S_8$', r'$\sigma_8$']

cc.add_chain(samp0.T,
             parameters=names,
             weights=c0.weight,
             kde=True,
             name=r'No IAs')
cc.add_chain(samp1.T,
             parameters=names,
             weights=c1.weight,
             kde=True,
             name=r'TATT')

cc.configure(colors=[
    '#121F90',
    '#8b008b',
    '#FF94CB',
    '#FF1493',
],
             shade=[False, True, True] * 3,
Beispiel #46
0
            s=1,
            label="Female")
plt.legend(loc=2)
plt.xlabel("Height")
plt.ylabel("Weight")

# treating points with probability
# MCMC chains and posterior samples
# instructor produced this lib

params = ["height", "weight"]
male = df2.loc[m, params].values
female = df2.loc[~m, params].values

from chainconsumer import ChainConsumer
c = ChainConsumer()
c.add_chain(male, parameters=params, name="Male", kde=1.0, color="b")
c.add_chain(female, parameters=params, name="Female", kde=1.0, color="r")
c.configure(contour_labels='confidence', usetex=False, serif=False)
c.plotter.plot(figsize=2.0)
sns.despine(left=True, bottom=True)
# shows 68% and 95% confidence intervals
# good for hypothesis testing
# does a data point come from the distribution?
# probability surfaces

# instead of looking at contours we can look at 1d distributions
c.plotter.plot_summary(figsize=2.0)
sns.despine(left=True, bottom=True)
# similar to violin plot
Beispiel #47
0
def run_emcee():
	if util.do_chains_exist(model_name,root_dir):
		print 'Chains already exist, using existing chains...'
		samples = util.read_chains(model_name,root_dir)
		print np.shape(samples)
	else:
		print 'Chains do not exist, computing chains...'
		logssfr, ssfr, snr, snr_err = util.read_data_with_log()

		ndim = 4
		nwalkers = 300

		pos_min = np.array([0.05e-14, 0.001, 0.1e-11, 0.01])
		pos_max = np.array([120e-14, 5.5, 1000e-11, 1.6])
		psize = pos_max - pos_min
		pos = [pos_min + psize*np.random.rand(ndim) for ii in range(nwalkers)]

		sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(ssfr, snr, snr_err), threads=1)
		pos, prob, state = sampler.run_mcmc(pos, 500)
		sampler.reset()

		pos, prob, state = sampler.run_mcmc(pos, 4000)

		samples = sampler.flatchain
		output = open(root_dir + 'Data/MCMC_nicelog.pkl','wb')
 		pick.dump(samples,output)
 		output.close()
		print np.shape(samples)

	plt.figure()
	plt.hist(samples[:,0],bins=300)
	plt.xlabel('A')

	plt.figure()
	plt.hist(samples[:,1],bins=300)
	plt.xlabel('k')

	plt.figure()
	plt.hist(samples[:,2],bins=300)
	plt.xlabel('sSFR_0')


	c = ChainConsumer()
	c.add_chain(samples, parameters=["$A$", "$k$", "$sSFR_0$", "$alpha$"])
	#figw = c.plotter.plot_walks()
	fig = c.plotter.plot(figsize=(8,6))
	fig.savefig(root_dir + 'Plots/marginals_nicelog.pdf')
	summary =  c.analysis.get_summary()

	a_fit = summary["$A$"][1]
	c_fit = summary["$k$"][1]
	d_fit = summary["$sSFR_0$"][1]
	alpha_fit = summary["$alpha$"][1]
	
	print 'A', a_fit
	print 'k', c_fit
	print 'sSFR_0', d_fit
	print 'alpha', alpha_fit
	
	theta_pass = a_fit, c_fit, d_fit, alpha_fit
	return theta_pass
samp3 = np.array([c3.samples['cosmological_parameters--omega_m'], c3.samples['cosmological_parameters--s8'], c3.samples['intrinsic_alignment_parameters--a1'], c3.samples['intrinsic_alignment_parameters--alpha1']])


#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()


names = [r'$\Omega_{\rm m}$','$S_8$','$A_1$', r'$\eta_1$']
names2 = [r'$\Omega_{\rm m}$','$S_8$','$A_1$', r'$\eta_1$', '$A_2$', r'$\eta_2$', r'$b_{\rm TA}$']


#cc.add_chain(samp1.T, parameters=names, weights=c1.weight, kde=True, name=r'model=NLA, data=TATT')
cc.add_chain(samp2.T, parameters=names2, weights=c2.weight, kde=True, name=r'model=TATT, data=TATT')
cc.add_chain(samp3.T, parameters=names, weights=c3.weight, kde=True, name=r'model=NLA, data=NLA')



cc.configure(colors=['#8b008b','#FF94CB', '#FF1493', ],shade=[False,True,True]*3, shade_alpha=[0.25, 0.25, 0.1,0.5], legend_kwargs={"loc": "upper right", "fontsize": 16},label_font_size=12,tick_font_size=14)
#cc.configure(colors=['#800080', '#800080', '#FF1493', '#000000' ],shade=[False,True,True,False], shade_alpha=[0.25,0.25,0.25], max_ticks=4, kde=[6]*5, linestyles=["-", "-", '-.', '--'], legend_kwargs={"loc": "upper right", "fontsize": 14},label_font_size=14,tick_font_size=14) 
plt.close() ; 
fig = cc.plotter.plot(extents={'$S_8$':(0.5,0.95),r'$\Omega_{\rm m}$':(0.15,0.45),'$A_1$':(-5.,5), '$A_2$':(-5.,5), r'$\eta_1$':(-5,5), r'$\eta_2$':(-5,2), r'$b_{\rm TA}$':(0,2)}) #, truth=[0.3,0.82355,0.7,-1.7] )


#plt.suptitle(r'$\Lambda$CDM $1 \times 2\mathrm{pt}$, data=TATT', fontsize=16)
plt.subplots_adjust(bottom=0.155,left=0.155, hspace=0, wspace=0)

print('Saving...')
plt.savefig('/Users/hattifattener/Documents/y3cosmicshear/plots/sim_ia_1x2pt_fixedm.pdf')
def plot_mockaverage(dataflag, matterfile, datafile, datafile_recon, covfile,
                     covfile_recon, winfile, winmatfile, xmin, xmax, chainfile,
                     chainfile_recon):

    power = Hinton2017CAMB(redshift=0.11, mnu=0.0)

    # Set up the type of data and model we want. Can be one of "Polynomial" or "FullShape". We will add "LinearPoint" and "BAOExtractor" later.
    if (dataflag == 0):
        data = CorrelationFunction(nmocks=1000).read_data(datafile=datafile,
                                                          covfile=covfile,
                                                          xmin=xmin,
                                                          xmax=xmax)
        data_recon = CorrelationFunction(nmocks=1000).read_data(
            datafile=datafile_recon,
            covfile=covfile_recon,
            xmin=xmin,
            xmax=xmax)
        model = Polynomial("CorrelationFunction",
                           power,
                           free_sigma_nl=True,
                           prepare_model_flag=True)
        model_recon = Polynomial("CorrelationFunction",
                                 power,
                                 free_sigma_nl=True,
                                 prepare_model_flag=True)
    elif (dataflag == 1):
        data = PowerSpectrum(nmocks=1000,
                             verbose=True).read_data(datafile=datafile,
                                                     covfile=covfile,
                                                     xmin=xmin,
                                                     xmax=xmax,
                                                     winmatfile=winmatfile)
        data.read_data(winfile=winfile, xmin=xmin, xmax=xmax, nconcat=binwidth)
        data_recon = PowerSpectrum(nmocks=1000, verbose=True).read_data(
            datafile=datafile_recon,
            covfile=covfile_recon,
            xmin=xmin,
            xmax=xmax,
            winmatfile=winmatfile)
        data_recon.read_data(winfile=winfile,
                             xmin=xmin,
                             xmax=xmax,
                             nconcat=binwidth)
        model = FullShape(
            "PowerSpectrum",
            power,
            free_sigma_nl=True,
            nonlinearterms="./files/compute_pt_integrals_output.dat",
            verbose=True)
        model_recon = FullShape(
            "PowerSpectrum",
            power,
            free_sigma_nl=True,
            nonlinearterms="./files/compute_pt_integrals_output.dat",
            verbose=True)
    elif (dataflag == 2):
        data = BAOExtract(nmocks=1000,
                          verbose=True).read_data(datafile=datafile,
                                                  covfile=covfile,
                                                  xmin=xmin,
                                                  xmax=xmax,
                                                  winmatfile=winmatfile)
        data.read_data(winfile=winfile, xmin=xmin, xmax=xmax, nconcat=binwidth)
        model = BAOExtractor(
            power,
            free_sigma_nl=True,
            nonlinearterms="./files/compute_pt_integrals_output.dat",
            verbose=True)
    else:
        print "dataflag value not supported, ", dataflag
        exit()

    # Read in the chains
    params, max_params, max_loglike, samples, loglike = prepareforChainConsumer(
        model, outputfile=chainfile, burnin=1000)
    params_recon, max_params_recon, max_loglike_recon, samples_recon, loglike_recon = prepareforChainConsumer(
        model_recon, outputfile=chainfile_recon, burnin=1000)

    free_params = model.get_all_params()
    prepare_model(data, model)
    for counter, i in enumerate(free_params):
        model.params[i][0] = max_params[counter]
    set_model(model, x=data.x)
    p = Plotter(data=data, model=model)

    free_params = model_recon.get_all_params()
    prepare_model(data_recon, model_recon)
    for counter, i in enumerate(free_params):
        model_recon.params[i][0] = max_params_recon[counter]
    set_model(model_recon, x=data_recon.x)
    p.add_data_to_plot(data=data_recon, markerfacecolor='w')
    p.add_model_to_plot(data_recon,
                        model=model_recon,
                        markerfacecolor='w',
                        linecolor='r')
    p.display_plot()

    c = ChainConsumer().add_chain(
        samples,
        parameters=params,
        posterior=loglike,
        cloud=True,
        name="Pre-reconstruction").configure(summary=True)
    c.add_chain(samples_recon,
                parameters=params_recon,
                posterior=loglike_recon,
                cloud=True,
                name="Post-reconstruction").configure(summary=True)
    print c.analysis.get_summary(), max_params, max_params_recon
    #c.plotter.plot(display=True)

    return
Beispiel #50
0
First lets mock some highly correlated data with colour scatter. And then throw a few more
data sets in to get some overlap.
"""

import numpy as np
from numpy.random import normal, multivariate_normal, uniform
from chainconsumer import ChainConsumer

np.random.seed(1)
n = 1000000
data = multivariate_normal([0.4, 1], [[0.01, -0.003], [-0.003, 0.001]], size=n)
data = np.hstack((data, (67 + 10 * data[:, 0] - data[:, 1]**2)[:, None]))
data2 = np.vstack((uniform(-0.1, 1.1, n), normal(1.2, 0.1, n))).T
data2[:, 1] -= (data2[:, 0]**2)
data3 = multivariate_normal([0.3, 0.7], [[0.02, 0.05], [0.05, 0.1]], size=n)

c = ChainConsumer()
c.add_chain(data2, parameters=["$\Omega_m$", "$-w$"], name="B")
c.add_chain(data3, name="S")
c.add_chain(data, parameters=["$\Omega_m$", "$-w$", "$H_0$"], name="P")

c.configure(color_params="$H_0$",
            shade=[True, True, False],
            shade_alpha=0.2,
            bar_shade=True,
            linestyles=["-", "--", "-"])
fig = c.plotter.plot(figsize=2.0, extents=[[0, 1], [0, 1.5]])

fig.set_size_inches(
    3 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #51
0
#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()

names = [r'$\Omega_{\rm m}$', '$S_8$', '$A_1$', r'$\eta_1$']
names2 = [
    r'$\Omega_{\rm m}$', '$S_8$', '$A_1$', r'$\eta_1$', '$A_2$', r'$\eta_2$',
    r'$b_{\rm TA}$'
]

cc.add_chain(samp1.T,
             parameters=names2,
             weights=c1.weight,
             kde=True,
             name=r'$\Omega_\nu h^2$ varied')
cc.add_chain(samp2.T,
             parameters=names2,
             weights=c2.weight,
             kde=True,
             name=r'$\Omega_\nu h^2$ fixed')

cc.configure(colors=[
    '#121F90',
    '#8b008b',
    '#FF94CB',
    '#FF1493',
],
             shade=[False, True, True] * 3,
Beispiel #52
0
Legends are hard.

Because of that, you can pass any keywords to the legend call you want via `legend_kwargs`.
"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data1 = multivariate_normal([0, 0], [[1, 0], [0, 1]], size=1000000)
data2 = data1 + 2

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"], name="Chain 1")
c.add_chain(data2, parameters=["$x$", "$y$"], name="Chain 2")
c.configure(colors=['lb', 'g'])
fig = c.plotter.plot()
fig.set_size_inches(2.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.

###############################################################################
# If the linestyles are different and the colours are the same, the artists
# will reappear.

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"], name="Chain 1")
c.add_chain(data2, parameters=["$x$", "$y$"], name="Chain 2")
c.configure(colors=['lb', 'lb'], linestyles=["-", "--"])
fig = c.plotter.plot()
fig.set_size_inches(2.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
Beispiel #53
0
samp1 = np.array([c1.samples['cosmological_parameters--s8'], c1.samples['cosmological_parameters--omega_m'], c1.samples['intrinsic_alignment_parameters--a1'], c1.samples['intrinsic_alignment_parameters--a2'], c1.samples['intrinsic_alignment_parameters--alpha1'], c1.samples['intrinsic_alignment_parameters--alpha2'], c1.samples['intrinsic_alignment_parameters--bias_ta']])
samp2 = np.array([c2.samples['cosmological_parameters--s8'], c2.samples['cosmological_parameters--omega_m'], c2.samples['intrinsic_alignment_parameters--a1'], c2.samples['intrinsic_alignment_parameters--a2'], c2.samples['intrinsic_alignment_parameters--alpha1'], c2.samples['intrinsic_alignment_parameters--alpha2'], c2.samples['intrinsic_alignment_parameters--bias_ta']])
samp3 = np.array([c3.samples['cosmological_parameters--s8'], c3.samples['cosmological_parameters--omega_m'], c3.samples['intrinsic_alignment_parameters--a1'], c3.samples['intrinsic_alignment_parameters--a2'], c3.samples['intrinsic_alignment_parameters--alpha1'], c3.samples['intrinsic_alignment_parameters--alpha2'], c3.samples['intrinsic_alignment_parameters--bias_ta']])


#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()


names = ['$S_8$',r'$\Omega_{\rm m}$','$A_1$', '$A_2$', r'$\eta_1$', r'$\eta_2$', r'$b_{\rm TA}$']


cc.add_chain(samp3.T, parameters=names, weights=c3.weight, kde=True, name=r'Fiducal')
cc.add_chain(samp1.T, parameters=names, weights=c1.weight, kde=True, name=r'GAMA (symmetric $b_{\rm TA}$)')
cc.add_chain(samp2.T, parameters=names, weights=c2.weight, kde=True, name=r'GAMA (positive $b_{\rm TA}$)')



cc.configure(colors=['#000000','#8b008b','#ffd1df', '#FF1493', ],shade=[False,True,True]*3, shade_alpha=[0.25, 0.25, 0.5,0.5], legend_kwargs={"loc": "upper right", "fontsize": 20},label_font_size=12,tick_font_size=14)
#cc.configure(colors=['#800080', '#800080', '#FF1493', '#000000' ],shade=[False,True,True,False], shade_alpha=[0.25,0.25,0.25], max_ticks=4, kde=[6]*5, linestyles=["-", "-", '-.', '--'], legend_kwargs={"loc": "upper right", "fontsize": 14},label_font_size=14,tick_font_size=14) 
plt.close() ; 
fig = cc.plotter.plot(extents={'$S_8$':(0.75,0.95),r'$\Omega_{\rm m}$':(0.15,0.45),'$A_1$':(-0.9,2), '$A_2$':(-3.,1), r'$\eta_1$':(-5,5), r'$\eta_2$':(-5,2), r'$b_{\rm TA}$':(-2,2)}, truth=[0.82355,0.3,1.7,0,0,0,0] )

plt.suptitle(r'$\Lambda$CDM $1 \times 2\mathrm{pt}$', fontsize=20)
plt.subplots_adjust(bottom=0.155,left=0.155, hspace=0, wspace=0)

print('Saving...')
plt.savefig('/Users/hattifattener/Documents/y3cosmicshear/plots/robustness/fiducial_test_0.40_CLASS_C1_DEEP2.pdf')
The code for this is based off the preliminize github repo at
https://github.com/cpadavis/preliminize, which will add watermark to arbitrary
figures!

"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data1 = multivariate_normal([3, 5], [[1, 0], [0, 1]], size=1000000)
data2 = multivariate_normal([5, 3], [[1, 0], [0, 1]], size=10000)

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"], name="Good results")
c.add_chain(data2, name="Unfinished results")
fig = c.plotter.plot(watermark=r"\textbf{Preliminary}", figsize=2.0)

###############################################################################
# You can also control the text options sent to the matplotlib text call.

c = ChainConsumer()
c.add_chain(data1, parameters=["$x$", "$y$"], name="Good results")
c.add_chain(data2, name="Unfinished results")
kwargs = {
    "color": "purple",
    "alpha": 1.0,
    "family": "sanserif",
    "usetex": False,
    "weight": "bold"
Beispiel #55
0
Note that you *cannot* use dictionary input with the grid method and not specify the full
flattened array. This is because we cannot construct the meshgrid from a dictionary, as
the order of the parameters is not preserved in the dictionary.

"""
import numpy as np
from chainconsumer import ChainConsumer
from scipy.stats import multivariate_normal


x, y = np.linspace(-3, 3, 50), np.linspace(-7, 7, 100)
xx, yy = np.meshgrid(x, y, indexing='ij')
pdf = np.exp(-0.5 * (xx * xx + yy * yy / 4 + np.abs(xx * yy)))

c = ChainConsumer()
c.add_chain([x, y], parameters=["$x$", "$y$"], weights=pdf, grid=True)
fig = c.plotter.plot()
fig.set_size_inches(4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.

###############################################################################
# If you have the flattened array already, you can also pass this

# Turning 2D data to flat data.
xs, ys = xx.flatten(), yy.flatten()
coords = np.vstack((xs, ys)).T
pdf_flat = multivariate_normal.pdf(coords, mean=[0.0, 0.0], cov=[[1.0, 0.7], [0.7, 3.5]])
c = ChainConsumer()
c.add_chain([xs, ys], parameters=["$x$", "$y$"], weights=pdf_flat, grid=True)
c.configure(smooth=1)  # Notice how smoothing changes the results!
fig = c.plotter.plot()
Beispiel #56
0
    def do_analysis(self):
        # run_id = "chains_1808/test1"

        # constants:
        c = 2.9979e10
        G = 6.67428e-8

        # -------------------------------------------------------------------------#
        # load in sampler:
        reader = emcee.backends.HDFBackend(filename=self.run_id + ".h5")
        #sampler = emcee.EnsembleSampler(self.nwalkers, self.ndim, self.lnprob, args=(self.x, self.y, self.yerr), backend=reader)
        #tau = 20
        tau = reader.get_autocorr_time(
            tol=0
        )  #using tol=0 means we'll always get an estimate even if it isn't trustworthy.
        burnin = int(2 * np.max(tau))
        thin = int(0.5 * np.min(tau))
        samples = reader.get_chain(flat=True, discard=burnin)
        sampler = reader.get_chain(flat=False)
        blobs = reader.get_blobs(flat=True)
        # samples = reader.get_chain(discard=burnin, flat=True, thin=thin)
        # log_prob_samples = reader.get_log_prob(discard=burnin, flat=True, thin=thin)
        # blobs = reader.get_blobs(discard=burnin, flat=True, thin=thin)

        data = []
        for i in range(len(blobs["model"])):
            data.append(eval(blobs["model"][i].decode('ASCII', 'replace')))

    # -------------------------------------------------------------------------#
    # get the acceptance fraction:
    #accept = reader.acceptance_fraction/nsteps #this will be an array with the acceptance fraction for each walker
    #print(f"The average acceptance fraction of the walkers is: {np.mean(accept)}")

    # get the autocorrelation times:
    # print("burn-in: {0}".format(burnin))
    # print("thin: {0}".format(thin))
    # print("flat chain shape: {0}".format(samples.shape))
    # print("flat log prob shape: {0}".format(log_prob_samples.shape))
    # print("flat log prior shape: {0}".format(log_prior_samples.shape))

    # alternate method of checking if the chains are converged:
    # This code is from https://dfm.io/posts/autocorr/

    # get autocorrelation time:

        def next_pow_two(n):
            i = 1
            while i < n:
                i = i << 1
            return i

        def autocorr_func_1d(x, norm=True):
            x = np.atleast_1d(x)
            if len(x.shape) != 1:
                raise ValueError(
                    "invalid dimensions for 1D autocorrelation function")
            n = next_pow_two(len(x))

            # Compute the FFT and then (from that) the auto-correlation function
            f = np.fft.fft(x - np.mean(x), n=2 * n)
            acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real
            acf /= 4 * n

            # Optionally normalize
            if norm:
                acf /= acf[0]

            return acf

        # Automated windowing procedure following Sokal (1989)
        def auto_window(taus, c):
            m = np.arange(len(taus)) < c * taus
            if np.any(m):
                return np.argmin(m)
            return len(taus) - 1

        # Following the suggestion from Goodman & Weare (2010)
        def autocorr_gw2010(y, c=5.0):
            f = autocorr_func_1d(np.mean(y, axis=0))
            taus = 2.0 * np.cumsum(f) - 1.0
            window = auto_window(taus, c)
            return taus[window]

        def autocorr_new(y, c=5.0):
            f = np.zeros(y.shape[1])
            for yy in y:
                f += autocorr_func_1d(yy)
            f /= len(y)
            taus = 2.0 * np.cumsum(f) - 1.0
            window = auto_window(taus, c)
            return taus[window]

        # Compute the estimators for a few different chain lengths

        #loop through 10 parameters:
        f = plt.figure(figsize=(8, 5))

        param = ["$X$", "$Z$", "$Q_{\mathrm{b}}$", "$f_{\mathrm{a}}$", "$f_{\mathrm{E}}$", "$r{\mathrm{1}}$",\
                "$r{\mathrm{2}}$", "$r{\mathrm{3}}$", "$M$", "$R$"]
        for j in range(10):
            chain = sampler[:, :, j].T
            print(np.shape(sampler))

            N = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]),
                                   10)).astype(int)
            print(N)
            gw2010 = np.empty(len(N))
            new = np.empty(len(N))
            for i, n in enumerate(N):
                gw2010[i] = autocorr_gw2010(chain[:, :n])
                new[i] = autocorr_new(chain[:, :n])

            # Plot the comparisons
            #plt.loglog(N, gw2010, "o-", label="G\&W 2010")
            plt.loglog(N, new, "o-", label=f"{param[j]}")
            plt.loglog(N, gw2010, "o-", label=None, color='grey')
            ylim = plt.gca().get_ylim()

            #plt.ylim(ylim)
        plt.xlabel("Number of samples, $N$", fontsize='xx-large')
        plt.ylabel(r"$\tau$ estimates", fontsize='xx-large')

        plt.plot(N, np.array(N) / 50.0, "--k")  # label=r"$\tau = N/50$")
        plt.legend(fontsize='large', loc='best',
                   ncol=2)  #bbox_to_anchor=(0.99, 1.02)
        plt.xticks(fontsize=14)
        plt.yticks(fontsize=14)
        plt.savefig('{}_autocorrelationtimes.pdf'.format(self.run_id))
        plt.show()

        print(
            f"The autocorrelation time for each parameter as calculated by emcee is: {tau}"
        )
        # -------------------------------------------------------------------------#
        # Get parameters for each model run from the blobs structure:

        # get each individual parameter:
        time = [data[i]['time'] for i in range(len(data))]
        e_b = [data[i]['e_b'] for i in range(len(data))]
        alpha = [data[i]['alpha'] for i in range(len(data))]
        X = [data[i]['x_0'] for i in range(len(data))]
        Z = [data[i]['z'] for i in range(len(data))]
        base = [data[i]['base'] for i in range(len(data))]
        mdot = [data[i]['mdot'] for i in range(len(data))]
        r1 = np.array([data[i]['r1'] for i in range(len(data))])
        r2 = np.array([data[i]['r2'] for i in range(len(data))])
        r3 = np.array([data[i]['r3'] for i in range(len(data))])
        mass = np.array([data[i]['mass'] for i in range(len(data))])
        radius = np.array([data[i]['radius'] for i in range(len(data))])

        # calculate redshift and gravity from mass and radius:
        R = np.array(radius) * 1e5  #cgs
        M = np.array(mass) * 1.989e33  #cgs
        redshift = np.power((1 - (2 * G * M / (R * c**2))), -0.5)
        gravity = M * redshift * G / R**2  #cgs

        # calculate distance and inclincation from scaling factors:
        r1 = np.array(r1)
        r2 = np.array(r2)
        r3 = np.array(r3)
        print(np.min(r1))
        print(np.min(r2))
        print(np.min(r3))
        print(np.min(mass))
        print(np.min(X))

        sqrt = (r1 * r2 * r3 * 1e3) / (63.23 * 0.74816)
        xip = np.power(sqrt, 0.5)
        xib = (0.74816 * xip) / r2
        distance = 10 * np.power((r1 / xip), 0.5)  #kpc
        cosi_2 = 1 / (2 * xip)
        cosi = 0.5 / (2 * (xip / xib) - 1)

        # to get the parameter middle values and uncertainty use the functions get_param_uncert_obs and get_param_uncert_pred, e.g.

        #t1, t2, t3, t4, t5, t6, t7 = get_param_uncert_obs1(time, self.numburstssim+1)
        #times = [list(t1), list(t2), list(t3), list(t4), list(t5), list(t6), list(t7)]
        times = get_param_uncert_obs(time, self.numburstssim * 2 + 1)
        timepred = [x[0] for x in times]
        timepred_errup = [x[1] for x in times]
        timepred_errlow = [x[2] for x in times]

        ebs = get_param_uncert_obs(e_b, self.numburstssim * 2)
        ebpred = [x[0] for x in ebs]
        ebpred_errup = [x[1] for x in ebs]
        ebpred_errlow = [x[2] for x in ebs]

        alphas = get_param_uncert_obs(alpha, self.numburstssim * 2)

        Xpred = np.array(list(get_param_uncert(X))[0])
        Zpred = np.array(list(get_param_uncert(Z))[0])
        basepred = np.array(list(get_param_uncert(base))[0])
        dpred = np.array(list(get_param_uncert(distance))[0])
        cosipred = np.array(list(get_param_uncert(cosi))[0])
        xippred = np.array(list(get_param_uncert(xip))[0])
        xibpred = np.array(list(get_param_uncert(xib))[0])
        masspred = np.array(list(get_param_uncert(mass))[0])
        radiuspred = np.array(list(get_param_uncert(radius))[0])
        gravitypred = np.array(list(get_param_uncert(gravity))[0])
        redshiftpred = np.array(list(get_param_uncert(redshift))[0])
        r1pred = np.array(list(get_param_uncert(r1))[0])
        r2pred = np.array(list(get_param_uncert(r2))[0])
        r3pred = np.array(list(get_param_uncert(r3))[0])

        # scale fluences by scaling factor:
        ebpred = np.array(ebpred) * np.array(r3pred[0])
        ebpred_errup = np.array(ebpred_errup) * np.array(r3pred[0])
        ebpred_errlow = np.array(ebpred_errlow) * np.array(r3pred[0])

        # save to text file with columns: paramname, value, upper uncertainty, lower uncertainty

        np.savetxt(
            f'{self.run_id}_parameterconstraints_pred.txt',
            (Xpred, Zpred, basepred, dpred, cosipred, xippred, xibpred,
             masspred, radiuspred, gravitypred, redshiftpred, r1pred, r2pred,
             r3pred),
            header=
            'Xpred, Zpred, basepred, dpred, cosipred, xippred, xibpred, masspred, radiuspred,gravitypred, redshiftpred, r1pred, r2pred, r3pred \n value, upper uncertainty, lower uncertainty'
        )

        # -------------------------------------------------------------------------#
        # PLOTS
        # -------------------------------------------------------------------------#

        # make plot of posterior distributions of your parameters:
        c = ChainConsumer()
        c.add_chain(samples,
                    parameters=[
                        "X", "Z", "Qb", "fa", "fE", "r1", "r2", "r3", "M", "R"
                    ])
        c.plotter.plot(filename=self.run_id + "_posteriors.pdf",
                       figsize="column")

        # make plot of posterior distributions of the mass, radius, surface gravity, and redshift:
        # stack data for input to chainconsumer:
        mass = mass.ravel()
        radius = radius.ravel()
        gravity = gravity.ravel()
        redshift = redshift.ravel()
        mrgr = np.column_stack((mass, radius, gravity, redshift))

        # plot with chainconsumer:
        c = ChainConsumer()
        c.add_chain(mrgr, parameters=["M", "R", "g", "1+z"])
        c.plotter.plot(filename=self.run_id + "_massradius.pdf",
                       figsize="column")

        # make plot of observed burst comparison with predicted bursts:
        # get the observed bursts for comparison:
        tobs = self.bstart
        ebobs = self.fluen

        plt.figure(figsize=(10, 7))

        plt.scatter(tobs,
                    ebobs,
                    color='black',
                    marker='.',
                    label='Observed',
                    s=200)
        #plt.scatter(time_pred_35, e_b_pred_35, marker = '*',color='cyan',s = 200, label = '2 M$_{\odot}$, R = 11.2 km')
        plt.scatter(timepred[1:],
                    ebpred,
                    marker='*',
                    color='darkgrey',
                    s=100,
                    label='Predicted')
        #plt.scatter(time_pred_18, e_b_pred_18, marker = '*',color='orange',s = 200, label = '1.4 M$_{\odot}$, R = 10 km')

        plt.errorbar(timepred[1:],
                     ebpred,
                     yerr=[ebpred_errup, ebpred_errlow],
                     xerr=[timepred_errup[1:], timepred_errlow[1:]],
                     fmt='.',
                     color='darkgrey')
        plt.errorbar(tobs, ebobs, fmt='.', color='black')

        plt.xlabel("Time (days after start of outburst)")
        plt.ylabel("Fluence (1e-9 erg/cm$^2$)")
        plt.legend(loc=2)

        plt.savefig(f'{self.run_id}_predictedburstscomparison.pdf')
        plt.show()

        # plot the chains:
        ndim = 10

        labels = [
            "$X$", "$Z$", "$Q_b$", "$f_a$", "$f_E$", "$r1$", "$r2$", "$r3$",
            "$M$", "$R$"
        ]
        plt.clf()
        fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(8, 9))

        for i in range(ndim):
            axes[i].plot(sampler[:, :, i].T, color="k", alpha=0.4)
            axes[i].yaxis.set_major_locator(MaxNLocator(5))
            axes[i].set_ylabel(labels[i])

        axes[ndim - 1].set_xlabel("step number")
        plt.tight_layout(h_pad=0.0)
        plt.savefig(self.run_id + 'chain-plot.pdf')
        plt.show()
            out_lines[ii] = GelmanRubin(chain_T[:ii, :, nd])

        #plt.ylim(0.95, 2.3)
        plt.plot(out_absc[20:], out_lines[20:], '-', color='k')
        plt.axhline(1.01)
        plt.savefig(dir_output + 'GRtrace_pam_' + repr(nd) + '.png', bbox_inches='tight')
        plt.close()

    print
    print '*************************************************************'
    print

if args.cc != 'False':
    cc = ChainConsumer()
    for nd in xrange(0, mc.ndim):  # (0,ndim):
        cc.add_chain(chain[:, :, nd].flatten(), walkers=mc.nwalkers)

    #print(cc.get_latex_table())
    print cc.get_summary()

    print cc.diagnostic_gelman_rubin(threshold=0.05)
    print cc.diagnostic_geweke()
    print
    print '*************************************************************'
    print

x0 = 1. / 150

M_star1_rand = np.random.normal(M_star1, M_star1_err, n_kept)

if 'kepler' in mc.model_list:
Beispiel #58
0
samp0 = np.array([c0.samples['cosmological_parameters--omega_m'], c0.samples['cosmological_parameters--s8'], c0.samples['cosmological_parameters--w'], c0.samples['cosmological_parameters--omega_b']*c0.samples['cosmological_parameters--h0']**2 ])
samp1 = np.array([c1.samples['cosmological_parameters--omega_m'], c1.samples['cosmological_parameters--s8'], c1.samples['cosmological_parameters--w'], c1.samples['cosmological_parameters--omega_b']*c1.samples['cosmological_parameters--h0']**2 ])


#import pdb ; pdb.set_trace()
print('Making cornerplot...')
cc = ChainConsumer()

#import pdb ; pdb.set_trace()


names = [r'$\Omega_{\rm m}$','$S_8$','$w$',r'$\Omega_{\rm b}h^2$']


cc.add_chain(samp0.T, parameters=names, weights=c0.weight, kde=True, name=r'Fiducial')
cc.add_chain(samp1.T, parameters=names, weights=c1.weight, kde=True, name=r'$\Omega_b h^2$ prior')


cc.configure(colors=['#121F90','#8b008b','#FF94CB', '#FF1493', ],shade=[False,True,True]*3, shade_alpha=[0.25, 0.25, 0.1,0.5], legend_kwargs={"loc": "upper right", "fontsize": 16},label_font_size=12,tick_font_size=14)
#cc.configure(colors=['#800080', '#800080', '#FF1493', '#000000' ],shade=[False,True,True,False], shade_alpha=[0.25,0.25,0.25], max_ticks=4, kde=[6]*5, linestyles=["-", "-", '-.', '--'], legend_kwargs={"loc": "upper right", "fontsize": 14},label_font_size=14,tick_font_size=14) 
plt.close() ; 
fig = cc.plotter.plot(extents={'$S_8$':(0.5,0.95),r'$\Omega_{\rm m}$':(0.15,0.45),'$A_1$':(-5.,5), '$A_2$':(-5.,5), r'$\eta_1$':(-5,5), r'$\eta_2$':(-5,2), r'$b_{\rm TA}$':(0,2)}) #, truth=[0.3,0.82355,0.7,-1.7] )


plt.suptitle(r'$w$CDM $1 \times 2\mathrm{pt}$', fontsize=16)
plt.subplots_adjust(bottom=0.155,left=0.155, hspace=0, wspace=0, top=0.92)

print('Saving...')
plt.savefig('/Users/hattifattener/Documents/y3cosmicshear/plots/wcdm_omegabh2_1x2pt.pdf')
plt.savefig('/Users/hattifattener/Documents/y3cosmicshear/plots/wcdm_omegabh2_1x2pt.png')
Beispiel #59
0
    rs_fid = get_r_s([0.273])[0]
    
    daval = (alpha/(1+epsilon)) * da / rs_fid
    
    hrc = hs * rs_fid / (alpha * (1 + epsilon) * (1 + epsilon)) / c
    res = np.vstack((omch2, daval, z/hrc)).T
    return res
    
p1 = [r"$\Omega_c h^2$", r"$\alpha$", r"$\epsilon$"]
p2 = [r"$\Omega_c h^2$", r"$D_A(z)/r_s$", r"$cz/H(z)/r_s $"]


if False:
    consumer = ChainConsumer()
    consumer.configure_contour(sigmas=[0,1.3])
    consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z0"), parameters=p1, name="$0.2<z<0.6$")
    consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z1"), parameters=p1, name="$0.4<z<0.8$")
    consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z2"), parameters=p1, name="$0.6<z<1.0$")
    consumer.plot(figsize="column", filename="wigglez_multipole_alphaepsilon.pdf", truth=[0.113, 1.0, 0.0])
    print(consumer.get_latex_table())

if True:
    c = ChainConsumer()
    c.configure_contour(sigmas=[0,1,2])
    c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z0", 0.44), parameters=p2, name="$0.2<z<0.6$")
    c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z1", 0.60), parameters=p2, name="$0.4<z<0.8$")
    c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z2", 0.73), parameters=p2, name="$0.6<z<1.0$")
    print(c.get_latex_table())
    #c.plot(figsize="column", filename="wigglez_multipole_dah.pdf")

if False:
Beispiel #60
0
                #chain is organized as chain[walker, step, parameter(s)]
                #chain = np.array(sampler.chain[:, :, :])
                chain = np.array(sampler.chain)

                for i in range(0, nWalkers):
                    for j in range(0, nBreak):
                        db.insert_shock_chain_data(db_pool=db_pool, table=obs["source"], chain=chain[i][j])

                if counter == 0:
                    full_chain = np.vstack(chain)
                else:
                    full_chain = np.concatenate((full_chain, np.vstack(chain)))

                # Checks for convergence using ChainConsumer
                c = ChainConsumer()
                c.add_chain(full_chain, parameters=column_names,
                            walkers=nWalkers)

                # Convergence tests
                gelman_rubin = c.diagnostic.gelman_rubin(threshold=0.15)
                if gelman_rubin:
                    print("Chains have converged")
                    break
                else:
                    print("Chains have yet to converge")

                ''' 
                # Plot the UCLCHEM plots
                vs, initial_dens, b_field, crir, isrf = chain_results[0], chain_results[1], chain_results[2], chain_results[3], chain_results[4]
                uclchem_file = "{0}/UCLCHEM/output/data/v{1:.2}n1e{2:.2}z{3:.2}r{4:.2}b{5:.2}.dat".format(
                    DIREC, vs, initial_dens, crir, isrf, b_field)