コード例 #1
0
 def test_divide_chains_fail(self):
     np.random.seed(0)
     data = np.concatenate((np.random.normal(loc=0.0, size=100000),
                            np.random.normal(loc=1.0, size=100000)))
     consumer = ChainConsumer()
     consumer.add_chain(data, walkers=2)
     with pytest.raises(ValueError):
         consumer.divide_chain(chain=2.0)
コード例 #2
0
 def test_divide_chains_name_fail(self):
     np.random.seed(0)
     data = np.concatenate((np.random.normal(loc=0.0, size=200000),
                            np.random.normal(loc=1.0, size=200000)))
     consumer = ChainConsumer()
     consumer.add_chain(data, walkers=2)
     with pytest.raises(AssertionError):
         c = consumer.divide_chain(chain="notexist")
コード例 #3
0
 def test_divide_chains_name(self):
     np.random.seed(0)
     data = np.concatenate((np.random.normal(loc=0.0, size=100000),
                            np.random.normal(loc=1.0, size=100000)))
     consumer = ChainConsumer()
     num_walkers = 2
     consumer.add_chain(data, walkers=num_walkers, name="test")
     c = consumer.divide_chain(chain="test")
     c.configure(bins=0.7)
     means = [0, 1.0]
     for i in range(num_walkers):
         stats = list(c.analysis.get_summary()[i].values())[0]
         assert np.abs(stats[1] - means[i]) < 1e-1
         assert np.abs(c.chains[i].chain[:, 0].mean() - means[i]) < 1e-2
コード例 #4
0
    def test_divide_chains(self):
        np.random.seed(0)
        data = np.concatenate((np.random.normal(loc=0.0, size=100000),
                               np.random.normal(loc=1.0, size=100000)))
        consumer = ChainConsumer()
        consumer.add_chain(data)
        num_walkers = 2

        c = consumer.divide_chain(2)
        c.configure_general()
        means = [0, 1.0]
        for i in range(num_walkers):
            stats = list(c.get_summary()[i].values())[0]
            assert np.abs(stats[1] - means[i]) < 1e-1
            assert np.abs(c.chains[i][:, 0].mean() - means[i]) < 1e-2
コード例 #5
0
================

ChainConsumer can split one chain into many!

If you use a sampling algorithm with multiple walkers (which
is fairly common), it can be useful to plot each walker as a separate chain
so that you can verify that your walkers have all converged to the same place.

You can use the `plot_walks` method for this, or the convergence diagnostics,
but the more options the better!

In the below example, I assume the generated data was created using ten walkers.
I introduce some fake shift in the data to badly emulate walker drift.

"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer

np.random.seed(0)
data = multivariate_normal([0.0, 4.0], [[1.0, 0.7], [0.7, 1.5]], size=1000000)
data[:, 0] += np.linspace(0, 1, data.shape[0])

c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$"], walkers=5)
c2 = c.divide_chain()
fig = c2.plotter.plot()

fig.set_size_inches(
    4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
コード例 #6
0
ファイル: efficiency_model_6.py プロジェクト: dessn/sn-bhm
    colours = ["#4CAF50", "#D32F2F", "#1E88E5"] * n
    for i in range(n):
        mean, std, zeros, calibration, threshold, lall, zall, call, mask, num_obs = get_data()
        theta = [mean, std] + zeros.tolist()

        kwargs = {"num_steps": 40000, "num_burn": 30000, "save_interval": 60,
                  "plot_covariance": True}  # , "unify_latent": True # , "callback": v.callback
        sampler = BatchMetropolisHastings(num_walkers=w, kwargs=kwargs, temp_dir=t % i, num_cores=4)

        model_good = EfficiencyModelUncorrected(call, zall, calibration, zeros, name="Good%d" % i)
        model_good.fit(sampler, chain_consumer=c)  # , include_latent=True

        model_un = EfficiencyModelUncorrected(call[mask], zall[mask], calibration,
                                              zeros, name="Uncorrected%d" % i)
        model_un.fit(sampler, chain_consumer=c)

        model_cor = EfficiencyModelCorrected(call[mask], zall[mask], calibration, zeros,
                                             threshold, num_obs,
                                             dir_name + "/output", name="Corrected%d" % i)
        model_cor.fit(sampler, chain_consumer=c)

    c.configure_bar(shade=True)
    c.configure_general(bins=1.0, colours=colours)
    c.configure_contour(sigmas=[0, 0.01, 1, 2], contourf=True, contourf_alpha=0.2)
    c.plot(filename=plot_file, truth=theta, figsize=(5, 5), legend=False, parameters=2)
    for i in range(len(c.chains)):
        c.plot_walks(filename=walk_file % c.names[i], chain=i, truth=theta)
        c.divide_chain(i, w).configure_general(rainbow=True) \
            .plot(figsize=(5, 5), filename=plot_file.replace(".png", "_%s.png" % c.names[i]),
                  truth=theta)
コード例 #7
0
================

ChainConsumer can split one chain into many!

If you use a sampling algorithm with multiple walkers (which
is fairly common), it can be useful to plot each walker as a separate chain
so that you can verify that your walkers have all converged to the same place.

You can use the `plot_walks` method for this, or the convergence diagnostics,
but the more options the better!

In the below example, I assume the generated data was created using ten walkers.
I introduce some fake shift in the data to badly emulate walker drift.

"""

import numpy as np
from numpy.random import multivariate_normal
from chainconsumer import ChainConsumer


np.random.seed(0)
data = multivariate_normal([0.0, 4.0], [[1.0, 0.7], [0.7, 1.5]], size=1000000)
data[:, 0] += np.linspace(0, 1, data.shape[0])

c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$"], walkers=5)
c2 = c.divide_chain()
fig = c2.plotter.plot()

fig.set_size_inches(4.5 + fig.get_size_inches())  # Resize fig for doco. You don't need this.