Esempio n. 1
0
def test_shapes(backend, moves, nwalkers=32, ndim=3, nsteps=10, seed=1234):
    # Set up the random number generator.
    np.random.seed(seed)

    with backend() as be:
        # Initialize the ensemble, moves and sampler.
        coords = np.random.randn(nwalkers, ndim)
        sampler = EnsembleSampler(nwalkers, ndim, normal_log_prob,
                                  moves=moves, backend=be)

        # Run the sampler.
        sampler.run_mcmc(coords, nsteps)
        chain = sampler.get_chain()
        assert len(chain) == nsteps, "wrong number of steps"

        tau = sampler.get_autocorr_time(quiet=True)
        assert tau.shape == (ndim,)

        # Check the shapes.
        assert sampler.chain.shape == (nwalkers, nsteps, ndim), \
            "incorrect coordinate dimensions"
        assert sampler.get_chain().shape == (nsteps, nwalkers, ndim), \
            "incorrect coordinate dimensions"
        assert sampler.lnprobability.shape == (nsteps, nwalkers), \
            "incorrect probability dimensions"

        assert sampler.acceptance_fraction.shape == (nwalkers,), \
            "incorrect acceptance fraction dimensions"

        # Check the shape of the flattened coords.
        assert sampler.get_chain(flat=True).shape == \
            (nsteps * nwalkers, ndim), "incorrect coordinate dimensions"
        assert sampler.get_log_prob(flat=True).shape == \
            (nsteps*nwalkers,), "incorrect probability dimensions"
Esempio n. 2
0
def test_shapes(backend, moves, nwalkers=32, ndim=3, nsteps=10, seed=1234):
    # Set up the random number generator.
    np.random.seed(seed)

    with backend() as be:
        # Initialize the ensemble, moves and sampler.
        coords = np.random.randn(nwalkers, ndim)
        sampler = EnsembleSampler(nwalkers,
                                  ndim,
                                  normal_log_prob,
                                  moves=moves,
                                  backend=be)

        # Run the sampler.
        sampler.run_mcmc(coords, nsteps)

        chain = sampler.get_chain()
        assert len(chain) == nsteps, "wrong number of steps"

        tau = sampler.get_autocorr_time(quiet=True)
        assert tau.shape == (ndim, )

        # Check the shapes.
        with pytest.warns(DeprecationWarning):
            assert sampler.chain.shape == (
                nwalkers,
                nsteps,
                ndim,
            ), "incorrect coordinate dimensions"
        with pytest.warns(DeprecationWarning):
            assert sampler.lnprobability.shape == (
                nwalkers,
                nsteps,
            ), "incorrect probability dimensions"
        assert sampler.get_chain().shape == (
            nsteps,
            nwalkers,
            ndim,
        ), "incorrect coordinate dimensions"
        assert sampler.get_log_prob().shape == (
            nsteps,
            nwalkers,
        ), "incorrect probability dimensions"

        assert sampler.acceptance_fraction.shape == (
            nwalkers, ), "incorrect acceptance fraction dimensions"

        # Check the shape of the flattened coords.
        assert sampler.get_chain(flat=True).shape == (
            nsteps * nwalkers,
            ndim,
        ), "incorrect coordinate dimensions"
        assert sampler.get_log_prob(flat=True).shape == (
            nsteps * nwalkers, ), "incorrect probability dimensions"
Esempio n. 3
0
index = 0
autocorr = np.empty(max_n)

# This will be useful to testing convergence
old_tau = np.inf

# Now we'll sample for up to max_n steps
for sample in s.sample(params0, iterations=max_n, progress=True):
    # Only check convergence every 10000 steps
    if s.iteration % 10000:
        continue

    # Compute the autocorrelation time so far
    # Using tol=0 means that we'll always get an estimate even
    # if it isn't trustworthy
    tau = s.get_autocorr_time(tol=0)
    autocorr[index] = np.mean(tau)
    index += 1

    # Check convergence
    converged = np.all(tau * 100 < s.iteration)
    converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
    if converged:
        break
    old_tau = tau
#
# graphical convergence check
#
n = 10000 * np.arange(1, index + 1)
y = autocorr[:index]
pyplot.plot(n, n / 100.0, "--k")
Esempio n. 4
0
    pool.wait()
    sys.exit(0)

sampler = EnsembleSampler(nwalkers, ndim, lnprob4D, pool=pool)

pos, lnprob, rand_state = sampler.run_mcmc(p0, nburn)

sampler.reset(
)  # Reset the chain to remove the burn-in samples; keep walker positions.

pos, lnprob, rand_stateR = sampler.run_mcmc(
    pos, nsteps, rstate0=rand_state)  # rstate0=rstate

if pool.is_master():
    meanacceptance = np.mean(sampler.acceptance_fraction)
    autocorrelationtimes = sampler.get_autocorr_time()

    # Print out the mean acceptance fraction. In general, acceptance_fraction has an entry for each walker so, in this case, it is a
    # 50-dimensional vector.
    print "Mean acceptance fraction:" + str(meanacceptance)

    # Estimate the integrated autocorrelation time for the time series in each parameter.
    print "Autocorrelation time:" + str(autocorrelationtimes)

    samples = sampler.flatchain
    lnprob = sampler.flatlnprobability

    length = len(samples[:, 0])

    lnpriors = []