Пример #1
0
def test_back_compat(seed=1234):
    np.random.seed(seed)
    coords = np.random.randn(16, 3)
    log_prob = np.random.randn(len(coords))
    blobs = np.random.randn(len(coords))
    rstate = np.random.get_state()

    state = State(coords, log_prob, blobs, rstate)
    c, l, r, b = state
    assert np.allclose(coords, c)
    assert np.allclose(log_prob, l)
    assert np.allclose(blobs, b)
    assert all(np.allclose(a, b) for a, b in zip(rstate[1:], r[1:]))

    state = State(coords, log_prob, None, rstate)
    c, l, r = state
    assert np.allclose(coords, c)
    assert np.allclose(log_prob, l)
    assert all(np.allclose(a, b) for a, b in zip(rstate[1:], r[1:]))
Пример #2
0
def test_live_dangerously(nwalkers=32, nsteps=3000, seed=1234):
    warnings.filterwarnings("error")

    # Set up the random number generator.
    np.random.seed(seed)
    state = State(np.random.randn(nwalkers, 2 * nwalkers),
                  log_prob=np.random.randn(nwalkers))
    model = Model(None, lambda x: (np.zeros(len(x)), None), map, np.random)
    proposal = moves.StretchMove()

    # Test to make sure that the error is thrown if there aren't enough
    # walkers.
    with pytest.raises(RuntimeError):
        proposal.propose(model, state)

    # Living dangerously...
    proposal.live_dangerously = True
    proposal.propose(model, state)
Пример #3
0
    def sample_ln_pdf(
        self,
        ln_pdf,
        size=None,
        sample_around=1.0,
        nwalkers=50,
        burn_in=20,
        oversampling_factor=10,
    ):
        """Sample from a distribution given by ln(pdf)

        This algorithm uses the :any:`emcee.EnsembleSampler`

        Parameters
        ----------
        ln_pdf : :any:`callable`
            The logarithm of the Probability density function
            of the given distribution, that takes a single argument
        size : :class:`int` or :any:`None`, optional
            sample size. Default: None
        sample_around : :class:`float`, optional
            Starting point for initial guess Default: 1.
        nwalkers : :class:`int`, optional
            The number of walkers in the mcmc sampler. Used for the
            emcee.EnsembleSampler class.
            Default: 100
        burn_in : :class:`int`, optional
            Number of burn-in runs in the mcmc algorithm.
            Default: 100
        oversampling_factor : :class:`int`, optional
            To guess the sample number needed for proper results, we use a
            factor for oversampling. The intern used sample-size is
            calculated by

            ``sample_size = max(burn_in, (size/nwalkers)*oversampling_factor)``

            So at least, as much as the burn-in runs.
            Default: 10
        """
        if size is None:
            sample_size = burn_in
        else:
            sample_size = max(burn_in, (size / nwalkers) * oversampling_factor)
        # initial guess
        init_guess = (self.random.rand(nwalkers).reshape(
            (nwalkers, 1)) * sample_around)
        # initialize the sampler
        sampler = mc.EnsembleSampler(nwalkers, 1, ln_pdf)
        # burn in phase with saving of last position
        ##################### mc 2 and 3 compatibility
        if MC_VER < 3:  # pragma: no cover
            burn_in_state, __, __ = sampler.run_mcmc(
                pos0=init_guess, N=burn_in, rstate0=self.random.get_state())
        else:  # pragma: no cover
            from emcee.state import State

            initial_state = State(init_guess, copy=True)
            initial_state.random_state = self.random.get_state()
            burn_in_state = sampler.run_mcmc(initial_state=initial_state,
                                             nsteps=burn_in)
        ##################### mc 2 and 3 compatibility
        # reset after burn_in
        sampler.reset()
        # actual sampling
        ##################### mc 2 and 3 compatibility
        if MC_VER < 3:  # pragma: no cover
            sampler.run_mcmc(
                pos0=burn_in_state,
                N=sample_size,
                rstate0=self.random.get_state(),
            )
            samples = sampler.flatchain[:, 0]
        else:  # pragma: no cover
            from emcee.state import State

            initial_state = State(burn_in_state, copy=True)
            initial_state.random_state = self.random.get_state()
            sampler.run_mcmc(initial_state=initial_state, nsteps=sample_size)
            samples = sampler.get_chain(flat=True)[:, 0]
        ##################### mc 2 and 3 compatibility

        # choose samples according to size
        return self.random.choice(samples, size)