def make_sampler(self): """ Make the samplers for the Objective. Use this method if the number of varying parameters changes. """ self._varying_parameters = self.objective.varying_parameters() self.__var_id = [id(obj) for obj in self._varying_parameters] if not self.nvary: raise ValueError("No parameters are being fitted") if self._ntemps == -1: self.sampler = emcee.EnsembleSampler(self._nwalkers, self.nvary, self.objective.logpost, **self.mcmc_kws) # Parallel Tempering was requested. else: sig = { "ntemps": self._ntemps, "nwalkers": self._nwalkers, "ndim": self.nvary, "logl": self.objective.logl, "logp": self.objective.logp, } sig.update(self.mcmc_kws) self.sampler = PTSampler(**sig) self._state = None
def make_sampler(self): """ Make the samplers for the Objective. Use this method if the number of varying parameters changes. """ self._varying_parameters = self.objective.varying_parameters() self.__var_id = [id(obj) for obj in self._varying_parameters] if not self.nvary: raise ValueError("No parameters are being fitted") if self._ntemps == -1: self.mcmc_kws['args'] = (self.objective,) self.sampler = emcee.EnsembleSampler(self._nwalkers, self.nvary, _objective_lnprob, **self.mcmc_kws) # Parallel Tempering was requested. else: if not _HAVE_PTSAMPLER: raise RuntimeError("You need to install the 'ptemcee' package" " to use parallel tempering") sig = {'loglargs': (self.objective,), 'logpargs': (self.objective,), 'ntemps': self._ntemps, 'nwalkers': self._nwalkers, 'dim': self.nvary, 'logl': _objective_lnlike, 'logp': _objective_lnprior } sig.update(self.mcmc_kws) self.sampler = PTSampler(**sig) # construction of the PTSampler creates an ntemps attribute. # If it was constructed with ntemps = None, then ntemps will # be an integer. self._ntemps = self.sampler.ntemps self._state = None
def test_base_emcee(self): # check that the base objective works against the emcee example. def lnprior(theta, x, y, yerr): m, b, lnf = theta if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0: return 0.0 return -np.inf def lnlike(theta, x, y, yerr): m, b, lnf = theta model = m * x + b inv_sigma2 = 1.0 / (yerr**2 + model**2 * np.exp(2 * lnf)) return -0.5 * (np.sum((y - model)**2 * inv_sigma2 - np.log(inv_sigma2))) x, y, yerr, _ = self.data.data theta = [self.m_true, self.b_true, np.log(self.f_true)] bo = BaseObjective(theta, lnlike, lnprior=lnprior, fcn_args=(x, y, yerr)) # test that the wrapper gives the same lnlike as the direct function assert_almost_equal(bo.lnlike(theta), lnlike(theta, x, y, yerr)) assert_almost_equal(bo.lnlike(theta), -bo.nll(theta)) assert_almost_equal(bo.nll(theta), 12.8885352412) # Find the maximum likelihood value. result = minimize(bo.nll, theta) # for repeatable sampling np.random.seed(1) ndim, nwalkers = 3, 100 pos = [ result["x"] + 1e-4 * np.random.randn(ndim) for i in range(nwalkers) ] sampler = emcee.EnsembleSampler(nwalkers, ndim, bo.lnprob) sampler.run_mcmc(pos, 800, rstate0=np.random.get_state()) burnin = 200 samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) samples[:, 2] = np.exp(samples[:, 2]) m_mc, b_mc, f_mc = map( lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0))) assert_allclose(m_mc, (-1.0071664, 0.0809444, 0.0784894), rtol=0.04) assert_allclose(b_mc, (4.5428107, 0.3549174, 0.3673304), rtol=0.04) assert_allclose(f_mc, (0.4610898, 0.0823304, 0.0640812), rtol=0.06) # # smoke test for covariance matrix bo.parameters = np.array(result['x']) covar1 = bo.covar() uncertainties = np.sqrt(np.diag(covar1)) # covariance from objective._covar should be almost equal to # the covariance matrix from sampling covar2 = np.cov(samples.T) assert_almost_equal(np.sqrt(np.diag(covar2))[:2], uncertainties[:2], 2) # check covariance of self.objective # TODO var_arr = result['x'][:] var_arr[0], var_arr[1], var_arr[2] = var_arr[2], var_arr[1], var_arr[0]
def __init__(self, objective, nwalkers=200, ntemps=-1, **mcmc_kws): """ Parameters ---------- objective : refnx.analysis.Objective The :class:`refnx.analysis.Objective` to be analysed. nwalkers : int, optional How many walkers you would like the sampler to have. Must be an even number. The more walkers the better. ntemps : int or None, optional If `ntemps == -1`, then an :class:`emcee.EnsembleSampler` is used during the `sample` method. Otherwise, or if `ntemps is None` then parallel tempering is used with a :class:`ptemcee.sampler.Sampler` object during the `sample` method, with `ntemps` specifing the number of temperatures. Can be `None`, in which case the `Tmax` keyword argument sets the maximum temperature. Parallel Tempering is useful if you expect your posterior distribution to be multi-modal. mcmc_kws : dict Keywords used to create the :class:`emcee.EnsembleSampler` or :class:`ptemcee.sampler.PTSampler` objects. Notes ----- See the documentation at http://dan.iel.fm/emcee/current/api/ for further details on what keywords are permitted. The `pool` and keyword is ignored here. Specification of parallel threading is done with the `pool` argument in the `sample` method. To use parallel tempering you will need to install the :package:`ptemcee` package. """ self.objective = objective self._varying_parameters = objective.varying_parameters() self.nvary = len(self._varying_parameters) if not self.nvary: raise ValueError("No parameters are being fitted") self.mcmc_kws = {} if mcmc_kws is not None: self.mcmc_kws.update(mcmc_kws) if 'pool' in self.mcmc_kws: self.mcmc_kws.pop('pool') if 'threads' in self.mcmc_kws: self.mcmc_kws.pop('threads') self._nwalkers = nwalkers self._ntemps = ntemps if ntemps == -1: self.mcmc_kws['args'] = (objective,) self.sampler = emcee.EnsembleSampler(nwalkers, self.nvary, _objective_lnprob, **self.mcmc_kws) # Parallel Tempering was requested. else: if not _HAVE_PTSAMPLER: raise RuntimeError("You need to install the 'ptemcee' package" " to use parallel tempering") sig = {'loglargs': (objective,), 'logpargs': (objective,), 'ntemps': ntemps, 'nwalkers': nwalkers, 'dim': self.nvary, 'logl': _objective_lnlike, 'logp': _objective_lnprior } sig.update(self.mcmc_kws) self.sampler = PTSampler(**sig) # construction of the PTSampler creates an ntemps attribute. # If it was constructed with ntemps = None, then ntemps will # be an integer. self._ntemps = self.sampler.ntemps self._state = None