def test_lnsigma(self): # check that lnsigma works correctly def lnprior(theta, x, y, yerr): m, b, lnf = theta if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0: return 0.0 return -np.inf def lnlike(theta, x, y, yerr): m, b, lnf = theta model = m * x + b inv_sigma2 = 1.0 / (yerr**2 + model**2 * np.exp(2 * lnf)) print(inv_sigma2) return -0.5 * (np.sum((y - model)**2 * inv_sigma2 - np.log(inv_sigma2))) x, y, yerr, _ = self.data.data theta = [self.m_true, self.b_true, np.log(self.f_true)] bo = BaseObjective(theta, lnlike, lnprior=lnprior, fcn_args=(x, y, yerr)) lnsigma = Parameter(np.log(self.f_true), 'lnsigma', bounds=(-10, 1), vary=True) self.objective.setp(np.array([self.b_true, self.m_true])) self.objective.lnsigma = lnsigma assert_allclose(self.objective.lnlike(), bo.lnlike())
def test_base_emcee(self): # check that the base objective works against the emcee example. def lnprior(theta, x, y, yerr): m, b, lnf = theta if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0: return 0.0 return -np.inf def lnlike(theta, x, y, yerr): m, b, lnf = theta model = m * x + b inv_sigma2 = 1.0 / (yerr**2 + model**2 * np.exp(2 * lnf)) return -0.5 * (np.sum((y - model)**2 * inv_sigma2 - np.log(inv_sigma2))) x, y, yerr, _ = self.data.data theta = [self.m_true, self.b_true, np.log(self.f_true)] bo = BaseObjective(theta, lnlike, lnprior=lnprior, fcn_args=(x, y, yerr)) # test that the wrapper gives the same lnlike as the direct function assert_almost_equal(bo.lnlike(theta), lnlike(theta, x, y, yerr)) assert_almost_equal(bo.lnlike(theta), -bo.nll(theta)) assert_almost_equal(bo.nll(theta), 12.8885352412) # Find the maximum likelihood value. result = minimize(bo.nll, theta) # for repeatable sampling np.random.seed(1) ndim, nwalkers = 3, 100 pos = [ result["x"] + 1e-4 * np.random.randn(ndim) for i in range(nwalkers) ] sampler = emcee.EnsembleSampler(nwalkers, ndim, bo.lnprob) sampler.run_mcmc(pos, 800, rstate0=np.random.get_state()) burnin = 200 samples = sampler.chain[:, burnin:, :].reshape((-1, ndim)) samples[:, 2] = np.exp(samples[:, 2]) m_mc, b_mc, f_mc = map( lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0))) assert_allclose(m_mc, (-1.0071664, 0.0809444, 0.0784894), rtol=0.04) assert_allclose(b_mc, (4.5428107, 0.3549174, 0.3673304), rtol=0.04) assert_allclose(f_mc, (0.4610898, 0.0823304, 0.0640812), rtol=0.06) # # smoke test for covariance matrix bo.parameters = np.array(result['x']) covar1 = bo.covar() uncertainties = np.sqrt(np.diag(covar1)) # covariance from objective._covar should be almost equal to # the covariance matrix from sampling covar2 = np.cov(samples.T) assert_almost_equal(np.sqrt(np.diag(covar2))[:2], uncertainties[:2], 2) # check covariance of self.objective # TODO var_arr = result['x'][:] var_arr[0], var_arr[1], var_arr[2] = var_arr[2], var_arr[1], var_arr[0]