def test_composed_prior_sampling(self): m1 = 10 c1 = 2 p1 = pints.GaussianLogPrior(m1, c1) m2 = -50 c2 = 100 p2 = pints.GaussianLogPrior(m2, c2) p = pints.ComposedLogPrior(p1, p2) p = pints.ComposedLogPrior(p1, p2) d = 2 n = 1 x = p.sample(n) self.assertEqual(x.shape, (n, d)) n = 10 x = p.sample(n) self.assertEqual(x.shape, (n, d)) p = pints.ComposedLogPrior( p1, pints.MultivariateGaussianLogPrior([0, 1, 2], np.diag([2, 4, 6])), p2, p2, ) d = p.n_parameters() self.assertEqual(d, 6) n = 1 x = p.sample(n) self.assertEqual(x.shape, (n, d)) n = 10 x = p.sample(n) self.assertEqual(x.shape, (n, d))
def test_composed_prior(self): import pints import numpy as np m1 = 10 c1 = 2 p1 = pints.GaussianLogPrior(m1, c1) m2 = -50 c2 = 100 p2 = pints.GaussianLogPrior(m2, c2) p = pints.ComposedLogPrior(p1, p2) # Test at center peak1 = p1([m1]) peak2 = p2([m2]) self.assertEqual(p([m1, m2]), peak1 + peak2) # Test at random points np.random.seed(1) for i in range(100): x = np.random.normal(m1, c1) y = np.random.normal(m2, c2) self.assertAlmostEqual(p([x, y]), p1([x]) + p2([y])) # Test effect of increasing covariance p = [ pints.ComposedLogPrior(p1, pints.GaussianLogPrior(m2, c)) for c in range(1, 10) ] p = [f([m1, m2]) for f in p] self.assertTrue(np.all(p[:-1] > p[1:])) # Test errors self.assertRaises(ValueError, pints.ComposedLogPrior) self.assertRaises(ValueError, pints.ComposedLogPrior, 1) # Test derivatives p = pints.ComposedLogPrior(p1, p2) x = [8, -40] y, dy = p.evaluateS1(x) self.assertEqual(y, p(x)) self.assertEqual(dy.shape, (2, )) y1, dy1 = p1.evaluateS1(x[:1]) y2, dy2 = p2.evaluateS1(x[1:]) self.assertAlmostEqual(dy[0], dy1[0]) self.assertAlmostEqual(dy[1], dy2[0]) # Test means m1 = 10 c1 = 2 p1 = pints.GaussianLogPrior(m1, c1) m2 = -50 c2 = 50 p2 = pints.UniformLogPrior(m2, c2) p = pints.ComposedLogPrior(p1, p2) self.assertTrue(np.array_equal(p.mean(), [10, 0]))
def __call__(self, x): if len(x) != 10: raise ValueError('Input parameters must be of length 10.') mu = x[0] tau = x[1] if tau < 0: # to handle proposals without having to change log-priors return -np.inf thetas = x[2:] log_prob = self._mu_log_pdf([mu]) log_prob += self._tau_log_pdf([tau]) if self._centered: log_prior = pints.GaussianLogPrior(mu, tau) else: log_prior = pints.GaussianLogPrior(0, 1) for i, theta_tilde in enumerate(thetas): log_prob += log_prior([theta_tilde]) if self._centered: theta = theta_tilde else: theta = mu + theta_tilde * tau log_prior_2 = pints.GaussianLogPrior(theta, self._sigma_j[i]) log_prob += log_prior_2([self._y_j[i]]) return log_prob
def test_log_posterior(self): # Create a toy problem and log likelihood model = pints.toy.LogisticModel() real_parameters = [0.015, 500] x = [0.014, 501] sigma = 0.001 times = np.linspace(0, 1000, 100) values = model.simulate(real_parameters, times) problem = pints.SingleOutputProblem(model, times, values) log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma) # Create a prior log_prior = pints.UniformLogPrior([0, 0], [1, 1000]) # Test p = pints.LogPosterior(log_likelihood, log_prior) self.assertEqual(p(x), log_likelihood(x) + log_prior(x)) y = [-1, 500] self.assertEqual(log_prior(y), -float('inf')) self.assertEqual(p(y), -float('inf')) self.assertEqual(p(y), log_prior(y)) # Test derivatives log_prior = pints.ComposedLogPrior(pints.GaussianLogPrior(0.015, 0.3), pints.GaussianLogPrior(500, 100)) log_posterior = pints.LogPosterior(log_likelihood, log_prior) x = [0.013, 540] y, dy = log_posterior.evaluateS1(x) self.assertEqual(y, log_posterior(x)) self.assertEqual(dy.shape, (2, )) y1, dy1 = log_prior.evaluateS1(x) y2, dy2 = log_likelihood.evaluateS1(x) self.assertTrue(np.all(dy == dy1 + dy2)) # Test getting the prior and likelihood back again self.assertIs(log_posterior.log_prior(), log_prior) self.assertIs(log_posterior.log_likelihood(), log_likelihood) # First arg must be a LogPDF self.assertRaises(ValueError, pints.LogPosterior, 'hello', log_prior) # Second arg must be a log_prior self.assertRaises(ValueError, pints.LogPosterior, log_likelihood, log_likelihood) # Prior and likelihood must have same dimension self.assertRaises(ValueError, pints.LogPosterior, log_likelihood, pints.GaussianLogPrior(0.015, 0.3))
def __init__(self, centered=True): self._n_parameters = 10 self._y_j = [28, 8, -3, 7, -1, 1, 18, 12] self._sigma_j = [15, 10, 16, 11, 9, 11, 10, 18] # priors self._mu_log_pdf = pints.GaussianLogPrior(0, 5) self._tau_log_pdf = pints.HalfCauchyLogPrior(0, 5) self._centered = bool(centered)
def test_gaussian_prior(self): mean = 10 std = 2 p = pints.GaussianLogPrior(mean, std) n = 10000 r = 6 * np.sqrt(std) # Test left half of distribution x = np.linspace(mean - r, mean, n) px = [p([i]) for i in x] self.assertTrue(np.all(px[1:] >= px[:-1])) # Test right half of distribution y = np.linspace(mean, mean + std, n) py = [p([i]) for i in y] self.assertTrue(np.all(py[1:] <= py[:-1])) # Test means self.assertAlmostEqual(p.mean(), mean) # Test derivatives x = [8] y, dy = p.evaluateS1(x) self.assertEqual(y, p(x)) self.assertEqual(dy.shape, (1, )) self.assertEqual(dy[0], (mean - x[0]) / std**2) p = pints.GaussianLogPrior(-1, 4.5) x = [3.75] self.assertAlmostEqual(p(x), -2.9801146954130457) p = pints.GaussianLogPrior(10.4, 0.5) x = [5.5] y, dy = p.evaluateS1(x) self.assertAlmostEqual(y, -48.245791352644737) self.assertEqual(dy, 19.6) # Test deprecated alias p = pints.NormalLogPrior(mean, std) self.assertIsInstance(p, pints.GaussianLogPrior) # Test assertRaises with negative sd self.assertRaises(ValueError, pints.GaussianLogPrior, 0, 0) self.assertRaises(ValueError, pints.GaussianLogPrior, 0, -1)
def evaluateS1(self, x): """ See :meth:`pints.LogPDF.evaluateS1()`. """ if len(x) != 10: raise ValueError('Input parameters must be of length 10.') mu = x[0] tau = x[1] if tau < 0: # to handle proposals without having to change log-priors return -np.inf, np.full([1, 10], -np.inf) thetas = x[2:] log_prob1, dL1 = self._mu_log_pdf.evaluateS1([mu]) log_prob2, dL2 = self._tau_log_pdf.evaluateS1([tau]) log_prob = log_prob1 + log_prob2 if self._centered: log_prior = pints.GaussianLogPrior(mu, tau) dL_theta = [] for i, theta in enumerate(thetas): y_j = self._y_j[i] sigma_j = self._sigma_j[i] dL1[0] += (theta - mu) / tau**2 dL2[0] += ((theta - mu)**2 - tau**2) / tau**3 log_prob_temp, dL_temp = log_prior.evaluateS1([theta]) log_prob += log_prob_temp log_prob += pints.GaussianLogPrior(theta, sigma_j)([y_j]) dL_temp[0] += (y_j - theta) / sigma_j**2 dL_theta.append(dL_temp[0]) else: log_prior = pints.GaussianLogPrior(0, 1) dL_theta = [] for i, theta_tilde in enumerate(thetas): y_j = self._y_j[i] sigma_j = self._sigma_j[i] theta = mu + theta_tilde * tau y_minus_theta = (y_j - theta) / sigma_j**2 dL1[0] += y_minus_theta dL2[0] += theta_tilde * y_minus_theta log_prob_temp, dL_temp = log_prior.evaluateS1([theta_tilde]) log_prob += log_prob_temp log_prob += pints.GaussianLogPrior(theta, sigma_j)([y_j]) dL_temp[0] += tau * y_minus_theta dL_theta.append(dL_temp[0]) return log_prob, ([dL1[0]] + [dL2[0]] + dL_theta)
def test_gaussian_prior_sampling(self): mean = 10 std = 2 p = pints.GaussianLogPrior(mean, std) d = 1 n = 1 x = p.sample(n) self.assertEqual(x.shape, (n, d)) n = 10 x = p.sample(n) self.assertEqual(x.shape, (n, d)) # Very roughly check distribution (main checks are in numpy!) np.random.seed(1) p = pints.GaussianLogPrior(mean, std) x = p.sample(10000) self.assertTrue(np.abs(mean - x.mean(axis=0)) < 0.1) self.assertTrue(np.abs(std - x.std(axis=0)) < 0.01)
def create_pints_prior(self): noise_parameters = self.get_noise_params() if self.form == self.Form.UNIFORM: lower = noise_parameters[0] upper = noise_parameters[1] pints_log_prior = pints.UniformLogPrior(lower, upper) elif self.form == self.Form.NORMAL: mean = noise_parameters[0] sd = noise_parameters[1] pints_log_prior = pints.GaussianLogPrior(mean, sd) return pints_log_prior
def test_composed_prior_cdf_icdf(self): p1 = pints.GaussianLogPrior(-3, 7) p2 = pints.UniformLogPrior(-4, -1) p = pints.ComposedLogPrior(p1, p2) ps = [p1, p2] xs = [-10, -3] cdfs = p.cdf(xs) for i, cdf in enumerate(cdfs): self.assertEqual(cdf, ps[i].cdf(xs[i])) cdfs1 = p.convert_to_unit_cube(xs) self.assertEqual(cdfs[0], cdfs1[0]) self.assertEqual(cdfs[1], cdfs1[1]) qs = [0.3, 0.75] icdfs = p.icdf(qs) for i, icdf in enumerate(icdfs): self.assertEqual(icdf, ps[i].icdf(qs[i])) icdfs1 = p.convert_from_unit_cube(qs) self.assertEqual(icdfs[0], icdfs1[0]) self.assertEqual(icdfs[1], icdfs1[1])
def test_gaussian_prior_cdf_icdf(self): p = pints.GaussianLogPrior(-4, 7.5) self.assertAlmostEqual(p.cdf(3.0), 0.8246760551477705) self.assertAlmostEqual(p.icdf(0.01), -21.447609055306305)