Ejemplo n.º 1
0
    def test_composed_prior_sampling(self):

        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)
        m2 = -50
        c2 = 100
        p2 = pints.GaussianLogPrior(m2, c2)
        p = pints.ComposedLogPrior(p1, p2)

        p = pints.ComposedLogPrior(p1, p2)
        d = 2
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        p = pints.ComposedLogPrior(
            p1,
            pints.MultivariateGaussianLogPrior([0, 1, 2], np.diag([2, 4, 6])),
            p2,
            p2,
        )
        d = p.n_parameters()
        self.assertEqual(d, 6)
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
Ejemplo n.º 2
0
    def test_composed_prior(self):
        import pints
        import numpy as np

        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)

        m2 = -50
        c2 = 100
        p2 = pints.GaussianLogPrior(m2, c2)

        p = pints.ComposedLogPrior(p1, p2)

        # Test at center
        peak1 = p1([m1])
        peak2 = p2([m2])
        self.assertEqual(p([m1, m2]), peak1 + peak2)

        # Test at random points
        np.random.seed(1)
        for i in range(100):
            x = np.random.normal(m1, c1)
            y = np.random.normal(m2, c2)
            self.assertAlmostEqual(p([x, y]), p1([x]) + p2([y]))

        # Test effect of increasing covariance
        p = [
            pints.ComposedLogPrior(p1, pints.GaussianLogPrior(m2, c))
            for c in range(1, 10)
        ]
        p = [f([m1, m2]) for f in p]
        self.assertTrue(np.all(p[:-1] > p[1:]))

        # Test errors
        self.assertRaises(ValueError, pints.ComposedLogPrior)
        self.assertRaises(ValueError, pints.ComposedLogPrior, 1)

        # Test derivatives
        p = pints.ComposedLogPrior(p1, p2)
        x = [8, -40]
        y, dy = p.evaluateS1(x)
        self.assertEqual(y, p(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = p1.evaluateS1(x[:1])
        y2, dy2 = p2.evaluateS1(x[1:])
        self.assertAlmostEqual(dy[0], dy1[0])
        self.assertAlmostEqual(dy[1], dy2[0])

        # Test means
        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)

        m2 = -50
        c2 = 50
        p2 = pints.UniformLogPrior(m2, c2)

        p = pints.ComposedLogPrior(p1, p2)
        self.assertTrue(np.array_equal(p.mean(), [10, 0]))
Ejemplo n.º 3
0
    def _make_pints_posterior(self):
        """Rebuild the Pints posterior and save it.
        """
        # Build a uniform model prior if it is not supplied
        if self.model_prior is None:
            num_model_params = self.problem.n_parameters()
            model_prior = pints.UniformLogPrior([-1e6] * num_model_params,
                                                [1e6] * num_model_params)

        # Get the GP prior
        kernel_prior = NonstatGPLogPrior(
            self.gp_times,
            self.kernel.num_parameters() // len(self.gp_times), self.mu,
            self.alpha, self.beta)

        # Combine the two priors
        log_prior = pints.ComposedLogPrior(model_prior, kernel_prior)

        # Build the likelihood
        log_likelihood = flexnoise.KernelCovarianceLogLikelihood(
            self.problem, self.kernel)

        # Build the posterior
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        self.posterior = log_posterior
    def update_model(self, fixed_parameters_list):
        """
        Update the model with fixed parameters.

        Parameters
        ----------
        fixed_parameters_list
            List of fixed parameter values.
        """

        # Create dictionary of fixed parameters and its values
        name_value_dict = {
            name: value
            for (name, value
                 ) in zip(self.model._parameter_names, fixed_parameters_list)
        }
        self.model.fix_parameters(name_value_dict)

        # Setup the problem with pints,
        # including likelihood, prior and posterior
        print(self.model.n_parameters())
        problem = pints.SingleOutputProblem(
            model=self.model,
            times=self.data['Time'].to_numpy(),
            values=self.data['Incidence Number'].to_numpy())
        log_likelihood = pints.GaussianLogLikelihood(problem)
        priors = self.set_prior(name_value_dict)
        self.log_prior = pints.ComposedLogPrior(*priors)
        self.log_posterior = pints.LogPosterior(log_likelihood, self.log_prior)

        # Run transformation
        self.transformations = pints.LogTransformation(
            self.log_posterior.n_parameters())
Ejemplo n.º 5
0
    def test_log_posterior(self):

        # Create a toy problem and log likelihood
        model = pints.toy.LogisticModel()
        real_parameters = [0.015, 500]
        x = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)

        # Create a prior
        log_prior = pints.UniformLogPrior([0, 0], [1, 1000])

        # Test
        p = pints.LogPosterior(log_likelihood, log_prior)
        self.assertEqual(p(x), log_likelihood(x) + log_prior(x))
        y = [-1, 500]
        self.assertEqual(log_prior(y), -float('inf'))
        self.assertEqual(p(y), -float('inf'))
        self.assertEqual(p(y), log_prior(y))

        # Test derivatives
        log_prior = pints.ComposedLogPrior(pints.GaussianLogPrior(0.015, 0.3),
                                           pints.GaussianLogPrior(500, 100))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        x = [0.013, 540]
        y, dy = log_posterior.evaluateS1(x)
        self.assertEqual(y, log_posterior(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = log_prior.evaluateS1(x)
        y2, dy2 = log_likelihood.evaluateS1(x)
        self.assertTrue(np.all(dy == dy1 + dy2))

        # Test getting the prior and likelihood back again
        self.assertIs(log_posterior.log_prior(), log_prior)
        self.assertIs(log_posterior.log_likelihood(), log_likelihood)

        # First arg must be a LogPDF
        self.assertRaises(ValueError, pints.LogPosterior, 'hello', log_prior)

        # Second arg must be a log_prior
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          log_likelihood)

        # Prior and likelihood must have same dimension
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          pints.GaussianLogPrior(0.015, 0.3))
Ejemplo n.º 6
0
    def test_composed_prior_cdf_icdf(self):
        p1 = pints.GaussianLogPrior(-3, 7)
        p2 = pints.UniformLogPrior(-4, -1)
        p = pints.ComposedLogPrior(p1, p2)
        ps = [p1, p2]
        xs = [-10, -3]
        cdfs = p.cdf(xs)
        for i, cdf in enumerate(cdfs):
            self.assertEqual(cdf, ps[i].cdf(xs[i]))
        cdfs1 = p.convert_to_unit_cube(xs)
        self.assertEqual(cdfs[0], cdfs1[0])
        self.assertEqual(cdfs[1], cdfs1[1])

        qs = [0.3, 0.75]
        icdfs = p.icdf(qs)
        for i, icdf in enumerate(icdfs):
            self.assertEqual(icdf, ps[i].icdf(qs[i]))
        icdfs1 = p.convert_from_unit_cube(qs)
        self.assertEqual(icdfs[0], icdfs1[0])
        self.assertEqual(icdfs[1], icdfs1[1])
Ejemplo n.º 7
0
                                  transform,
                                  sine_wave=1)
n_params = model.n_params
#
# Define problem
#
problem = pints.SingleOutputProblem(model, time, current)

#
# Define log-posterior
#
log_likelihood = LogLikelihood.tiLogLikelihood(problem, sigma_noise, voltage)
log_prior_rates = prior.LogPrior(rate_dict, lower_conductance, n_params,
                                 transform)
log_prior_discrp = pints.NormalLogPrior(0., 1.5)
log_prior = pints.ComposedLogPrior(log_prior_rates, log_prior_discrp)
log_posterior = pints.LogPosterior(log_likelihood, log_prior)
rate_checker = Rates.ratesPrior(transform, lower_conductance)

#
# Run
#
nchains = 1

# Define starting point for mcmc routine
xs = []

x0 = np.loadtxt(cmaes_result_files + model_name + '-cell-' + str(cell) +
                '-cmaes.txt')
ds = log_prior_discrp.sample().reshape((1, ))
x0 = [np.concatenate((x0, ds))]
Ejemplo n.º 8
0
loglikelihood = DiscrepancyLogLikelihood(
    problem, inducing_times, downsample=100
)  #Note to Chon: downsample=100<--change this to 1 or None when not debugging
logmodelprior = LogPrior[info_id](transform_to_model_param,
                                  transform_from_model_param)
# Priors for discrepancy
# I have transformed all the discrepancy priors as well
lognoiseprior = HalfNormalLogPrior(
    sd=25,
    transform=True)  # This will have considerable mass at the initial value
logrhoprior = InverseGammaLogPrior(
    alpha=5, beta=5, transform=True)  # As suggested in STAN manual
logkersdprior = InverseGammaLogPrior(
    alpha=5, beta=5, transform=True)  # As suggested in STAN manual

logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior, logrhoprior,
                                  logkersdprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)

# Check logposterior is working fine
initial_rho = logrhoprior.sample(
)  #np.log(0.5) # Initialise Kernel hyperparameter \rho
initial_ker_sigma = logkersdprior.sample(
)  #np.log(5.0) # Initialise Kernel hyperparameter \ker_sigma

priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.hstack(
    (priorparams, noise_sigma, initial_rho, initial_ker_sigma))
transform_priorparams = np.hstack(
    (transform_priorparams, noise_sigma, initial_rho, initial_ker_sigma))
print('Posterior at prior parameters: ', logposterior(transform_priorparams))
Ejemplo n.º 9
0
    n_arama = len(armax_result.params[armax_result.k_exog:])

# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = DiscrepancyLogLikelihood(
    problem, armax_result, transparams=transparams
)  ################# <-----Changed 21/10 #####################
logmodelprior = LogPrior[info_id](transform_to_model_param,
                                  transform_from_model_param)

# Priors for discrepancy
logarmaprior = ArmaNormalCentredLogPrior(
    armax_result,
    0.25)  # Note for Chon: Worth checking out more wider/narrower priors

logprior = pints.ComposedLogPrior(logmodelprior, logarmaprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)

# Check logposterior is working fine
init_arma_ar = _ar_transparams(armax_result.arparams.copy(
))  ################# <-----Changed 21/10 #####################
init_arma_ma = _ma_transparams(armax_result.maparams.copy(
))  ################# <-----Changed 21/10 #####################
init_arma = np.append(
    init_arma_ar,
    init_arma_ma)  ################# <-----Changed 21/10 #####################

priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.append(priorparams, init_arma)
transform_priorparams = np.append(transform_priorparams, init_arma)
    cmaes_params = np.loadtxt(cmaes_result_files + model_name + '-cell-' +
                              str(cell) + '-cmaes.txt')
    exog_current = model.simulate(cmaes_params, time)
    armax_mod = sm.tsa.ARMA(current, order=(2, 1), exog=exog_current)
    armax_result = armax_mod.fit(trend='nc', solver='cg')
    print(armax_result.summary())

# Define problem for pints
#
problem = pints.SingleOutputProblem(model, time, current)
if args.discrepancy:
    log_prior_model_params = prior.LogPrior(rate_dict, lower_conductance,
                                            n_params, transform)
    #log_prior_arma = positive_priors.ArmaLogPrior(armax_result, 1e-1)
    log_prior_arma = positive_priors.ArmaLogPriorNonPints(armax_result, 0.25)
    log_prior = pints.ComposedLogPrior(log_prior_model_params, log_prior_arma)
else:
    log_prior = prior.LogPrior(rate_dict, lower_conductance, n_params,
                               transform)
burnin = args.burnin
iterations = args.niter
do_thermo = args.thermo
if do_thermo:
    #
    # Define a MCMC runner function that will run a single MCMC chain for a given
    # Thermodynamic temperature `t`
    #
    nchains = 1
    num_temps = args.ntemps

    def mcmc_runner(temps, n_chains=nchains):
Ejemplo n.º 11
0
            current = data.current
            times = data.times

        problem = pints.SingleOutputProblem(pints_model, times, current)
        boundaries = pints.RectangularBoundaries(lower_bounds, upper_bounds)

        # Create a new log-likelihood function (adds an extra parameter!)
        log_likelihood = pints.GaussianLogLikelihood(problem)

        # Create a new prior
        large = 1e9
        param_prior = pints.MultivariateGaussianLogPrior(
            mu_0, large * np.eye(len(mu_0)))
        noise_prior = pints.UniformLogPrior([lower_bounds[-1]],
                                            [upper_bounds[-1]])
        log_prior = pints.ComposedLogPrior(param_prior, noise_prior)

        # Create a posterior log-likelihood (log(likelihood * prior))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        log_posteriors.append(log_posterior)
        score = pints.ProbabilityBasedError(log_posterior)

        if synthetic:
            found_parameters = list(true_parameters) + [noise]
        else:
            found_parameters, found_value = pints.optimise(score,
                                                           x0,
                                                           sigma0,
                                                           boundaries,
                                                           method=pints.CMAES)
        self._offset -= self._nt * np.log(sigma)
        self._multip = -1 / (2.0 * sigma**2)

    def __call__(self, x):

        error = self._values - self._problem.evaluate(x)
        return self.temperature * np.sum(self._offset + self._multip *
                                         np.sum(error**2, axis=0))


# In[ ]:

sigma_noise = 1.
log_prior_a = pints.NormalLogPrior(2., 1.)
log_prior_b = pints.NormalLogPrior(3., 1.)
log_prior = pints.ComposedLogPrior(log_prior_a, log_prior_b)

# In[ ]:

import mcmcsampling
from joblib import Parallel, delayed
import multiprocessing
niter = 10000


def mcmc_runner(temps):

    nchains = 1
    #print('temperature', temps)
    tempered_log_likelihood = tiLogLikelihood(problem, sigma_noise, temps)
    tempered_log_posterior = pints.LogPosterior(tempered_log_likelihood,