Exemplo n.º 1
0
    def test_scaled_log_likelihood(self):
        import pints
        import pints.toy as toy
        import numpy as np

        model = toy.LogisticModel()
        real_parameters = [0.015, 500]
        test_parameters = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)

        # Create an object with links to the model and time series
        problem = pints.SingleSeriesProblem(model, times, values)

        # Create a scaled and not scaled log_likelihood
        log_likelihood_not_scaled = pints.KnownNoiseLogLikelihood(
            problem, sigma)
        log_likelihood_scaled = pints.ScaledLogLikelihood(
            log_likelihood_not_scaled)

        eval_not_scaled = log_likelihood_not_scaled(test_parameters)
        eval_scaled = log_likelihood_scaled(test_parameters)

        self.assertEqual(int(eval_not_scaled), -20959169232)
        self.assertAlmostEqual(eval_scaled * len(times), eval_not_scaled)
Exemplo n.º 2
0
    def __init__(self, name):
        super(TestCMAES, self).__init__(name)

        # Create toy model
        self.model = toy.LogisticModel()
        self.real_parameters = [0.015, 500]
        self.times = np.linspace(0, 1000, 1000)
        self.values = self.model.simulate(self.real_parameters, self.times)

        # Create an object with links to the model and time series
        self.problem = pints.SingleSeriesProblem(self.model, self.times,
                                                 self.values)

        # Select a score function
        self.score = pints.SumOfSquaresError(self.problem)

        # Select some boundaries
        self.boundaries = pints.Boundaries([0, 400], [0.03, 600])

        # Set an initial position
        self.x0 = 0.014, 499

        # Set a guess for the standard deviation around the initial position
        # (in both directions)
        self.sigma0 = 0.01

        # Minimum score function value to obtain
        self.cutoff = 1e-9

        # Maximum tries before it counts as failed
        self.max_tries = 3
Exemplo n.º 3
0
    def __init__(self, name):
        super(TestAdaptiveCovarianceMCMC, self).__init__(name)

        # Create toy model
        self.model = toy.LogisticModel()
        self.real_parameters = [0.015, 500]
        self.times = np.linspace(0, 1000, 1000)
        self.values = self.model.simulate(self.real_parameters, self.times)

        # Add noise
        noise = 10
        self.values += np.random.normal(0, noise, self.values.shape)
        self.real_parameters.append(noise)

        # Create an object with links to the model and time series
        self.problem = pints.SingleSeriesProblem(
            self.model, self.times, self.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        self.prior = pints.UniformPrior(
            [0.01, 400, noise * 0.1],
            [0.02, 600, noise * 100]
        )

        # Create an un-normalised log-posterior (prior * likelihood)
        self.log_likelihood = pints.LogPosterior(
            self.prior, pints.UnknownNoiseLogLikelihood(self.problem))

        # Select initial point and covariance
        self.x0 = np.array(self.real_parameters) * 1.1
        self.sigma0 = [0.005, 100, 0.5 * noise]
Exemplo n.º 4
0
    def test_known_unknown_log_likelihood(self):
        import pints
        import pints.toy as toy
        import numpy as np

        model = toy.LogisticModel()
        parameters = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(parameters, times)
        problem = pints.SingleSeriesProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.KnownNoiseLogLikelihood(problem, sigma)
        l2 = pints.UnknownNoiseLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))
Exemplo n.º 5
0
    priors.append(pints.NormalPrior(E2, E2_diff**2))
    if reversible:
        priors.append(pints.UniformPrior(lower_bounds[6:], upper_bounds[6:]))
    else:
        priors.append(pints.UniformPrior(lower_bounds[6:], upper_bounds[6:]))
else:
    if reversible:
        priors.append(pints.UniformPrior(lower_bounds[0:], upper_bounds[0:]))
    else:
        priors.append(pints.UniformPrior(lower_bounds[0:], upper_bounds[0:]))

# Load a forward model
pints_model = electrochemistry.PintsModelAdaptor(poms_model, names)

# Create an object with links to the model and time series
problem = pints.SingleSeriesProblem(pints_model, data.time, data.current)

# Create a log-likelihood function scaled by n
log_likelihood = pints.UnknownNoiseLogLikelihood(problem)
#score = pints.RMSError(problem)
print 'log_like dim = ', log_likelihood.dimension()

# Create a uniform prior over both the parameters and the new noise variable
prior = pints.ComposedPrior(*priors)
print 'prior dim = ', prior.dimension()


class BayesianScore(pints.ErrorMeasure):
    """
    Inverts a log-likelihood to use it as an error.
    """
Exemplo n.º 6
0
# values = np.zeros((samples,len(times)))
log_posteriors = []
samplers = []
exp_parameters = np.zeros((len(mean), nexp))
for i in range(nexp):
    # sample from parameter distribution
    parameters = np.random.normal(mean, stddev)
    print('exp', i, ': param =', parameters)
    exp_parameters[:, i] = parameters

    # generate from model + add noise
    values = model.simulate(parameters, times) + \
        np.random.normal(0, noise, len(times))

    # Create a new log-likelihood function (adds an extra parameter!)
    problem = pints.SingleSeriesProblem(model, times, values)
    log_likelihood = pints.UnknownNoiseLogLikelihood(problem)

    # Create a new prior
    large = 1e9
    param_prior = pints.MultivariateNormalLogPrior(mean,
                                                   large * np.eye(len(mean)))
    noise_prior = pints.UniformLogPrior([noise / 10.0], [noise * 10.0])
    log_prior = pints.ComposedLogPrior(param_prior, noise_prior)

    # Create a posterior log-likelihood (log(likelihood * prior))
    log_posterior = pints.LogPosterior(log_likelihood, log_prior)
    log_posteriors.append(log_posterior)

    sampler = pints.AdaptiveCovarianceMCMC(mean + [noise])
    samplers.append(sampler)