Exemplo n.º 1
0
def figure2():
    """Make a figure for MCMC inference
    """
    num_mcmc_iters = 10000

    def stimulus(t):
        return (1 * (t < 50)) + (-100 * (t >= 50) & (t < 75)) + (1 * (t >= 75))

    # Generate data
    y0 = np.array([0.0, 0.0])
    m = diffeqinf.DampedOscillator(stimulus, y0, 'RK45')
    m.set_tolerance(1e-8)
    true_params = [1.0, 0.2, 1.0]
    times = np.linspace(0, 100, 500)
    y = m.simulate(true_params, times)
    y += np.random.normal(0, 0.01, len(times))

    # Run inference with correct model
    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)
    prior = pints.UniformLogPrior([0] * 4, [1e6] * 4)
    posterior = pints.LogPosterior(likelihood, prior)

    x0 = [true_params + [0.01]] * 3

    mcmc = pints.MCMCController(posterior, 3, x0)
    mcmc.set_max_iterations(num_mcmc_iters)
    chains_correct = mcmc.run()

    # Run inference with incorrect model
    m.set_tolerance(1e-2)
    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)
    prior = pints.UniformLogPrior([0] * 4, [1e6] * 4)
    posterior = pints.LogPosterior(likelihood, prior)

    mcmc = pints.MCMCController(posterior, 3, x0)
    mcmc.set_max_iterations(num_mcmc_iters)
    chains_incorrect = mcmc.run()

    # Plot MCMC chains
    pints.plot.trace(chains_incorrect)
    plt.show()

    # Plot posteriors
    diffeqinf.plot.plot_grouped_parameter_posteriors(
        [chains_correct[0, num_mcmc_iters // 2:, :]],
        [chains_incorrect[0, num_mcmc_iters // 2:, :]],
        [chains_incorrect[1, num_mcmc_iters // 2:, :]],
        [chains_incorrect[2, num_mcmc_iters // 2:, :]],
        true_model_parameters=true_params,
        method_names=[
            'Correct', 'PoorTol_Chain1', 'PoorTol_Chain2', 'PoorTol_Chain3'
        ],
        parameter_names=['k', 'c', 'm'],
        fname=None)
    plt.show()
Exemplo n.º 2
0
    def setUpClass(cls):
        # Create test log-pdfs
        model = pints.toy.ConstantModel(1)

        problem = pints.SingleOutputProblem(model=model,
                                            times=[1, 2, 3, 4],
                                            values=[1, 2, 3, 4])
        cls.log_pdf_1 = pints.GaussianLogLikelihood(problem)

        problem = pints.SingleOutputProblem(model=model,
                                            times=[1, 2, 3, 4],
                                            values=[1, 1, 1, 1])
        cls.log_pdf_2 = pints.GaussianLogLikelihood(problem)
Exemplo n.º 3
0
    def setUpClass(cls):
        """ Prepare problem for tests. """
        # Load a forward model
        model = pints.toy.LogisticModel()

        # Create some toy data
        real_parameters = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        org_values = model.simulate(real_parameters, times)

        # Add noise
        noise = 10
        values = org_values + np.random.normal(0, noise, org_values.shape)
        real_parameters = np.array(real_parameters + [noise])

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create an error measure
        cls.score = pints.SumOfSquaresError(problem)
        cls.boundaries = pints.RectangularBoundaries([0, 400], [0.05, 600])

        # Create a log-likelihood function (adds an extra parameter!)
        log_likelihood = pints.GaussianLogLikelihood(problem)

        # Create a uniform prior over both the parameters and the new noise
        cls.log_prior = pints.UniformLogPrior([0.01, 400, noise * 0.1],
                                              [0.02, 600, noise * 100])

        # Create a posterior log-likelihood (log(likelihood * prior))
        cls.log_posterior = pints.LogPosterior(log_likelihood, cls.log_prior)
Exemplo n.º 4
0
    def test_stopping_on_ill_conditioned_covariance_matrix(self):
        # Tests that ill conditioned covariance matrices are detected.
        from scipy.integrate import odeint
        #TODO: A quicker test-case for this would be great!

        def OnePopControlODE(y, t, p):
            a, b, c = p
            dydt = np.zeros(y.shape)
            k = (a - b) / c * (y[0] + y[1])
            dydt[0] = a * y[0] - b * y[0] - k * y[0]
            dydt[1] = k * y[0] - b * y[1]
            return dydt

        class Model(pints.ForwardModel):

            def simulate(self, parameters, times):
                y0 = [2000000, 0]
                solution = odeint(
                    OnePopControlODE, y0, times, args=(parameters,))
                return np.sum(np.array(solution), axis=1)

            def n_parameters(self):
                return 3

        model = Model()
        times = [0, 0.5, 2, 4, 8, 24]
        values = [2e6, 3.9e6, 3.1e7, 3.7e8, 1.6e9, 1.6e9]
        problem = pints.SingleOutputProblem(model, times, values)
        score = pints.SumOfSquaresError(problem)
        x = [3.42, -0.21, 5e6]
        opt = pints.OptimisationController(score, x, method=method)
        with StreamCapture() as c:
            opt.run()
        self.assertTrue('Ill-conditioned covariance matrix' in c.text())
Exemplo n.º 5
0
    def _problem(self):
        import numpy as np
        import pints
        import pints.toy

        # Load a forward model
        model = pints.toy.LogisticModel()

        # Create some toy data
        xtrue = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        values = model.simulate(xtrue, times)

        # Add noise
        values += np.random.normal(0, 10, values.shape)

        # Create problem
        problem = pints.SingleOutputProblem(model, times, values)
        score = pints.SumOfSquaresError(problem)

        # Select some boundaries
        boundaries = pints.RectangularBoundaries([0, 400], [0.03, 600])

        # Select a random starting point
        x0 = boundaries.sample(1)[0]

        # Select an initial sigma
        sigma0 = (1 / 6) * boundaries.range()

        return score, xtrue, x0, sigma0, boundaries
Exemplo n.º 6
0
    def test_evaluateS1_two_dim_array_single(self):
        # Convert data to array of shape (n_times, 1)
        values = np.reshape(self.data_single, (self.n_times, 1))

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(self.model_single, self.times,
                                            values)

        # Create log_likelihood
        log_likelihood = pkpd.ConstantAndMultiplicativeGaussianLogLikelihood(
            problem)

        # Evaluate likelihood for test parameters
        test_parameters = [2.0, 0.5, 1.1, 1.0]
        score, deriv = log_likelihood.evaluateS1(test_parameters)

        # Check that likelihood score agrees with call
        # There are floating point deviations because in evaluateS1
        # log(sigma_tot) is for efficiency computed as -log(1/sigma_tot)
        self.assertAlmostEqual(score, log_likelihood(test_parameters))

        # Check that number of partials is correct
        self.assertAlmostEqual(deriv.shape, (4, ))

        # Check that partials are computed correctly
        self.assertAlmostEqual(deriv[0], -2.0553513340073835)
        self.assertAlmostEqual(deriv[1], -1.0151215581116324)
        self.assertAlmostEqual(deriv[2], -1.5082610203777322)
        self.assertAlmostEqual(deriv[3], -2.1759606944650822)
Exemplo n.º 7
0
    def setUpClass(cls):
        """ Prepare a problem for testing. """

        # Random seed
        np.random.seed(1)

        # Create toy model
        cls.model = toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        cls.times = np.linspace(0, 1000, 1000)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Add noise
        cls.noise = 10
        cls.values += np.random.normal(0, cls.noise, cls.values.shape)
        cls.real_parameters.append(cls.noise)
        cls.real_parameters = np.array(cls.real_parameters)

        # Create an object with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior([0.01, 400, cls.noise * 0.1],
                                              [0.02, 600, cls.noise * 100])

        # Create a log likelihood
        cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        cls.log_posterior = pints.LogPosterior(cls.log_likelihood,
                                               cls.log_prior)
    def update_model(self, fixed_parameters_list):
        """
        Update the model with fixed parameters.

        Parameters
        ----------
        fixed_parameters_list
            List of fixed parameter values.
        """

        # Create dictionary of fixed parameters and its values
        name_value_dict = {
            name: value
            for (name, value
                 ) in zip(self.model._parameter_names, fixed_parameters_list)
        }
        self.model.fix_parameters(name_value_dict)

        # Setup the problem with pints,
        # including likelihood, prior and posterior
        print(self.model.n_parameters())
        problem = pints.SingleOutputProblem(
            model=self.model,
            times=self.data['Time'].to_numpy(),
            values=self.data['Incidence Number'].to_numpy())
        log_likelihood = pints.GaussianLogLikelihood(problem)
        priors = self.set_prior(name_value_dict)
        self.log_prior = pints.ComposedLogPrior(*priors)
        self.log_posterior = pints.LogPosterior(log_likelihood, self.log_prior)

        # Run transformation
        self.transformations = pints.LogTransformation(
            self.log_posterior.n_parameters())
Exemplo n.º 9
0
    def setUpClass(cls):
        """ Prepare for the test. """
        # Create toy model
        model = pints.toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        values = model.simulate(cls.real_parameters, times)

        # Add noise
        np.random.seed(1)
        cls.noise = 10
        values += np.random.normal(0, cls.noise, values.shape)
        cls.real_parameters.append(cls.noise)

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior(
            [0.01, 400],
            [0.02, 600]
        )

        # Create a log-likelihood
        cls.log_likelihood = pints.GaussianKnownSigmaLogLikelihood(
            problem, cls.noise)
Exemplo n.º 10
0
    def test_basics(self):
        # Test everything

        model = pints.toy.LogisticModel()
        times = [0, 1, 2, 3]
        x = [1, 1]
        values = model.simulate(x, times)
        noisy = values + np.array([0.01, -0.01, 0.01, -0.01])
        problem = pints.SingleOutputProblem(model, times, noisy)

        self.assertTrue(np.all(times == problem.times()))
        self.assertTrue(np.all(noisy == problem.values()))
        self.assertTrue(np.all(values == problem.evaluate(x)))
        self.assertEqual(problem.n_parameters(), model.n_parameters(), 2)
        self.assertEqual(problem.n_outputs(), model.n_outputs(), 1)
        self.assertEqual(problem.n_times(), len(times))

        # Test errors
        times[0] = -2
        self.assertRaises(ValueError, pints.SingleOutputProblem, model, times,
                          values)
        times = [1, 2, 2, 1]
        self.assertRaises(ValueError, pints.SingleOutputProblem, model, times,
                          values)
        times = [1, 2, 3]
        self.assertRaises(ValueError, pints.SingleOutputProblem, model, times,
                          values)

        # Multi-output problem not allowed
        model = pints.toy.FitzhughNagumoModel()
        self.assertEqual(model.n_outputs(), 2)
        values = model.simulate([1, 1, 1], times)
        self.assertRaises(ValueError, pints.SingleOutputProblem, model, times,
                          values)
Exemplo n.º 11
0
    def test_mean_squared_error_single(self):
        """ Tests :class:`pints.MeanSquaredError` with a single output. """

        # Set up problem
        model = pints.toy.ConstantModel(1)
        times = [1, 2, 3]
        values = [1, 1, 1]
        p = pints.SingleOutputProblem(model, times, values)

        # Test
        e = pints.MeanSquaredError(p)
        self.assertEqual(e.n_parameters(), 1)
        float(e([1]))
        self.assertEqual(e([1]), 0)
        self.assertEqual(e([2]), 1)
        self.assertEqual(e([0]), 1)
        self.assertEqual(e([3]), 4)

        # Derivatives
        for x in [1, 2, 3, 4]:
            y, dy = e.evaluateS1([x])
            r = x - 1
            self.assertEqual(y, e([x]))
            self.assertEqual(dy.shape, (1, ))
            self.assertTrue(np.all(dy == 2 * r))
Exemplo n.º 12
0
    def test_arma11(self):
        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3, 4])
        model.simulate(parameters, times)
        values = np.asarray([3, -4.5, 10.5, 0.3])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.ARMA11LogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([0, 0.9, -0.4, 1]),
                               -171.53031588534171)

        # multiple outputs
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 5)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6], [-12, -10.1, -4, 2.3]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.ARMA11LogLikelihood(problem)
        # ARMA1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, phi=0.34 sigma=1) +
        # ARMA1Logpdf((7.6,-10.3,-30.5, -10.1)|
        #             mean=0, rho=-0.25, phi=0.1, sigma=3) +
        # ARMA1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, phi=0.0, sigma=10) +
        # ARMA1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, phi=0.9, sigma=2)
        #      = -116.009 -74.94 -14.32 -8.88
        self.assertAlmostEqual(
            log_likelihood(parameters + [
                0.5, 0.34, 1.0, -0.25, 0.1, 3.0, 0.9, 0.0, 10.0, 0.0, 0.9, 2.0
            ]), -214.17034137601107)
Exemplo n.º 13
0
    def test_multiplicative_gaussian(self):
        # Test single output
        model = pints.toy.ConstantModel(1)
        parameters = [2]
        times = np.asarray([1, 2, 3, 4])
        model.simulate(parameters, times)
        values = np.asarray([1.9, 2.1, 1.8, 2.2])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)

        self.assertAlmostEqual(log_likelihood(parameters + [2.0, 1.0]),
                               -9.224056577298253)

        # Test multiple output
        model = pints.toy.ConstantModel(2)
        parameters = [1, 2]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([[1.1, 0.9, 1.5], [1.5, 2.5, 2.0]]).transpose()
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)

        self.assertAlmostEqual(
            log_likelihood(parameters + [1.0, 2.0, 1.0, 1.0]),
            -12.176330824267543)
Exemplo n.º 14
0
    def __init__(self, models: List[m.SingleOutputModel], times: List[np.ndarray], values: List[np.ndarray]):
        """Initialises a single output inference problem with default objective function pints.SumOfSquaresError and
        default optimiser pints.CMAES. Standard deviation in initial starting point of optimisation as well as
        restricted domain of support for inferred parameters is disabled by default.

        Arguments:
            models {List[m.SingleOutputModel]} -- Models, which parameters are to be inferred.
            times {List[np.ndarray]} -- Times of data points for the different models.
            values {List[np.ndarray]} -- State values of data points for the different models.

        Return:
            None
        """
        # initialise problem container
        self.problem_container = []
        for model_id, model in enumerate(models):
            self.problem_container.append(pints.SingleOutputProblem(model, times[model_id], values[model_id]))

        # initialise error function container
        self.error_function_container = []
        for problem in self.problem_container:
            self.error_function_container.append(pints.SumOfSquaresError(problem))

        # initialise optimiser
        self.optimiser = pints.CMAES

        # initialise fluctuations around starting point of optimisation
        self.initial_parameter_uncertainty = None

        # initialise parameter constraints
        self.parameter_boundaries = None

        # initialise outputs
        self.estimated_parameters = None
        self.objective_score = None
Exemplo n.º 15
0
    def test_ar1(self):
        # single outputs
        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([1.0, -10.7, 15.5])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.AR1LogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([0, 0.5, 5]),
                               -19.706737485492436)

        # multiple outputs
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 5)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6], [-12, -10.1, -4, 2.3]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.AR1LogLikelihood(problem)
        # Test AR1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, sigma=1) +
        #      AR1Logpdf((7.6,-10.3,-30.5, -10.1)|mean=0, rho=-0.25, sigma=3) +
        #      AR1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, sigma=10) +
        #      AR1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, sigma=2)
        #      = -109.4752924909364 -93.58199 - 18.3833..
        #        -16.4988
        self.assertAlmostEqual(
            log_likelihood(parameters +
                           [0.5, 1.0, -0.25, 3.0, 0.9, 10.0, 0.0, 2.0]),
            -237.93936126949615)
Exemplo n.º 16
0
    def test_build_tree_nan(self):
        # This method gives nan in the hamiltonian_dash
        # in the build_tree function
        # Needed for coverage

        model = pints.toy.LogisticModel()
        real_parameters = np.array([0.015, 20])
        times = np.linspace(0, 1000, 50)
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.1
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.0001, 1], [1, 500])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [[0.36083914, 1.99013825]]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(50)
        nuts_mcmc.set_log_to_screen(False)
        np.random.seed(5)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
Exemplo n.º 17
0
    def test_sum_of_squares_error_single(self):
        """ Tests :class:`pints.MeanSquaredError` with a single output. """

        # Set up problem
        model = pints.toy.ConstantModel(1)
        times = [1, 2, 3]
        values = [1, 1, 1]
        p = pints.SingleOutputProblem(model, times, values)

        # Test
        e = pints.SumOfSquaresError(p)
        self.assertEqual(e.n_parameters(), 1)
        float(e([1]))
        self.assertEqual(e([1]), 0)
        self.assertEqual(e([2]), 3)
        self.assertEqual(e([0]), 3)
        self.assertEqual(e([3]), 12)

        # Derivatives
        for x in [1, 2, 3, 4]:
            ex, dex = e.evaluateS1([x])
            r = x - 1
            self.assertEqual(ex, e([x]))
            self.assertEqual(dex.shape, (1, ))
            self.assertEqual(dex[0], 2 * 3 * r)
Exemplo n.º 18
0
    def test_gaussian_integrated_uniform_log_likelihood_single(self):
        # Tests GaussianIntegratedUniformLogLikelihood with single output
        # problem
        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([1.0, -10.7, 15.5])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
            problem, 2, 4)
        self.assertAlmostEqual(log_likelihood([0]), -20.441037907121299)

        # test incorrect constructors
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, -1, 2)
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, 0, 0)
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, 2, 1)
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, [1, 2], [2, 3])
Exemplo n.º 19
0
 def setUpClass(cls):
     cls.kernel = flexnoise.kernels.GPLaplacianKernel
     cls.model = pints.toy.ConstantModel(1)
     cls.times = np.linspace(1.0, 4.0, 15)
     cls.data = np.random.normal(2.0, 1.0, 15)
     cls.gp_times = np.array([1.0, 2.5, 4.0])
     cls.problem = pints.SingleOutputProblem(cls.model, cls.times, cls.data)
Exemplo n.º 20
0
    def test_model_that_gives_nan(self):
        # This model will return a nan in the gradient evaluation, which
        # originally tripped up the find_reasonable_epsilon function in nuts.
        # Run it for a bit so that we get coverage on the if statement!

        model = pints.toy.LogisticModel()
        real_parameters = model.suggested_parameters()
        times = model.suggested_parameters()
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.2
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.01, 40], [0.2, 60])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [real_parameters * 1.1]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(10)
        nuts_mcmc.set_log_to_screen(False)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
Exemplo n.º 21
0
def main():
    #constants
    timeRangesToUse = [[1, 2499], [2549, 2999], [3049, 4999], [5049, 14999],
                       [15049, 19999], [20049, 29999], [30049, 64999],
                       [65049, 69999], [70049, -1]]
    true_parameters = [
        2.26E-04, 0.0699, 3.45E-05, 0.05462, 0.0873, 8.92E-03, 5.150E-3,
        0.03158, 0.1524
    ]

    model = ChannelModelPintsWrapper()
    data = pd.read_csv("data/averaged-data.txt", delim_whitespace=True)
    dat = extract_time_ranges(data.values, timeRangesToUse)
    times = dat[:, 0]
    values = dat[:, 1]

    current = model.simulate(true_parameters, times)
    plt.plot(times, values)
    plt.plot(times, current)
    plt.show()
    problem = pints.SingleOutputProblem(model, times, values)
    error = pints.SumOfSquaresError(problem)
    boundaries = MarkovModelBoundaries()
    x0 = np.array([0.1] * 9)
    found_parameters, found_value = pints.optimise(error,
                                                   true_parameters,
                                                   boundaries=boundaries)
    print(found_parameters, found_value)
Exemplo n.º 22
0
def run(model, real_parameters, noise_used, log_prior_used):
    # Create some toy data
    
    times = np.linspace(1, 1000, 50)
    org_values = model.simulate(real_parameters, times)

    # Add noise
    noise = 10
    values = org_values + np.random.normal(0, noise, org_values.shape)
    real_parameters = np.array(real_parameters)


    # Create an object with links to the model and time series
    problem = pints.SingleOutputProblem(model, times, values)

    # Create a log-likelihood function (adds an extra parameter!)
    log_likelihood_used = pints.GaussianKnownSigmaLogLikelihood(problem, [noise_used])

    # Create a uniform prior over both the parameters and the new noise variable

    # Create a posterior log-likelihood (log(likelihood * prior))
    log_posterior = pints.LogPosterior(log_likelihood_used, log_prior_used)

    # Choose starting points for 3 mcmc chains
    xs = [
        real_parameters,
        real_parameters * 1.01,
        real_parameters * 0.99,
    ]

    # Create mcmc routine with four chains
    mcmc = pints.MCMCController(log_posterior, 3, xs, method=pints.HaarioACMC)
    
    sample_size = 4000
    # Add stopping criterion
    mcmc.set_max_iterations(sample_size)

    # Start adapting after 1000 iterations
    mcmc.set_initial_phase_iterations(sample_size//4)

    # Disable logging mode
    mcmc.set_log_to_screen(False)

    # Run!
    print('Running...')
    chains = mcmc.run()
    print('Done!')
    s = sample_size//4+1
    #HMC: s = 1
    b = False
    while s < sample_size:
        chains_cut = chains[:,sample_size//4:s+1]
        rhat = pints.rhat(chains_cut)
        s+=1
        if rhat[0] < 1.05:
            b = True
            break
    print(s)
    return chains[0][s:][:, 0]
Exemplo n.º 23
0
    def test_sum_of_independent_log_pdfs(self):

        # Test single output
        model = pints.toy.LogisticModel()
        x = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(x, times) + 0.1
        problem = pints.SingleOutputProblem(model, times, values)

        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianLogLikelihood(problem)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test single output derivatives
        y, dy = ll.evaluateS1(x)
        self.assertEqual(y, ll(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))

        # Wrong number of arguments
        self.assertRaises(TypeError, pints.SumOfIndependentLogPDFs)
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1])

        # Wrong types
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, 1])
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs,
                          [problem, l1])

        # Mismatching dimensions
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, l2])

        # Test multi-output
        model = pints.toy.FitzhughNagumoModel()
        x = model.suggested_parameters()
        nt = 10
        nx = model.n_parameters()
        times = np.linspace(0, 10, nt)
        values = model.simulate(x, times) + 0.01
        problem = pints.MultiOutputProblem(model, times, values)
        sigma = 0.01
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test multi-output derivatives
        y, dy = ll.evaluateS1(x)

        # Note: y and ll(x) differ a bit, because the solver acts slightly
        # different when evaluating with and without sensitivities!
        self.assertAlmostEqual(y, ll(x), places=3)

        self.assertEqual(dy.shape, (nx, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))
Exemplo n.º 24
0
    def test_known_noise_gaussian_single_and_multi(self):
        """
        Tests the output of single-series against multi-series known noise
        log-likelihoods.
        """

        # Define boring 1-output and 2-output models
        class NullModel1(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def simulate(self, x, times):
                return np.zeros(times.shape)

        class NullModel2(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def n_outputs(self):
                return 2

            def simulate(self, x, times):
                return np.zeros((len(times), 2))

        # Create two single output problems
        times = np.arange(10)
        np.random.seed(1)
        sigma1 = 3
        sigma2 = 5
        values1 = np.random.uniform(0, sigma1, times.shape)
        values2 = np.random.uniform(0, sigma2, times.shape)
        model1d = NullModel1()
        problem1 = pints.SingleOutputProblem(model1d, times, values1)
        problem2 = pints.SingleOutputProblem(model1d, times, values2)
        log1 = pints.GaussianKnownSigmaLogLikelihood(problem1, sigma1)
        log2 = pints.GaussianKnownSigmaLogLikelihood(problem2, sigma2)

        # Create one multi output problem
        values3 = np.array([values1, values2]).swapaxes(0, 1)
        model2d = NullModel2()
        problem3 = pints.MultiOutputProblem(model2d, times, values3)
        log3 = pints.GaussianKnownSigmaLogLikelihood(
            problem3, [sigma1, sigma2])

        # Check if we get the right output
        self.assertAlmostEqual(log1(0) + log2(0), log3(0))
Exemplo n.º 25
0
 def setUpClass(cls):
     cls.kernel = flexnoise.kernels.LaplacianKernel
     cls.model = pints.toy.ConstantModel(1)
     cls.times = np.arange(1, 91)
     cls.data = np.hstack(
         (np.random.normal(2.0, 0.1, 30), np.random.normal(2.0, 2.0, 30),
          np.random.normal(2.0, 0.1, 30)))
     cls.problem = pints.SingleOutputProblem(cls.model, cls.times, cls.data)
     cls.model_prior = pints.UniformLogPrior([0] * 1, [1e6] * 1)
Exemplo n.º 26
0
def figure1():
    """Make an interactive figure for numerical error.
    """
    def stimulus(t):
        return (1 * (t < 50)) + (-100 * (t >= 50) & (t < 75)) + (1 * (t >= 75))

    # Generate data
    y0 = np.array([0.0, 0.0])
    m = diffeqinf.DampedOscillator(stimulus, y0, 'RK45')
    m.set_tolerance(1e-8)
    true_params = [1.0, 0.2, 1.0]
    times = np.linspace(0, 100, 500)
    y = m.simulate(true_params, times)
    y += np.random.normal(0, 0.01, len(times))

    # Forward Euler method
    m = diffeqinf.DampedOscillator(stimulus, y0, diffeqinf.ForwardEuler)
    m.set_step_size(0.01)
    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)

    step_sizes = [0.2, 0.1, 0.01]
    true_params = [1.0, 0.2, 1.0, 0.01]

    diffeqinf.plot.plot_likelihoods(problem,
                                    likelihood,
                                    true_params,
                                    step_sizes=step_sizes,
                                    param_names=['k', 'c', 'm'])

    # RK45 method
    m = diffeqinf.DampedOscillator(stimulus, y0, 'RK45')
    m.set_tolerance(1e-2)

    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)

    tolerances = [0.01, 0.0001, 0.000001]

    diffeqinf.plot.plot_likelihoods(problem,
                                    likelihood,
                                    true_params,
                                    tolerances=tolerances,
                                    param_names=['k', 'c', 'm'])
Exemplo n.º 27
0
    def test_student_t_log_likelihood_single(self):
        # Single-output test for Student-t noise log-likelihood methods

        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([1.0, -10.7, 15.5])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.StudentTLogLikelihood(problem)
        # Test Student-t_logpdf(values|mean=0, df = 3, scale = 10) = -11.74..
        self.assertAlmostEqual(log_likelihood([0, 3, 10]), -11.74010919785115)
Exemplo n.º 28
0
    def test_cauchy_log_likelihood_single(self):
        # Single-output test for Cauchy noise log-likelihood methods

        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([1.0, -10.7, 15.5])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.CauchyLogLikelihood(problem)
        # Test Cauchy_logpdf(values|mean=0, scale = 10) = -12.34..
        self.assertAlmostEqual(log_likelihood([0, 10]), -12.3394986541736)
Exemplo n.º 29
0
    def test_bad_log_pdfs_parameters(self):
        model = pints.toy.ConstantModel(1)
        problem = pints.SingleOutputProblem(model=model,
                                            times=[1, 2, 3, 4],
                                            values=[1, 2, 3, 4])
        log_pdf = pints.ConstantAndMultiplicativeGaussianLogLikelihood(problem)

        log_pdfs = [self.log_pdf_1, log_pdf]
        pooled = [True, True]

        self.assertRaisesRegex(ValueError,
                               'All log-pdfs passed to PooledLogPDFs',
                               pints.PooledLogPDF, log_pdfs, pooled)
Exemplo n.º 30
0
    def test_not_implemented_error(self):
        # Convert data to list
        values = self.data_single.tolist()

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(self.model_single, self.times,
                                            values)

        # Create error measure
        error = pints.RootMeanSquaredError(problem)

        # Check that not implemented error is raised for evaluateS1
        self.assertRaisesRegex(NotImplementedError, '', error.evaluateS1, 1)