Esempio n. 1
0
    def test_uniform_prior_sampling(self):
        lower = np.array([1, 2])
        upper = np.array([10, 20])
        p = pints.UniformLogPrior(lower, upper)

        # Test output formats
        d = 2
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))

        p = pints.UniformLogPrior([0], [1])
        d = 1
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))

        # Roughly check distribution (main checks are in numpy!)
        np.random.seed(1)
        p = pints.UniformLogPrior(lower, upper)
        x = p.sample(10000)
        self.assertTrue(np.all(lower <= x))
        self.assertTrue(np.all(upper > x))
        self.assertTrue(
            np.linalg.norm(x.mean(axis=0) - 0.5 * (upper + lower)) < 0.1)
Esempio n. 2
0
def figure2():
    """Make a figure for MCMC inference
    """
    num_mcmc_iters = 10000

    def stimulus(t):
        return (1 * (t < 50)) + (-100 * (t >= 50) & (t < 75)) + (1 * (t >= 75))

    # Generate data
    y0 = np.array([0.0, 0.0])
    m = diffeqinf.DampedOscillator(stimulus, y0, 'RK45')
    m.set_tolerance(1e-8)
    true_params = [1.0, 0.2, 1.0]
    times = np.linspace(0, 100, 500)
    y = m.simulate(true_params, times)
    y += np.random.normal(0, 0.01, len(times))

    # Run inference with correct model
    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)
    prior = pints.UniformLogPrior([0] * 4, [1e6] * 4)
    posterior = pints.LogPosterior(likelihood, prior)

    x0 = [true_params + [0.01]] * 3

    mcmc = pints.MCMCController(posterior, 3, x0)
    mcmc.set_max_iterations(num_mcmc_iters)
    chains_correct = mcmc.run()

    # Run inference with incorrect model
    m.set_tolerance(1e-2)
    problem = pints.SingleOutputProblem(m, times, y)
    likelihood = pints.GaussianLogLikelihood(problem)
    prior = pints.UniformLogPrior([0] * 4, [1e6] * 4)
    posterior = pints.LogPosterior(likelihood, prior)

    mcmc = pints.MCMCController(posterior, 3, x0)
    mcmc.set_max_iterations(num_mcmc_iters)
    chains_incorrect = mcmc.run()

    # Plot MCMC chains
    pints.plot.trace(chains_incorrect)
    plt.show()

    # Plot posteriors
    diffeqinf.plot.plot_grouped_parameter_posteriors(
        [chains_correct[0, num_mcmc_iters // 2:, :]],
        [chains_incorrect[0, num_mcmc_iters // 2:, :]],
        [chains_incorrect[1, num_mcmc_iters // 2:, :]],
        [chains_incorrect[2, num_mcmc_iters // 2:, :]],
        true_model_parameters=true_params,
        method_names=[
            'Correct', 'PoorTol_Chain1', 'PoorTol_Chain2', 'PoorTol_Chain3'
        ],
        parameter_names=['k', 'c', 'm'],
        fname=None)
    plt.show()
    def set_prior(self, name_value_dict):
        """
        Organise the priors for free parameters.

        The priors are pre-tested separately.

        Parameters
        ----------
        name_value_dict
            Dictionary of fixed parameters with their values.
        """
        prior_list = [
            pints.UniformLogPrior(0, 100),
            pints.UniformLogPrior(0, 10),
            pints.UniformLogPrior(0, 10),
            pints.UniformLogPrior(0, 100),
            pints.UniformLogPrior(0, 1),
            pints.UniformLogPrior(0, 1),
            pints.UniformLogPrior(0, 1)
        ]
        prior_dict = {
            name: value
            for (name, value) in zip(self.model._parameter_names, prior_list)
        }

        priors = []
        for key in name_value_dict:
            if name_value_dict[key] is None:
                priors.append(prior_dict[key])

        priors.append(pints.UniformLogPrior(0, 1))

        return priors
Esempio n. 4
0
    def setUpClass(cls):
        """ Prepare a problem for testing. """

        # Random seed
        np.random.seed(1)

        # Create toy model
        cls.model = toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        cls.times = np.linspace(0, 1000, 1000)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Add noise
        cls.noise = 10
        cls.values += np.random.normal(0, cls.noise, cls.values.shape)
        cls.real_parameters.append(cls.noise)
        cls.real_parameters = np.array(cls.real_parameters)

        # Create an object with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior([0.01, 400, cls.noise * 0.1],
                                              [0.02, 600, cls.noise * 100])

        # Create a log likelihood
        cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        cls.log_posterior = pints.LogPosterior(cls.log_likelihood,
                                               cls.log_prior)
Esempio n. 5
0
    def test_construction_errors(self):
        # Tests if invalid constructor calls are picked up.

        # First arg must be a log likelihood
        self.assertRaisesRegex(ValueError, 'must extend pints.LogLikelihood',
                               pints.NestedController, 'hello', self.log_prior)

        # First arg must be a log prior
        self.assertRaisesRegex(ValueError, 'must extend pints.LogPrior',
                               pints.NestedController, self.log_likelihood,
                               self.log_likelihood)

        # Both must have same number of parameters
        log_prior = pints.UniformLogPrior([0.01, 400, 1], [0.02, 600, 3])
        self.assertRaisesRegex(ValueError, 'same number of parameters',
                               pints.NestedController, self.log_likelihood,
                               log_prior)

        # test that ellipsoidal sampling used by default
        sampler = pints.NestedController(self.log_likelihood, self.log_prior)
        self.assertEqual(sampler._sampler.name(), 'Nested ellipsoidal sampler')
        self.assertRaisesRegex(
            ValueError, 'Given method must extend pints.NestedSampler.',
            pints.NestedController, self.log_likelihood, self.log_prior,
            pints.DifferentialEvolutionMCMC)

        self.assertRaisesRegex(
            ValueError, 'Given method must extend pints.NestedSampler.',
            pints.NestedController, self.log_likelihood, self.log_prior, 0.0)
Esempio n. 6
0
    def test_nparameters_error(self):
        # Test that error is thrown when parameters from log prior and error
        # measure do not match.
        log_prior = pints.UniformLogPrior([0.0, 0, 0], [0.2, 100, 1])

        self.assertRaises(ValueError, pints.ABCController, self.error_measure,
                          log_prior)
    def __call__(self, x):
        """
        Evaluates the log-prior in a PINTS framework.

        """
        # Prior contribution for initial R
        log_prior = pints.UniformLogPrior([0], [5])(x[0])

        # Prior contribution for ICs
        # log_prior += pints.UniformLogPrior([0], [1000])(x[-1])

        # Variance for betas
        sigma_b = x[-1]

        log_prior += gamma.logpdf(sigma_b, 1, scale=1 / 100)

        # Prior contriubution for betas
        LEN = len(np.arange(44, len(self._times), 7))
        for r in range(len(self._model.regions)):
            log_prior += norm.logpdf(np.log(x[r * LEN + 1]),
                                     loc=0,
                                     scale=sigma_b)
            for d in range(1, LEN):
                log_prior += norm.logpdf(np.log(x[r * LEN + d + 1]),
                                         loc=np.log(x[r * LEN + d]),
                                         scale=sigma_b)

        return log_prior
Esempio n. 8
0
    def test_method_near_boundary(self):

        # Create log pdf
        log_pdf = pints.UniformLogPrior([0, 0], [1, 1])

        # Create mcmc
        x0 = np.array([0.999, 0.999])
        sigma = [[1, 0], [0, 1]]
        mcmc = pints.NoUTurnMCMC(x0, sigma)

        # Perform short run
        chain = []
        for i in range(2 * mcmc.number_adaption_steps()):
            x = mcmc.ask()
            fx, gr = log_pdf.evaluateS1(x)
            sample = mcmc.tell((fx, gr))
            if sample is not None:
                chain.append(sample)
            if np.all(sample == x):
                self.assertEqual(mcmc.current_log_pdf(), fx)

        chain = np.array(chain)
        self.assertGreater(chain.shape[0], 1)
        self.assertEqual(chain.shape[1], len(x0))
        self.assertGreater(mcmc.divergent_iterations().shape[0], 0)
Esempio n. 9
0
    def setUpClass(cls):
        """ Prepare problem for tests. """
        # Load a forward model
        model = pints.toy.LogisticModel()

        # Create some toy data
        real_parameters = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        org_values = model.simulate(real_parameters, times)

        # Add noise
        noise = 10
        values = org_values + np.random.normal(0, noise, org_values.shape)
        real_parameters = np.array(real_parameters + [noise])

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create an error measure
        cls.score = pints.SumOfSquaresError(problem)
        cls.boundaries = pints.RectangularBoundaries([0, 400], [0.05, 600])

        # Create a log-likelihood function (adds an extra parameter!)
        log_likelihood = pints.GaussianLogLikelihood(problem)

        # Create a uniform prior over both the parameters and the new noise
        cls.log_prior = pints.UniformLogPrior([0.01, 400, noise * 0.1],
                                              [0.02, 600, noise * 100])

        # Create a posterior log-likelihood (log(likelihood * prior))
        cls.log_posterior = pints.LogPosterior(log_likelihood, cls.log_prior)
Esempio n. 10
0
    def setUpClass(cls):
        """ Prepare for the test. """
        # Create toy model
        model = pints.toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        values = model.simulate(cls.real_parameters, times)

        # Add noise
        np.random.seed(1)
        cls.noise = 10
        values += np.random.normal(0, cls.noise, values.shape)
        cls.real_parameters.append(cls.noise)

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior(
            [0.01, 400],
            [0.02, 600]
        )

        # Create a log-likelihood
        cls.log_likelihood = pints.GaussianKnownSigmaLogLikelihood(
            problem, cls.noise)
Esempio n. 11
0
    def test_build_tree_nan(self):
        # This method gives nan in the hamiltonian_dash
        # in the build_tree function
        # Needed for coverage

        model = pints.toy.LogisticModel()
        real_parameters = np.array([0.015, 20])
        times = np.linspace(0, 1000, 50)
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.1
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.0001, 1], [1, 500])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [[0.36083914, 1.99013825]]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(50)
        nuts_mcmc.set_log_to_screen(False)
        np.random.seed(5)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
Esempio n. 12
0
    def test_method_near_boundary(self):

        # Create log pdf
        log_pdf = pints.UniformLogPrior([0, 0], [1, 1])

        # Create mcmc
        x0 = np.array([0.999, 0.999])
        sigma = [[1, 0], [0, 1]]
        mcmc = pints.NoUTurnMCMC(x0, sigma)

        # Perform short run
        chain = []
        for i in range(2 * mcmc.number_adaption_steps()):
            x = mcmc.ask()
            fx, gr = log_pdf.evaluateS1(x)
            reply = mcmc.tell((fx, gr))
            if reply is not None:
                y, fy, ac = reply
                chain.append(y)
                recalc = log_pdf.evaluateS1(y)
                self.assertEqual(fy[0], recalc[0])
                self.assertTrue(np.all(fy[1] == recalc[1]))

        chain = np.array(chain)
        self.assertGreater(chain.shape[0], 1)
        self.assertEqual(chain.shape[1], len(x0))
        self.assertGreater(mcmc.divergent_iterations().shape[0], 0)
Esempio n. 13
0
    def test_model_that_gives_nan(self):
        # This model will return a nan in the gradient evaluation, which
        # originally tripped up the find_reasonable_epsilon function in nuts.
        # Run it for a bit so that we get coverage on the if statement!

        model = pints.toy.LogisticModel()
        real_parameters = model.suggested_parameters()
        times = model.suggested_parameters()
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.2
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.01, 40], [0.2, 60])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [real_parameters * 1.1]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(10)
        nuts_mcmc.set_log_to_screen(False)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
Esempio n. 14
0
    def _make_pints_posterior(self):
        """Rebuild the Pints posterior and save it.
        """
        # Build a uniform model prior if it is not supplied
        if self.model_prior is None:
            num_model_params = self.problem.n_parameters()
            model_prior = pints.UniformLogPrior([-1e6] * num_model_params,
                                                [1e6] * num_model_params)

        # Get the GP prior
        kernel_prior = NonstatGPLogPrior(
            self.gp_times,
            self.kernel.num_parameters() // len(self.gp_times), self.mu,
            self.alpha, self.beta)

        # Combine the two priors
        log_prior = pints.ComposedLogPrior(model_prior, kernel_prior)

        # Build the likelihood
        log_likelihood = flexnoise.KernelCovarianceLogLikelihood(
            self.problem, self.kernel)

        # Build the posterior
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        self.posterior = log_posterior
Esempio n. 15
0
    def test_composed_prior(self):
        import pints
        import numpy as np

        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)

        m2 = -50
        c2 = 100
        p2 = pints.GaussianLogPrior(m2, c2)

        p = pints.ComposedLogPrior(p1, p2)

        # Test at center
        peak1 = p1([m1])
        peak2 = p2([m2])
        self.assertEqual(p([m1, m2]), peak1 + peak2)

        # Test at random points
        np.random.seed(1)
        for i in range(100):
            x = np.random.normal(m1, c1)
            y = np.random.normal(m2, c2)
            self.assertAlmostEqual(p([x, y]), p1([x]) + p2([y]))

        # Test effect of increasing covariance
        p = [
            pints.ComposedLogPrior(p1, pints.GaussianLogPrior(m2, c))
            for c in range(1, 10)
        ]
        p = [f([m1, m2]) for f in p]
        self.assertTrue(np.all(p[:-1] > p[1:]))

        # Test errors
        self.assertRaises(ValueError, pints.ComposedLogPrior)
        self.assertRaises(ValueError, pints.ComposedLogPrior, 1)

        # Test derivatives
        p = pints.ComposedLogPrior(p1, p2)
        x = [8, -40]
        y, dy = p.evaluateS1(x)
        self.assertEqual(y, p(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = p1.evaluateS1(x[:1])
        y2, dy2 = p2.evaluateS1(x[1:])
        self.assertAlmostEqual(dy[0], dy1[0])
        self.assertAlmostEqual(dy[1], dy2[0])

        # Test means
        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)

        m2 = -50
        c2 = 50
        p2 = pints.UniformLogPrior(m2, c2)

        p = pints.ComposedLogPrior(p1, p2)
        self.assertTrue(np.array_equal(p.mean(), [10, 0]))
Esempio n. 16
0
    def test_error_measure_instance(self):
        # Test that error is thrown when we use an error measure which is not
        # an instance of ``pints.ErrorMeasure``.
        # Set a log prior as the error measure to trigger the warning
        wrong_error_measure = pints.UniformLogPrior([0.0, 0, 0], [0.2, 100, 1])

        self.assertRaises(ValueError, pints.ABCController, wrong_error_measure,
                          self.log_prior)
Esempio n. 17
0
    def sample(self, x, parallel=False):
        """
        Runs the sampler, this method:
            (1) generates simulated data and adds noise
            (2) sets up the sampler with the method given,
                using an KnownNoiseLogLikelihood, and a UniformLogPrior
            (3) runs the sampler
            (4) returns:
                - the calculated rhat value
                - the average of ess across all chains, returning the
                  minimum result across all parameters
                - the total time taken by the sampler
        """

        the_model = self.model()
        values = the_model.simulate(self.real_parameters, self.times)
        value_range = np.max(values) - np.min(values)
        values += np.random.normal(0, self.noise * value_range, values.shape)
        problem = pints.MultiOutputProblem(the_model, self.times, values)
        log_likelihood = pints.KnownNoiseLogLikelihood(
            problem, value_range * self.noise)
        # lower = list(self.lower) + [value_range *
        #                            self.noise / 10.0]*the_model.n_outputs()
        #upper = list(self.upper) + [value_range * self.noise * 10]*the_model.n_outputs()
        lower = list(self.lower)
        upper = list(self.upper)
        middle = [0.5 * (u + l) for l, u in zip(lower, upper)]
        sigma = [u - l for l, u in zip(lower, upper)]
        log_prior = pints.UniformLogPrior(lower, upper)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        n_chains = int(x[-1])
        xs = [[
            np.random.uniform() * (u - l) + l for l, u in zip(lower, upper)
        ] for c in range(n_chains)]
        mcmc = pints.MCMCSampling(log_posterior,
                                  n_chains,
                                  xs,
                                  method=self.method)
        [sampler.set_hyper_parameters(x[:-1]) for sampler in mcmc.samplers()]
        if parallel:
            mcmc.set_parallel(int(os.environ['OMP_NUM_THREADS']))

        mcmc.set_log_interval(1000)

        start = timer()
        chains = mcmc.run()
        end = timer()

        rhat = np.max(pints._diagnostics.rhat_all_params(chains))
        ess = np.zeros(chains[0].shape[1])
        for chain in chains:
            ess += np.array(pints._diagnostics.effective_sample_size(chain))
        ess /= n_chains
        ess = np.min(ess)
        print('rhat:', rhat)
        print('ess:', ess)
        print('time:', end - start)
        return rhat, ess, end - start
Esempio n. 18
0
 def setUpClass(cls):
     cls.kernel = flexnoise.kernels.LaplacianKernel
     cls.model = pints.toy.ConstantModel(1)
     cls.times = np.arange(1, 91)
     cls.data = np.hstack(
         (np.random.normal(2.0, 0.1, 30), np.random.normal(2.0, 2.0, 30),
          np.random.normal(2.0, 0.1, 30)))
     cls.problem = pints.SingleOutputProblem(cls.model, cls.times, cls.data)
     cls.model_prior = pints.UniformLogPrior([0] * 1, [1e6] * 1)
Esempio n. 19
0
    def test_uniform_prior_icdf(self):
        lower = np.array([1, 2])
        upper = np.array([11, 22])
        log_prior = pints.UniformLogPrior(lower, upper)
        self.assertEqual(log_prior.icdf([0.4, 0.9])[0], 5.0)
        self.assertEqual(log_prior.icdf([0.4, 0.9])[1], 20.0)
        self.assertEqual(log_prior.icdf(np.array([0.4, 0.9]))[1], 20.0)
        self.assertEqual(log_prior.icdf([[0.1, 0.3], [0.2, 0.4]]).shape[0], 2)

        self.assertRaises(ValueError, log_prior.icdf, [[1]])
        self.assertRaises(ValueError, log_prior.icdf, [[1, 2, 3, 4]])

        log_prior = pints.UniformLogPrior(1, 3)
        self.assertEqual(log_prior.icdf(1), 3.0)
        self.assertEqual(log_prior.icdf(0), 1.0)
        self.assertEqual(log_prior.icdf(0.75), 2.5)
        self.assertEqual(len(log_prior.icdf([[0.1], [0.2]])), 2)
        self.assertEqual(log_prior.icdf([[0.5], [0.75]])[1],
                         log_prior.icdf(0.75))
Esempio n. 20
0
 def create_pints_prior(self):
     noise_parameters = self.get_noise_params()
     if self.form == self.Form.UNIFORM:
         lower = noise_parameters[0]
         upper = noise_parameters[1]
         pints_log_prior = pints.UniformLogPrior(lower, upper)
     elif self.form == self.Form.NORMAL:
         mean = noise_parameters[0]
         sd = noise_parameters[1]
         pints_log_prior = pints.GaussianLogPrior(mean, sd)
     return pints_log_prior
Esempio n. 21
0
    def test_uniform_prior_cdf(self):
        lower = np.array([1, 2])
        upper = np.array([11, 22])
        log_prior = pints.UniformLogPrior(lower, upper)
        self.assertEqual(log_prior.cdf([2, 19.0])[0], 0.1)
        self.assertEqual(log_prior.cdf([2, 19.0])[1], 0.85)
        self.assertEqual(log_prior.cdf(np.array([2, 19.0]))[1], 0.85)
        self.assertEqual(log_prior.cdf([[1, 2], [2, 3]]).shape[0], 2)

        # test errors
        self.assertRaises(ValueError, log_prior.cdf, [[1]])
        self.assertRaises(ValueError, log_prior.cdf, [[1, 2, 3, 4]])

        log_prior = pints.UniformLogPrior(1, 3)
        self.assertEqual(log_prior.cdf(1), 0)
        self.assertEqual(log_prior.cdf(2), 0.5)
        self.assertEqual(log_prior.cdf(3), 1.0)

        # test multiple samples
        self.assertEqual(len(log_prior.cdf([[1], [2]])), 2)
        self.assertEqual(log_prior.cdf([[1], [2]])[1], log_prior.cdf(2))
Esempio n. 22
0
def run_figureS2(num_runs=3, output_dir='./'):
    """Run the Gaussian process on block noise data.

    This function runs the simulations and saves the results to pickle.
    """
    random.seed(12345)
    np.random.seed(12345)

    all_fits = []
    iid_runs = []
    sigmas = []
    mult_runs = []
    gp_runs = []
    for run in range(num_runs):
        # Make a synthetic time series
        times, values, data = generate_time_series(model='logistic',
                                                   noise='blocks',
                                                   n_times=625)

        # Make Pints model and problem
        model = pints.toy.LogisticModel()
        problem = pints.SingleOutputProblem(model, times, data)

        # Initial conditions for model parameters
        model_starting_point = [0.08, 50]

        # Infer the nonstationary kernel fit
        # Run an optimization assumming IID
        log_prior = pints.UniformLogPrior([0] * 3, [1e6] * 3)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        opt = pints.OptimisationController(log_posterior,
                                           model_starting_point + [2])
        xbest, fbest = opt.run()

        # Run the GP fit, using the best fit for initialization
        gp_times = times[::25]
        kernel = flexnoise.kernels.GPLaplacianKernel
        gnp = flexnoise.GPNoiseProcess(problem, kernel, xbest[:2], gp_times)
        gnp.set_gp_hyperparameters(mu=0.0, alpha=1.0, beta_num_points=200)
        x = gnp.run_optimize(num_restarts=100, parallel=True, maxiter=150)
        all_fits.append(x)

    # Save all results to pickle
    kernel = kernel(None, gp_times)
    results = [all_fits, times, data, values, model, problem, kernel]

    fname = os.path.join(output_dir, 'figS2_data.pkl')
    with open(fname, 'wb') as f:
        pickle.dump(results, f)
Esempio n. 23
0
    def setUpClass(cls):
        """ Set up problem for tests. """

        # Create toy model
        cls.model = toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        cls.times = np.linspace(0, 1000, 1000)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Add noise
        cls.noise = 10
        cls.values += np.random.normal(0, cls.noise, cls.values.shape)
        cls.real_parameters.append(cls.noise)
        cls.real_parameters = np.array(cls.real_parameters)

        # Create an object with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior([0.01, 400, cls.noise * 0.1],
                                              [0.02, 600, cls.noise * 100])

        # Create a log likelihood
        cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        cls.log_posterior = pints.LogPosterior(cls.log_likelihood,
                                               cls.log_prior)

        # Run MCMC sampler
        xs = [
            cls.real_parameters * 1.1,
            cls.real_parameters * 0.9,
            cls.real_parameters * 1.15,
        ]

        mcmc = pints.MCMCController(cls.log_posterior,
                                    3,
                                    xs,
                                    method=pints.HaarioBardenetACMC)
        mcmc.set_max_iterations(200)
        mcmc.set_initial_phase_iterations(50)
        mcmc.set_log_to_screen(False)

        start = time.time()
        cls.chains = mcmc.run()
        end = time.time()
        cls.time = end - start
Esempio n. 24
0
    def test_log_posterior(self):

        # Create a toy problem and log likelihood
        model = pints.toy.LogisticModel()
        real_parameters = [0.015, 500]
        x = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)

        # Create a prior
        log_prior = pints.UniformLogPrior([0, 0], [1, 1000])

        # Test
        p = pints.LogPosterior(log_likelihood, log_prior)
        self.assertEqual(p(x), log_likelihood(x) + log_prior(x))
        y = [-1, 500]
        self.assertEqual(log_prior(y), -float('inf'))
        self.assertEqual(p(y), -float('inf'))
        self.assertEqual(p(y), log_prior(y))

        # Test derivatives
        log_prior = pints.ComposedLogPrior(pints.GaussianLogPrior(0.015, 0.3),
                                           pints.GaussianLogPrior(500, 100))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        x = [0.013, 540]
        y, dy = log_posterior.evaluateS1(x)
        self.assertEqual(y, log_posterior(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = log_prior.evaluateS1(x)
        y2, dy2 = log_likelihood.evaluateS1(x)
        self.assertTrue(np.all(dy == dy1 + dy2))

        # Test getting the prior and likelihood back again
        self.assertIs(log_posterior.log_prior(), log_prior)
        self.assertIs(log_posterior.log_likelihood(), log_likelihood)

        # First arg must be a LogPDF
        self.assertRaises(ValueError, pints.LogPosterior, 'hello', log_prior)

        # Second arg must be a log_prior
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          log_likelihood)

        # Prior and likelihood must have same dimension
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          pints.GaussianLogPrior(0.015, 0.3))
Esempio n. 25
0
    def test_transformed_log_prior(self):
        # Test TransformedLogPrior class

        d = 2
        t = pints.LogTransformation(2)
        r = pints.UniformLogPrior([0.1, 0.1], [0.9, 0.9])
        tr = t.convert_log_prior(r)

        # Test sample
        n = 1
        x = tr.sample(n)
        self.assertEqual(x.shape, (n, d))
        self.assertTrue(np.all(x < 0.))
        n = 1000
        x = tr.sample(n)
        self.assertEqual(x.shape, (n, d))
        self.assertTrue(np.all(x < 0.))
Esempio n. 26
0
    def setUpClass(cls):
        """ Prepare problem for tests. """

        # Create toy model
        cls.model = pints.toy.stochastic.DegradationModel()
        cls.real_parameters = [0.1]
        cls.times = np.linspace(0, 10, 10)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Create an object (problem) with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters
        cls.log_prior = pints.UniformLogPrior([0.0], [0.3])

        # Set error measure
        cls.error_measure = pints.RootMeanSquaredError(cls.problem)
Esempio n. 27
0
    def test_construction_errors(self):
        """ Tests if invalid constructor calls are picked up. """

        # First arg must be a log likelihood
        self.assertRaisesRegex(ValueError, 'must extend pints.LogLikelihood',
                               pints.NestedRejectionSampler, 'hello',
                               self.log_prior)

        # First arg must be a log prior
        self.assertRaisesRegex(ValueError, 'must extend pints.LogPrior',
                               pints.NestedRejectionSampler,
                               self.log_likelihood, self.log_likelihood)

        # Both must have same number of parameters
        log_prior = pints.UniformLogPrior([0.01, 400, 1], [0.02, 600, 3])
        self.assertRaisesRegex(ValueError, 'same number of parameters',
                               pints.NestedRejectionSampler,
                               self.log_likelihood, log_prior)
Esempio n. 28
0
    def test_composed_prior_cdf_icdf(self):
        p1 = pints.GaussianLogPrior(-3, 7)
        p2 = pints.UniformLogPrior(-4, -1)
        p = pints.ComposedLogPrior(p1, p2)
        ps = [p1, p2]
        xs = [-10, -3]
        cdfs = p.cdf(xs)
        for i, cdf in enumerate(cdfs):
            self.assertEqual(cdf, ps[i].cdf(xs[i]))
        cdfs1 = p.convert_to_unit_cube(xs)
        self.assertEqual(cdfs[0], cdfs1[0])
        self.assertEqual(cdfs[1], cdfs1[1])

        qs = [0.3, 0.75]
        icdfs = p.icdf(qs)
        for i, icdf in enumerate(icdfs):
            self.assertEqual(icdf, ps[i].icdf(qs[i]))
        icdfs1 = p.convert_from_unit_cube(qs)
        self.assertEqual(icdfs[0], icdfs1[0])
        self.assertEqual(icdfs[1], icdfs1[1])
Esempio n. 29
0
    def setUpClass(cls):
        """ Set up problem for tests. """
        # Create toy model
        cls.model = toy.stochastic.DegradationModel()
        cls.real_parameters = [0.1]
        cls.times = np.linspace(0, 10, 10)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Create an object (problem) with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters
        cls.log_prior = pints.UniformLogPrior([0.0], [0.3])

        cls.transition_kernel = pints.MultivariateGaussianLogPrior(
            np.zeros(1), 0.001 * np.identity(1))

        # Set error measure
        cls.error_measure = pints.RootMeanSquaredError(cls.problem)
Esempio n. 30
0
    def test_basic(self):

        # Create a custom LogPDF for testing
        class Gradient(pints.LogPDF):

            def n_parameters(self):
                return 1

            def __call__(self, x):
                return x

        # Create boundaries based on gradient
        b = pints.LogPDFBoundaries(Gradient(), 0.75)

        # Test n_parameters
        self.assertEqual(b.n_parameters(), 1)

        # Test
        self.assertFalse(b.check(0))
        self.assertFalse(b.check(-1))
        self.assertTrue(b.check(2))
        self.assertTrue(b.check(1))
        self.assertFalse(b.check(0.75))

        # Test bad creation
        self.assertRaisesRegexp(
            ValueError, 'must be a pints.LogPDF', pints.LogPDFBoundaries, 5, 5)

        # Can't sample from this log pdf!
        self.assertRaises(NotImplementedError, b.sample, 1)

        # Can sample if we have a prior that supports it
        b = pints.RectangularBoundaries([1, 1], [2, 2])
        p = pints.UniformLogPrior(b)
        p.sample(2)
        b = pints.LogPDFBoundaries(p)
        b.sample(2)