Ejemplo n.º 1
0
    def test_multivariate_normal_prior(self):
        # 1d test
        mean = 0
        covariance = 1

        # Input must be a matrix
        self.assertRaises(ValueError, pints.MultivariateGaussianLogPrior, mean,
                          covariance)
        covariance = [1]
        self.assertRaises(ValueError, pints.MultivariateGaussianLogPrior, mean,
                          covariance)

        # Basic test
        covariance = [[1]]
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        p([0])
        p([-1])
        p([11])

        # 5d tests
        mean = [1, 2, 3, 4, 5]
        covariance = np.diag(mean)
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        self.assertRaises(ValueError, p, [1, 2, 3])
        p([1, 2, 3, 4, 5])
        p([-1, 2, -3, 4, -5])

        # Test mean
        for idx, component in enumerate(mean):
            self.assertAlmostEqual(p.mean()[idx], component)

        # Test errors
        self.assertRaises(ValueError, pints.MultivariateGaussianLogPrior,
                          [1, 2], [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
Ejemplo n.º 2
0
    def test_multivariate_normal_sampling(self):
        d = 1
        mean = 2
        covariance = [[1]]
        p = pints.MultivariateGaussianLogPrior(mean, covariance)

        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))

        # 5d tests
        d = 5
        mean = np.array([1, 2, 3, 4, 5])
        covariance = np.diag(mean)
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))

        # Roughly check distribution (main checks are in numpy!)
        np.random.seed(1)
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        x = p.sample(10000)
        self.assertTrue(np.all(np.abs(mean - x.mean(axis=0)) < 0.1))
        self.assertTrue(np.all(
            np.abs(np.diag(covariance) - x.std(axis=0)**2) < 0.1))
Ejemplo n.º 3
0
    def test_method_list(self):
        # Create abc smc sampler
        abc = pints.ABCSMC(
            self.log_prior,
            pints.MultivariateGaussianLogPrior(np.zeros(1),
                                               0.001 * np.identity(1)))

        # Configure
        n_draws = 2
        niter = 10
        abc.set_intermediate_size(niter)
        abc.set_threshold_schedule([6, 4, 2])
        abc.set_perturbation_kernel(
            pints.MultivariateGaussianLogPrior(np.zeros(1),
                                               0.001 * np.identity(1)))

        # Perform short run using ask and tell framework
        samples = []
        while len(samples) < 3 * niter:
            xs = abc.ask(n_draws)
            fxs = [self.error_measure(x) for x in xs]
            sample = abc.tell(fxs)
            while sample is None:
                xs = abc.ask(n_draws)
                fxs = [self.error_measure(x) for x in xs]
                sample = abc.tell(fxs)
            samples.append(sample)

        samples = np.array(samples)
        self.assertEqual(samples.shape[0], 3 * niter)
Ejemplo n.º 4
0
    def test_multivariate_normal_prior(self):

        # Input must be a matrix
        self.assertRaises(
            ValueError, pints.MultivariateGaussianLogPrior, 0, 1)
        self.assertRaises(
            ValueError, pints.MultivariateGaussianLogPrior, 0, [1])

        # 1d test
        p = pints.MultivariateGaussianLogPrior(0, [[1]])
        self.assertEqual(p([0]), -0.5 * np.log(2 * np.pi))

        # 5d tests
        mean = [1, 2, 3, 4, 5]
        covariance = np.diag(mean)
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        self.assertRaises(ValueError, p, [1, 2, 3])
        self.assertAlmostEqual(p([1, 2, 3, 4, 5]), -6.988438537414387)
        self.assertAlmostEqual(p([-1, 2, -3, 4, -5]), -24.988438537414385)

        # Test mean
        for idx, component in enumerate(mean):
            self.assertAlmostEqual(p.mean()[idx], component)

        # Test errors
        self.assertRaises(
            ValueError, pints.MultivariateGaussianLogPrior, [1, 2],
            [[1, 0, 0], [0, 1, 0], [0, 0, 1]])

        # Test sensitivities
        mean = [1, 3]
        covariance = [[2, 0.5], [0.5, 2]]
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        y, dy = p.evaluateS1([4, 5])
        self.assertEqual(len(dy), 2)
        self.assertAlmostEqual(y, -5.165421653067172, places=6)
        dy_test = [-float(4 / 3), -float(2 / 3)]
        self.assertAlmostEqual(dy[0], dy_test[0], places=6)
        self.assertAlmostEqual(dy[1], dy_test[1], places=6)

        mean = [-5.5, 6.7, 3.2]
        covariance = [[3.4, -0.5, -0.7], [-0.5, 2.7, 1.4], [-0.7, 1.4, 5]]
        p = pints.MultivariateGaussianLogPrior(mean, covariance)
        y, dy = p.evaluateS1([4.4, 3.5, -3])
        self.assertEqual(len(dy), 3)
        self.assertAlmostEqual(y, -20.855279298674258, places=6)
        dy_test = [-2.709773397444412, 0.27739553170576203, 0.7829609754801692]
        self.assertAlmostEqual(dy[0], dy_test[0], places=6)
        self.assertAlmostEqual(dy[1], dy_test[1], places=6)
        self.assertAlmostEqual(dy[2], dy_test[2], places=6)

        # 1d sensitivity test
        p = pints.MultivariateGaussianLogPrior(0, [[1]])
        x = [0]
        y, dy = p.evaluateS1(x)
        self.assertEqual(y, p(x))
        self.assertTrue(len(dy), 1)
        self.assertEqual(dy[0], 0)
Ejemplo n.º 5
0
    def test_multivariate_normal_cdf_icdf(self):
        # 1d
        log_prior = pints.MultivariateGaussianLogPrior([-5], [[3]])
        self.assertAlmostEqual(
            log_prior.pseudo_cdf([-4])[0], 0.71814856917461345)
        self.assertAlmostEqual(
            log_prior.pseudo_cdf(-4)[0], 0.71814856917461345)
        self.assertEqual(
            log_prior.convert_to_unit_cube([-5])[0],
            log_prior.pseudo_cdf([-5])[0])
        self.assertAlmostEqual(
            log_prior.pseudo_icdf([0.3])[0], -5.9082883315254957)
        self.assertAlmostEqual(
            log_prior.pseudo_icdf(0.3)[0], -5.9082883315254957)
        self.assertEqual(
            log_prior.convert_from_unit_cube([0.1])[0],
            log_prior.pseudo_icdf([0.1])[0])

        # 3d
        log_prior = pints.MultivariateGaussianLogPrior(mean=[-3, 4, 7],
                                                       cov=[[4, 0.5, 0.1],
                                                            [0.5, 9, -0.1],
                                                            [0.1, -0.1, 16]])
        xs = [1, 10.5, 3]
        cdfs = log_prior.pseudo_cdf(xs)
        cdfs1 = log_prior.convert_to_unit_cube(xs)
        cdfs2 = log_prior.convert_to_unit_cube(np.array(xs))
        self.assertTrue(np.array_equal(cdfs, cdfs1))
        self.assertTrue(np.array_equal(cdfs, cdfs2))
        self.assertAlmostEqual(cdfs[0], 0.97724986805182079)
        self.assertAlmostEqual(cdfs[1], 0.9776241475778038)
        self.assertAlmostEqual(cdfs[2], 0.15714957928562118)
        self.assertEqual(
            log_prior.pseudo_cdf([[1, 2, 3], [2, 3, 3]]).shape[0], 2)
        self.assertEqual(
            log_prior.pseudo_cdf([[1, 10.5, 3], [2, 3, 3]])[0, 2], cdfs[2])

        qs = [0.1, 0.05, 0.95]
        icdfs = log_prior.pseudo_icdf(qs)
        icdfs1 = log_prior.convert_from_unit_cube(qs)
        icdfs2 = log_prior.convert_from_unit_cube(np.array(qs))
        self.assertTrue(np.array_equal(icdfs, icdfs1))
        self.assertTrue(np.array_equal(icdfs, icdfs2))
        self.assertAlmostEqual(icdfs[0], -5.5631031310892007)
        self.assertAlmostEqual(icdfs[1], -1.2377850302165871)
        self.assertAlmostEqual(icdfs[2], 13.576429013793563)
        self.assertEqual(
            log_prior.pseudo_icdf([[0.1, 0.2, 0.3], [0.2, 0.3, 0.3]]).shape[0],
            2)
        self.assertEqual(
            log_prior.pseudo_icdf([[0.1, 0.2, 0.3], [0.2, 0.3, 0.3]])[0, 0],
            icdfs[0])

        # test errors
        self.assertRaises(ValueError, log_prior.pseudo_cdf, [[1, 2]])
        self.assertRaises(ValueError, log_prior.pseudo_cdf, [[1, 2, 3, 4]])
        self.assertRaises(ValueError, log_prior.pseudo_icdf, [[1, 2]])
        self.assertRaises(ValueError, log_prior.pseudo_icdf, [[1, 2, 3, 4]])
Ejemplo n.º 6
0
    def test_composed_prior_sampling(self):

        m1 = 10
        c1 = 2
        p1 = pints.GaussianLogPrior(m1, c1)
        m2 = -50
        c2 = 100
        p2 = pints.GaussianLogPrior(m2, c2)
        p = pints.ComposedLogPrior(p1, p2)

        p = pints.ComposedLogPrior(p1, p2)
        d = 2
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        p = pints.ComposedLogPrior(
            p1,
            pints.MultivariateGaussianLogPrior([0, 1, 2], np.diag([2, 4, 6])),
            p2,
            p2,
        )
        d = p.n_parameters()
        self.assertEqual(d, 6)
        n = 1
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
        n = 10
        x = p.sample(n)
        self.assertEqual(x.shape, (n, d))
Ejemplo n.º 7
0
    def _run(self, result, log_path):

        import pints
        import pints.toy
        import numpy as np

        import logging
        log = logging.getLogger(__name__)

        DEBUG = False

        # Store method name
        result['method'] = self._method
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Create a log pdf
        xtrue = np.array([2, 4])
        sigma = np.diag(np.array([1, 3]))
        log_pdf = pints.toy.GaussianLogPDF(xtrue, sigma)

        # Create a log prior
        log_prior = pints.MultivariateGaussianLogPrior(xtrue + 1, sigma * 2)

        # Create a nested sampler
        sampler = method(log_pdf, log_prior)

        # Log to file
        if not DEBUG:
            sampler.set_log_to_screen(False)
        sampler.set_log_to_file(log_path)

        # Set max iterations
        sampler.set_iterations(4000)
        sampler.set_posterior_samples(1000)

        # Run
        samples, logZ = sampler.run()

        # Calculate KLD for a sliding window
        n_samples = len(samples)  # Total samples
        n_window = 500  # Window size
        n_jump = 20  # Spacing between windows
        iters = list(range(0, n_samples - n_window + n_jump, n_jump))
        result['iters'] = iters
        result['klds'] = [
            log_pdf.kl_divergence(samples[i:i + n_window]) for i in iters
        ]

        # Store kullback-leibler divergence
        result['kld'] = log_pdf.kl_divergence(samples)

        # Store status
        result['status'] = 'done'
Ejemplo n.º 8
0
    def _run(self, result, log_path):

        import pints
        import pints.toy

        log = logging.getLogger(__name__)

        DEBUG = False

        # Store method name
        result['method'] = self._method
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Create a log pdf
        sigma = 2
        r = 4
        log_pdf = pints.toy.SimpleEggBoxLogPDF(sigma=sigma, r=r)

        # Create a log prior
        d = 2 * 6 * r * sigma
        log_prior = pints.MultivariateGaussianLogPrior(
            [0, 0], [[d, 0], [0, d]])

        # Create a nested sampler
        sampler = method(log_pdf, log_prior)

        # Log to file
        if not DEBUG:
            sampler.set_log_to_screen(False)
        sampler.set_log_to_file(log_path)

        # Set max iterations
        sampler.set_iterations(8000)
        sampler.set_posterior_samples(2000)

        # Run
        samples, logZ = sampler.run()

        # Calculate KLD for a sliding window
        n_samples = len(samples)    # Total samples
        n_window = 500              # Window size
        n_jump = 20                 # Spacing between windows
        iters = list(range(0, n_samples - n_window + n_jump, n_jump))
        result['iters'] = iters
        result['klds'] = [
            log_pdf.kl_divergence(samples[i:i + n_window]) for i in iters]

        # Store kullback-leibler-based score
        result['kld'] = log_pdf.kl_divergence(samples)

        # Store status
        result['status'] = 'done'
    def _run(self, result):

        import pints
        import pints.toy

        log = logging.getLogger(__name__)

        DEBUG = False

        # Show method name
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Create a log pdf
        sigma = 2
        r = 4
        log_pdf = pints.toy.SimpleEggBoxLogPDF(sigma=sigma, r=r)

        # Create a log prior
        d = 2 * 6 * r * sigma
        log_prior = pints.MultivariateGaussianLogPrior([0, 0],
                                                       [[d, 0], [0, d]])

        # Create a nested sampler
        sampler = pints.NestedController(log_pdf, log_prior, method=method)

        # Log to file
        if not DEBUG:
            sampler.set_log_to_screen(False)

        # Set max iterations
        sampler.set_iterations(4000)
        sampler.set_n_posterior_samples(1000)

        # Run
        samples = sampler.run()

        # Store kullback-leibler-based score
        result['kld'] = log_pdf.kl_divergence(samples)

        # Store status
        result['status'] = 'done'
Ejemplo n.º 10
0
    def _run(self, result):

        import pints
        import pints.toy
        import numpy as np

        import logging
        log = logging.getLogger(__name__)

        DEBUG = False

        # Show method name
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Create a log pdf
        xtrue = np.array([2, 4])
        sigma = np.diag(np.array([1, 3]))
        log_pdf = pints.toy.GaussianLogPDF(xtrue, sigma)

        # Create a log prior
        log_prior = pints.MultivariateGaussianLogPrior(xtrue + 1, sigma * 2)

        # Create a nested sampler
        sampler = pints.NestedController(log_pdf, log_prior, method=method)

        # Log to file
        if not DEBUG:
            sampler.set_log_to_screen(False)

        # Set max iterations
        sampler.set_iterations(4000)
        sampler.set_n_posterior_samples(1000)

        # Run
        samples = sampler.run()

        # Store kullback-leibler divergence
        result['kld'] = log_pdf.kl_divergence(samples)

        # Store status
        result['status'] = 'done'
Ejemplo n.º 11
0
    def setUpClass(cls):
        """ Set up problem for tests. """
        # Create toy model
        cls.model = toy.stochastic.DegradationModel()
        cls.real_parameters = [0.1]
        cls.times = np.linspace(0, 10, 10)
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Create an object (problem) with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters
        cls.log_prior = pints.UniformLogPrior([0.0], [0.3])

        cls.transition_kernel = pints.MultivariateGaussianLogPrior(
            np.zeros(1), 0.001 * np.identity(1))

        # Set error measure
        cls.error_measure = pints.RootMeanSquaredError(cls.problem)
Ejemplo n.º 12
0
    def __init__(self, gp_times, num_gps, mu, alpha, beta):
        """
        For mu, alpha, and beta, either provide a single parameter which is
        used for all kernel parameters, or a list of parameters corresponding
        to the number of kernel parameters.

        Parameters
        ----------
        gp_times : np.ndarray
            The time points where Gaussian process is evaluated
        num_gps : int
            The number of time varying kernel parameters
        mu : float or list
            RBF mean
        alpha : float or list
            RBF scale
        beta : float or list
            RBF lengthscale
        """
        self._n_parameters = len(gp_times) * num_gps
        self._priors = []

        for i in range(num_gps):
            hyperparams = [0.0, 0.0, 0.0]
            for j, argument in enumerate((mu, alpha, beta)):
                try:
                    hyperparams[j] = argument[i]
                except (TypeError, IndexError):
                    hyperparams[j] = argument

            gp_prior_mean = hyperparams[0] * np.ones(len(gp_times))
            gp_prior_cov = hyperparams[1] ** 2 * \
                np.exp(-(gp_times - gp_times[:, np.newaxis]) ** 2
                       / (2 * hyperparams[2] ** 2))
            gp_prior_cov += 1e-3 * np.diag(np.ones(len(gp_times)))

            subprior = pints.MultivariateGaussianLogPrior(
                gp_prior_mean, gp_prior_cov)

            self._priors.append(subprior)
Ejemplo n.º 13
0
    def __init__(self,
                 log_prior,
                 perturbation_kernel=None,
                 nr_samples=100,
                 error_schedule=[1]):
        # Log prior
        self._log_prior = log_prior

        # Default value for error threshold schedule
        self._e_schedule = error_schedule

        # Default value for current threshold
        self._threshold = error_schedule[0]

        # Size of intermediate distributions
        self._nr_samples = nr_samples

        # Set up for first iteration
        self._samples = [[], []]
        self._accepted_count = 0
        self._weights = []
        self._xs = None
        self._ready_for_tell = False
        self._t = 0
        self._to_print = True

        # Setting the perturbation kernel
        if perturbation_kernel is None:
            dim = log_prior.n_parameters()
            self._perturbation_kernel = pints.MultivariateGaussianLogPrior(
                np.zeros(dim), 0.001 * np.identity(dim))
        elif isinstance(perturbation_kernel, pints.LogPrior):
            self._perturbation_kernel = perturbation_kernel
        else:
            raise ValueError('Provided perturbation kernel must be an instance'
                             ' of pints.LogPrior')
Ejemplo n.º 14
0
            current = pints_model.simulate(true_parameters, data.times)
            current = np.random.normal(current, noise)
            times = data.times
        else:
            current = data.current
            times = data.times

        problem = pints.SingleOutputProblem(pints_model, times, current)
        boundaries = pints.RectangularBoundaries(lower_bounds, upper_bounds)

        # Create a new log-likelihood function (adds an extra parameter!)
        log_likelihood = pints.GaussianLogLikelihood(problem)

        # Create a new prior
        large = 1e9
        param_prior = pints.MultivariateGaussianLogPrior(
            mu_0, large * np.eye(len(mu_0)))
        noise_prior = pints.UniformLogPrior([lower_bounds[-1]],
                                            [upper_bounds[-1]])
        log_prior = pints.ComposedLogPrior(param_prior, noise_prior)

        # Create a posterior log-likelihood (log(likelihood * prior))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        log_posteriors.append(log_posterior)
        score = pints.ProbabilityBasedError(log_posterior)

        if synthetic:
            found_parameters = list(true_parameters) + [noise]
        else:
            found_parameters, found_value = pints.optimise(score,
                                                           x0,
                                                           sigma0,
Ejemplo n.º 15
0
    def _run(self, result, log_path):

        import pints
        import pints.toy
        import numpy as np

        import logging
        log = logging.getLogger(__name__)

        DEBUG = False

        # Store method name
        result['method'] = self._method
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Check number of chains
        if isinstance(method, pints.SingleChainMCMC) and self._nchains > 1:
            log.warn('SingleChainMCMC run with more than 1 chain.')
        elif isinstance(method, pints.MultiChainMCMC) and self._nchains == 1:
            log.warn('MultiChainMCMC run with only 1 chain.')

        # Create a log pdf
        xtrue = np.array([2, 4])
        sigma = np.diag(np.array([1, 3]))
        log_pdf = pints.toy.GaussianLogPDF(xtrue, sigma)

        # Create a log prior
        log_prior = pints.MultivariateGaussianLogPrior(xtrue + 1, sigma * 2)

        # Generate random points
        x0 = log_prior.sample(self._nchains)

        # Create a realistic sigma - for some methods only!
        sigma = None
        if method == pints.HamiltonianMCMC:
            sigma = np.diag(np.array([1, 3]))

        # Create a sampling routine
        mcmc = pints.MCMCController(
            log_pdf, self._nchains, x0, sigma0=sigma, method=method)
        mcmc.set_parallel(True)

        # Log to file
        if not DEBUG:
            mcmc.set_log_to_screen(False)
        mcmc.set_log_to_file(log_path)

        # Set max iterations
        n_iter = self._max_iter
        n_burn = int(self._max_iter * 0.5)
        n_init = int(self._max_iter * 0.1)
        mcmc.set_max_iterations(n_iter)
        if mcmc.method_needs_initial_phase():
            mcmc.set_initial_phase_iterations(n_init)

        # Run
        chains = mcmc.run()

        if DEBUG:
            import matplotlib.pyplot as plt
            import pints.plot
            pints.plot.trace(chains)
            plt.show()

        # Combine chains (weaving, so we can see the combined progress per
        # iteration for multi-chain methods)
        chain = pfunk.weave(chains)

        # Calculate KLD for a sliding window
        n_samples = len(chain)              # Total samples
        n_window = 500 * self._nchains      # Window size
        n_jump = 20 * self._nchains         # Spacing between windows
        iters = list(range(0, n_samples - n_window + n_jump, n_jump))
        result['iters'] = iters
        result['klds'] = [
            log_pdf.kl_divergence(chain[i:i + n_window]) for i in iters]

        # Remove burn-in
        # For multi-chain, multiply by n_chains because we wove the chains
        # together.
        chain = chain[n_burn * self._nchains:]
        log.info('Chain shape (without burn-in): ' + str(chain.shape))
        log.info('Chain mean: ' + str(np.mean(chain, axis=0)))

        # Store kullback-leibler divergence after burn-in
        result['kld'] = log_pdf.kl_divergence(chain)

        # Store effective sample size
        result['ess'] = pints.effective_sample_size(chain)

        # Store status
        result['status'] = 'done'
Ejemplo n.º 16
0
    def _run(self, result):

        import pints
        import pints.toy
        import numpy as np

        import logging
        log = logging.getLogger(__name__)

        DEBUG = False

        # Show method name
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Check number of chains
        if isinstance(method, pints.SingleChainMCMC) and self._nchains > 1:
            log.warn('SingleChainMCMC run with more than 1 chain.')
        elif isinstance(method, pints.MultiChainMCMC) and self._nchains == 1:
            log.warn('MultiChainMCMC run with only 1 chain.')

        # Create a log pdf
        sigma = 2
        r = 4
        log_pdf = pints.toy.SimpleEggBoxLogPDF(sigma=sigma, r=r)

        # Generate random starting point(s)
        d = 2 * 6 * r * sigma
        log_prior = pints.MultivariateGaussianLogPrior([0, 0],
                                                       [[d, 0], [0, d]])
        x0 = log_prior.sample(self._nchains)

        # Set up a sampling routine
        mcmc = pints.MCMCController(log_pdf, self._nchains, x0, method=method)
        mcmc.set_parallel(False)  # allow external parallelisation instead

        # Log to file
        if not DEBUG:
            mcmc.set_log_to_screen(False)

        # Set max iterations
        n_iter = 50000
        n_burn = 10000
        n_init = 1000
        mcmc.set_max_iterations(n_iter)
        if mcmc.method_needs_initial_phase():
            mcmc.set_initial_phase_iterations(n_init)

        # Run
        chains = mcmc.run()

        if DEBUG:
            import matplotlib.pyplot as plt
            import pints.plot
            pints.plot.trace(chains)
            plt.show()

        # Combine chains (weaving, so we can see the combined progress per
        # iteration for multi-chain methods)
        chain = pfunk.weave(chains)

        # Remove burn-in
        # For multi-chain, multiply by n_chains because we wove the chains
        # together.
        chain = chain[n_burn * self._nchains:]
        log.info('Chain shape (without burn-in): ' + str(chain.shape))
        log.info('Chain mean: ' + str(np.mean(chain, axis=0)))

        # Store kullback-leibler-based score after burn-in
        result['kld'] = log_pdf.kl_divergence(chain)

        # Store effective sample size
        result['ess'] = pints.effective_sample_size(chain)

        # Store status
        result['status'] = 'done'
Ejemplo n.º 17
0
    def _run(self, result):

        import pints
        import pints.toy
        import numpy as np

        import logging
        log = logging.getLogger(__name__)

        DEBUG = False

        # Show method name
        log.info('Using method: ' + self._method)

        # Get method class
        method = getattr(pints, self._method)

        # Check number of chains
        if isinstance(method, pints.SingleChainMCMC) and self._nchains > 1:
            log.warn('SingleChainMCMC run with more than 1 chain.')
        elif isinstance(method, pints.MultiChainMCMC) and self._nchains == 1:
            log.warn('MultiChainMCMC run with only 1 chain.')

        # Create a log pdf
        xtrue = np.array([2, 4])
        sigma = np.diag(np.array([1, 3]))
        log_pdf = pints.toy.GaussianLogPDF(xtrue, sigma)

        # Generate random points
        log_prior = pints.MultivariateGaussianLogPrior(xtrue + 1, sigma * 2)
        x0 = log_prior.sample(self._nchains)

        # Create a sampling routine
        mcmc = pints.MCMCController(
            log_pdf, self._nchains, x0, sigma0=sigma, method=method)
        mcmc.set_parallel(False)  # allow external parallelisation instead

        # Set hyperparameters for selected methods
        if method == pints.MALAMCMC:
            for sampler in mcmc.samplers():
                # Set MALA step size
                sampler.set_epsilon([1.5, 1.5])

        # Log to file
        if not DEBUG:
            mcmc.set_log_to_screen(False)

        # Set max iterations
        n_iter = self._max_iter
        n_burn = int(self._max_iter * 0.5)
        n_init = int(self._max_iter * 0.1)
        mcmc.set_max_iterations(n_iter)
        if mcmc.method_needs_initial_phase():
            mcmc.set_initial_phase_iterations(n_init)

        # Run
        chains = mcmc.run()

        if DEBUG:
            import matplotlib.pyplot as plt
            import pints.plot
            pints.plot.trace(chains)
            plt.show()

        # Combine chains (weaving, so we can see the combined progress per
        # iteration for multi-chain methods)
        chain = pfunk.weave(chains)

        # Remove burn-in
        # For multi-chain, multiply by n_chains because we wove the chains
        # together.
        chain = chain[n_burn * self._nchains:]
        log.info('Chain shape (without burn-in): ' + str(chain.shape))
        log.info('Chain mean: ' + str(np.mean(chain, axis=0)))

        # Store kullback-leibler divergence after burn-in
        result['kld'] = log_pdf.kl_divergence(chain)

        # Store effective sample size
        result['ess'] = pints.effective_sample_size(chain)

        # Store status
        result['status'] = 'done'