def test_flow(self):

        # Test initial proposal is first point
        x0 = self.real_parameters
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        self.assertTrue(mcmc.ask() is mcmc._x0)

        # Double initialisation
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        mcmc.ask()
        self.assertRaises(RuntimeError, mcmc._initialise)

        # Tell without ask
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        self.assertRaises(RuntimeError, mcmc.tell, 0)

        # Repeated asks should return same point
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        # Get into accepting state
        mcmc.set_initial_phase(False)
        for i in range(100):
            mcmc.tell(self.log_posterior(mcmc.ask()))
        x = mcmc.ask()
        for i in range(10):
            self.assertTrue(x is mcmc.ask())

        # Repeated tells should fail
        mcmc.tell(1)
        self.assertRaises(RuntimeError, mcmc.tell, 1)

        # Bad starting point
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        mcmc.ask()
        self.assertRaises(ValueError, mcmc.tell, float('-inf'))
Beispiel #2
0
    def test_deprecated_alias(self):

        mcmc = pints.AdaptiveCovarianceMCMC(self.real_parameters)
        self.assertIn('Haario-Bardenet', mcmc.name())

        # Perform short run
        mcmc.set_target_acceptance_rate(0.3)
        mcmc.set_initial_phase(True)
        rate = []
        chain = []
        for i in range(100):
            x = mcmc.ask()
            fx = self.log_posterior(x)
            sample = mcmc.tell(fx)
            if i == 20:
                mcmc.set_initial_phase(False)
            if i >= 50:
                chain.append(sample)
            rate.append(mcmc.acceptance_rate())
            if np.all(sample == x):
                self.assertEqual(mcmc.current_log_pdf(), fx)
        chain = np.array(chain)
        rate = np.array(rate)
        self.assertEqual(chain.shape[0], 50)
        self.assertEqual(chain.shape[1], len(self.real_parameters))
        self.assertEqual(rate.shape[0], 100)
    def test_method(self):

        # Create mcmc
        x0 = self.real_parameters * 1.1
        mcmc = pints.AdaptiveCovarianceMCMC(x0)

        # Configure
        mcmc.set_target_acceptance_rate(0.3)
        mcmc.set_initial_phase(True)

        # Perform short run
        rate = []
        chain = []
        for i in range(100):
            x = mcmc.ask()
            fx = self.log_posterior(x)
            sample = mcmc.tell(fx)
            if i == 20:
                mcmc.set_initial_phase(False)
            if i >= 50:
                chain.append(sample)
            rate.append(mcmc.acceptance_rate())
            if np.all(sample == x):
                self.assertEqual(mcmc.current_log_pdf(), fx)

        chain = np.array(chain)
        rate = np.array(rate)
        self.assertEqual(chain.shape[0], 50)
        self.assertEqual(chain.shape[1], len(x0))
        self.assertEqual(rate.shape[0], 100)
Beispiel #4
0
    def test_replace(self):

        x0 = self.real_parameters * 1.1
        mcmc = pints.AdaptiveCovarianceMCMC(x0)

        # One round of ask-tell must have been run
        self.assertRaisesRegex(RuntimeError, 'already running', mcmc.replace,
                               x0, 1)

        mcmc.ask()

        # One round of ask-tell must have been run
        self.assertRaises(RuntimeError, mcmc.replace, x0, 1)

        mcmc.tell(0.5)
        mcmc.replace([1, 2, 3], 10)
        mcmc.replace([1, 2, 3], 10)

        # New position must have correct size
        self.assertRaisesRegex(ValueError,
                               '`current` has the wrong dimensions',
                               mcmc.replace, [1, 2], 1)

        # Proposal can be changed too
        mcmc.ask()
        mcmc.replace([1, 2, 3], 10, [3, 4, 5])

        # New proposal must have correct size
        self.assertRaisesRegex(ValueError,
                               '`proposed` has the wrong dimensions',
                               mcmc.replace, [1, 2, 3], 3, [3, 4])
Beispiel #5
0
    def test_deprecated_alias(self):

        mcmc = pints.AdaptiveCovarianceMCMC(self.real_parameters)
        self.assertIn('Haario-Bardenet', mcmc.name())

        # Perform short run
        mcmc.set_target_acceptance_rate(0.3)
        mcmc.set_initial_phase(True)
        rate = []
        chain = []
        for i in range(100):
            x = mcmc.ask()
            fx = self.log_posterior(x)
            y, fy, ac = mcmc.tell(fx)
            if i == 20:
                mcmc.set_initial_phase(False)
            if i >= 50:
                chain.append(x)
            rate.append(mcmc.acceptance_rate())
            self.assertTrue(isinstance(ac, bool))
            if ac:
                self.assertTrue(np.all(x == y))
                self.assertEqual(fx, fy)
        chain = np.array(chain)
        rate = np.array(rate)
        self.assertEqual(chain.shape[0], 50)
        self.assertEqual(chain.shape[1], len(self.real_parameters))
        self.assertEqual(rate.shape[0], 100)
    def test_settings(self):

        mcmc = pints.AdaptiveCovarianceMCMC(self.log_likelihood, self.x0)

        r = mcmc.acceptance_rate() * 0.5
        mcmc.set_acceptance_rate(r)
        self.assertEqual(mcmc.acceptance_rate(), r)

        i = int(mcmc.iterations() * 0.5)
        mcmc.set_iterations(i)
        self.assertEqual(mcmc.iterations(), i)

        i = int(mcmc.non_adaptive_iterations() * 0.5)
        mcmc.set_non_adaptive_iterations(i)
        self.assertEqual(mcmc.non_adaptive_iterations(), i)

        i = int(mcmc.burn_in() * 0.5)
        mcmc.set_burn_in(i)
        self.assertEqual(mcmc.burn_in(), i)

        # Store only every 4th sample
        r = 4
        mcmc.set_thinning_rate(r)
        self.assertEqual(mcmc.thinning_rate(), r)

        # Disable verbose mode
        v = not mcmc.verbose()
        mcmc.set_verbose(v)
        self.assertEqual(mcmc.verbose(), v)
    def test_with_hint_and_sigma(self):

        mcmc = pints.AdaptiveCovarianceMCMC(
            self.log_likelihood, self.x0, self.sigma0)
        mcmc.set_verbose(debug)
        chain = mcmc.run()
        mean = np.mean(chain, axis=0)
        self.assertTrue(np.linalg.norm(mean - self.real_parameters) < 1.5)
    def test_replace(self):

        x0 = self.real_parameters * 1.1
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        self.assertRaises(RuntimeError, mcmc.replace, x0, 1)
        mcmc.ask()
        self.assertRaises(RuntimeError, mcmc.replace, x0, 1)
        mcmc.tell(0.5)
        mcmc.replace([1, 2, 3], 10)
        mcmc.replace([1, 2, 3], 10)
        self.assertRaises(ValueError, mcmc.replace, [1, 2], 1)
    def test_options(self):

        # Test setting acceptance rate
        x0 = self.real_parameters
        mcmc = pints.AdaptiveCovarianceMCMC(x0)
        self.assertNotEqual(mcmc.target_acceptance_rate(), 0.5)
        mcmc.set_target_acceptance_rate(0.5)
        self.assertEqual(mcmc.target_acceptance_rate(), 0.5)
        mcmc.set_target_acceptance_rate(1)
        self.assertRaises(ValueError, mcmc.set_target_acceptance_rate, 0)
        self.assertRaises(ValueError, mcmc.set_target_acceptance_rate, -1e-6)
        self.assertRaises(ValueError, mcmc.set_target_acceptance_rate, 1.00001)
Beispiel #10
0
        # Create a posterior log-likelihood (log(likelihood * prior))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        log_posteriors.append(log_posterior)
        score = pints.ProbabilityBasedError(log_posterior)

        if synthetic:
            found_parameters = list(true_parameters) + [noise]
        else:
            found_parameters, found_value = pints.optimise(score,
                                                           x0,
                                                           sigma0,
                                                           boundaries,
                                                           method=pints.CMAES)

        sampler = pints.AdaptiveCovarianceMCMC(found_parameters)
        samplers.append(sampler)

    pickle.dump((samplers, log_posteriors), open(pickle_file, 'wb'))
else:
    samplers, log_posteriors = pickle.load(open(pickle_file, 'rb'))
    print('using starting points:')
    for i, (sampler, log_posterior) in enumerate(zip(samplers,
                                                     log_posteriors)):
        print('\t', sampler._x0)
        sampled_true_parameters[:, i] = sampler._x0[:-1]
        if not use_cmaes:
            sampler._x0 = pints.vector(x0)

        plt.clf()
        times = log_posterior._log_likelihood._problem._times
Beispiel #11
0
    # Create a new log-likelihood function (adds an extra parameter!)
    problem = pints.SingleSeriesProblem(model, times, values)
    log_likelihood = pints.UnknownNoiseLogLikelihood(problem)

    # Create a new prior
    large = 1e9
    param_prior = pints.MultivariateNormalLogPrior(mean,
                                                   large * np.eye(len(mean)))
    noise_prior = pints.UniformLogPrior([noise / 10.0], [noise * 10.0])
    log_prior = pints.ComposedLogPrior(param_prior, noise_prior)

    # Create a posterior log-likelihood (log(likelihood * prior))
    log_posterior = pints.LogPosterior(log_likelihood, log_prior)
    log_posteriors.append(log_posterior)

    sampler = pints.AdaptiveCovarianceMCMC(mean + [noise])
    samplers.append(sampler)

# burn in the individual samplers
n_burn_in = 1000
for sample in range(n_burn_in):
    if sample % 10 == 0:
        print('x', end='', flush=True)
    # generate samples of hierarchical p1e9 * arams
    for i, (sampler, log_posterior) in enumerate(zip(samplers,
                                                     log_posteriors)):
        x = sampler.ask()
        sampler.tell(log_posterior(x))

# Run a simple hierarchical gibbs-mcmc routine
n_samples = 10000