예제 #1
0
    def test_gaussian_noise_multi(self):
        # Multi-output test for known/unknown Gaussian noise log-likelihood
        # methods.

        model = pints.toy.FitzhughNagumoModel()
        parameters = [0.5, 0.5, 0.5]
        sigma = 0.1
        times = np.linspace(0, 100, 100)
        values = model.simulate(parameters, times)
        values += np.random.normal(0, sigma, values.shape)
        problem = pints.MultiOutputProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianKnownSigmaLogLikelihood(problem, [sigma, sigma])
        l3 = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters),
                               l3(parameters + [sigma, sigma]))

        # Test invalid constructors
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, 0)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, -1)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1])
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1, 2, 3, 4])
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1, 2, -3])
예제 #2
0
    def test_sum_of_independent_log_pdfs(self):

        # Test single output
        model = pints.toy.LogisticModel()
        x = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(x, times) + 0.1
        problem = pints.SingleOutputProblem(model, times, values)

        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianLogLikelihood(problem)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test single output derivatives
        y, dy = ll.evaluateS1(x)
        self.assertEqual(y, ll(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))

        # Wrong number of arguments
        self.assertRaises(TypeError, pints.SumOfIndependentLogPDFs)
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1])

        # Wrong types
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, 1])
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs,
                          [problem, l1])

        # Mismatching dimensions
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, l2])

        # Test multi-output
        model = pints.toy.FitzhughNagumoModel()
        x = model.suggested_parameters()
        nt = 10
        nx = model.n_parameters()
        times = np.linspace(0, 10, nt)
        values = model.simulate(x, times) + 0.01
        problem = pints.MultiOutputProblem(model, times, values)
        sigma = 0.01
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test multi-output derivatives
        y, dy = ll.evaluateS1(x)

        # Note: y and ll(x) differ a bit, because the solver acts slightly
        # different when evaluating with and without sensitivities!
        self.assertAlmostEqual(y, ll(x), places=3)

        self.assertEqual(dy.shape, (nx, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))
예제 #3
0
    def test_build_tree_nan(self):
        # This method gives nan in the hamiltonian_dash
        # in the build_tree function
        # Needed for coverage

        model = pints.toy.LogisticModel()
        real_parameters = np.array([0.015, 20])
        times = np.linspace(0, 1000, 50)
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.1
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.0001, 1], [1, 500])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [[0.36083914, 1.99013825]]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(50)
        nuts_mcmc.set_log_to_screen(False)
        np.random.seed(5)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
예제 #4
0
    def setUpClass(cls):
        """ Prepare for the test. """
        # Create toy model
        model = pints.toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        times = np.linspace(0, 1000, 1000)
        values = model.simulate(cls.real_parameters, times)

        # Add noise
        np.random.seed(1)
        cls.noise = 10
        values += np.random.normal(0, cls.noise, values.shape)
        cls.real_parameters.append(cls.noise)

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior = pints.UniformLogPrior(
            [0.01, 400],
            [0.02, 600]
        )

        # Create a log-likelihood
        cls.log_likelihood = pints.GaussianKnownSigmaLogLikelihood(
            problem, cls.noise)
예제 #5
0
    def test_model_that_gives_nan(self):
        # This model will return a nan in the gradient evaluation, which
        # originally tripped up the find_reasonable_epsilon function in nuts.
        # Run it for a bit so that we get coverage on the if statement!

        model = pints.toy.LogisticModel()
        real_parameters = model.suggested_parameters()
        times = model.suggested_parameters()
        org_values = model.simulate(real_parameters, times)
        np.random.seed(1)
        noise = 0.2
        values = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)

        log_prior = pints.UniformLogPrior([0.01, 40], [0.2, 60])

        log_posterior = pints.LogPosterior(log_likelihood, log_prior)

        xs = [real_parameters * 1.1]
        nuts_mcmc = pints.MCMCController(log_posterior,
                                         len(xs),
                                         xs,
                                         method=pints.NoUTurnMCMC)

        nuts_mcmc.set_max_iterations(10)
        nuts_mcmc.set_log_to_screen(False)
        nuts_chains = nuts_mcmc.run()

        self.assertFalse(np.isnan(np.sum(nuts_chains)))
예제 #6
0
def run(model, real_parameters, noise_used, log_prior_used):
    # Create some toy data
    
    times = np.linspace(1, 1000, 50)
    org_values = model.simulate(real_parameters, times)

    # Add noise
    noise = 10
    values = org_values + np.random.normal(0, noise, org_values.shape)
    real_parameters = np.array(real_parameters)


    # Create an object with links to the model and time series
    problem = pints.SingleOutputProblem(model, times, values)

    # Create a log-likelihood function (adds an extra parameter!)
    log_likelihood_used = pints.GaussianKnownSigmaLogLikelihood(problem, [noise_used])

    # Create a uniform prior over both the parameters and the new noise variable

    # Create a posterior log-likelihood (log(likelihood * prior))
    log_posterior = pints.LogPosterior(log_likelihood_used, log_prior_used)

    # Choose starting points for 3 mcmc chains
    xs = [
        real_parameters,
        real_parameters * 1.01,
        real_parameters * 0.99,
    ]

    # Create mcmc routine with four chains
    mcmc = pints.MCMCController(log_posterior, 3, xs, method=pints.HaarioACMC)
    
    sample_size = 4000
    # Add stopping criterion
    mcmc.set_max_iterations(sample_size)

    # Start adapting after 1000 iterations
    mcmc.set_initial_phase_iterations(sample_size//4)

    # Disable logging mode
    mcmc.set_log_to_screen(False)

    # Run!
    print('Running...')
    chains = mcmc.run()
    print('Done!')
    s = sample_size//4+1
    #HMC: s = 1
    b = False
    while s < sample_size:
        chains_cut = chains[:,sample_size//4:s+1]
        rhat = pints.rhat(chains_cut)
        s+=1
        if rhat[0] < 1.05:
            b = True
            break
    print(s)
    return chains[0][s:][:, 0]
예제 #7
0
    def test_known_noise_gaussian_single_and_multi(self):
        """
        Tests the output of single-series against multi-series known noise
        log-likelihoods.
        """

        # Define boring 1-output and 2-output models
        class NullModel1(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def simulate(self, x, times):
                return np.zeros(times.shape)

        class NullModel2(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def n_outputs(self):
                return 2

            def simulate(self, x, times):
                return np.zeros((len(times), 2))

        # Create two single output problems
        times = np.arange(10)
        np.random.seed(1)
        sigma1 = 3
        sigma2 = 5
        values1 = np.random.uniform(0, sigma1, times.shape)
        values2 = np.random.uniform(0, sigma2, times.shape)
        model1d = NullModel1()
        problem1 = pints.SingleOutputProblem(model1d, times, values1)
        problem2 = pints.SingleOutputProblem(model1d, times, values2)
        log1 = pints.GaussianKnownSigmaLogLikelihood(problem1, sigma1)
        log2 = pints.GaussianKnownSigmaLogLikelihood(problem2, sigma2)

        # Create one multi output problem
        values3 = np.array([values1, values2]).swapaxes(0, 1)
        model2d = NullModel2()
        problem3 = pints.MultiOutputProblem(model2d, times, values3)
        log3 = pints.GaussianKnownSigmaLogLikelihood(
            problem3, [sigma1, sigma2])

        # Check if we get the right output
        self.assertAlmostEqual(log1(0) + log2(0), log3(0))
예제 #8
0
    def test_log_posterior(self):

        # Create a toy problem and log likelihood
        model = pints.toy.LogisticModel()
        real_parameters = [0.015, 500]
        x = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)

        # Create a prior
        log_prior = pints.UniformLogPrior([0, 0], [1, 1000])

        # Test
        p = pints.LogPosterior(log_likelihood, log_prior)
        self.assertEqual(p(x), log_likelihood(x) + log_prior(x))
        y = [-1, 500]
        self.assertEqual(log_prior(y), -float('inf'))
        self.assertEqual(p(y), -float('inf'))
        self.assertEqual(p(y), log_prior(y))

        # Test derivatives
        log_prior = pints.ComposedLogPrior(pints.GaussianLogPrior(0.015, 0.3),
                                           pints.GaussianLogPrior(500, 100))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        x = [0.013, 540]
        y, dy = log_posterior.evaluateS1(x)
        self.assertEqual(y, log_posterior(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = log_prior.evaluateS1(x)
        y2, dy2 = log_likelihood.evaluateS1(x)
        self.assertTrue(np.all(dy == dy1 + dy2))

        # Test getting the prior and likelihood back again
        self.assertIs(log_posterior.log_prior(), log_prior)
        self.assertIs(log_posterior.log_likelihood(), log_likelihood)

        # First arg must be a LogPDF
        self.assertRaises(ValueError, pints.LogPosterior, 'hello', log_prior)

        # Second arg must be a log_prior
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          log_likelihood)

        # Prior and likelihood must have same dimension
        self.assertRaises(ValueError, pints.LogPosterior, log_likelihood,
                          pints.GaussianLogPrior(0.015, 0.3))
예제 #9
0
    def create_pints_log_likelihood(self):
        problem, fitted_children = self.create_pints_problem()
        if self.form == self.Form.NORMAL:
            noise_param = self.parameters.get(index=1)
            if noise_param.child.form == noise_param.child.Form.FIXED:
                value = noise_param.value
                return pints.GaussianKnownSigmaLogLikelihood(
                    problem, value), fitted_children
            else:
                return pints.GaussianLogLikelihood(
                    problem), fitted_children + [noise_param.child]
        elif self.form == LogLikelihood.Form.LOGNORMAL:
            noise_param = self.parameters.get(index=1)
            return pints.LogNormalLogLikelihood(
                problem), fitted_children + [noise_param.child]

        raise RuntimeError('unknown log_likelihood form')
예제 #10
0
    def __init__(self, name):
        super(TestPlot, self).__init__(name)

        # Create toy model (single output)
        self.model = toy.LogisticModel()
        self.real_parameters = [0.015, 500]
        self.times = np.linspace(0, 1000, 100)  # small problem
        self.values = self.model.simulate(self.real_parameters, self.times)

        # Add noise
        self.noise = 10
        self.values += np.random.normal(0, self.noise, self.values.shape)
        self.real_parameters.append(self.noise)
        self.real_parameters = np.array(self.real_parameters)

        # Create an object with links to the model and time series
        self.problem = pints.SingleOutputProblem(self.model, self.times,
                                                 self.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        self.lower = [0.01, 400, self.noise * 0.1]
        self.upper = [0.02, 600, self.noise * 100]
        self.log_prior = pints.UniformLogPrior(self.lower, self.upper)

        # Create a log likelihood
        self.log_likelihood = pints.GaussianLogLikelihood(self.problem)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        self.log_posterior = pints.LogPosterior(self.log_likelihood,
                                                self.log_prior)

        # Run MCMC
        self.x0 = [
            self.real_parameters * 1.1, self.real_parameters * 0.9,
            self.real_parameters * 1.05
        ]
        mcmc = pints.MCMCController(self.log_posterior, 3, self.x0)
        mcmc.set_max_iterations(300)  # make it as small as possible
        mcmc.set_log_to_screen(False)
        self.samples = mcmc.run()

        # Create toy model (multi-output)
        self.model2 = toy.LotkaVolterraModel()
        self.real_parameters2 = self.model2.suggested_parameters()
        self.times2 = self.model2.suggested_times()[::10]  # down sample it
        self.values2 = self.model2.simulate(self.real_parameters2, self.times2)

        # Add noise
        self.noise2 = 0.05
        self.values2 += np.random.normal(0, self.noise2, self.values2.shape)

        # Create an object with links to the model and time series
        self.problem2 = pints.MultiOutputProblem(self.model2, self.times2,
                                                 self.values2)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        self.log_prior2 = pints.UniformLogPrior([1, 1, 1, 1], [6, 6, 6, 6])
        # Create a log likelihood
        self.log_likelihood2 = pints.GaussianKnownSigmaLogLikelihood(
            self.problem2, self.noise2)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        self.log_posterior2 = pints.LogPosterior(self.log_likelihood2,
                                                 self.log_prior2)

        # Run MCMC
        self.x02 = [
            self.real_parameters2 * 1.1, self.real_parameters2 * 0.9,
            self.real_parameters2 * 1.05
        ]
        mcmc = pints.MCMCController(self.log_posterior2, 3, self.x02)
        mcmc.set_max_iterations(300)  # make it as small as possible
        mcmc.set_log_to_screen(False)
        self.samples2 = mcmc.run()

        # Create toy model (single-output, single-parameter)
        self.real_parameters3 = [0]
        self.log_posterior3 = toy.GaussianLogPDF(self.real_parameters3, [1])
        self.lower3 = [-3]
        self.upper3 = [3]

        # Run MCMC
        self.x03 = [[1], [-2], [3]]
        mcmc = pints.MCMCController(self.log_posterior3, 3, self.x03)
        mcmc.set_max_iterations(300)  # make it as small as possible
        mcmc.set_log_to_screen(False)
        self.samples3 = mcmc.run()
예제 #11
0
ax = plt.hist([], bins=range(L + 2), align='left')
rankstats = []
columns = [str(x) for x in range(N)]
df = pd.DataFrame(columns=columns)
for n in range(N):
    print(n)
    theta = log_prior.sample(n=1)[0]
    times = np.linspace(1, 1000, 25)
    org_values = model.simulate(theta, times)
    # Add noise
    noise = 10
    ys = org_values + np.random.normal(0, noise, org_values.shape)
    # Create an object with links to the model and time series
    problem = pints.SingleOutputProblem(model, times, ys)
    # Create a log-likelihood function (adds an extra parameter!)
    log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, [noise])

    log_prior_incorrect = pints.UniformLogPrior([200], [800])
    log_posterior = pints.LogPosterior(log_likelihood, log_prior)

    # Choose starting points for 3 mcmc chains
    xs = [theta, theta * 1.01, theta * 0.99]
    isinf = False
    for x in xs:
        if (math.isinf(log_posterior.evaluateS1(x)[0])):
            isinf = True
            d += 1
            break
    if (isinf == True):
        continue
예제 #12
0
    def test_gaussian_log_likelihoods_single_output(self):
        # Single-output test for known/unknown noise log-likelihood methods

        model = pints.toy.LogisticModel()
        parameters = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(parameters, times)
        values += np.random.normal(0, sigma, values.shape)
        problem = pints.SingleOutputProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))

        # Test invalid constructors
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, 0)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, -1)

        # known noise value checks
        model = pints.toy.ConstantModel(1)
        times = np.linspace(0, 10, 10)
        values = model.simulate([2], times)
        org_values = np.arange(10) / 5.0
        problem = pints.SingleOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, 1.5)
        self.assertAlmostEqual(log_likelihood([-1]), -21.999591968683927)
        l, dl = log_likelihood.evaluateS1([3])
        self.assertAlmostEqual(l, -23.777369746461702)
        self.assertAlmostEqual(dl[0], -9.3333333333333321)
        self.assertEqual(len(dl), 1)

        # unknown noise value checks
        log_likelihood = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([-3, 1.5]), -47.777369746461702)

        # unknown noise check sensitivity
        model = pints.toy.ConstantModel(1)
        times = np.linspace(0, 10, 10)
        values = model.simulate([2], times)
        org_values = np.arange(10) / 5.0
        problem = pints.SingleOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        l, dl = log_likelihood.evaluateS1([7, 2.0])
        self.assertAlmostEqual(l, -63.04585713764618)
        self.assertAlmostEqual(dl[0], -15.25)
        self.assertAlmostEqual(dl[1], 41.925000000000004)

        # Test deprecated aliases
        l1 = pints.KnownNoiseLogLikelihood(problem, sigma)
        self.assertIsInstance(l1, pints.GaussianKnownSigmaLogLikelihood)

        l2 = pints.UnknownNoiseLogLikelihood(problem)
        self.assertIsInstance(l2, pints.GaussianLogLikelihood)

        # test multiple output unknown noise
        model = pints.toy.ConstantModel(3)
        parameters = [0, 0, 0]
        times = [1, 2, 3, 4]
        values = model.simulate([0, 0, 0], times)
        org_values = [[10.7, 3.5, 3.8], [1.1, 3.2, -1.4], [9.3, 0.0, 4.5],
                      [1.2, -3, -10]]
        problem = pints.MultiOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
        #      Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
        #      Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
        #      = -50.5088...
        self.assertAlmostEqual(log_likelihood(parameters + [3.5, 1, 12]),
                               -50.508848609684783)
        l, dl = log_likelihood.evaluateS1(parameters + [3.5, 1, 12])
        self.assertAlmostEqual(l, -50.508848609684783)
        self.assertAlmostEqual(dl[0], 1.820408163265306)
        self.assertAlmostEqual(dl[1], 3.7000000000000002)
        self.assertAlmostEqual(dl[2], -0.021527777777777774)
        self.assertAlmostEqual(dl[3], 3.6065306122448981)
        self.assertAlmostEqual(dl[4], 27.490000000000002)
        self.assertAlmostEqual(dl[5], -0.25425347222222222)

        # test multiple output model dimensions of sensitivities
        d = 20
        model = pints.toy.ConstantModel(d)
        parameters = [0 for i in range(d)]
        times = [1, 2, 3, 4]
        values = model.simulate(parameters, times)
        org_values = np.ones((len(times), d))
        extra_params = np.ones(d).tolist()
        problem = pints.MultiOutputProblem(model, times, org_values)
        log_likelihood = pints.GaussianLogLikelihood(problem)
        l = log_likelihood(parameters + extra_params)
        l1, dl = log_likelihood.evaluateS1(parameters + extra_params)
        self.assertTrue(np.array_equal(len(dl),
                                       len(parameters + extra_params)))
        self.assertEqual(l, l1)
예제 #13
0
    def test_known_noise_gaussian_single_S1(self):
        # Simple tests for single known noise Gaussian log-likelihood with
        # sensitivities.

        model = pints.toy.LogisticModel()
        x = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(x, times)
        values += np.random.normal(0, sigma, values.shape)
        problem = pints.SingleOutputProblem(model, times, values)

        # Test if values are correct
        f = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        L1 = f(x)
        L2, dL = f.evaluateS1(x)
        self.assertEqual(L1, L2)
        self.assertEqual(dL.shape, (2, ))

        # Test with MultiOutputProblem
        problem = pints.MultiOutputProblem(model, times, values)
        f2 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        L3 = f2(x)
        L4, dL = f2.evaluateS1(x)
        self.assertEqual(L3, L4)
        self.assertEqual(L1, L3)
        self.assertEqual(dL.shape, (2, ))

        # Test without noise
        values = model.simulate(x, times)
        problem = pints.SingleOutputProblem(model, times, values)
        f = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        L1 = f(x)
        L2, dL = f.evaluateS1(x)
        self.assertEqual(L1, L2)
        self.assertEqual(dL.shape, (2, ))

        # Test if zero at optimum
        self.assertTrue(np.all(dL == 0))

        # Test if positive to the left, negative to the right
        L, dL = f.evaluateS1(x + np.array([-1e-9, 0]))
        self.assertTrue(dL[0] > 0)
        L, dL = f.evaluateS1(x + np.array([1e-9, 0]))
        self.assertTrue(dL[0] < 0)

        # Test if positive to the left, negative to the right
        L, dL = f.evaluateS1(x + np.array([0, -1e-9]))
        self.assertTrue(dL[1] > 0)
        L, dL = f.evaluateS1(x + np.array([0, 1e-9]))
        self.assertTrue(dL[1] < 0)

        # Plot derivatives
        if False:
            import matplotlib.pyplot as plt
            plt.figure()
            r = np.linspace(x[0] * 0.95, x[0] * 1.05, 100)
            L = []
            dL1 = []
            dL2 = []
            for y in r:
                a, b = f.evaluateS1([y, x[1]])
                L.append(a)
                dL1.append(b[0])
                dL2.append(b[1])
            plt.subplot(3, 1, 1)
            plt.plot(r, L)
            plt.subplot(3, 1, 2)
            plt.plot(r, dL1)
            plt.grid(True)
            plt.subplot(3, 1, 3)
            plt.plot(r, dL2)
            plt.grid(True)

            plt.figure()
            r = np.linspace(x[1] * 0.95, x[1] * 1.05, 100)
            L = []
            dL1 = []
            dL2 = []
            for y in r:
                a, b = f.evaluateS1([x[0], y])
                L.append(a)
                dL1.append(b[0])
                dL2.append(b[1])
            plt.subplot(3, 1, 1)
            plt.plot(r, L)
            plt.subplot(3, 1, 2)
            plt.plot(r, dL1)
            plt.grid(True)
            plt.subplot(3, 1, 3)
            plt.plot(r, dL2)
            plt.grid(True)

            plt.show()

        # value-based tests (single output tests are above)
        # multiple outputs
        model = pints.toy.ConstantModel(3)
        parameters = [0, 0, 0]
        times = [1, 2, 3, 4]
        values = model.simulate(parameters, times)
        org_values = [[10.7, 3.5, 3.8], [1.1, 3.2, -1.4], [9.3, 0.0, 4.5],
                      [1.2, -3, -10]]
        problem = pints.MultiOutputProblem(model, times, org_values)
        sigma = [3.5, 1, 12]
        log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
        #      Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
        #      Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
        #      = -50.5088...
        self.assertAlmostEqual(log_likelihood(parameters), -50.508848609684783)
        l, dl = log_likelihood.evaluateS1(parameters)
        self.assertAlmostEqual(l, -50.508848609684783)
        self.assertAlmostEqual(dl[0], 1.820408163265306)
        self.assertAlmostEqual(dl[1], 3.7000000000000002)
        self.assertAlmostEqual(dl[2], -0.021527777777777774)
예제 #14
0
    def test_scaled_log_likelihood(self):

        model = pints.toy.LogisticModel()
        real_parameters = [0.015, 500]
        test_parameters = [0.014, 501]
        sigma = 0.001
        times = np.linspace(0, 1000, 100)
        values = model.simulate(real_parameters, times)

        # Create an object with links to the model and time series
        problem = pints.SingleOutputProblem(model, times, values)

        # Create a scaled and not scaled log_likelihood
        log_likelihood_not_scaled = pints.GaussianKnownSigmaLogLikelihood(
            problem, sigma)
        log_likelihood_scaled = pints.ScaledLogLikelihood(
            log_likelihood_not_scaled)

        eval_not_scaled = log_likelihood_not_scaled(test_parameters)
        eval_scaled = log_likelihood_scaled(test_parameters)

        self.assertEqual(int(eval_not_scaled), -20959169232)
        self.assertAlmostEqual(eval_scaled * len(times), eval_not_scaled)

        # Test bad constructor
        self.assertRaises(ValueError, pints.ScaledLogLikelihood, model)

        # Test single-output derivatives
        y1, dy1 = log_likelihood_not_scaled.evaluateS1(test_parameters)
        y2, dy2 = log_likelihood_scaled.evaluateS1(test_parameters)
        self.assertEqual(y1, log_likelihood_not_scaled(test_parameters))
        self.assertEqual(dy1.shape, (2, ))
        self.assertEqual(y2, log_likelihood_scaled(test_parameters))
        self.assertEqual(dy2.shape, (2, ))
        dy3 = dy2 * len(times)
        self.assertAlmostEqual(dy1[0] / dy3[0], 1)
        self.assertAlmostEqual(dy1[1] / dy3[1], 1)

        # Test on multi-output problem
        model = pints.toy.FitzhughNagumoModel()
        nt = 10
        no = model.n_outputs()
        times = np.linspace(0, 100, nt)
        values = model.simulate([0.5, 0.5, 0.5], times)
        problem = pints.MultiOutputProblem(model, times, values)
        unscaled = pints.GaussianKnownSigmaLogLikelihood(problem, 1)
        scaled = pints.ScaledLogLikelihood(unscaled)
        p = [0.1, 0.1, 0.1]
        x = unscaled(p)
        y = scaled(p)
        self.assertAlmostEqual(y, x / nt / no)

        # Test multi-output derivatives
        y1, dy1 = unscaled.evaluateS1(p)
        y2, dy2 = scaled.evaluateS1(p)
        self.assertAlmostEqual(y1, unscaled(p), places=6)
        self.assertEqual(dy1.shape, (3, ))
        self.assertAlmostEqual(y2, scaled(p))
        self.assertEqual(dy2.shape, (3, ))
        dy3 = dy2 * nt * no
        self.assertAlmostEqual(dy1[0] / dy3[0], 1)
        self.assertAlmostEqual(dy1[1] / dy3[1], 1)

        # test values of log-likelihood and derivatives
        model = pints.toy.ConstantModel(3)
        times = [1, 2, 3, 4]
        parameters = [0, 0, 0]
        org_values = [[10.7, 3.5, 3.8], [1.1, 3.2, -1.4], [9.3, 0.0, 4.5],
                      [1.2, -3, -10]]
        problem = pints.MultiOutputProblem(model, times, org_values)
        f2 = pints.GaussianKnownSigmaLogLikelihood(problem, [3.5, 1, 12])
        log_likelihood = pints.ScaledLogLikelihood(f2)
        # Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
        #      Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
        #      Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
        #      = -50.5088...
        self.assertAlmostEqual(log_likelihood(parameters),
                               -50.508848609684783 / 12.0)
        l, dl = log_likelihood.evaluateS1(parameters)
        self.assertAlmostEqual(l, -50.508848609684783 / 12.0)
        self.assertAlmostEqual(dl[0], 1.820408163265306 / 12.0)
        self.assertAlmostEqual(dl[1], 3.7000000000000002 / 12.0)
        self.assertAlmostEqual(dl[2], -0.021527777777777774 / 12.0)
예제 #15
0
def Algorithm1WithConvergence(L,
                              N,
                              model,
                              log_prior,
                              log_prior_used,
                              times,
                              noise,
                              noise_used,
                              MCMCmethod,
                              param=0):
    time_start = time.time()
    sum_p = 0
    sum_p_theta = 0
    sum_p_y = 0
    c = 1
    for i in range(c):
        print(i)
        res1 = []
        res2 = []
        thetatildeArray = []  #np.empty(N, dtype=float)
        ytildeArray = []  #np.empty(N, dtype=float)

        d = 0
        for n in range(N):
            print(n)
            thetatilde = log_prior.sample(n=1)[0]
            org_values = model.simulate(thetatilde, times)
            ytilde_n = org_values + np.random.normal(0, noise,
                                                     org_values.shape)
            problem = pints.SingleOutputProblem(model, times, ytilde_n)
            log_likelihood_used = pints.GaussianKnownSigmaLogLikelihood(
                problem, [noise_used])
            log_posterior = pints.LogPosterior(log_likelihood_used,
                                               log_prior_used)
            #Start from thetatilde

            xs = [thetatilde, thetatilde * 1.01, thetatilde * 0.99]
            isinf = False
            for x in xs:
                #print(x)
                if (math.isinf(log_posterior.evaluateS1(x)[0])):
                    isinf = True
                    d += 1
                    break
            if (isinf == True):
                print('isinf:', isinf)
                continue
            #Run Markov chain L steps from thetatilde'''
            mcmc = pints.MCMCController(log_posterior,
                                        len(xs),
                                        xs,
                                        method=MCMCmethod)
            # Add stopping criterion
            sample_size = 3000

            mcmc.set_max_iterations(sample_size)

            # Start adapting after sample_size//4 iterations
            mcmc.set_initial_phase_iterations(sample_size // 4)

            # Disable logging mode
            mcmc.set_log_to_screen(False)
            chains = mcmc.run()
            s = sample_size // 4 + 1
            b = False
            while s < sample_size:
                chains_cut = chains[:, sample_size // 4:s + 1]
                #HMC: chains_cut = chains[:,0:s+1]
                rhat = pints.rhat(chains_cut)
                s += 1
                if rhat[0] < 1.05:
                    print('converge')
                    b = True
                    break
            if b == False:
                d += 1
                continue

            print(s)
            thetatilde_n = chains[0][(s + sample_size) // 2 - 1]
            print(thetatilde)
            thetatildeArray.append(thetatilde_n[param])
            ytildeArray.append(ytilde_n[param])
            res1.append((thetatilde_n[param], ytilde_n[param]))

        thetaArray = np.empty(N - d, dtype=float)
        yArray = np.empty(N - d, dtype=float)

        for n in range(N - d):
            theta_n = log_prior.sample(n=1)[0]
            org_values = model.simulate(theta_n, times)
            y_n = org_values + np.random.normal(0, noise, org_values.shape)
            thetaArray[n] = theta_n[param]
            yArray[n] = y_n[param]
            res2.append((theta_n[param], y_n[param]))

        p = ks2d2s(thetatildeArray, ytildeArray, thetaArray, yArray)
        statistic_theta, p_theta = ks_2samp(thetatildeArray, thetaArray)
        statistic_y, p_y = ks_2samp(ytildeArray, yArray)
        sum_p += p
        sum_p_theta += p_theta
        sum_p_y += p_y
    time_end = time.time()
    duration = time_end - time_start

    average_p = sum_p / c
    average_p_theta = sum_p_theta / c
    average_p_y = sum_p_y / c
    print('average_p:', average_p)
    print('average_p_theta:', average_p_theta)
    print('average_p_y:', average_p_y)
    return average_p, average_p_theta, average_p_y, duration, thetatildeArray, thetaArray, ytildeArray, yArray
예제 #16
0
    def setUpClass(cls):

        # Number of samples: Make this as small as possible to speed up testing
        n_samples = 300

        # Create toy model (single output)
        cls.model = toy.LogisticModel()
        cls.real_parameters = [0.015, 500]
        cls.times = np.linspace(0, 1000, 100)  # small problem
        cls.values = cls.model.simulate(cls.real_parameters, cls.times)

        # Add noise
        cls.noise = 10
        cls.values += np.random.normal(0, cls.noise, cls.values.shape)
        cls.real_parameters.append(cls.noise)
        cls.real_parameters = np.array(cls.real_parameters)

        # Create an object with links to the model and time series
        cls.problem = pints.SingleOutputProblem(cls.model, cls.times,
                                                cls.values)

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.lower = [0.01, 400, cls.noise * 0.1]
        cls.upper = [0.02, 600, cls.noise * 100]
        cls.log_prior = pints.UniformLogPrior(cls.lower, cls.upper)

        # Create a log likelihood
        cls.log_likelihood = pints.GaussianLogLikelihood(cls.problem)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        cls.log_posterior = pints.LogPosterior(cls.log_likelihood,
                                               cls.log_prior)

        # Run MCMC
        cls.x0 = [
            cls.real_parameters * 1.1, cls.real_parameters * 0.9,
            cls.real_parameters * 1.05
        ]
        mcmc = pints.MCMCController(cls.log_posterior, 3, cls.x0)
        mcmc.set_max_iterations(n_samples)
        mcmc.set_log_to_screen(False)
        cls.samples = mcmc.run()

        # Create toy model (multi-output)
        cls.model2 = toy.LotkaVolterraModel()
        cls.real_parameters2 = cls.model2.suggested_parameters()
        cls.times2 = cls.model2.suggested_times()[::10]  # downsample it
        cls.values2 = cls.model2.simulate(cls.real_parameters2, cls.times2)

        # Add noise
        cls.noise2 = 0.05
        cls.values2 += np.random.normal(0, cls.noise2, cls.values2.shape)

        # Create an object with links to the model and time series
        cls.problem2 = pints.MultiOutputProblem(cls.model2, cls.times2,
                                                np.log(cls.values2))

        # Create a uniform prior over both the parameters and the new noise
        # variable
        cls.log_prior2 = pints.UniformLogPrior([0, 0, 0, 0], [6, 6, 6, 6])
        # Create a log likelihood
        cls.log_likelihood2 = pints.GaussianKnownSigmaLogLikelihood(
            cls.problem2, cls.noise2)

        # Create an un-normalised log-posterior (log-likelihood + log-prior)
        cls.log_posterior2 = pints.LogPosterior(cls.log_likelihood2,
                                                cls.log_prior2)

        # Run MCMC
        cls.x02 = [
            cls.real_parameters2 * 1.1, cls.real_parameters2 * 0.9,
            cls.real_parameters2 * 1.05
        ]
        mcmc = pints.MCMCController(cls.log_posterior2, 3, cls.x02)
        mcmc.set_max_iterations(n_samples)
        mcmc.set_log_to_screen(False)
        cls.samples2 = mcmc.run()

        # Create toy model (single-output, single-parameter)
        cls.real_parameters3 = [0]
        cls.log_posterior3 = toy.GaussianLogPDF(cls.real_parameters3, [1])
        cls.lower3 = [-3]
        cls.upper3 = [3]

        # Run MCMC
        cls.x03 = [[1], [-2], [3]]
        mcmc = pints.MCMCController(cls.log_posterior3, 3, cls.x03)
        mcmc.set_max_iterations(n_samples)
        mcmc.set_log_to_screen(False)
        cls.samples3 = mcmc.run()
예제 #17
0
    set_ion=info.ions_conc,
    transform=transform_to_model_param,
    temperature=273.15 + info.temperature,  # K
)

LogPrior = {
    'model_A': priors.ModelALogPrior,
    'model_B': priors.ModelBLogPrior,
}

# Update protocol
model.set_fixed_form_voltage_protocol(protocol, protocol_times)

# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise_sigma)
logprior = LogPrior[info_id](transform_to_model_param,
                             transform_from_model_param)
logposterior = pints.LogPosterior(loglikelihood, logprior)

# Check logposterior is working fine
priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
print('Score at prior parameters: ', logposterior(transform_priorparams))
for _ in range(10):
    assert(logposterior(transform_priorparams) ==\
            logposterior(transform_priorparams))

# Run
try:
    N = int(sys.argv[2])
예제 #18
0
def Algorithm1Rankstat(N,
                       L,
                       Ldashdash,
                       model,
                       log_prior,
                       log_prior_used,
                       times,
                       noise,
                       noise_used,
                       MCMCmethod,
                       param=0):
    time_start = time.time()
    sum_p = 0
    sum_p_theta = 0
    sum_p_y = 0

    rankstats = []

    d = 0
    for n in range(N):
        print(n)
        thetaArray = np.empty(L, dtype=float)
        thetatilde = log_prior.sample(n=1)[0]
        org_values = model.simulate(thetatilde, times)
        ytilde_n = org_values + np.random.normal(0, noise, org_values.shape)
        problem = pints.SingleOutputProblem(model, times, ytilde_n)
        log_likelihood_used = pints.GaussianKnownSigmaLogLikelihood(
            problem, [noise_used])
        log_posterior = pints.LogPosterior(log_likelihood_used, log_prior_used)
        for l in range(L):
            #Run Markov chain L steps from thetatilde
            xs = [thetatilde]
            mcmc = pints.MCMCController(log_posterior,
                                        1,
                                        xs,
                                        method=MCMCmethod)
            # Add stopping criterion
            sample_size = Ldashdash + 1
            mcmc.set_max_iterations(sample_size)

            # Start adapting after sample_size//4 iterations
            mcmc.set_initial_phase_iterations(sample_size // 4)

            # Disable logging mode
            mcmc.set_log_to_screen(False)

            chain = mcmc.run()[0]
            theta_l = chain[Ldashdash]

            thetaArray[l] = theta_l[param]

        rankstat = rank_statistic(thetaArray, thetatilde)
        rankstats.append(rankstat)

    current_date_and_time = datetime.now()
    current_date_and_time_string = str(current_date_and_time)

    plt.hist(rankstats, bins=range(N + 2), align='left')
    plt.axhline(y=c / (N + 1), color='r', linestyle='-')
    plt.axhline(y=binom.ppf(0.005, N, 1 / (L + 1)), color='b')
    plt.axhline(y=binom.ppf(0.995, N, 1 / (L + 1)), color='b')
    plt.savefig('./Gandy2020AlgorithmRankstat' + current_date_and_time_string +
                '.png',
                dpi=500,
                bbox_inches='tight')
    time_end = time.time()
    print('total running time', time_end - time_start)

    plt.show()