Beispiel #1
0
    def test_sum_of_squares_error_multi(self):
        """ Tests :class:`pints.MeanSquaredError` with multiple outputs. """

        # Set up problem
        model = pints.toy.ConstantModel(2)
        times = [1, 2, 3]
        values = [[1, 4], [1, 4], [1, 4]]
        p = pints.MultiOutputProblem(model, times, values)

        # Test
        e = pints.SumOfSquaresError(p)
        self.assertEqual(e.n_parameters(), 2)
        float(e([1, 2]))
        self.assertEqual(e([1, 2]), 0)  # 0
        self.assertEqual(e([2, 2]), 3)  # 3*(1^2+0^2) = 3
        self.assertEqual(e([2, 3]), 15)  # 3*(1^2+2^2) = 15
        self.assertEqual(e([3, 4]), 60)  # 3*(2^2+4^2) = 60

        # Derivatives
        values = np.array([[1, 4], [2, 7], [3, 10]])
        p = pints.MultiOutputProblem(model, times, values)
        e = pints.SumOfSquaresError(p)
        x = [1, 2]

        # Model outputs are 3 times [1,4]
        # Model derivatives are 3 times [[1, 0], [0, 1]]
        y, dy = p.evaluateS1(x)
        self.assertTrue(np.all(y == p.evaluate(x)))
        self.assertTrue(np.all(y[0, :] == [1, 4]))
        self.assertTrue(np.all(y[1, :] == [1, 4]))
        self.assertTrue(np.all(y[2, :] == [1, 4]))
        self.assertTrue(np.all(dy[0, :] == [[1, 0], [0, 1]]))
        self.assertTrue(np.all(dy[1, :] == [[1, 0], [0, 1]]))
        self.assertTrue(np.all(dy[2, :] == [[1, 0], [0, 1]]))

        # Check residuals
        rx = y - np.array(values)
        self.assertTrue(np.all(rx == np.array([[-0, -0], [-1, -3], [-2, -6]])))
        self.assertAlmostEqual(e(x), np.sum(rx**2))

        # Now with derivatives
        ex, dex = e.evaluateS1(x)

        # Check error
        self.assertTrue(np.all(ex == e(x)))

        # Check derivatives. Shape is (parameters, )
        self.assertEqual(dex.shape, (2, ))

        # Residuals are: [[0, 0], [-1, -3], [-2, -6]]
        # Derivatives are: [[1, 0], [0, 1]]
        # dex1 is: 2 * (0 - 1 - 2) * 1 = 2 * -3 * 1 = -6
        # dex2 is: 2 * (0 - 3 - 6) * 2 = 2 * -9 * 1 = -18
        self.assertEqual(dex[0], -6)
        self.assertEqual(dex[1], -18)
Beispiel #2
0
    def test_gaussian_integrated_uniform_log_likelihood_multi(self):
        # Tests GaussianIntegratedUniformLogLikelihood with multi output
        # problem
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([[3.4, 4.3, 22.0, -7.3],
                             [11.1, 12.2, 13.9, 5.0],
                             [-0.4, -12.3, -8.3, -1.2]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
            problem, 2, 4)
        self.assertAlmostEqual(log_likelihood(parameters), -75.443307614807225)

        # test non-equal prior limits
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([[3.4, 4.3, 22.0, -7.3],
                             [11.1, 12.2, 13.9, 5.0],
                             [-0.4, -12.3, -8.3, -1.2]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
            problem, [1, 0, 5, 2], [2, 4, 7, 8])
        self.assertAlmostEqual(log_likelihood(parameters), -71.62076263891457)

        # test incorrect constructors
        model = pints.toy.ConstantModel(2)
        parameters = [0, 0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = [[1, 2],
                  [3, 4],
                  [5, 6]]
        problem = pints.MultiOutputProblem(model, times, values)
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, 2, 2)
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, [1, 2, 3], [2, 4])
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, [1, 2], [2, 4, 5])
        self.assertRaises(ValueError,
                          pints.GaussianIntegratedUniformLogLikelihood,
                          problem, [1, 3], [2, 2])
Beispiel #3
0
    def optimise(self, data, sigma_fac=0.001, method="minimisation"):
        cmaes_problem = pints.MultiOutputProblem(self, self.frequency_range,
                                                 data)
        if method == "likelihood":
            score = pints.GaussianLogLikelihood(cmaes_problem)
            sigma = sigma_fac * np.sum(data) / 2 * len(data)
            lower_bound = [self.param_bounds[x][0]
                           for x in self.params] + [0.1 * sigma] * 2
            upper_bound = [self.param_bounds[x][1]
                           for x in self.params] + [10 * sigma] * 2
            CMAES_boundaries = pints.RectangularBoundaries(
                lower_bound, upper_bound)
            random_init = abs(np.random.rand(self.n_parameters()))
            x0 = self.change_norm_group(random_init, "un_norm",
                                        "list") + [sigma] * 2
            cmaes_fitting = pints.OptimisationController(
                score,
                x0,
                sigma0=None,
                boundaries=CMAES_boundaries,
                method=pints.CMAES)
        elif method == "minimisation":
            score = pints.SumOfSquaresError(cmaes_problem)
            lower_bound = [self.param_bounds[x][0] for x in self.params]
            upper_bound = [self.param_bounds[x][1] for x in self.params]
            CMAES_boundaries = pints.RectangularBoundaries(
                lower_bound, upper_bound)
            random_init = abs(np.random.rand(self.n_parameters()))
            x0 = self.change_norm_group(random_init, "un_norm", "list")
            cmaes_fitting = pints.OptimisationController(
                score,
                x0,
                sigma0=None,
                boundaries=CMAES_boundaries,
                method=pints.CMAES)
        cmaes_fitting.set_max_unchanged_iterations(iterations=200,
                                                   threshold=1e-7)
        #cmaes_fitting.set_log_to_screen(False)
        cmaes_fitting.set_parallel(True)

        found_parameters, found_value = cmaes_fitting.run()

        if method == "likelihood":
            sim_params = found_parameters[:-2]
            sim_data = self.simulate(sim_params, self.frequency_range)
        else:
            found_value = -found_value
            sim_params = found_parameters
            sim_data = self.simulate(sim_params, self.frequency_range)
            """

            log_score = pints.GaussianLogLikelihood(cmaes_problem)
            stds=self.get_std(data, sim_data)
            sigma=sigma_fac*np.sum(data)/2*len(data)
            score_params=list(found_parameters)+[sigma]*2
            found_value=log_score(score_params)
            print(stds, found_value, "stds")"""

        #DOITDIMENSIONALLY#NORMALISE DEFAULT TO BOUND
        return found_parameters, found_value, cmaes_fitting._optimiser._es.sm.C, sim_data
Beispiel #4
0
    def test_evaluateS1_two_dim_array_multi_weighted(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create error measure with weighted inputs
        weights = [1, 2]
        error = pints.SumOfSquaresError(problem, weights=weights)

        # Evaluate likelihood for test parameters
        test_parameters = [3, 4]
        score, deriv = error.evaluateS1(test_parameters)

        # Check that returned error is correct
        self.assertEqual(score, error(test_parameters))

        # Check that partial derivatives are returned for each parameter
        self.assertEqual(deriv.shape, (2, ))

        # Check that partials are correct
        # Expectation = [weight [0] * 2 * sum(input[0] - 1),
        # weight[1] * 4 * sum(2 * input[1] - 4)]
        self.assertEqual(deriv[0],
                         weights[0] * 2 * 3 * (test_parameters[0] - 1))
        self.assertEqual(deriv[1],
                         weights[1] * 4 * 3 * (2 * test_parameters[1] - 4))
    def test_evaluateS1_two_dim_array_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create log_likelihood
        log_likelihood = pkpd.ConstantAndMultiplicativeGaussianLogLikelihood(
            problem)

        # Evaluate likelihood for test parameters
        test_parameters = [
            2.0, 2.0, 2.0, 0.5, 0.5, 0.5, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0
        ]
        score, deriv = log_likelihood.evaluateS1(test_parameters)

        # Check that likelihood score agrees with call
        self.assertAlmostEqual(score, log_likelihood(test_parameters))

        # Check that number of partials is correct
        self.assertAlmostEqual(deriv.shape, (12, ))

        # Check that partials are computed correctly
        self.assertAlmostEqual(deriv[0], 8.585990509232376)
        self.assertAlmostEqual(deriv[1], -1.6726936107293917)
        self.assertAlmostEqual(deriv[2], -0.6632862192355309)
        self.assertAlmostEqual(deriv[3], 5.547071959874058)
        self.assertAlmostEqual(deriv[4], -0.2868738955802226)
        self.assertAlmostEqual(deriv[5], 0.1813851785335695)
        self.assertAlmostEqual(deriv[6], 8.241803503682762)
        self.assertAlmostEqual(deriv[7], -1.82731103999105)
        self.assertAlmostEqual(deriv[8], 2.33264086991343)
        self.assertAlmostEqual(deriv[9], 11.890409042744405)
        self.assertAlmostEqual(deriv[10], -1.3181262877783717)
        self.assertAlmostEqual(deriv[11], 1.3018716574264304)
Beispiel #6
0
    def __init__(self, models: List[m.MultiOutputModel], times: List[np.ndarray], values: List[np.ndarray]):
        """Initialises a multi-output inference problem with default objective function pints.SumOfSquaresError and
        default optimiser pints.CMAES. Standard deviation in initial starting point of optimisation as well as
        restricted domain of support for inferred parameters is disabled by default.

        Arguments:
            models {List[m.MultiOutputModel]} -- Models, which parameters are to be inferred.
            times {List[np.ndarray]} -- Times of data points for the different models.
            values {List[np.ndarray]} -- State values of data points for the different models.

        Return:
            None
        """
        # initialise problem container
        self.problem_container = []
        for model_id, model in enumerate(models):
            self.problem_container.append(pints.MultiOutputProblem(model, times[model_id], values[model_id]))

        # initialise error function container
        self.error_function_container = []
        for problem in self.problem_container:
            self.error_function_container.append(pints.SumOfSquaresError(problem))

        # initialise optimiser
        self.optimiser = pints.CMAES

        # initialise fluctuations around starting point of optimisation
        self.initial_parameter_uncertainty = None

        # initialise parameter constraints
        self.parameter_boundaries = None

        # initialise outputs
        self.estimated_parameters = None
        self.objective_score = None
Beispiel #7
0
    def _problem(self):
        import numpy as np
        import pints
        import pints.toy

        # Load a forward model
        model = pints.toy.ActionPotentialModel()

        # Create some toy data
        xtrue = model.suggested_parameters()
        times = model.suggested_times()
        values = model.simulate(xtrue, times)

        # Add noise
        values[:, 0] += np.random.normal(0, 1, values[:, 0].shape)
        values[:, 1] += np.random.normal(0, 5e-7, values[:, 1].shape)

        # Create problem and a weighted score function
        problem = pints.MultiOutputProblem(model, times, values)
        weights = [1 / 70, 1 / 0.000006]
        score = pints.SumOfSquaresError(problem, weights=weights)

        # Select some boundaries
        lower = xtrue - 2
        upper = xtrue + 2
        boundaries = pints.RectangularBoundaries(lower, upper)

        # Select a random starting point
        x0 = boundaries.sample(1)[0]

        # Select an initial sigma
        sigma0 = (1 / 6) * boundaries.range()

        return score, xtrue, x0, sigma0, boundaries
    def test_ar1(self):
        # single outputs
        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([1.0, -10.7, 15.5])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.AR1LogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([0, 0.5, 5]),
                               -19.706737485492436)

        # multiple outputs
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 5)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6], [-12, -10.1, -4, 2.3]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.AR1LogLikelihood(problem)
        # Test AR1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, sigma=1) +
        #      AR1Logpdf((7.6,-10.3,-30.5, -10.1)|mean=0, rho=-0.25, sigma=3) +
        #      AR1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, sigma=10) +
        #      AR1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, sigma=2)
        #      = -109.4752924909364 -93.58199 - 18.3833..
        #        -16.4988
        self.assertAlmostEqual(
            log_likelihood(parameters +
                           [0.5, 1.0, -0.25, 3.0, 0.9, 10.0, 0.0, 2.0]),
            -237.93936126949615)
Beispiel #9
0
    def _problem(self):
        import numpy as np
        import pints
        import pints.toy

        # Create a model
        model = pints.toy.FitzhughNagumoModel()

        # Run a simulation
        xtrue = [0.1, 0.5, 3]
        times = np.linspace(0, 20, 200)
        values = model.simulate(xtrue, times)

        # Add some noise
        sigma = 0.5
        noisy = values + np.random.normal(0, sigma, values.shape)

        # Create problem
        problem = pints.MultiOutputProblem(model, times, noisy)
        score = pints.SumOfSquaresError(problem)

        # Select boundaries
        boundaries = pints.RectangularBoundaries([0, 0, 0], [10, 10, 10])

        # Select a random starting point
        x0 = boundaries.sample(1)[0]

        # Select an initial sigma
        sigma0 = (1 / 6) * boundaries.range()

        return score, xtrue, x0, sigma0, boundaries
    def test_arma11(self):
        model = pints.toy.ConstantModel(1)
        parameters = [0]
        times = np.asarray([1, 2, 3, 4])
        model.simulate(parameters, times)
        values = np.asarray([3, -4.5, 10.5, 0.3])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.ARMA11LogLikelihood(problem)
        self.assertAlmostEqual(log_likelihood([0, 0.9, -0.4, 1]),
                               -171.53031588534171)

        # multiple outputs
        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 5)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6], [-12, -10.1, -4, 2.3]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.ARMA11LogLikelihood(problem)
        # ARMA1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, phi=0.34 sigma=1) +
        # ARMA1Logpdf((7.6,-10.3,-30.5, -10.1)|
        #             mean=0, rho=-0.25, phi=0.1, sigma=3) +
        # ARMA1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, phi=0.0, sigma=10) +
        # ARMA1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, phi=0.9, sigma=2)
        #      = -116.009 -74.94 -14.32 -8.88
        self.assertAlmostEqual(
            log_likelihood(parameters + [
                0.5, 0.34, 1.0, -0.25, 0.1, 3.0, 0.9, 0.0, 10.0, 0.0, 0.9, 2.0
            ]), -214.17034137601107)
    def test_evaluateS1_gaussian_log_likelihood_agrees_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create CombinedGaussianLL and GaussianLL
        log_likelihood = pkpd.ConstantAndMultiplicativeGaussianLogLikelihood(
            problem)
        gauss_log_likelihood = pints.GaussianLogLikelihood(problem)

        # Check that CombinedGaussianLL agrees with GaussianLoglikelihood when
        # sigma_rel = 0 and sigma_base = sigma
        test_parameters = [
            2.0, 2.0, 2.0, 0.5, 0.5, 0.5, 1.1, 1.1, 1.1, 0.0, 0.0, 0.0
        ]
        gauss_test_parameters = [2.0, 2.0, 2.0, 0.5, 0.5, 0.5]
        score, deriv = log_likelihood.evaluateS1(test_parameters)
        gauss_score, gauss_deriv = gauss_log_likelihood.evaluateS1(
            gauss_test_parameters)

        # Check that scores are the same
        self.assertAlmostEqual(score, gauss_score)

        # Check that partials for model params and sigma_base agree
        self.assertAlmostEqual(deriv[0], gauss_deriv[0])
        self.assertAlmostEqual(deriv[1], gauss_deriv[1])
        self.assertAlmostEqual(deriv[2], gauss_deriv[2])
        self.assertAlmostEqual(deriv[3], gauss_deriv[3])
        self.assertAlmostEqual(deriv[4], gauss_deriv[4])
        self.assertAlmostEqual(deriv[5], gauss_deriv[5])
    def test_gaussian_noise_multi(self):
        # Multi-output test for known/unknown Gaussian noise log-likelihood
        # methods.

        model = pints.toy.FitzhughNagumoModel()
        parameters = [0.5, 0.5, 0.5]
        sigma = 0.1
        times = np.linspace(0, 100, 100)
        values = model.simulate(parameters, times)
        values += np.random.normal(0, sigma, values.shape)
        problem = pints.MultiOutputProblem(model, times, values)

        # Test if known/unknown give same result
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianKnownSigmaLogLikelihood(problem, [sigma, sigma])
        l3 = pints.GaussianLogLikelihood(problem)
        self.assertAlmostEqual(l1(parameters), l2(parameters),
                               l3(parameters + [sigma, sigma]))

        # Test invalid constructors
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, 0)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, -1)
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1])
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1, 2, 3, 4])
        self.assertRaises(ValueError, pints.GaussianKnownSigmaLogLikelihood,
                          problem, [1, 2, -3])
    def test_multiplicative_gaussian(self):
        # Test single output
        model = pints.toy.ConstantModel(1)
        parameters = [2]
        times = np.asarray([1, 2, 3, 4])
        model.simulate(parameters, times)
        values = np.asarray([1.9, 2.1, 1.8, 2.2])
        problem = pints.SingleOutputProblem(model, times, values)
        log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)

        self.assertAlmostEqual(log_likelihood(parameters + [2.0, 1.0]),
                               -9.224056577298253)

        # Test multiple output
        model = pints.toy.ConstantModel(2)
        parameters = [1, 2]
        times = np.asarray([1, 2, 3])
        model.simulate(parameters, times)
        values = np.asarray([[1.1, 0.9, 1.5], [1.5, 2.5, 2.0]]).transpose()
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)

        self.assertAlmostEqual(
            log_likelihood(parameters + [1.0, 2.0, 1.0, 1.0]),
            -12.176330824267543)
    def test_sum_of_independent_log_pdfs(self):

        # Test single output
        model = pints.toy.LogisticModel()
        x = [0.015, 500]
        sigma = 0.1
        times = np.linspace(0, 1000, 100)
        values = model.simulate(x, times) + 0.1
        problem = pints.SingleOutputProblem(model, times, values)

        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        l2 = pints.GaussianLogLikelihood(problem)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test single output derivatives
        y, dy = ll.evaluateS1(x)
        self.assertEqual(y, ll(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))

        # Wrong number of arguments
        self.assertRaises(TypeError, pints.SumOfIndependentLogPDFs)
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1])

        # Wrong types
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, 1])
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs,
                          [problem, l1])

        # Mismatching dimensions
        self.assertRaises(ValueError, pints.SumOfIndependentLogPDFs, [l1, l2])

        # Test multi-output
        model = pints.toy.FitzhughNagumoModel()
        x = model.suggested_parameters()
        nt = 10
        nx = model.n_parameters()
        times = np.linspace(0, 10, nt)
        values = model.simulate(x, times) + 0.01
        problem = pints.MultiOutputProblem(model, times, values)
        sigma = 0.01
        l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
        ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
        self.assertEqual(l1.n_parameters(), ll.n_parameters())
        self.assertEqual(3 * l1(x), ll(x))

        # Test multi-output derivatives
        y, dy = ll.evaluateS1(x)

        # Note: y and ll(x) differ a bit, because the solver acts slightly
        # different when evaluating with and without sensitivities!
        self.assertAlmostEqual(y, ll(x), places=3)

        self.assertEqual(dy.shape, (nx, ))
        y1, dy1 = l1.evaluateS1(x)
        self.assertTrue(np.all(3 * dy1 == dy))
Beispiel #15
0
    def sample(self, x, parallel=False):
        """
        Runs the sampler, this method:
            (1) generates simulated data and adds noise
            (2) sets up the sampler with the method given,
                using an KnownNoiseLogLikelihood, and a UniformLogPrior
            (3) runs the sampler
            (4) returns:
                - the calculated rhat value
                - the average of ess across all chains, returning the
                  minimum result across all parameters
                - the total time taken by the sampler
        """

        the_model = self.model()
        values = the_model.simulate(self.real_parameters, self.times)
        value_range = np.max(values) - np.min(values)
        values += np.random.normal(0, self.noise * value_range, values.shape)
        problem = pints.MultiOutputProblem(the_model, self.times, values)
        log_likelihood = pints.KnownNoiseLogLikelihood(
            problem, value_range * self.noise)
        # lower = list(self.lower) + [value_range *
        #                            self.noise / 10.0]*the_model.n_outputs()
        #upper = list(self.upper) + [value_range * self.noise * 10]*the_model.n_outputs()
        lower = list(self.lower)
        upper = list(self.upper)
        middle = [0.5 * (u + l) for l, u in zip(lower, upper)]
        sigma = [u - l for l, u in zip(lower, upper)]
        log_prior = pints.UniformLogPrior(lower, upper)
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        n_chains = int(x[-1])
        xs = [[
            np.random.uniform() * (u - l) + l for l, u in zip(lower, upper)
        ] for c in range(n_chains)]
        mcmc = pints.MCMCSampling(log_posterior,
                                  n_chains,
                                  xs,
                                  method=self.method)
        [sampler.set_hyper_parameters(x[:-1]) for sampler in mcmc.samplers()]
        if parallel:
            mcmc.set_parallel(int(os.environ['OMP_NUM_THREADS']))

        mcmc.set_log_interval(1000)

        start = timer()
        chains = mcmc.run()
        end = timer()

        rhat = np.max(pints._diagnostics.rhat_all_params(chains))
        ess = np.zeros(chains[0].shape[1])
        for chain in chains:
            ess += np.array(pints._diagnostics.effective_sample_size(chain))
        ess /= n_chains
        ess = np.min(ess)
        print('rhat:', rhat)
        print('ess:', ess)
        print('time:', end - start)
        return rhat, ess, end - start
Beispiel #16
0
    def test_bad_constructor(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Check that an error is raised for multi-output problems
        self.assertRaisesRegex(
            ValueError,
            'This measure is only defined for single output problems.',
            pints.RootMeanSquaredError, problem)
Beispiel #17
0
    def test_in_problem(self):
        # Tests using a ConstantModel in single and multi-output problems.

        # Single output
        model = pints.toy.ConstantModel(1)
        times = [0, 1, 2, 1000]
        values = [10, 0, 1, 10]
        problem = pints.SingleOutputProblem(model, times, values)
        problem.evaluate([1])

        # Multi output (n=1)
        problem = pints.MultiOutputProblem(model, times, values)
        problem.evaluate([1])

        # Multi output (n=3)
        model = pints.toy.ConstantModel(3)
        times = [0, 1, 2, 1000]
        values = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [8, 7, 6]]
        problem = pints.MultiOutputProblem(model, times, values)
        problem.evaluate([1, 2, 3])
Beispiel #18
0
    def test_bad_constructor(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_single, self.times,
                                           self.data_single)

        # Test invalid weight shape
        weights = [1, 2, 3]
        self.assertRaisesRegex(
            ValueError,
            'Number of weights must match number of problem outputs.',
            pints.SumOfSquaresError, problem, weights)
Beispiel #19
0
    def optimise(self, x, parallel=False):
        """
        Runs the optimisation, this method:
            (1) generates simulated data and adds noise
            (2) sets up the optimiser with the method given, trying to
                optimise the function f(x) = sum of squared error
            (3) runs the optimisation
            (4) returns:
                - the found parameters x,
                - the ratio of f(x) / f(x_0), where x_0 are the real parameters
                - time total time taken divided by the time taken to evaluate a
                  single evaluation of f(x)
        """
        the_model = self.model()
        print('model = ', the_model)
        values = the_model.simulate(self.real_parameters, self.times)
        value_range = np.max(values) - np.min(values)
        values += np.random.normal(0, self.noise * value_range, values.shape)
        problem = pints.MultiOutputProblem(the_model, self.times, values)
        score = pints.SumOfSquaresError(problem)
        middle = [0.5 * (u + l) for l, u in zip(self.lower, self.upper)]
        sigma = [(1.0/6.0)*(u - l) for l, u in zip(self.lower, self.upper)]
        print('sigma = ', sigma)
        boundaries = pints.RectangularBoundaries(self.lower, self.upper)

        optimisation = pints.Optimisation(
            score,
            middle,
            sigma0=sigma,
            boundaries=boundaries,
            method=self.method
        )
        optimisation.optimiser().set_hyper_parameters(x)
        if parallel:
            optimisation.set_parallel(int(os.environ['OMP_NUM_THREADS']))
        else:
            optimisation.set_parallel(False)


        start = timer()
        found_parameters, found_value = optimisation.run()
        end = timer()
        N = 10
        start_score = timer()
        for i in range(N):
            minimum_value = score(self.real_parameters)
        end_score = timer()
        score_duration = (end_score - start_score) / N

        return found_parameters,  \
            found_value / minimum_value, \
            (end - start) / score_duration
Beispiel #20
0
    def test_call_two_dim_array_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create error measure
        error = pints.SumOfSquaresError(problem)

        # Evaluate likelihood for test parameters
        test_parameters = [3, 4]
        score = error(test_parameters)

        # Check that error returns expected value
        # Exp = sum((input[0] - 1) ** 2) + sum((2 * input[1] - 4) ** 2)
        self.assertEqual(score, 60)
Beispiel #21
0
    def test_basics(self):

        model = pints.toy.FitzhughNagumoModel()
        self.assertEqual(model.n_outputs(), 2)

        times = [0, 1, 2, 3]
        x = [1, 1, 1]
        values = model.simulate(x, times)
        noisy = values + np.array([[0.01, -0.02], [-0.01, -0.02],
                                   [-0.01, 0.02], [0.01, -0.02]])
        problem = pints.MultiOutputProblem(model, times, noisy)

        self.assertTrue(np.all(times == problem.times()))
        self.assertTrue(np.all(noisy == problem.values()))
        self.assertTrue(np.all(values == problem.evaluate(x)))
        self.assertEqual(problem.n_parameters(), model.n_parameters(), 2)
        self.assertEqual(problem.n_parameters(), model.n_parameters(), 2)
        self.assertEqual(problem.n_outputs(), model.n_outputs(), 3)
        self.assertEqual(problem.n_times(), len(times))

        # Test errors
        times[0] = -2
        self.assertRaises(ValueError, pints.MultiOutputProblem, model, times,
                          values)
        times = [1, 2, 2, 1]
        self.assertRaises(ValueError, pints.MultiOutputProblem, model, times,
                          values)
        times = [1, 2, 3]
        self.assertRaises(ValueError, pints.MultiOutputProblem, model, times,
                          values)

        # Single value model is fine too!
        model = pints.toy.LogisticModel()
        self.assertEqual(model.n_outputs(), 1)
        values = model.simulate([1, 1], times)
        pints.MultiOutputProblem(model, times, values)
Beispiel #22
0
    def test_known_noise_gaussian_single_and_multi(self):
        """
        Tests the output of single-series against multi-series known noise
        log-likelihoods.
        """

        # Define boring 1-output and 2-output models
        class NullModel1(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def simulate(self, x, times):
                return np.zeros(times.shape)

        class NullModel2(pints.ForwardModel):
            def n_parameters(self):
                return 1

            def n_outputs(self):
                return 2

            def simulate(self, x, times):
                return np.zeros((len(times), 2))

        # Create two single output problems
        times = np.arange(10)
        np.random.seed(1)
        sigma1 = 3
        sigma2 = 5
        values1 = np.random.uniform(0, sigma1, times.shape)
        values2 = np.random.uniform(0, sigma2, times.shape)
        model1d = NullModel1()
        problem1 = pints.SingleOutputProblem(model1d, times, values1)
        problem2 = pints.SingleOutputProblem(model1d, times, values2)
        log1 = pints.GaussianKnownSigmaLogLikelihood(problem1, sigma1)
        log2 = pints.GaussianKnownSigmaLogLikelihood(problem2, sigma2)

        # Create one multi output problem
        values3 = np.array([values1, values2]).swapaxes(0, 1)
        model2d = NullModel2()
        problem3 = pints.MultiOutputProblem(model2d, times, values3)
        log3 = pints.GaussianKnownSigmaLogLikelihood(
            problem3, [sigma1, sigma2])

        # Check if we get the right output
        self.assertAlmostEqual(log1(0) + log2(0), log3(0))
Beispiel #23
0
    def test_call_two_dim_array_multi_weighted(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create error measure with weighted input
        weights = [1, 2]
        error = pints.MeanSquaredError(problem, weights=weights)

        # Evaluate likelihood for test parameters
        test_parameters = [3, 4]
        score = error(test_parameters)

        # Check that error returns expected value
        # exp = (weight[0] * mean(input[0] - 1) ** 2 +
        # weight[1] * mean(2 * input[1] - 4) ** 2) / 2
        self.assertEqual(score, 18)
    def test_call_two_dim_array_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create log_likelihood
        log_likelihood = pkpd.ConstantAndMultiplicativeGaussianLogLikelihood(
            problem)

        # Evaluate likelihood for test parameters
        test_parameters = [
            2.0, 2.0, 2.0, 0.5, 0.5, 0.5, 1.1, 1.1, 1.1, 1.0, 1.0, 1.0
        ]
        score = log_likelihood(test_parameters)

        # Check that likelihood returns expected value
        self.assertAlmostEqual(score, -42.87921520701031)
Beispiel #25
0
    def setUpClass(cls):
        # Create a single output optimisation toy model
        cls.model1 = toy.LogisticModel()
        cls.real_parameters1 = [0.015, 500]
        cls.times1 = np.linspace(0, 1000, 100)
        cls.values1 = cls.model1.simulate(cls.real_parameters1, cls.times1)

        # Add noise
        cls.noise1 = 50
        cls.values1 += np.random.normal(0, cls.noise1, cls.values1.shape)

        # Set up optimisation problem
        cls.problem1 = pints.SingleOutputProblem(cls.model1, cls.times1,
                                                 cls.values1)

        # Instead of running the optimisation, choose fixed values to serve as
        # the results
        cls.found_parameters1 = np.array([0.0149, 494.6])

        # Create a multiple output MCMC toy model
        cls.model2 = toy.LotkaVolterraModel()
        cls.real_parameters2 = cls.model2.suggested_parameters()
        # Downsample the times for speed
        cls.times2 = cls.model2.suggested_times()[::10]
        cls.values2 = cls.model2.simulate(cls.real_parameters2, cls.times2)

        # Add noise
        cls.noise2 = 0.05
        cls.values2 += np.random.normal(0, cls.noise2, cls.values2.shape)

        # Set up 2-output MCMC problem
        cls.problem2 = pints.MultiOutputProblem(cls.model2, cls.times2,
                                                cls.values2)

        # Instead of running MCMC, generate three chains which actually contain
        # independent samples near the true values (faster than MCMC)
        samples = np.zeros((3, 50, 4))
        for chain_idx in range(3):
            for parameter_idx in range(4):
                if parameter_idx == 0 or parameter_idx == 2:
                    chain = np.random.normal(3.01, .2, 50)
                else:
                    chain = np.random.normal(1.98, .2, 50)
                samples[chain_idx, :, parameter_idx] = chain
        cls.samples2 = samples
    def test_student_t_log_likelihood_multi(self):
        # Multi-output test for Student-t noise log-likelihood methods

        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 4)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.StudentTLogLikelihood(problem)
        # Test Student-t_logpdf((3.5,1.1,-10)|mean=0, df=2, scale=13) +
        #      Student-t_logpdf((7.6,-10.3,-30.5)|mean=0, df=1, scale=8) +
        #      Student-t_logpdf((8.5,15.6,-5)|mean=0, df=2.5, scale=13.5) +
        #      Student-t_logpdf((3.4,5.5,7.6)|mean=0, df=3.4, scale=10.5)
        #      = -47.83....
        self.assertAlmostEqual(
            log_likelihood(parameters + [2, 13, 1, 8, 2.5, 13.5, 3.4, 10.5]),
            -47.83720347766945)
    def test_call_gaussian_log_likelihood_agrees_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create CombinedGaussianLL and GaussianLL
        log_likelihood = pkpd.ConstantAndMultiplicativeGaussianLogLikelihood(
            problem)
        gauss_log_likelihood = pints.GaussianLogLikelihood(problem)

        # Check that CombinedGaussianLL agrees with GaussianLoglikelihood when
        # sigma_rel = 0 and sigma_base = sigma
        test_parameters = [
            2.0, 2.0, 2.0, 0.5, 0.5, 0.5, 1.1, 1.1, 1.1, 0.0, 0.0, 0.0
        ]
        gauss_test_parameters = [2.0, 2.0, 2.0, 0.5, 0.5, 0.5]
        score = log_likelihood(test_parameters)
        gauss_score = gauss_log_likelihood(gauss_test_parameters)
        self.assertAlmostEqual(score, gauss_score)
    def test_cauchy_log_likelihood_multi(self):
        # Multi-output test for Cauchy noise log-likelihood methods

        model = pints.toy.ConstantModel(4)
        parameters = [0, 0, 0, 0]
        times = np.arange(1, 4)
        model.simulate(parameters, times)
        values = np.asarray([[3.5, 7.6, 8.5, 3.4], [1.1, -10.3, 15.6, 5.5],
                             [-10, -30.5, -5, 7.6]])
        problem = pints.MultiOutputProblem(model, times, values)
        log_likelihood = pints.CauchyLogLikelihood(problem)
        # Test Cauchy_logpdf((3.5,1.1,-10)|mean=0, scale=13) +
        #      Cauchy_logpdf((7.6,-10.3,-30.5)|mean=0, scale=8) +
        #      Cauchy_logpdf((8.5,15.6,-5)|mean=0, scale=13.5) +
        #      Cauchy_logpdf((3.4,5.5,7.6)|mean=0, scale=10.5)
        #      = -49.51....
        self.assertAlmostEqual(
            log_likelihood(parameters + [13, 8, 13.5, 10.5]),
            -49.51182454195375)
    def test_bad_n_output(self):
        # Create multi output model
        model = pints.toy.ConstantModel(3)

        # Generate data
        times = np.array([1, 2, 3, 4])
        data = np.array([[10.7, 3.5, 3.8], [1.1, 3.2, -1.4], [9.3, 0.0, 4.5],
                         [1.2, -3, -10]])

        # Create problem
        problem = pints.MultiOutputProblem(model, times, data)

        # Create "bad" likelihood
        log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)

        # Check that error is thown when we attempt to fix eta
        eta = 1
        self.assertRaisesRegex(
            ValueError, 'This likelihood wrapper is only defined for a ',
            pkpd.FixedEtaLogLikelihoodWrapper, log_likelihood, eta)
Beispiel #30
0
    def test_evaluateS1_two_dim_array_multi(self):
        # Create an object with links to the model and time series
        problem = pints.MultiOutputProblem(self.model_multi, self.times,
                                           self.data_multi)

        # Create error measure
        error = pints.MeanSquaredError(problem)

        # Evaluate likelihood for test parameters
        test_parameters = [3, 4]
        score, deriv = error.evaluateS1(test_parameters)

        # Check that returned error is correct
        self.assertEqual(score, error(test_parameters))

        # Check that partial derivatives are returned for each parameter
        self.assertEqual(deriv.shape, (2, ))

        # Check that partials are correct
        # Expectation = [mean(input[0] - 1), 2 * mean(2 * input[1] - 4)]
        self.assertEqual(deriv[0], test_parameters[0] - 1)
        self.assertEqual(deriv[1], 2 * (2 * test_parameters[1] - 4))