Exemple #1
0
    def test_n_parameters(self):
        # Create error measure
        error = pints.ProbabilityBasedError(self.problem)

        # Get number of parameters
        n_parameters = error.n_parameters()

        # Check number of parameters
        self.assertEqual(n_parameters, 3)
Exemple #2
0
    def test_call(self):
        # Create error measure
        error = pints.ProbabilityBasedError(self.problem)

        # Evaluate likelihood for test parameters
        test_parameters = [1, 2, 3]
        score = error(test_parameters)

        # Check that error returns expected value
        self.assertEqual(score, -10)
Exemple #3
0
    def test_probability_based_error(self):
        """ Tests :class:`pints.ProbabilityBasedError`. """

        p = MiniLogPDF()
        e = pints.ProbabilityBasedError(p)
        self.assertEqual(e.n_parameters(), 3)
        self.assertEqual(e([1, 2, 3]), -10)
        p = MiniProblem()
        self.assertRaises(ValueError, pints.ProbabilityBasedError, p)

        # Test derivatives
        x = [1, 2, 3]
        y, dy = e.evaluateS1(x)
        self.assertEqual(y, e(x))
        self.assertEqual(dy.shape, (3, ))
        self.assertTrue(np.all(dy == [-1, -2, -3]))
Exemple #4
0
    def test_evaluateS1(self):
        # Create error measure
        error = pints.ProbabilityBasedError(self.problem)

        # Evaluate likelihood for test parameters
        test_parameters = [1, 2, 3]
        score, deriv = error.evaluateS1(test_parameters)

        # Check that error returns expected value
        self.assertEqual(score, error(test_parameters))

        # Check dimension of partial derivatives
        self.assertEqual(deriv.shape, (3, ))

        # Check that partials are computed correctly
        self.assertEqual(deriv[0], -1)
        self.assertEqual(deriv[1], -2)
        self.assertEqual(deriv[2], -3)
Exemple #5
0
    def __init__(self,
                 function,
                 x0,
                 sigma0=None,
                 boundaries=None,
                 method=None):

        # Convert x0 to vector
        # This converts e.g. (1, 7) shapes to (7, ), giving users a bit more
        # freedom with the exact shape passed in. For example, to allow the
        # output of LogPrior.sample(1) to be passed in.
        x0 = pints.vector(x0)

        # Check dimension of x0 against function
        if function.n_parameters() != len(x0):
            raise ValueError(
                'Starting point must have same dimension as function to'
                ' optimise.')

        # Check if minimising or maximising
        self._minimising = not isinstance(function, pints.LogPDF)

        # Store function
        if self._minimising:
            self._function = function
        else:
            self._function = pints.ProbabilityBasedError(function)
        del (function)

        # Create optimiser
        if method is None:
            method = pints.CMAES
        elif not issubclass(method, pints.Optimiser):
            raise ValueError('Method must be subclass of pints.Optimiser.')
        self._optimiser = method(x0, sigma0, boundaries)

        # Logging
        self._log_to_screen = True
        self._log_filename = None
        self._log_csv = False
        self.set_log_interval()

        # Parallelisation
        self._parallel = False
        self._n_workers = 1
        self.set_parallel()

        #
        # Stopping criteria
        #

        # Maximum iterations
        self._max_iterations = None
        self.set_max_iterations()

        # Maximum unchanged iterations
        self._max_unchanged_iterations = None
        self._min_significant_change = 1
        self.set_max_unchanged_iterations()

        # Threshold value
        self._threshold = None

        # Post-run statistics
        self._evaluations = None
        self._iterations = None
        self._time = None
Exemple #6
0
    def __init__(
            self, function, x0, sigma0=None, boundaries=None,
            transformation=None, method=None):

        # Convert x0 to vector
        # This converts e.g. (1, 7) shapes to (7, ), giving users a bit more
        # freedom with the exact shape passed in. For example, to allow the
        # output of LogPrior.sample(1) to be passed in.
        x0 = pints.vector(x0)

        # Check dimension of x0 against function
        if function.n_parameters() != len(x0):
            raise ValueError(
                'Starting point must have same dimension as function to'
                ' optimise.')

        # Check if minimising or maximising
        self._minimising = not isinstance(function, pints.LogPDF)

        # Apply a transformation (if given). From this point onward the
        # optimiser will see only the transformed search space and will know
        # nothing about the model parameter space.
        if transformation is not None:
            # Convert error measure or log pdf
            if self._minimising:
                function = transformation.convert_error_measure(function)
            else:
                function = transformation.convert_log_pdf(function)

            # Convert initial position
            x0 = transformation.to_search(x0)

            # Convert sigma0, if provided
            if sigma0 is not None:
                sigma0 = transformation.convert_standard_deviation(sigma0, x0)
            if boundaries:
                boundaries = transformation.convert_boundaries(boundaries)

        # Store transformation for later detransformation: if using a
        # transformation, any parameters logged to the filesystem or printed to
        # screen should be detransformed first!
        self._transformation = transformation

        # Store function
        if self._minimising:
            self._function = function
        else:
            self._function = pints.ProbabilityBasedError(function)
        del function

        # Create optimiser
        if method is None:
            method = pints.CMAES
        elif not issubclass(method, pints.Optimiser):
            raise ValueError('Method must be subclass of pints.Optimiser.')
        self._optimiser = method(x0, sigma0, boundaries)

        # Check if sensitivities are required
        self._needs_sensitivities = self._optimiser.needs_sensitivities()

        # Track optimiser's f_best or f_guessed
        self._use_f_guessed = None
        self.set_f_guessed_tracking()

        # Logging
        self._log_to_screen = True
        self._log_filename = None
        self._log_csv = False
        self.set_log_interval()

        # Parallelisation
        self._parallel = False
        self._n_workers = 1
        self.set_parallel()

        # User callback
        self._callback = None

        # :meth:`run` can only be called once
        self._has_run = False

        #
        # Stopping criteria
        #

        # Maximum iterations
        self._max_iterations = None
        self.set_max_iterations()

        # Maximum unchanged iterations
        self._unchanged_max_iterations = None  # n_iter w/o change until stop
        self._unchanged_threshold = 1          # smallest significant f change
        self.set_max_unchanged_iterations()

        # Maximum evaluations
        self._max_evaluations = None

        # Threshold value
        self._threshold = None

        # Post-run statistics
        self._evaluations = None
        self._iterations = None
        self._time = None
Exemple #7
0
        # Create a new log-likelihood function (adds an extra parameter!)
        log_likelihood = pints.GaussianLogLikelihood(problem)

        # Create a new prior
        large = 1e9
        param_prior = pints.MultivariateGaussianLogPrior(
            mu_0, large * np.eye(len(mu_0)))
        noise_prior = pints.UniformLogPrior([lower_bounds[-1]],
                                            [upper_bounds[-1]])
        log_prior = pints.ComposedLogPrior(param_prior, noise_prior)

        # Create a posterior log-likelihood (log(likelihood * prior))
        log_posterior = pints.LogPosterior(log_likelihood, log_prior)
        log_posteriors.append(log_posterior)
        score = pints.ProbabilityBasedError(log_posterior)

        if synthetic:
            found_parameters = list(true_parameters) + [noise]
        else:
            found_parameters, found_value = pints.optimise(score,
                                                           x0,
                                                           sigma0,
                                                           boundaries,
                                                           method=pints.CMAES)

        sampler = pints.AdaptiveCovarianceMCMC(found_parameters)
        samplers.append(sampler)

    pickle.dump((samplers, log_posteriors), open(pickle_file, 'wb'))
else: