Exemplo n.º 1
0
    def optimise(self,
                 initial_parameter: np.ndarray,
                 number_of_iterations: int = 10) -> None:
        """Find point in parameter space that optimises the objective function,
        i.e. find the set of parameters that minimises the distance of the
        model to the data with respect to the objective function.

        Arguments:
            initial_parameter {np.ndarray} -- Starting point in parameter
            space of the optimisation algorithm.

        Return:
            None
        """
        # create sum of errors measure
        error_measure = pints.SumOfErrors(self.error_function_container)

        # initialise optimisation
        optimisation = pints.OptimisationController(
            function=error_measure,
            x0=initial_parameter,
            sigma0=self.initial_parameter_uncertainty,
            boundaries=self.parameter_boundaries,
            method=self.optimiser)

        # run optimisation 'number_of_iterations' times
        estimate_container = []
        for _ in range(number_of_iterations):
            estimates, _ = optimisation.run()
            estimate_container.append(estimates)

        # return median parameters
        self.estimated_parameters = np.median(a=estimate_container, axis=0)
Exemplo n.º 2
0
    def __init__(self, cell, protocols, transformation=None, cap_filter=True):

        # Store transformation object
        if transformation is None:
            transformation = transformations.NullTransformation()
        self._transformation = transformation

        # Store problems
        self._problems = []

        # Set individual errors and weights
        weights = []
        errors = []
        for protocol in protocols:

            # Create protocol
            if protocol == 6:
                p = data.load_protocol_values(protocol)
            else:
                p = data.load_myokit_protocol(protocol)

            # Create forward model
            m = model.Model(p,
                            cells.reversal_potential(cells.temperature(cell)),
                            sine_wave=(protocol == 7),
                            analytical=(protocol < 6),
                            start_steady=False)

            # Load data, create single output problem
            log = data.load(cell, protocol, cap_filter=cap_filter)
            time = log.time()
            current = log['current']

            # Create single output problem
            problem = pints.SingleOutputProblem(m, time, current)
            self._problems.append(problem)

            # Define error function
            errors.append(pints.RootMeanSquaredError(problem))

            # Add weighting based on range
            weights.append(1 / (np.max(current) - np.min(current)))

        # Create weighted sum of errors
        self._f = pints.SumOfErrors(errors, weights)
Exemplo n.º 3
0
    def find_optimal_parameter(self, initial_parameter:np.ndarray, number_of_iterations:int=5) -> None:
        """Find point in parameter space that optimises the objective function, i.e. find the set of parameters that
        minimises the distance of the model to the data with respect to the objective function. Optimisation is run
        number_of_iterations times and result with minimal score is returned.

        Arguments:
            initial_parameter {np.ndarray} -- Starting point in parameter space of the optimisation algorithm.
            number_of_iterations {int} -- Number of times optimisation is run. Default: 5 (arbitrary).

        Return:
            None
        """
        # set default randomness in initial parameter values, if not specified in GUI
        if self.initial_parameter_uncertainty is None:
            # TODO: evaluate how to choose uncertainty best, to obtain most stable results
            self.initial_parameter_uncertainty = initial_parameter + 0.1  # arbitrary

        # create sum of errors measure
        error_measure = pints.SumOfErrors(self.error_function_container)

        # initialise optimisation
        optimisation = pints.OptimisationController(function=error_measure,
                                                    x0=initial_parameter,
                                                    sigma0=self.initial_parameter_uncertainty,
                                                    boundaries=self.parameter_boundaries,
                                                    method=self.optimiser
                                                    )

        # run optimisation 'number_of_iterations' times
        estimate_container = []
        score_container = []
        for _ in range(number_of_iterations):
            estimates, score = optimisation.run()
            estimate_container.append(estimates)
            score_container.append(score)

        # return parameters with minimal score
        min_score_id = np.argmin(score_container)
        self.estimated_parameters, self.objective_score = [estimate_container[min_score_id],
                                                           score_container[min_score_id]
                                                           ]
Exemplo n.º 4
0
    def test_sum_of_errors(self):
        # Tests :class:`pints.SumOfErrors`.

        e1 = pints.SumOfSquaresError(MiniProblem())
        e2 = pints.MeanSquaredError(MiniProblem())
        e3 = pints.RootMeanSquaredError(BigMiniProblem())
        e4 = pints.SumOfSquaresError(BadMiniProblem())

        # Basic use
        e = pints.SumOfErrors([e1, e2])
        x = [0, 0, 0]
        self.assertEqual(e.n_parameters(), 3)
        self.assertEqual(e(x), e1(x) + e2(x))
        e = pints.SumOfErrors([e1, e2], [3.1, 4.5])
        x = [0, 0, 0]
        self.assertEqual(e.n_parameters(), 3)
        self.assertEqual(e(x), 3.1 * e1(x) + 4.5 * e2(x))
        e = pints.SumOfErrors([e1, e1, e1, e1, e1, e1], [1, 2, 3, 4, 5, 6])
        self.assertEqual(e.n_parameters(), 3)
        self.assertEqual(e(x), e1(x) * 21)
        self.assertNotEqual(e(x), 0)

        with np.errstate(all='ignore'):
            e = pints.SumOfErrors([e4, e1, e1, e1, e1, e1],
                                  [10, 1, 1, 1, 1, 1])
            self.assertEqual(e.n_parameters(), 3)
            self.assertEqual(e(x), float('inf'))
            e = pints.SumOfErrors([e4, e1, e1, e1, e1, e1], [0, 2, 0, 2, 0, 2])
            self.assertEqual(e.n_parameters(), 3)
            self.assertTrue(e(x), 6 * e1(x))
            e5 = pints.SumOfSquaresError(BadMiniProblem(float('-inf')))
            e = pints.SumOfErrors([e1, e5, e1], [2.1, 3.4, 6.5])
            self.assertTrue(np.isinf(e(x)))
            e = pints.SumOfErrors([e4, e5, e1], [2.1, 3.4, 6.5])
            self.assertTrue(np.isinf(e(x)))
            e5 = pints.SumOfSquaresError(BadMiniProblem(float('nan')))
            e = pints.SumOfErrors(
                [BadErrorMeasure(float('inf')),
                 BadErrorMeasure(float('inf'))], [1, 1])
            self.assertEqual(e(x), float('inf'))
            e = pints.SumOfErrors([
                BadErrorMeasure(float('inf')),
                BadErrorMeasure(float('-inf'))
            ], [1, 1])
            self.assertTrue(np.isnan(e(x)))
            e = pints.SumOfErrors(
                [BadErrorMeasure(5),
                 BadErrorMeasure(float('nan'))], [1, 1])
            self.assertTrue(np.isnan(e(x)))
            e = pints.SumOfErrors([e1, e5, e1], [2.1, 3.4, 6.5])
            self.assertTrue(np.isnan(e(x)))
            e = pints.SumOfErrors([e4, e5, e1], [2.1, 3.4, 6.5])
            self.assertTrue(np.isnan(e(x)))

        # Wrong number of ErrorMeasures
        self.assertRaises(ValueError, pints.SumOfErrors, [], [])

        # Wrong argument types
        self.assertRaises(TypeError, pints.SumOfErrors, [e1, e1], [e1, 1])
        self.assertRaises(ValueError, pints.SumOfErrors, [e1, 3], [2, 1])

        # Mismatching sizes
        self.assertRaises(ValueError, pints.SumOfErrors, [e1, e1, e1], [1, 1])

        # Mismatching problem dimensions
        self.assertRaises(ValueError, pints.SumOfErrors, [e1, e1, e3],
                          [1, 2, 3])

        # Single-output derivatives
        model = pints.toy.ConstantModel(1)
        times = [1, 2, 3]
        p1 = pints.SingleOutputProblem(model, times, [1, 1, 1])
        p2 = pints.SingleOutputProblem(model, times, [2, 2, 2])
        e1 = pints.SumOfSquaresError(p1)
        e2 = pints.SumOfSquaresError(p2)
        e = pints.SumOfErrors([e1, e2], [1, 2])
        x = [4]
        y, dy = e.evaluateS1(x)
        self.assertEqual(y, e(x))
        self.assertEqual(dy.shape, (1, ))
        y1, dy1 = e1.evaluateS1(x)
        y2, dy2 = e2.evaluateS1(x)
        self.assertTrue(np.all(dy == dy1 + 2 * dy2))

        # Multi-output derivatives
        model = pints.toy.ConstantModel(2)
        times = [1, 2, 3]
        p1 = pints.MultiOutputProblem(model, times, [[3, 2], [1, 7], [3, 2]])
        p2 = pints.MultiOutputProblem(model, times, [[2, 3], [3, 4], [5, 6]])
        e1 = pints.SumOfSquaresError(p1)
        e2 = pints.SumOfSquaresError(p2)
        e = pints.SumOfErrors([e1, e2], [1, 2])
        x = [4, -2]
        y, dy = e.evaluateS1(x)
        self.assertEqual(y, e(x))
        self.assertEqual(dy.shape, (2, ))
        y1, dy1 = e1.evaluateS1(x)
        y2, dy2 = e2.evaluateS1(x)
        self.assertTrue(np.all(dy == dy1 + 2 * dy2))