Beispiel #1
0
    def _initialise(self):
        """
        Initialises the optimiser for the first iteration.
        """
        assert (not self._running)

        # Create boundary transform, or use manual boundary checking
        self._manual_boundaries = False
        self._boundary_transform = None
        if isinstance(self._boundaries, pints.RectangularBoundaries):
            self._boundary_transform = pints.TriangleWaveTransform(
                self._boundaries)
        elif self._boundaries is not None:
            self._manual_boundaries = True

        # Shorthands
        d = self._n_parameters
        n = self._population_size

        # Learning rates
        # TODO Allow changing before run() with method call
        self._eta_mu = 1
        # TODO Allow changing before run() with method call
        self._eta_A = 0.6 * (3 + np.log(d)) * d**-1.5

        # Pre-calculated utilities
        self._us = np.maximum(0, np.log(n / 2 + 1) - np.log(1 + np.arange(n)))
        self._us /= np.sum(self._us)
        self._us -= 1 / n

        # Center of distribution
        self._mu = np.array(self._x0, copy=True)

        # Initial square root of covariance matrix
        self._A = np.eye(d) * self._sigma0

        # Identity matrix of appropriate size
        self._I = np.eye(d)

        # Update optimiser state
        self._running = True
Beispiel #2
0
    def _initialise(self):
        """
        Initialises the optimiser for the first iteration.
        """
        assert (not self._running)

        # Create boundary transform, or use manual boundary checking
        self._manual_boundaries = False
        self._boundary_transform = None
        if isinstance(self._boundaries, pints.RectangularBoundaries):
            self._boundary_transform = pints.TriangleWaveTransform(
                self._boundaries)
        elif self._boundaries is not None:
            self._manual_boundaries = True

        # Parent generation population size
        # The parameter parent_pop_size is the mu in the papers. It represents
        # the size of a parent population used to update our paramters.
        self._parent_pop_size = self._population_size // 2

        # Weights, all set equal for the moment
        # Sum of all positive weights should be 1
        self._W = 1 + np.arange(self._population_size)
        self._W = np.log(0.5 * (self._population_size + 1)) - np.log(self._W)

        # Inverse of the sum of the first parent weights squared (variance
        # effective selection mass)
        self._muEff = (np.sum(self._W[:self._parent_pop_size])**2 /
                       np.sum(np.square(self._W[:self._parent_pop_size])))

        # Inverse of the Sum of the last weights squared (variance effective
        # selection mass)
        self._muEffMinus = (np.sum(self._W[self._parent_pop_size:])**2 /
                            np.sum(np.square(self._W[self._parent_pop_size:])))

        # cummulation, evolution paths, used to update Cov matrix and sigma)
        self._pc = np.zeros(self._n_parameters)
        self._psig = np.zeros(self._n_parameters)

        # learning rate for the mean
        self._cm = 1

        # Decay rate of the evolution path for C
        self._ccov = (4 + self._muEff / self._n_parameters) / (
            self._n_parameters + 4 + 2 * self._muEff / self._n_parameters)

        # Decay rate of the evolution path for sigma
        self._csig = (2 + self._muEff) / (self._n_parameters + 5 + self._muEff)

        # See rank-1 vs rank-mu updates
        # Learning rate for rank-1 update
        self._c1 = 2 / ((self._n_parameters + 1.3)**2 + self._muEff)

        # Learning rate for rank-mu update
        self._cmu = min(
            2 * (self._muEff - 2 + 1 / self._muEff) /
            ((self._n_parameters + 2)**2 + self._muEff), 1 - self._c1)

        # Damping of the step-size (sigma0) update
        self._dsig = 1 + self._csig + 2 * max(
            0,
            np.sqrt((self._muEff - 1) / (self._n_parameters + 1)) - 1)

        # Parameters from the Table 1 of [1]
        alpha_mu = 1 + self._c1 / self._cmu
        alpha_mueff = 1 + 2 * self._muEffMinus / (self._muEff + 2)
        alpha_pos_def = \
            (1 - self._c1 - self._cmu) / (self._n_parameters * self._cmu)

        # Rescale the weights
        sum_pos = np.sum(self._W[self._W > 0])
        sum_neg = np.sum(self._W[self._W < 0])
        scale_pos = 1 / sum_pos
        scale_neg = min(alpha_mu, alpha_mueff, alpha_pos_def) / -sum_neg
        self._W[self._W > 0] *= scale_pos
        self._W[self._W < 0] *= scale_neg

        # Update optimiser state
        self._running = True
Beispiel #3
0
    def run(self):
        """See :meth:`Optimiser.run()`."""

        # Default search parameters
        # TODO Allow changing before run() with method call
        parallel = True

        # Search is terminated after max_iter iterations
        # TODO Allow changing before run() with method call
        max_iter = 10000

        # Or if the result doesn't change significantly for a while
        # TODO Allow changing before run() with method call
        max_unchanged_iterations = 100
        # TODO Allow changing before run() with method call
        min_significant_change = 1e-11
        # TODO Allow changing before run() with method call
        unchanged_iterations = 0

        # Parameter space dimension
        d = self._dimension

        # Population size
        # TODO Allow changing before run() with method call
        # If parallel, round up to a multiple of the reported number of cores
        n = 4 + int(3 * np.log(d))
        if parallel:
            cpu_count = multiprocessing.cpu_count()
            n = (((n - 1) // cpu_count) + 1) * cpu_count

        # Set up progress reporting in verbose mode
        nextMessage = 0
        if self._verbose:
            if parallel:
                print('Running in parallel mode with population size ' +
                      str(n))
            else:
                print('Running in sequential mode with population size ' +
                      str(n))

        # Apply wrapper to implement boundaries
        if self._boundaries is None:

            def xtransform(x):
                return x
        else:
            xtransform = pints.TriangleWaveTransform(self._boundaries)

        # Create evaluator object
        if parallel:
            evaluator = pints.ParallelEvaluator(self._function)
        else:
            evaluator = pints.SequentialEvaluator(self._function)

        # Learning rates
        # TODO Allow changing before run() with method call
        eta_mu = 1
        # TODO Allow changing before run() with method call
        eta_A = 0.6 * (3 + np.log(d)) * d**-1.5

        # Pre-calculated utilities
        us = np.maximum(0, np.log(n / 2 + 1) - np.log(1 + np.arange(n)))
        us /= np.sum(us)
        us -= 1 / n

        # Center of distribution
        mu = np.array(self._x0, copy=True)

        # Initial square root of covariance matrix
        A = np.eye(d) * self._sigma0

        # Identity matrix for later use
        I = np.eye(d)

        # Best solution found
        xbest = mu
        fbest = float('inf')

        # Start running
        for iteration in range(1, 1 + max_iter):

            # Create new samples
            zs = np.array([np.random.normal(0, 1, d) for i in range(n)])
            xs = np.array([mu + np.dot(A, zs[i]) for i in range(n)])

            # Evaluate at the samples
            fxs = evaluator.evaluate(xtransform(xs))

            # Order the normalized samples according to the scores
            order = np.argsort(fxs)
            zs = zs[order]

            # Update center
            Gd = np.dot(us, zs)
            mu += eta_mu * np.dot(A, Gd)

            # Update best if needed
            if fxs[order[0]] < fbest:

                # Check if this counts as a significant change
                fnew = fxs[order[0]]
                if np.sum(np.abs(fnew - fbest)) < min_significant_change:
                    unchanged_iterations += 1
                else:
                    unchanged_iterations = 0

                # Update best
                xbest = xs[order[0]]
                fbest = fnew

            else:
                unchanged_iterations += 1

            # Show progress in verbose mode:
            if self._verbose and iteration >= nextMessage:
                print(str(iteration) + ': ' + str(fbest))
                if iteration < 3:
                    nextMessage = iteration + 1
                else:
                    nextMessage = 20 * (1 + iteration // 20)

            # Stop if no change for too long
            if unchanged_iterations >= max_unchanged_iterations:
                if self._verbose:
                    print('Halting: No significant change for ' +
                          str(unchanged_iterations) + ' iterations.')
                break

            # Update root of covariance matrix
            Gm = np.dot(np.array([np.outer(z, z).T - I for z in zs]).T, us)
            A *= scipy.linalg.expm(np.dot(0.5 * eta_A, Gm))

        # Show stopping criterion
        if self._verbose and unchanged_iterations < max_unchanged_iterations:
            print('Halting: Maximum iterations reached.')

        # Get final score at mu
        fmu = self._function(xtransform(mu))
        if fmu < fbest:
            if self._verbose:
                print('Final score at mu beats best sample')
            xbest = mu
            fbest = fmu

        # Show final value
        if self._verbose:
            print(str(iteration) + ': ' + str(fbest))

        # Return best solution
        return xtransform(xbest), fbest
Beispiel #4
0
    def _initialise(self):
        """
        Initialises the optimiser for the first iteration.
        """
        assert (not self._running)

        # Initialize swarm
        self._xs = []  # Particle coordinate vectors
        self._vs = []  # Particle velocity vectors
        self._fl = []  # Best local score
        self._pl = []  # Best local position

        # Set initial positions
        self._xs.append(np.array(self._x0, copy=True))
        if self._boundaries is not None:
            # Attempt to sample n - 1 points from the boundaries
            try:
                self._xs.extend(
                    self._boundaries.sample(self._population_size - 1))
            except NotImplementedError:
                # Not all boundaries implement sampling
                pass
        # If we couldn't sample from the boundaries, use gaussian sampling
        # around x0.
        for i in range(1, self._population_size):
            self._xs.append(np.random.normal(self._x0, self._sigma0))
        self._xs = np.array(self._xs, copy=True)

        # Set initial velocities
        for i in range(self._population_size):
            self._vs.append(1e-1 * self._sigma0 *
                            np.random.uniform(0, 1, self._n_parameters))

        # Set initial scores and local best
        for i in range(self._population_size):
            self._fl.append(float('inf'))
            self._pl.append(self._xs[i])

        # Set global best position and score
        self._fg = float('inf')
        self._pg = self._xs[0]

        # Create boundary transform, or use manual boundary checking
        self._manual_boundaries = False
        self._boundary_transform = None
        if isinstance(self._boundaries, pints.RectangularBoundaries):
            self._boundary_transform = pints.TriangleWaveTransform(
                self._boundaries)
        elif self._boundaries is not None:
            self._manual_boundaries = True

        # Create safe xs to pass to user
        if self._boundary_transform is not None:
            # Rectangular boundaries? Then apply transform to xs
            self._xs = self._boundary_transform(self._xs)
        if self._manual_boundaries:
            # Manual boundaries? Then filter out out-of-bounds points from xs
            self._user_ids = np.nonzero(
                [self._boundaries.check(x) for x in self._xs])
            self._user_xs = self._xs[self._user_ids]
            if len(self._user_xs) == 0:  # pragma: no cover
                self._logger.warning(
                    'All initial PSO particles are outside the boundaries.')
        else:
            self._user_xs = np.array(self._xs, copy=True)

        # Set user points as read-only
        self._user_xs.setflags(write=False)

        # Set local/global exploration balance
        self.set_local_global_balance()

        # Update optimiser state
        self._running = True