Exemplo n.º 1
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     from python_map import python_map
     if self._map != python_map:
         #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
         from mystic.monitors import Null
         evalmon = Null()
     else: evalmon = self._evalmon
     fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Exemplo n.º 2
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     from mystic.python_map import python_map
     if self._map != python_map:
         #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
         from mystic.monitors import Null
         evalmon = Null()
     else:
         evalmon = self._evalmon
     fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations  #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(
                 self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
         #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Exemplo n.º 3
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate the cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         if self.generations:
             #NOTE: pop[0] was best, may not be after resetting simplex
             for i, j in enumerate(self._setSimplexWithinRangeBoundary()):
                 self.population[i + 1] = self.population[0].copy()
                 self.population[i + 1][i] = j
         else:
             self.population[0] = self._clipGuessWithinRangeBoundary(
                 self.population[0])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
         #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            indx = list(self.popEnergy).index(self.bestEnergy)
            ngen = self.generations  #XXX: no random if generations=0 ?
            for i in range(self.nPop):
                self.population[i] = self._clipGuessWithinRangeBoundary(
                    self.population[i], (not ngen) or (i is indx))
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
        cost = wrap_penalty(cost, self._penalty)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
Exemplo n.º 5
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
Exemplo n.º 6
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
Exemplo n.º 7
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
    #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
     from python_map import python_map
     if self._map != python_map:
         self._fcalls = [0] #FIXME: temporary patch for removing the following line
     else:
         self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
Exemplo n.º 8
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Exemplo n.º 9
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Exemplo n.º 10
0
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            if self.generations:
                #NOTE: pop[0] was best, may not be after resetting simplex
                for i, j in enumerate(self._setSimplexWithinRangeBoundary()):
                    self.population[i + 1] = self.population[0].copy()
                    self.population[i + 1][i] = j
            else:
                self.population[0] = self._clipGuessWithinRangeBoundary(
                    self.population[0])
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints
        cost = wrap_penalty(cost, self._penalty)
        cost = wrap_nested(cost, constraints)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
Exemplo n.º 11
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate the cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         if self.generations:
             #NOTE: pop[0] was best, may not be after resetting simplex
             for i,j in enumerate(self._setSimplexWithinRangeBoundary()):
                 self.population[i+1] = self.population[0].copy()
                 self.population[i+1][i] = j
         else:
             self.population[0] = self._clipGuessWithinRangeBoundary(self.population[0])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Exemplo n.º 12
0
    def Solve(self, cost, termination=None, ExtraArgs=(), **kwds):
        """Minimize a 'cost' function with given termination conditions.

Description:

    Uses an ensemble of optimizers to find the minimum of
    a function of one or more variables.

Inputs:

    cost -- the Python function or method to be minimized.

Additional Inputs:

    termination -- callable object providing termination conditions.
    ExtraArgs -- extra arguments for cost.

Further Inputs:

    sigint_callback -- callback function for signal handler.
    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.                           [default = None]
    disp -- non-zero to print convergence messages.         [default = 0]
        """
        # process and activate input settings
        if 'sigint_callback' in kwds:
            self.sigint_callback = kwds['sigint_callback']
            del kwds['sigint_callback']
        else: self.sigint_callback = None
        settings = self._process_inputs(kwds)
        disp = settings['disp'] if 'disp' in settings else False
        echo = settings['callback'] if 'callback' in settings else None
#       for key in settings:
#           exec "%s = settings['%s']" % (key,key)
        if disp in ['verbose', 'all']: verbose = True
        else: verbose = False
        #-------------------------------------------------------------

        from mystic.python_map import python_map
        if self._map != python_map:
            #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
            from mystic.monitors import Null
            evalmon = Null()
        else: evalmon = self._evalmon
        fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)

        # set up signal handler
       #self._EARLYEXIT = False

        # activate signal_handler
       #import threading as thread
       #mainthread = isinstance(thread.current_thread(), thread._MainThread)
       #if mainthread: #XXX: if not mainthread, signal will raise ValueError
        import mystic._signal as signal
        if self._handle_sigint:
            signal.signal(signal.SIGINT, signal.Handler(self))

        # register termination function
        if termination is not None: self.SetTermination(termination)

        # get the nested solver instance
        solver = self._AbstractEnsembleSolver__get_solver_instance()
        #-------------------------------------------------------------

        # generate starting points
        initial_values = self._InitialPoints()

        # run optimizer for each grid point
        from copy import deepcopy as _copy
        op = [_copy(solver) for i in range(len(initial_values))]
       #cf = [cost for i in range(len(initial_values))]
        vb = [verbose for i in range(len(initial_values))]
        cb = [echo for i in range(len(initial_values))] #XXX: remove?
        at = self.id if self.id else 0  # start at self.id
        id = range(at,at+len(initial_values))

        # generate the local_optimize function
        def local_optimize(solver, x0, rank=None, disp=False, callback=None):
            from copy import deepcopy as _copy
            from mystic.tools import isNull
            solver.id = rank
            solver.SetInitialPoints(x0)
            if solver._useStrictRange: #XXX: always, settable, or sync'd ?
                solver.SetStrictRanges(min=solver._strictMin, \
                                       max=solver._strictMax) # or lower,upper ?
            solver.Solve(cost, disp=disp, callback=callback)
            sm = solver._stepmon
            em = solver._evalmon
            if isNull(sm): sm = ([],[],[],[])
            else: sm = (_copy(sm._x),_copy(sm._y),_copy(sm._id),_copy(sm._info))
            if isNull(em): em = ([],[],[],[])
            else: em = (_copy(em._x),_copy(em._y),_copy(em._id),_copy(em._info))
            return solver, sm, em

        # map:: solver = local_optimize(solver, x0, id, verbose)
        results = list(self._map(local_optimize, op, initial_values, id, \
                                                 vb, cb, **self._mapconfig))

        # save initial state
        self._AbstractSolver__save_state()
        #XXX: HACK TO GET CONTENT OF ALL MONITORS
        # reconnect monitors; save all solvers
        from mystic.monitors import Monitor
        while results: #XXX: option to not save allSolvers? skip this and _copy
            _solver, _stepmon, _evalmon = results.pop()
            sm = Monitor()
            sm._x,sm._y,sm._id,sm._info = _stepmon
            _solver._stepmon.extend(sm)
            del sm
            em = Monitor()
            em._x,em._y,em._id,em._info = _evalmon
            _solver._evalmon.extend(em)
            del em
            self._allSolvers[len(results)] = _solver
        del results, _solver, _stepmon, _evalmon
        #XXX: END HACK

        # get the results with the lowest energy
        self._bestSolver = self._allSolvers[0]
        bestpath = self._bestSolver._stepmon
        besteval = self._bestSolver._evalmon
        self._total_evals = self._bestSolver.evaluations
        for solver in self._allSolvers[1:]:
            self._total_evals += solver.evaluations # add func evals
            if solver.bestEnergy < self._bestSolver.bestEnergy:
                self._bestSolver = solver
                bestpath = solver._stepmon
                besteval = solver._evalmon

        # return results to internals
        self.population = self._bestSolver.population #XXX: pointer? copy?
        self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy?
        self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy?
        self.bestEnergy = self._bestSolver.bestEnergy
        self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy?
        self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy?
        self._maxiter = self._bestSolver._maxiter
        self._maxfun = self._bestSolver._maxfun

        # write 'bests' to monitors  #XXX: non-best monitors may be useful too
        self._stepmon = bestpath #XXX: pointer? copy?
        self._evalmon = besteval #XXX: pointer? copy?
        self.energy_history = None
        self.solution_history = None
       #from mystic.tools import isNull
       #if isNull(bestpath):
       #    self._stepmon = bestpath
       #else:
       #    for i in range(len(bestpath.y)):
       #        self._stepmon(bestpath.x[i], bestpath.y[i], self.id)
       #        #XXX: could apply callback here, or in exec'd code
       #if isNull(besteval):
       #    self._evalmon = besteval
       #else:
       #    for i in range(len(besteval.y)):
       #        self._evalmon(besteval.x[i], besteval.y[i])
        #-------------------------------------------------------------

        # restore default handler for signal interrupts
        if self._handle_sigint:
            signal.signal(signal.SIGINT, signal.default_int_handler)

        # log any termination messages
        msg = self.Terminated(disp=disp, info=True)
        if msg: self._stepmon.info('STOP("%s")' % msg)
        # save final state
        self._AbstractSolver__save_state(force=True)
        return 
    def Solve(self, cost, termination=None, ExtraArgs=(), **kwds):
        """Minimize a 'cost' function with given termination conditions.

Uses an ensemble of optimizers to find the minimum of a function of one or
more variables.

Args:
    cost (func, default=None): the function to be minimized: ``y = cost(x)``.
    termination (termination, default=None): termination conditions.
    ExtraArgs (tuple, default=None): extra arguments for cost.
    sigint_callback (func, default=None): callback function for signal handler.
    callback (func, default=None): function to call after each iteration. The
        interface is ``callback(xk)``, with xk the current parameter vector.
    disp (bool, default=False): if True, print convergence messages.

Returns:
    None
        """
        # process and activate input settings
        if 'sigint_callback' in kwds:
            self.sigint_callback = kwds['sigint_callback']
            del kwds['sigint_callback']
        else:
            self.sigint_callback = None
        settings = self._process_inputs(kwds)
        disp = settings['disp'] if 'disp' in settings else False
        echo = settings['callback'] if 'callback' in settings else None
        #       for key in settings:
        #           exec "%s = settings['%s']" % (key,key)
        if disp in ['verbose', 'all']: verbose = True
        else: verbose = False
        #-------------------------------------------------------------

        from mystic.python_map import python_map
        if self._map != python_map:
            #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
            from mystic.monitors import Null
            evalmon = Null()
        else:
            evalmon = self._evalmon
        fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)

        # set up signal handler
        #self._EARLYEXIT = False

        # activate signal_handler
        #import threading as thread
        #mainthread = isinstance(thread.current_thread(), thread._MainThread)
        #if mainthread: #XXX: if not mainthread, signal will raise ValueError
        import mystic._signal as signal
        if self._handle_sigint:
            signal.signal(signal.SIGINT, signal.Handler(self))

        # register termination function
        if termination is not None: self.SetTermination(termination)

        # get the nested solver instance
        solver = self._AbstractEnsembleSolver__get_solver_instance()
        #-------------------------------------------------------------

        # generate starting points
        initial_values = self._InitialPoints()

        # run optimizer for each grid point
        from copy import deepcopy as _copy
        op = [_copy(solver) for i in range(len(initial_values))]
        #cf = [cost for i in range(len(initial_values))]
        vb = [verbose for i in range(len(initial_values))]
        cb = [echo for i in range(len(initial_values))]  #XXX: remove?
        at = self.id if self.id else 0  # start at self.id
        id = range(at, at + len(initial_values))

        # generate the local_optimize function
        def local_optimize(solver, x0, rank=None, disp=False, callback=None):
            from copy import deepcopy as _copy
            from mystic.tools import isNull
            solver.id = rank
            solver.SetInitialPoints(x0)
            if solver._useStrictRange:  #XXX: always, settable, or sync'd ?
                solver.SetStrictRanges(min=solver._strictMin, \
                                       max=solver._strictMax) # or lower,upper ?
            solver.Solve(cost, disp=disp, callback=callback)
            sm = solver._stepmon
            em = solver._evalmon
            if isNull(sm): sm = ([], [], [], [])
            else:
                sm = (_copy(sm._x), _copy(sm._y), _copy(sm._id),
                      _copy(sm._info))
            if isNull(em): em = ([], [], [], [])
            else:
                em = (_copy(em._x), _copy(em._y), _copy(em._id),
                      _copy(em._info))
            return solver, sm, em

        # map:: solver = local_optimize(solver, x0, id, verbose)
        results = list(self._map(local_optimize, op, initial_values, id, \
                                                 vb, cb, **self._mapconfig))

        # save initial state
        self._AbstractSolver__save_state()
        #XXX: HACK TO GET CONTENT OF ALL MONITORS
        # reconnect monitors; save all solvers
        from mystic.monitors import Monitor
        while results:  #XXX: option to not save allSolvers? skip this and _copy
            _solver, _stepmon, _evalmon = results.pop()
            sm = Monitor()
            sm._x, sm._y, sm._id, sm._info = _stepmon
            _solver._stepmon.extend(sm)
            del sm
            em = Monitor()
            em._x, em._y, em._id, em._info = _evalmon
            _solver._evalmon.extend(em)
            del em
            self._allSolvers[len(results)] = _solver
        del results, _solver, _stepmon, _evalmon
        #XXX: END HACK

        # get the results with the lowest energy
        self._bestSolver = self._allSolvers[0]
        bestpath = self._bestSolver._stepmon
        besteval = self._bestSolver._evalmon
        self._total_evals = self._bestSolver.evaluations
        for solver in self._allSolvers[1:]:
            self._total_evals += solver.evaluations  # add func evals
            if solver.bestEnergy < self._bestSolver.bestEnergy:
                self._bestSolver = solver
                bestpath = solver._stepmon
                besteval = solver._evalmon

        # return results to internals
        self.population = self._bestSolver.population  #XXX: pointer? copy?
        self.popEnergy = self._bestSolver.popEnergy  #XXX: pointer? copy?
        self.bestSolution = self._bestSolver.bestSolution  #XXX: pointer? copy?
        self.bestEnergy = self._bestSolver.bestEnergy
        self.trialSolution = self._bestSolver.trialSolution  #XXX: pointer? copy?
        self._fcalls = self._bestSolver._fcalls  #XXX: pointer? copy?
        self._maxiter = self._bestSolver._maxiter
        self._maxfun = self._bestSolver._maxfun

        # write 'bests' to monitors  #XXX: non-best monitors may be useful too
        self._stepmon = bestpath  #XXX: pointer? copy?
        self._evalmon = besteval  #XXX: pointer? copy?
        self.energy_history = None
        self.solution_history = None
        #from mystic.tools import isNull
        #if isNull(bestpath):
        #    self._stepmon = bestpath
        #else:
        #    for i in range(len(bestpath.y)):
        #        self._stepmon(bestpath.x[i], bestpath.y[i], self.id)
        #        #XXX: could apply callback here, or in exec'd code
        #if isNull(besteval):
        #    self._evalmon = besteval
        #else:
        #    for i in range(len(besteval.y)):
        #        self._evalmon(besteval.x[i], besteval.y[i])
        #-------------------------------------------------------------

        # restore default handler for signal interrupts
        if self._handle_sigint:
            signal.signal(signal.SIGINT, signal.default_int_handler)

        # log any termination messages
        msg = self.Terminated(disp=disp, info=True)
        if msg: self._stepmon.info('STOP("%s")' % msg)
        # save final state
        self._AbstractSolver__save_state(force=True)
        return
Exemplo n.º 14
0
    def Solve(
            self,
            func,
            termination,
            sigint_callback=None,
            EvaluationMonitor=Null,
            StepMonitor=Null,  #GradientMonitor=Null,
            ExtraArgs=(),
            **kwds):
        """Minimize a function using NCG.

Description:

    Uses a Newton-CG algorithm to find the minimum of
    a function of one or more variables.

Inputs:

    func -- the Python function or method to be minimized.
    termination -- callable object providing termination conditions.

Additional Inputs:

    sigint_callback -- callback function for signal handler.
    EvaluationMonitor -- a callable object that will be passed x, fval
        whenever the cost function is evaluated.
    StepMonitor -- a callable object that will be passed x, fval, the gradient, and
                   the Hessian after the end of an iteration.
    ExtraArgs -- extra arguments for func and fprime (same for both).

Further Inputs:

    fprime -- callable f'(x,*args)
            Gradient of f.
    fhess_p : callable fhess_p(x,p,*args)
        Function which computes the Hessian of f times an
        arbitrary vector, p.
    fhess : callable fhess(x,*args)
        Function to compute the Hessian matrix of f.
    epsilon : float or ndarray
        If fhess is approximated, use this value for the step size.
    callback : callable
        An optional user-supplied function which is called after
        each iteration.  Called as callback(xk), where xk is the
        current parameter vector.
    disp : bool
        If True, print convergence message.

    Notes:
      Only one of `fhess_p` or `fhess` need to be given.  If `fhess`
      is provided, then `fhess_p` will be ignored.  If neither `fhess`
      nor `fhess_p` is provided, then the hessian product will be
      approximated using finite differences on `fprime`. `fhess_p`
      must compute the hessian times an arbitrary vector. If it is not
      given, finite-differences on `fprime` are used to compute
      it. 

        """
        # allow for inputs that don't conform to AbstractSolver interface
        args = ExtraArgs
        x0 = self.population[0]
        x0 = asarray(x0).flatten()

        epsilon = _epsilon
        self.disp = 1
        self.callback = None
        fhess_p = None
        fhess = None

        if kwds.has_key('epsilon'): epsilon = kwds['epsilon']
        if kwds.has_key('callback'): self.callback = kwds['callback']
        if kwds.has_key('disp'): self.disp = kwds['disp']
        if kwds.has_key('fhess'): fhess = kwds['fhess']
        if kwds.has_key('fhess_p'): fhess_p = kwds['fhess_p']

        # fprime is actually required. Temporary fix?:
        if kwds.has_key('fprime'): fprime = kwds['fprime']
        #-------------------------------------------------------------

        import signal
        self._EARLYEXIT = False

        fcalls, func = wrap_function(func, args, EvaluationMonitor)
        if self._useStrictRange:
            x0 = self._clipGuessWithinRangeBoundary(x0)
            func = wrap_bounds(func, self._strictMin, self._strictMax)

        #generate signal_handler
        self._generateHandler(sigint_callback)
        if self._handle_sigint:
            signal.signal(signal.SIGINT, self.signal_handler)

        #--------------------------------------------------------------

        if self._maxiter is None:
            self._maxiter = len(x0) * 200

        # Wrap gradient function?
        # gcalls, fprime = wrap_function(fprime, args, GradientMonitor)
        gcalls, fprime = wrap_function(fprime, args, Null)

        # Wrap hessian monitor?
        # But wrap_function assumes the function takes one parameter...
        #if fhess is not None:
        #    hcalls2, fhess = wrap_function(fhess, args, HessianMonitor)
        #else:
        #    if fhess_p is not None:
        #        hcalls2, fhess_p = wrap_function(fhess_p, args, HessianMonitor)

        #xtol = len(x0)*avextol
        #update = [2*xtol]
        xk = x0
        k = 0
        hcalls = 0
        old_fval = func(x0)
        abs = absolute
        while k < self._maxiter:
            # Compute a search direction pk by applying the CG method to
            #  del2 f(xk) p = - grad f(xk) starting from 0.
            b = -fprime(xk)
            maggrad = numpy.add.reduce(abs(b))
            eta = min([0.5, numpy.sqrt(maggrad)])
            termcond = eta * maggrad
            xsupi = zeros(len(x0), dtype=x0.dtype)
            ri = -b
            psupi = -ri
            i = 0
            dri0 = numpy.dot(ri, ri)

            if fhess is not None:  # you want to compute hessian once.
                A = fhess(*(xk, ) + args)
                hcalls = hcalls + 1

            while numpy.add.reduce(abs(ri)) > termcond:
                if fhess is None:
                    if fhess_p is None:
                        Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
                    else:
                        Ap = fhess_p(xk, psupi, *args)
                        hcalls = hcalls + 1
                else:
                    Ap = numpy.dot(A, psupi)
                # check curvature
                Ap = asarray(Ap).squeeze()  # get rid of matrices...
                curv = numpy.dot(psupi, Ap)
                if curv == 0.0:
                    break
                elif curv < 0:
                    if (i > 0):
                        break
                    else:
                        xsupi = xsupi + dri0 / curv * psupi
                        break
                alphai = dri0 / curv
                xsupi = xsupi + alphai * psupi
                ri = ri + alphai * Ap
                dri1 = numpy.dot(ri, ri)
                betai = dri1 / dri0
                psupi = -ri + betai * psupi
                i = i + 1
                dri0 = dri1  # update numpy.dot(ri,ri) for next time.

            pk = xsupi  # search direction is solution to system.
            gfk = -b  # gradient at xk
            alphak, fc, gc, old_fval = line_search_BFGS(
                func, xk, pk, gfk, old_fval)

            update = alphak * pk

            # Put last solution in trialSolution for termination()
            self.trialSolution = xk

            xk = xk + update  # upcast if necessary
            if self.callback is not None:
                self.callback(xk)
            k += 1

            # Update variables/monitors
            self.bestSolution = xk
            self.bestEnergy = old_fval
            StepMonitor(self.bestSolution, self.bestEnergy, gfk, Ap)
            self.energy_history.append(self.bestEnergy)

            if self._EARLYEXIT or termination(self):
                break

        self.generations = k

        # Fix me?
        self.hcalls = hcalls
        self.gcalls = gcalls[0]

        signal.signal(signal.SIGINT, signal.default_int_handler)

        if self.disp:
            fval = old_fval
        if k >= self._maxiter:
            if self.disp:
                print "Warning: Maximum number of iterations has been exceeded"
                print "         Current function value: %f" % fval
                print "         Iterations: %d" % k
                print "         Function evaluations: %d" % fcalls[0]
                print "         Gradient evaluations: %d" % gcalls[0]
                print "         Hessian evaluations: %d" % hcalls
        else:
            if self.disp:
                print "Optimization terminated successfully."
                print "         Current function value: %f" % fval
                print "         Iterations: %d" % k
                print "         Function evaluations: %d" % fcalls[0]
                print "         Gradient evaluations: %d" % gcalls[0]
                print "         Hessian evaluations: %d" % hcalls
Exemplo n.º 15
0
    def Solve(self,
              cost,
              termination=None,
              sigint_callback=None,
              ExtraArgs=(),
              **kwds):
        """Minimize a function using batch grid optimization.

Description:

    Uses parallel mapping of solvers on a regular grid to find the
    minimum of a function of one or more variables.

Inputs:

    cost -- the Python function or method to be minimized.

Additional Inputs:

    termination -- callable object providing termination conditions.
    sigint_callback -- callback function for signal handler.
    ExtraArgs -- extra arguments for cost.

Further Inputs:

    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.                           [default = None]
    disp -- non-zero to print convergence messages.         [default = 0]
        """
        # process and activate input settings
        settings = self._process_inputs(kwds)
        disp = 0
        #       for key in settings:
        #           exec "%s = settings['%s']" % (key,key)
        if disp in ['verbose', 'all']: verbose = True
        else: verbose = False
        #-------------------------------------------------------------

        import signal
        #self._EARLYEXIT = False

        #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
        from python_map import python_map
        if self._map != python_map:
            self._fcalls = [
                0
            ]  #FIXME: temporary patch for removing the following line
        else:
            self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)

        #generate signal_handler
        self._generateHandler(sigint_callback)
        if self._handle_sigint:
            signal.signal(signal.SIGINT, self.signal_handler)

        # register termination function
        if termination is not None:
            self.SetTermination(termination)

        # get the nested solver instance
        solver = self._AbstractNestedSolver__get_solver_instance()
        #-------------------------------------------------------------

        nbins = self._nbins
        if len(self._strictMax): upper = list(self._strictMax)
        else:
            upper = list(self._defaultMax)
        if len(self._strictMin): lower = list(self._strictMin)
        else:
            lower = list(self._defaultMin)

        # generate arrays of points defining a grid in parameter space
        grid_dimensions = self.nDim
        bins = []
        for i in range(grid_dimensions):
            step = abs(upper[i] - lower[i]) / nbins[i]
            bins.append([lower[i] + (j + 0.5) * step for j in range(nbins[i])])

        # build a grid of starting points
        from mystic.math import gridpts
        initial_values = gridpts(bins)

        # run optimizer for each grid point
        from copy import deepcopy as copy
        op = [copy(solver) for i in range(len(initial_values))]
        #cf = [cost for i in range(len(initial_values))]
        #vb = [verbose for i in range(len(initial_values))]
        id = range(len(initial_values))

        # generate the local_optimize function
        def local_optimize(solver, x0, rank=None, disp=verbose):
            solver.id = rank
            solver.SetInitialPoints(x0)
            if solver._useStrictRange:  #XXX: always, settable, or sync'd ?
                solver.SetStrictRanges(min=solver._strictMin, \
                                       max=solver._strictMax) # or lower,upper ?
            solver.Solve(cost, disp=disp)
            return solver

        # map:: solver = local_optimize(solver, x0, id, verbose)
        results = self._map(local_optimize, op, initial_values, id, \
                                                **self._mapconfig)

        # save initial state
        self._AbstractSolver__save_state()
        # get the results with the lowest energy
        self._bestSolver = results[0]
        bestpath = self._bestSolver._stepmon
        besteval = self._bestSolver._evalmon
        self._total_evals = self._bestSolver.evaluations
        for solver in results[1:]:
            self._total_evals += solver.evaluations  # add func evals
            if solver.bestEnergy < self._bestSolver.bestEnergy:
                self._bestSolver = solver
                bestpath = solver._stepmon
                besteval = solver._evalmon

        # return results to internals
        self.population = self._bestSolver.population  #XXX: pointer? copy?
        self.popEnergy = self._bestSolver.popEnergy  #XXX: pointer? copy?
        self.bestSolution = self._bestSolver.bestSolution  #XXX: pointer? copy?
        self.bestEnergy = self._bestSolver.bestEnergy
        self.trialSolution = self._bestSolver.trialSolution  #XXX: pointer? copy?
        self._fcalls = self._bestSolver._fcalls  #XXX: pointer? copy?
        self._maxiter = self._bestSolver._maxiter
        self._maxfun = self._bestSolver._maxfun

        # write 'bests' to monitors  #XXX: non-best monitors may be useful too
        self._stepmon = bestpath  #XXX: pointer? copy?
        self._evalmon = besteval  #XXX: pointer? copy?
        #from mystic.tools import isNull
        #if isNull(bestpath):
        #    self._stepmon = bestpath
        #else:
        #    for i in range(len(bestpath.y)):
        #        self._stepmon(bestpath.x[i], bestpath.y[i], self.id)
        #        #XXX: could apply callback here, or in exec'd code
        #if isNull(besteval):
        #    self._evalmon = besteval
        #else:
        #    for i in range(len(besteval.y)):
        #        self._evalmon(besteval.x[i], besteval.y[i])
        #-------------------------------------------------------------

        signal.signal(signal.SIGINT, signal.default_int_handler)

        # log any termination messages
        msg = self.CheckTermination(disp=disp, info=True)
        if msg: self._stepmon.info('STOP("%s")' % msg)
        # save final state
        self._AbstractSolver__save_state(force=True)
        return
Exemplo n.º 16
0
    def Solve(self, func, termination, sigint_callback=None,
              EvaluationMonitor=Null, StepMonitor=Null, ExtraArgs=(), **kwds):
        """Minimize a function using simulated annealing.

Description:

    Uses a simulated annealing algorithm to find the minimum of
    a function of one or more variables.

Inputs:

    func -- the Python function or method to be minimized.
    termination -- callable object providing termination conditions.

Additional Inputs:

    sigint_callback -- callback function for signal handler.
    EvaluationMonitor -- a callable object that will be passed x, fval
        whenever the cost function is evaluated.
    StepMonitor -- a callable object that will be passed x, fval
        after the end of an iteration.
    ExtraArgs -- extra arguments for func.

Further Inputs:

    schedule     -- Annealing schedule to use: 'cauchy', 'fast', or 'boltzmann'
                    [default='fast']
    T0           -- Initial Temperature (estimated as 1.2 times the largest
                    cost-function deviation over random points in the range)
                    [default=None]
    learn_rate   -- scale constant for adjusting guesses
                    [default=0.5]
    boltzmann    -- Boltzmann constant in acceptance test
                     (increase for less stringent test at each temperature).
                    [default=1.0]
    quench, m, n -- Parameters to alter fast_sa schedule
                    [all default to 1.0]
    dwell        -- The number of times to search the space at each temperature.
                    [default=50]

    Optional Termination Conditions:
    Tf           -- Final goal temperature
                    [default=1e-12]
    maxaccept    -- Maximum changes to accept
                    [default=None]
        """
        #allow for inputs that don't conform to AbstractSolver interface
        args = ExtraArgs
        x0 = self.population[0]

        schedule = "fast"
        T0 = None
        boltzmann = 1.0
        learn_rate = 0.5
        dwell = 50
        quench = 1.0
        m = 1.0
        n = 1.0

        Tf = 1e-12 # or None?
        self._maxaccept = None

        self.disp = 0
        self.callback = None

        if kwds.has_key('schedule'): schedule = kwds['schedule']
        if kwds.has_key('T0'): T0 = kwds['T0']
        if kwds.has_key('boltzmann'): boltzmann = kwds['boltzmann']
        if kwds.has_key('learn_rate'): learn_rate = kwds['learn_rate']
        if kwds.has_key('dwell'): dwell = kwds['dwell']
        if kwds.has_key('quench'): quench = kwds['quench']
        if kwds.has_key('m'): m = kwds['m']
        if kwds.has_key('n'): n = kwds['n']    

        if kwds.has_key('Tf'): Tf = kwds['Tf']
        if kwds.has_key('maxaccept'): self._maxaccept = kwds['maxaccept']

        if kwds.has_key('disp'): self.disp = kwds['disp']
        if kwds.has_key('callback'): self.callback = kwds['callback']

        #-------------------------------------------------------------

        import signal
        self._EARLYEXIT = False

        fcalls, func = wrap_function(func, ExtraArgs, EvaluationMonitor)

        if self._useStrictRange:
            x0 = self._clipGuessWithinRangeBoundary(x0)
            # Note: wrap_bounds changes the results slightly from the original
            func = wrap_bounds(func, self._strictMin, self._strictMax)

        #generate signal_handler
        self._generateHandler(sigint_callback) 
        if self._handle_sigint: signal.signal(signal.SIGINT, self.signal_handler)
        #-------------------------------------------------------------

        schedule = eval(schedule+'_sa()')
        #   initialize the schedule
        schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0,
                  learn_rate=learn_rate, lower=self._strictMin, upper=self._strictMax,
                  m=m, n=n, quench=quench, dwell=dwell)

        if self._maxiter is None:
            self._maxiter = 400

        current_state, last_state = _state(), _state()
        if T0 is None:
            x0, self.bestEnergy = schedule.getstart_temp()
            self.bestSolution = x0
        else:
            #self.bestSolution = None
            self.bestSolution = x0
            self.bestEnergy = 300e8

        retval = 0
        last_state.x = asarray(x0).copy()
        fval = func(x0,*args)
        schedule.feval += 1
        last_state.cost = fval
        if last_state.cost < self.bestEnergy:
            self.bestEnergy = fval
            self.bestSolution = asarray(x0).copy()
        schedule.T = schedule.T0
        fqueue = [100, 300, 500, 700]
        self.population = asarray(fqueue)*1.0
        iters = 0
        while 1:
            StepMonitor(self.bestSolution, self.bestEnergy)
            for n in range(dwell):
                current_state.x = schedule.update_guess(last_state.x)
                current_state.cost = func(current_state.x,*args)
                schedule.feval += 1

                dE = current_state.cost - last_state.cost
                if schedule.accept_test(dE):
                    last_state.x = current_state.x.copy()
                    last_state.cost = current_state.cost
                    if last_state.cost < self.bestEnergy:
                        self.bestSolution = last_state.x.copy()
                        self.bestEnergy = last_state.cost
            schedule.update_temp() 

            iters += 1
            fqueue.append(squeeze(last_state.cost))
            fqueue.pop(0)
            af = asarray(fqueue)*1.0

            # Update monitors/variables
            self.population = af
            self.energy_history.append(self.bestEnergy)

            if self.callback is not None:
                self.callback(self.bestSolution)

            # Stopping conditions
            # - last saved values of f from each cooling step
            #     are all very similar (effectively cooled)
            # - Tf is set and we are below it
            # - maxfun is set and we are past it
            # - maxiter is set and we are past it
            # - maxaccept is set and we are past it

            if self._EARLYEXIT or termination(self):
                # How to deal with the below warning? It uses feps, which is passed
                # to termination, so it would be repetitive to also pass it to Solve().
                #if abs(af[-1]-best_state.cost) > feps*10:
                #print "Warning: Cooled to %f at %s but this is not" \
                #      % (squeeze(last_state.cost), str(squeeze(last_state.x))) \
                #      + " the smallest point found."
                break
            if (Tf is not None) and (schedule.T < Tf):
                break
           # if (self._maxfun is not None) and (schedule.feval > self._maxfun):
           #     retval = 1
           #     break
            if (self._maxfun is not None) and (fcalls[0] > self._maxfun):
                retval = 1
                break
            if (iters > self._maxiter):
                retval = 2
                break
            if (self._maxaccept is not None) and (schedule.accepted > self._maxaccept):
                break

        signal.signal(signal.SIGINT,signal.default_int_handler)

        # Store some information. Is there a better way to do this?
        self.generations = iters        # Number of iterations
        self.T = schedule.T             # Final temperature
        self.accept = schedule.accepted # Number of tests accepted

        # code below here pushes output to scipy.optimize interface

        if self.disp:
            if retval == 1: 
                print "Warning: Maximum number of function evaluations has "\
                      "been exceeded."
            elif retval == 2:
                print "Warning: Maximum number of iterations has been exceeded"
            else:
                print "Optimization terminated successfully."
                print "         Current function value: %f" % self.bestEnergy
                print "         Iterations: %d" % iters
                print "         Function evaluations: %d" % fcalls[0]

        return 
Exemplo n.º 17
0
    def Solve(self, cost, termination=None, sigint_callback=None,
                                            ExtraArgs=(), **kwds):
        """Minimize a function using batch grid optimization.

Description:

    Uses parallel mapping of solvers on a regular grid to find the
    minimum of a function of one or more variables.

Inputs:

    cost -- the Python function or method to be minimized.

Additional Inputs:

    termination -- callable object providing termination conditions.
    sigint_callback -- callback function for signal handler.
    ExtraArgs -- extra arguments for cost.

Further Inputs:

    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.                           [default = None]
    disp -- non-zero to print convergence messages.         [default = 0]
        """
        # process and activate input settings
        settings = self._process_inputs(kwds)
        disp=0
#       for key in settings:
#           exec "%s = settings['%s']" % (key,key)
        if disp in ['verbose', 'all']: verbose = True
        else: verbose = False
        #-------------------------------------------------------------

        import signal
       #self._EARLYEXIT = False

       #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
        from python_map import python_map
        if self._map != python_map:
            self._fcalls = [0] #FIXME: temporary patch for removing the following line
        else:
            self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)

        #generate signal_handler
        self._generateHandler(sigint_callback) 
        if self._handle_sigint: signal.signal(signal.SIGINT,self.signal_handler)

        # register termination function
        if termination is not None:
            self.SetTermination(termination)

        # get the nested solver instance
        solver = self._AbstractNestedSolver__get_solver_instance()
        #-------------------------------------------------------------

        nbins = self._nbins
        if len(self._strictMax): upper = list(self._strictMax)
        else:
            upper = list(self._defaultMax)
        if len(self._strictMin): lower = list(self._strictMin)
        else:
            lower = list(self._defaultMin)

        # generate arrays of points defining a grid in parameter space
        grid_dimensions = self.nDim
        bins = []
        for i in range(grid_dimensions):
            step = abs(upper[i] - lower[i])/nbins[i]
            bins.append( [lower[i] + (j+0.5)*step for j in range(nbins[i])] )

        # build a grid of starting points
        from mystic.math import gridpts
        initial_values = gridpts(bins)

        # run optimizer for each grid point
        from copy import deepcopy as copy
        op = [copy(solver) for i in range(len(initial_values))]
       #cf = [cost for i in range(len(initial_values))]
       #vb = [verbose for i in range(len(initial_values))]
        id = range(len(initial_values))

        # generate the local_optimize function
        def local_optimize(solver, x0, rank=None, disp=verbose):
            solver.id = rank
            solver.SetInitialPoints(x0)
            if solver._useStrictRange: #XXX: always, settable, or sync'd ?
                solver.SetStrictRanges(min=solver._strictMin, \
                                       max=solver._strictMax) # or lower,upper ?
            solver.Solve(cost, disp=disp)
            return solver

        # map:: solver = local_optimize(solver, x0, id, verbose)
        results = self._map(local_optimize, op, initial_values, id, \
                                                **self._mapconfig)

        # save initial state
        self._AbstractSolver__save_state()
        # get the results with the lowest energy
        self._bestSolver = results[0]
        bestpath = self._bestSolver._stepmon
        besteval = self._bestSolver._evalmon
        self._total_evals = self._bestSolver.evaluations
        for solver in results[1:]:
            self._total_evals += solver.evaluations # add func evals
            if solver.bestEnergy < self._bestSolver.bestEnergy:
                self._bestSolver = solver
                bestpath = solver._stepmon
                besteval = solver._evalmon

        # return results to internals
        self.population = self._bestSolver.population #XXX: pointer? copy?
        self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy?
        self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy?
        self.bestEnergy = self._bestSolver.bestEnergy
        self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy?
        self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy?
        self._maxiter = self._bestSolver._maxiter
        self._maxfun = self._bestSolver._maxfun

        # write 'bests' to monitors  #XXX: non-best monitors may be useful too
        self._stepmon = bestpath #XXX: pointer? copy?
        self._evalmon = besteval #XXX: pointer? copy?
       #from mystic.tools import isNull
       #if isNull(bestpath):
       #    self._stepmon = bestpath
       #else:
       #    for i in range(len(bestpath.y)):
       #        self._stepmon(bestpath.x[i], bestpath.y[i], self.id)
       #        #XXX: could apply callback here, or in exec'd code
       #if isNull(besteval):
       #    self._evalmon = besteval
       #else:
       #    for i in range(len(besteval.y)):
       #        self._evalmon(besteval.x[i], besteval.y[i])
        #-------------------------------------------------------------

        signal.signal(signal.SIGINT,signal.default_int_handler)

        # log any termination messages
        msg = self.CheckTermination(disp=disp, info=True)
        if msg: self._stepmon.info('STOP("%s")' % msg)
        # save final state
        self._AbstractSolver__save_state(force=True)
        return 
Exemplo n.º 18
0
    def Solve(self, cost, termination=None, ExtraArgs=(), **kwds):
        """Minimize a function using batch grid optimization.

Description:

    Uses parallel mapping of solvers on a regular grid to find the
    minimum of a function of one or more variables.

Inputs:

    cost -- the Python function or method to be minimized.

Additional Inputs:

    termination -- callable object providing termination conditions.
    ExtraArgs -- extra arguments for cost.

Further Inputs:

    sigint_callback -- callback function for signal handler.
    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.                           [default = None]
    disp -- non-zero to print convergence messages.         [default = 0]
        """
        # process and activate input settings
        sigint_callback = kwds.pop('sigint_callback', None)
        settings = self._process_inputs(kwds)
        disp = settings['disp'] if 'disp' in settings else False
        echo = settings['callback'] if 'callback' in settings else None
        #       for key in settings:
        #           exec "%s = settings['%s']" % (key,key)
        if disp in ['verbose', 'all']: verbose = True
        else: verbose = False
        #-------------------------------------------------------------

        from python_map import python_map
        if self._map != python_map:
            #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
            from mystic.monitors import Null
            evalmon = Null()
        else:
            evalmon = self._evalmon
        fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)

        # set up signal handler
        #self._EARLYEXIT = False
        self._generateHandler(sigint_callback)

        # activate signal_handler
        import signal
        if self._handle_sigint:
            signal.signal(signal.SIGINT, self.signal_handler)

        # register termination function
        if termination is not None:
            self.SetTermination(termination)

        # get the nested solver instance
        solver = self._AbstractEnsembleSolver__get_solver_instance()
        #-------------------------------------------------------------

        nbins = self._nbins
        if len(self._strictMax): upper = list(self._strictMax)
        else:
            upper = list(self._defaultMax)
        if len(self._strictMin): lower = list(self._strictMin)
        else:
            lower = list(self._defaultMin)

        # generate arrays of points defining a grid in parameter space
        grid_dimensions = self.nDim
        bins = []
        for i in range(grid_dimensions):
            step = abs(upper[i] - lower[i]) / nbins[i]
            bins.append([lower[i] + (j + 0.5) * step for j in range(nbins[i])])

        # build a grid of starting points
        from mystic.math import gridpts
        initial_values = gridpts(bins)

        # run optimizer for each grid point
        from copy import deepcopy as _copy
        op = [_copy(solver) for i in range(len(initial_values))]
        #cf = [cost for i in range(len(initial_values))]
        vb = [verbose for i in range(len(initial_values))]
        cb = [echo for i in range(len(initial_values))]  #XXX: remove?
        at = self.id if self.id else 0  # start at self.id
        id = range(at, at + len(initial_values))

        # generate the local_optimize function
        def local_optimize(solver, x0, rank=None, disp=False, callback=None):
            from copy import deepcopy as _copy
            from mystic.tools import isNull
            solver.id = rank
            solver.SetInitialPoints(x0)
            if solver._useStrictRange:  #XXX: always, settable, or sync'd ?
                solver.SetStrictRanges(min=solver._strictMin, \
                                       max=solver._strictMax) # or lower,upper ?
            solver.Solve(cost, disp=disp, callback=callback)
            sm = solver._stepmon
            em = solver._evalmon
            if isNull(sm): sm = ([], [], [], [])
            else:
                sm = (_copy(sm._x), _copy(sm._y), _copy(sm._id),
                      _copy(sm._info))
            if isNull(em): em = ([], [], [], [])
            else:
                em = (_copy(em._x), _copy(em._y), _copy(em._id),
                      _copy(em._info))
            return solver, sm, em

        # map:: solver = local_optimize(solver, x0, id, verbose)
        results = self._map(local_optimize, op, initial_values, id, \
                                            vb, cb, **self._mapconfig)

        # save initial state
        self._AbstractSolver__save_state()
        #XXX: HACK TO GET CONTENT OF ALL MONITORS
        # reconnect monitors; save all solvers
        from mystic.monitors import Monitor
        while results:  #XXX: option to not save allSolvers? skip this and _copy
            _solver, _stepmon, _evalmon = results.pop()
            sm = Monitor()
            sm._x, sm._y, sm._id, sm._info = _stepmon
            _solver._stepmon.extend(sm)
            del sm
            em = Monitor()
            em._x, em._y, em._id, em._info = _evalmon
            _solver._evalmon.extend(em)
            del em
            self._allSolvers[len(results)] = _solver
        del results, _solver, _stepmon, _evalmon
        #XXX: END HACK

        # get the results with the lowest energy
        self._bestSolver = self._allSolvers[0]
        bestpath = self._bestSolver._stepmon
        besteval = self._bestSolver._evalmon
        self._total_evals = self._bestSolver.evaluations
        for solver in self._allSolvers[1:]:
            self._total_evals += solver.evaluations  # add func evals
            if solver.bestEnergy < self._bestSolver.bestEnergy:
                self._bestSolver = solver
                bestpath = solver._stepmon
                besteval = solver._evalmon

        # return results to internals
        self.population = self._bestSolver.population  #XXX: pointer? copy?
        self.popEnergy = self._bestSolver.popEnergy  #XXX: pointer? copy?
        self.bestSolution = self._bestSolver.bestSolution  #XXX: pointer? copy?
        self.bestEnergy = self._bestSolver.bestEnergy
        self.trialSolution = self._bestSolver.trialSolution  #XXX: pointer? copy?
        self._fcalls = self._bestSolver._fcalls  #XXX: pointer? copy?
        self._maxiter = self._bestSolver._maxiter
        self._maxfun = self._bestSolver._maxfun

        # write 'bests' to monitors  #XXX: non-best monitors may be useful too
        self._stepmon = bestpath  #XXX: pointer? copy?
        self._evalmon = besteval  #XXX: pointer? copy?
        self.energy_history = None
        self.solution_history = None
        #from mystic.tools import isNull
        #if isNull(bestpath):
        #    self._stepmon = bestpath
        #else:
        #    for i in range(len(bestpath.y)):
        #        self._stepmon(bestpath.x[i], bestpath.y[i], self.id)
        #        #XXX: could apply callback here, or in exec'd code
        #if isNull(besteval):
        #    self._evalmon = besteval
        #else:
        #    for i in range(len(besteval.y)):
        #        self._evalmon(besteval.x[i], besteval.y[i])
        #-------------------------------------------------------------

        # restore default handler for signal interrupts
        signal.signal(signal.SIGINT, signal.default_int_handler)

        # log any termination messages
        msg = self.Terminated(disp=disp, info=True)
        if msg: self._stepmon.info('STOP("%s")' % msg)
        # save final state
        self._AbstractSolver__save_state(force=True)
        return