Beispiel #1
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate the cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     evalmon = self._evalmon
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
     if self._useStrictRange:
         if self.generations:
             #NOTE: pop[0] was best, may not be after resetting simplex
             for i, j in enumerate(self._setSimplexWithinRangeBoundary()):
                 self.population[i + 1] = self.population[0].copy()
                 self.population[i + 1][i] = j
         else:
             self.population[0] = self._clipGuessWithinRangeBoundary(
                 self.population[0])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
         #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #2
0
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            indx = list(self.popEnergy).index(self.bestEnergy)
            ngen = self.generations  #XXX: no random if generations=0 ?
            for i in range(self.nPop):
                self.population[i] = self._clipGuessWithinRangeBoundary(
                    self.population[i], (not ngen) or (i == indx))
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints
        cost = wrap_penalty(cost, self._penalty)
        cost = wrap_nested(cost, constraints)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
Beispiel #3
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     from python_map import python_map
     if self._map != python_map:
         #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
         from mystic.monitors import Null
         evalmon = Null()
     else: evalmon = self._evalmon
     fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #4
0
def sampled_mean(f, lb,ub, npts=10000):
  """
use random sampling to calculate the mean of a function

Inputs:
    f -- a function that takes a list and returns a number
    lb -- a list of lower bounds
    ub -- a list of upper bounds
    npts -- the number of points to sample [Default is npts=10000]
"""
  from numpy import inf, transpose
  from mystic.tools import wrap_bounds
  pts = _random_samples(lb, ub, npts)
  f = wrap_bounds(f,lb,ub)
  ave = 0; count = 0
  for i in range(len(pts[0])):
    if len(lb) != 1:
      xvector = transpose(pts)[i]
      Fx = f(list(xvector))
    else:
      Fx = f(float(transpose(pts)[i]))
    if Fx != -inf: # outside of bounds evaluates to -inf
      ave += Fx
      count += 1
  if not count: return None  #XXX: define 0/0 = None
  ave = float(ave) / float(count)
  return ave
Beispiel #5
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     from mystic.python_map import python_map
     if self._map != python_map:
         #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
         from mystic.monitors import Null
         evalmon = Null()
     else:
         evalmon = self._evalmon
     fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations  #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(
                 self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
         #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #6
0
def sampled_mean(f, lb, ub, npts=10000):
    """
use random sampling to calculate the mean of a function

Inputs:
    f -- a function that takes a list and returns a number
    lb -- a list of lower bounds
    ub -- a list of upper bounds
    npts -- the number of points to sample [Default is npts=10000]
"""
    from numpy import inf, transpose
    from mystic.tools import wrap_bounds
    pts = _random_samples(lb, ub, npts)
    f = wrap_bounds(f, lb, ub)
    ave = 0
    count = 0
    for i in range(len(pts[0])):
        if len(lb) != 1:
            xvector = transpose(pts)[i]
            Fx = f(list(xvector))
        else:
            Fx = f(float(transpose(pts)[i]))
        if Fx != -inf:  # outside of bounds evaluates to -inf
            ave += Fx
            count += 1
    if not count: return None  #XXX: define 0/0 = None
    ave = float(ave) / float(count)
    return ave
def sampled_mean(pts,lb,ub):
  from numpy import inf
  f = wrap_bounds(model,lb,ub)
  ave = 0; count = 0
  for i in range(len(pts[0])):
    Fx = f([pts[0][i],pts[1][i],pts[2][i]])
    if Fx != -inf: # outside of bounds evaluates to -inf
      ave += Fx
      count += 1
  if not count: return None  #XXX: define 0/0 = None
  ave = float(ave) / float(count)
  return ave
Beispiel #8
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
Beispiel #9
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
Beispiel #10
0
 def _RegisterObjective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     if ExtraArgs == None: ExtraArgs = ()
    #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'
     from python_map import python_map
     if self._map != python_map:
         self._fcalls = [0] #FIXME: temporary patch for removing the following line
     else:
         self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     # hold on to the 'wrapped' cost function
     self._cost = (cost, ExtraArgs)
     return cost
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print("@%r %r %r" % (cost, ExtraArgs, max))
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #12
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         indx = list(self.popEnergy).index(self.bestEnergy)
         ngen = self.generations #XXX: no random if generations=0 ?
         for i in range(self.nPop):
             self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx))
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #13
0
def sampled_pts(pts,lb,ub):
  """
determine the number of sample points inside the given bounds

Inputs:
    pts -- a list of sample points
    lb -- a list of lower bounds
    ub -- a list of upper bounds
"""
  from numpy import inf, transpose
  def identity(x):
    return x
  from mystic.tools import wrap_bounds
  f = wrap_bounds(identity,lb,ub)
  npts = 0
  for i in range(len(pts[0])):
    xvector = transpose(pts)[i]
    Fx = f(list(xvector))
    if Fx != -inf: # outside of bounds evaluates to -inf
      npts += 1
  return npts
Beispiel #14
0
def sampled_pts(pts,lb,ub):
  """
determine the number of sample points inside the given bounds

Inputs:
    pts -- a list of sample points
    lb -- a list of lower bounds
    ub -- a list of upper bounds
"""
  from numpy import inf, transpose
  def identity(x):
    return x
  from mystic.tools import wrap_bounds
  f = wrap_bounds(identity,lb,ub)
  npts = 0
  for i in range(len(pts[0])):
    xvector = transpose(pts)[i]
    Fx = f(list(xvector))
    if Fx != -inf: # outside of bounds evaluates to -inf
      npts += 1
  return npts
Beispiel #15
0
    def _decorate_objective(self, cost, ExtraArgs=None):
        """decorate the cost function with bounds, penalties, monitors, etc

input::
    - cost is the objective function, of the form y = cost(x, *ExtraArgs),
      where x is a candidate solution, and ExtraArgs is the tuple of positional
      arguments required to evaluate the objective."""
        #print("@%r %r %r" % (cost, ExtraArgs, max))
        evalmon = self._evalmon
        raw = cost
        if ExtraArgs is None: ExtraArgs = ()
        self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)
        if self._useStrictRange:
            if self.generations:
                #NOTE: pop[0] was best, may not be after resetting simplex
                for i, j in enumerate(self._setSimplexWithinRangeBoundary()):
                    self.population[i + 1] = self.population[0].copy()
                    self.population[i + 1][i] = j
            else:
                self.population[0] = self._clipGuessWithinRangeBoundary(
                    self.population[0])
            cost = wrap_bounds(cost, self._strictMin,
                               self._strictMax)  #XXX: remove?
            from mystic.constraints import and_
            constraints = and_(self._constraints,
                               self._strictbounds,
                               onfail=self._strictbounds)
        else:
            constraints = self._constraints
        cost = wrap_penalty(cost, self._penalty)
        cost = wrap_nested(cost, constraints)
        if self._reducer:
            #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
            cost = reduced(self._reducer, arraylike=True)(cost)
        # hold on to the 'wrapped' and 'raw' cost function
        self._cost = (cost, raw, ExtraArgs)
        self._live = True
        return cost
Beispiel #16
0
 def _decorate_objective(self, cost, ExtraArgs=None):
     """decorate the cost function with bounds, penalties, monitors, etc"""
     #print ("@", cost, ExtraArgs, max)
     raw = cost
     if ExtraArgs is None: ExtraArgs = ()
     self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon)
     if self._useStrictRange:
         if self.generations:
             #NOTE: pop[0] was best, may not be after resetting simplex
             for i,j in enumerate(self._setSimplexWithinRangeBoundary()):
                 self.population[i+1] = self.population[0].copy()
                 self.population[i+1][i] = j
         else:
             self.population[0] = self._clipGuessWithinRangeBoundary(self.population[0])
         cost = wrap_bounds(cost, self._strictMin, self._strictMax)
     cost = wrap_penalty(cost, self._penalty)
     cost = wrap_nested(cost, self._constraints)
     if self._reducer:
        #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool)
         cost = reduced(self._reducer, arraylike=True)(cost)
     # hold on to the 'wrapped' and 'raw' cost function
     self._cost = (cost, raw, ExtraArgs)
     self._live = True
     return cost
Beispiel #17
0
    def Solve(self, func, termination, sigint_callback=None,
              EvaluationMonitor=Null, StepMonitor=Null, ExtraArgs=(), **kwds):
        """Minimize a function using simulated annealing.

Description:

    Uses a simulated annealing algorithm to find the minimum of
    a function of one or more variables.

Inputs:

    func -- the Python function or method to be minimized.
    termination -- callable object providing termination conditions.

Additional Inputs:

    sigint_callback -- callback function for signal handler.
    EvaluationMonitor -- a callable object that will be passed x, fval
        whenever the cost function is evaluated.
    StepMonitor -- a callable object that will be passed x, fval
        after the end of an iteration.
    ExtraArgs -- extra arguments for func.

Further Inputs:

    schedule     -- Annealing schedule to use: 'cauchy', 'fast', or 'boltzmann'
                    [default='fast']
    T0           -- Initial Temperature (estimated as 1.2 times the largest
                    cost-function deviation over random points in the range)
                    [default=None]
    learn_rate   -- scale constant for adjusting guesses
                    [default=0.5]
    boltzmann    -- Boltzmann constant in acceptance test
                     (increase for less stringent test at each temperature).
                    [default=1.0]
    quench, m, n -- Parameters to alter fast_sa schedule
                    [all default to 1.0]
    dwell        -- The number of times to search the space at each temperature.
                    [default=50]

    Optional Termination Conditions:
    Tf           -- Final goal temperature
                    [default=1e-12]
    maxaccept    -- Maximum changes to accept
                    [default=None]
        """
        #allow for inputs that don't conform to AbstractSolver interface
        args = ExtraArgs
        x0 = self.population[0]

        schedule = "fast"
        T0 = None
        boltzmann = 1.0
        learn_rate = 0.5
        dwell = 50
        quench = 1.0
        m = 1.0
        n = 1.0

        Tf = 1e-12 # or None?
        self._maxaccept = None

        self.disp = 0
        self.callback = None

        if kwds.has_key('schedule'): schedule = kwds['schedule']
        if kwds.has_key('T0'): T0 = kwds['T0']
        if kwds.has_key('boltzmann'): boltzmann = kwds['boltzmann']
        if kwds.has_key('learn_rate'): learn_rate = kwds['learn_rate']
        if kwds.has_key('dwell'): dwell = kwds['dwell']
        if kwds.has_key('quench'): quench = kwds['quench']
        if kwds.has_key('m'): m = kwds['m']
        if kwds.has_key('n'): n = kwds['n']    

        if kwds.has_key('Tf'): Tf = kwds['Tf']
        if kwds.has_key('maxaccept'): self._maxaccept = kwds['maxaccept']

        if kwds.has_key('disp'): self.disp = kwds['disp']
        if kwds.has_key('callback'): self.callback = kwds['callback']

        #-------------------------------------------------------------

        import signal
        self._EARLYEXIT = False

        fcalls, func = wrap_function(func, ExtraArgs, EvaluationMonitor)

        if self._useStrictRange:
            x0 = self._clipGuessWithinRangeBoundary(x0)
            # Note: wrap_bounds changes the results slightly from the original
            func = wrap_bounds(func, self._strictMin, self._strictMax)

        #generate signal_handler
        self._generateHandler(sigint_callback) 
        if self._handle_sigint: signal.signal(signal.SIGINT, self.signal_handler)
        #-------------------------------------------------------------

        schedule = eval(schedule+'_sa()')
        #   initialize the schedule
        schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0,
                  learn_rate=learn_rate, lower=self._strictMin, upper=self._strictMax,
                  m=m, n=n, quench=quench, dwell=dwell)

        if self._maxiter is None:
            self._maxiter = 400

        current_state, last_state = _state(), _state()
        if T0 is None:
            x0, self.bestEnergy = schedule.getstart_temp()
            self.bestSolution = x0
        else:
            #self.bestSolution = None
            self.bestSolution = x0
            self.bestEnergy = 300e8

        retval = 0
        last_state.x = asarray(x0).copy()
        fval = func(x0,*args)
        schedule.feval += 1
        last_state.cost = fval
        if last_state.cost < self.bestEnergy:
            self.bestEnergy = fval
            self.bestSolution = asarray(x0).copy()
        schedule.T = schedule.T0
        fqueue = [100, 300, 500, 700]
        self.population = asarray(fqueue)*1.0
        iters = 0
        while 1:
            StepMonitor(self.bestSolution, self.bestEnergy)
            for n in range(dwell):
                current_state.x = schedule.update_guess(last_state.x)
                current_state.cost = func(current_state.x,*args)
                schedule.feval += 1

                dE = current_state.cost - last_state.cost
                if schedule.accept_test(dE):
                    last_state.x = current_state.x.copy()
                    last_state.cost = current_state.cost
                    if last_state.cost < self.bestEnergy:
                        self.bestSolution = last_state.x.copy()
                        self.bestEnergy = last_state.cost
            schedule.update_temp() 

            iters += 1
            fqueue.append(squeeze(last_state.cost))
            fqueue.pop(0)
            af = asarray(fqueue)*1.0

            # Update monitors/variables
            self.population = af
            self.energy_history.append(self.bestEnergy)

            if self.callback is not None:
                self.callback(self.bestSolution)

            # Stopping conditions
            # - last saved values of f from each cooling step
            #     are all very similar (effectively cooled)
            # - Tf is set and we are below it
            # - maxfun is set and we are past it
            # - maxiter is set and we are past it
            # - maxaccept is set and we are past it

            if self._EARLYEXIT or termination(self):
                # How to deal with the below warning? It uses feps, which is passed
                # to termination, so it would be repetitive to also pass it to Solve().
                #if abs(af[-1]-best_state.cost) > feps*10:
                #print "Warning: Cooled to %f at %s but this is not" \
                #      % (squeeze(last_state.cost), str(squeeze(last_state.x))) \
                #      + " the smallest point found."
                break
            if (Tf is not None) and (schedule.T < Tf):
                break
           # if (self._maxfun is not None) and (schedule.feval > self._maxfun):
           #     retval = 1
           #     break
            if (self._maxfun is not None) and (fcalls[0] > self._maxfun):
                retval = 1
                break
            if (iters > self._maxiter):
                retval = 2
                break
            if (self._maxaccept is not None) and (schedule.accepted > self._maxaccept):
                break

        signal.signal(signal.SIGINT,signal.default_int_handler)

        # Store some information. Is there a better way to do this?
        self.generations = iters        # Number of iterations
        self.T = schedule.T             # Final temperature
        self.accept = schedule.accepted # Number of tests accepted

        # code below here pushes output to scipy.optimize interface

        if self.disp:
            if retval == 1: 
                print "Warning: Maximum number of function evaluations has "\
                      "been exceeded."
            elif retval == 2:
                print "Warning: Maximum number of iterations has been exceeded"
            else:
                print "Optimization terminated successfully."
                print "         Current function value: %f" % self.bestEnergy
                print "         Iterations: %d" % iters
                print "         Function evaluations: %d" % fcalls[0]

        return 
Beispiel #18
0
    def Solve(
            self,
            func,
            termination,
            sigint_callback=None,
            EvaluationMonitor=Null,
            StepMonitor=Null,  #GradientMonitor=Null,
            ExtraArgs=(),
            **kwds):
        """Minimize a function using NCG.

Description:

    Uses a Newton-CG algorithm to find the minimum of
    a function of one or more variables.

Inputs:

    func -- the Python function or method to be minimized.
    termination -- callable object providing termination conditions.

Additional Inputs:

    sigint_callback -- callback function for signal handler.
    EvaluationMonitor -- a callable object that will be passed x, fval
        whenever the cost function is evaluated.
    StepMonitor -- a callable object that will be passed x, fval, the gradient, and
                   the Hessian after the end of an iteration.
    ExtraArgs -- extra arguments for func and fprime (same for both).

Further Inputs:

    fprime -- callable f'(x,*args)
            Gradient of f.
    fhess_p : callable fhess_p(x,p,*args)
        Function which computes the Hessian of f times an
        arbitrary vector, p.
    fhess : callable fhess(x,*args)
        Function to compute the Hessian matrix of f.
    epsilon : float or ndarray
        If fhess is approximated, use this value for the step size.
    callback : callable
        An optional user-supplied function which is called after
        each iteration.  Called as callback(xk), where xk is the
        current parameter vector.
    disp : bool
        If True, print convergence message.

    Notes:
      Only one of `fhess_p` or `fhess` need to be given.  If `fhess`
      is provided, then `fhess_p` will be ignored.  If neither `fhess`
      nor `fhess_p` is provided, then the hessian product will be
      approximated using finite differences on `fprime`. `fhess_p`
      must compute the hessian times an arbitrary vector. If it is not
      given, finite-differences on `fprime` are used to compute
      it. 

        """
        # allow for inputs that don't conform to AbstractSolver interface
        args = ExtraArgs
        x0 = self.population[0]
        x0 = asarray(x0).flatten()

        epsilon = _epsilon
        self.disp = 1
        self.callback = None
        fhess_p = None
        fhess = None

        if kwds.has_key('epsilon'): epsilon = kwds['epsilon']
        if kwds.has_key('callback'): self.callback = kwds['callback']
        if kwds.has_key('disp'): self.disp = kwds['disp']
        if kwds.has_key('fhess'): fhess = kwds['fhess']
        if kwds.has_key('fhess_p'): fhess_p = kwds['fhess_p']

        # fprime is actually required. Temporary fix?:
        if kwds.has_key('fprime'): fprime = kwds['fprime']
        #-------------------------------------------------------------

        import signal
        self._EARLYEXIT = False

        fcalls, func = wrap_function(func, args, EvaluationMonitor)
        if self._useStrictRange:
            x0 = self._clipGuessWithinRangeBoundary(x0)
            func = wrap_bounds(func, self._strictMin, self._strictMax)

        #generate signal_handler
        self._generateHandler(sigint_callback)
        if self._handle_sigint:
            signal.signal(signal.SIGINT, self.signal_handler)

        #--------------------------------------------------------------

        if self._maxiter is None:
            self._maxiter = len(x0) * 200

        # Wrap gradient function?
        # gcalls, fprime = wrap_function(fprime, args, GradientMonitor)
        gcalls, fprime = wrap_function(fprime, args, Null)

        # Wrap hessian monitor?
        # But wrap_function assumes the function takes one parameter...
        #if fhess is not None:
        #    hcalls2, fhess = wrap_function(fhess, args, HessianMonitor)
        #else:
        #    if fhess_p is not None:
        #        hcalls2, fhess_p = wrap_function(fhess_p, args, HessianMonitor)

        #xtol = len(x0)*avextol
        #update = [2*xtol]
        xk = x0
        k = 0
        hcalls = 0
        old_fval = func(x0)
        abs = absolute
        while k < self._maxiter:
            # Compute a search direction pk by applying the CG method to
            #  del2 f(xk) p = - grad f(xk) starting from 0.
            b = -fprime(xk)
            maggrad = numpy.add.reduce(abs(b))
            eta = min([0.5, numpy.sqrt(maggrad)])
            termcond = eta * maggrad
            xsupi = zeros(len(x0), dtype=x0.dtype)
            ri = -b
            psupi = -ri
            i = 0
            dri0 = numpy.dot(ri, ri)

            if fhess is not None:  # you want to compute hessian once.
                A = fhess(*(xk, ) + args)
                hcalls = hcalls + 1

            while numpy.add.reduce(abs(ri)) > termcond:
                if fhess is None:
                    if fhess_p is None:
                        Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
                    else:
                        Ap = fhess_p(xk, psupi, *args)
                        hcalls = hcalls + 1
                else:
                    Ap = numpy.dot(A, psupi)
                # check curvature
                Ap = asarray(Ap).squeeze()  # get rid of matrices...
                curv = numpy.dot(psupi, Ap)
                if curv == 0.0:
                    break
                elif curv < 0:
                    if (i > 0):
                        break
                    else:
                        xsupi = xsupi + dri0 / curv * psupi
                        break
                alphai = dri0 / curv
                xsupi = xsupi + alphai * psupi
                ri = ri + alphai * Ap
                dri1 = numpy.dot(ri, ri)
                betai = dri1 / dri0
                psupi = -ri + betai * psupi
                i = i + 1
                dri0 = dri1  # update numpy.dot(ri,ri) for next time.

            pk = xsupi  # search direction is solution to system.
            gfk = -b  # gradient at xk
            alphak, fc, gc, old_fval = line_search_BFGS(
                func, xk, pk, gfk, old_fval)

            update = alphak * pk

            # Put last solution in trialSolution for termination()
            self.trialSolution = xk

            xk = xk + update  # upcast if necessary
            if self.callback is not None:
                self.callback(xk)
            k += 1

            # Update variables/monitors
            self.bestSolution = xk
            self.bestEnergy = old_fval
            StepMonitor(self.bestSolution, self.bestEnergy, gfk, Ap)
            self.energy_history.append(self.bestEnergy)

            if self._EARLYEXIT or termination(self):
                break

        self.generations = k

        # Fix me?
        self.hcalls = hcalls
        self.gcalls = gcalls[0]

        signal.signal(signal.SIGINT, signal.default_int_handler)

        if self.disp:
            fval = old_fval
        if k >= self._maxiter:
            if self.disp:
                print "Warning: Maximum number of iterations has been exceeded"
                print "         Current function value: %f" % fval
                print "         Iterations: %d" % k
                print "         Function evaluations: %d" % fcalls[0]
                print "         Gradient evaluations: %d" % gcalls[0]
                print "         Hessian evaluations: %d" % hcalls
        else:
            if self.disp:
                print "Optimization terminated successfully."
                print "         Current function value: %f" % fval
                print "         Iterations: %d" % k
                print "         Function evaluations: %d" % fcalls[0]
                print "         Gradient evaluations: %d" % gcalls[0]
                print "         Hessian evaluations: %d" % hcalls