def write_monitor(steps, energy, id=[]): from mystic.monitors import Monitor mon = Monitor() mon._x = steps[:] mon._y = energy[:] mon._id = id[:] return mon
def write_monitor(steps, energy, id=[], k=None): from mystic.monitors import Monitor mon = Monitor() mon.k = k mon._x.extend(steps) mon._y.extend(mon._k(energy, iter)) mon._id.extend(id) return mon
def __update_allSolvers(self, results): 'replace allSolvers with solvers found in results' #NOTE: apparently, monitors internal to the solver don't work as well # reconnect monitors; save all solvers fcalls = [getattr(s, '_fcalls', [0])[0] for s in self._allSolvers] from mystic.monitors import Monitor while results: #XXX: option to not save allSolvers? skip this and _copy _solver, _stepmon, _evalmon = results.pop() lr = len(results) sm, em = Monitor(), Monitor() s = self._allSolvers[lr] ls, le = len(s._stepmon), len(s._evalmon) # gather old and new results in monitors _solver._stepmon[:] = s._stepmon sm._x,sm._y,sm._id,sm._info = _stepmon _solver._stepmon[ls:] = sm[ls:] del sm _solver._evalmon[:] = s._evalmon em._x,em._y,em._id,em._info = _evalmon _solver._evalmon[le:] = em[le:] del em if not _solver._fcalls[0]: _solver._fcalls[0] = fcalls[lr] self._allSolvers[lr] = _solver #XXX: update not replace? return
def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._stepmon if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor self.energy_history = self._stepmon.y self.solution_history = self._stepmon.x return
class AbstractSolver(object): """AbstractSolver base class for mystic optimizers. """ def __init__(self, dim, **kwds): """ Takes one initial input:: dim -- dimensionality of the problem. Additional inputs:: npop -- size of the trial solution population. [default = 1] Important class members:: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = kwds['npop'] if 'npop' in kwds else 1 self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.sigint_callback = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [ 1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history= None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None, None) # (cost, raw_cost, args) #,callback) self._collapse = False self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[0] is False or (kw['info'] if 'info' in kw else True) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination as mt self._EARLYEXIT = mt.EARLYEXIT self._live = False return def Solution(self): """return the best solution""" return self.bestSolution def __evaluations(self): """get the number of function calls""" return self._fcalls[0] def __generations(self): """get the number of iterations""" return max(0,len(self._stepmon)-1) def __energy_history(self): """get the energy_history (default: energy_history = _stepmon._y)""" if self._energy_history is None: return self._stepmon._y return self._energy_history def __set_energy_history(self, energy): """set the energy_history (energy=None will sync with _stepmon._y)""" self._energy_history = energy return def __solution_history(self): """get the solution_history (default: solution_history = _stepmon.x)""" if self._solution_history is None: return self._stepmon.x return self._solution_history def __set_solution_history(self, params): """set the solution_history (params=None will sync with _stepmon.x)""" self._solution_history = params return def __bestSolution(self): """get the bestSolution (default: bestSolution = population[0])""" if self._bestSolution is None: return self.population[0] return self._bestSolution def __set_bestSolution(self, params): """set the bestSolution (params=None will sync with population[0])""" self._bestSolution = params return def __bestEnergy(self): """get the bestEnergy (default: bestEnergy = popEnergy[0])""" if self._bestEnergy is None: return self.popEnergy[0] return self._bestEnergy def __set_bestEnergy(self, energy): """set the bestEnergy (energy=None will sync with popEnergy[0])""" self._bestEnergy = energy return def SetReducer(self, reducer, arraylike=False): """apply a reducer function to the cost function input:: - a reducer function of the form: y' = reducer(yk), where yk is a results vector and y' is a single value. Ideally, this method is applied to a cost function with a multi-value return, to reduce the output to a single value. If arraylike, the reducer provided should take a single array as input and produce a scalar; otherwise, the reducer provided should meet the requirements of the python's builtin 'reduce' method (e.g. lambda x,y: x+y), taking two scalars and producing a scalar.""" if not reducer: self._reducer = None elif not isinstance(reducer, collections.Callable): raise TypeError("'%s' is not a callable function" % reducer) elif not arraylike: self._reducer = wrap_reducer(reducer) else: #XXX: check if is arraylike? self._reducer = reducer return self._update_objective() def SetPenalty(self, penalty): """apply a penalty function to the optimization input:: - a penalty function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. Ideally, this function is constructed so a penalty is applied when the desired (i.e. encoded) constraints are violated. Equality constraints should be considered satisfied when the penalty condition evaluates to zero, while inequality constraints are satisfied when the penalty condition evaluates to a non-positive number.""" if not penalty: self._penalty = lambda x: 0.0 elif not isinstance(penalty, collections.Callable): raise TypeError("'%s' is not a callable function" % penalty) else: #XXX: check for format: y' = penalty(x) ? self._penalty = penalty return self._update_objective() def SetConstraints(self, constraints): """apply a constraints function to the optimization input:: - a constraints function of the form: xk' = constraints(xk), where xk is the current parameter vector. Ideally, this function is constructed so the parameter vector it passes to the cost function will satisfy the desired (i.e. encoded) constraints.""" if not constraints: self._constraints = lambda x: x elif not isinstance(constraints, collections.Callable): raise TypeError("'%s' is not a callable function" % constraints) else: #XXX: check for format: x' = constraints(x) ? self._constraints = constraints return self._update_objective() def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._stepmon if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError("'%s' is not a monitor instance" % monitor) self.energy_history = None # sync with self._stepmon self.solution_history = None # sync with self._stepmon return def SetEvaluationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each cost function evaluation""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._evalmon if isinstance(monitor, (Null, Monitor) ): # is Monitor() or Null() self._evalmon = monitor self._evalmon.prepend(current) elif monitor == Null: # is Null self._evalmon = monitor() self._evalmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._evalmon = monitor #FIXME: need .prepend(current) else: raise TypeError("'%s' is not a monitor instance" % monitor) return def SetStrictRanges(self, min=None, max=None): """ensure solution is within bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i] note:: SetStrictRanges(None) will remove strict range constraints""" if min is False or max is False: self._useStrictRange = False return self._update_objective() #XXX: better to use 'defaultMin,defaultMax' or '-inf,inf' ??? if min is None: min = self._defaultMin if max is None: max = self._defaultMax # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] min = asarray(min); max = asarray(max) if numpy.any(( min > max ),0): raise ValueError("each min[i] must be <= the corresponding max[i]") if len(min) != self.nDim: raise ValueError("bounds array must be length %s" % self.nDim) self._useStrictRange = True self._strictMin = min self._strictMax = max return self._update_objective() def _clipGuessWithinRangeBoundary(self, x0, at=True): """ensure that initial guess is set within bounds input:: - x0: must be a sequence of length self.nDim""" #if len(x0) != self.nDim: #XXX: unnecessary w/ self.trialSolution # raise ValueError, "initial guess must be length %s" % self.nDim x0 = asarray(x0) bounds = (self._strictMin,self._strictMax) if not len(self._strictMin): return x0 # clip x0 at bounds settings = numpy.seterr(all='ignore') x_ = x0.clip(*bounds) numpy.seterr(**settings) if at: return x_ # clip x0 within bounds x_ = x_ != x0 x0[x_] = random.uniform(self._strictMin,self._strictMax)[x_] return x0 def SetInitialPoints(self, x0, radius=0.05): """Set Initial Points with Guess (x0) input:: - x0: must be a sequence of length self.nDim - radius: generate random points within [-radius*x0, radius*x0] for i!=0 when a simplex-type initial guess in required""" x0 = asfarray(x0) rank = len(x0.shape) if rank is 0: x0 = asfarray([x0]) rank = 1 if not -1 < rank < 2: raise ValueError("Initial guess must be a scalar or rank-1 sequence.") if len(x0) != self.nDim: raise ValueError("Initial guess must be length %s" % self.nDim) #slightly alter initial values for solvers that depend on randomness min = x0*(1-radius) max = x0*(1+radius) numzeros = len(x0[x0==0]) min[min==0] = asarray([-radius for i in range(numzeros)]) max[max==0] = asarray([radius for i in range(numzeros)]) self.SetRandomInitialPoints(min,max) #stick initial values in population[i], i=0 self.population[0] = x0.tolist() def SetRandomInitialPoints(self, min=None, max=None): """Generate Random Initial Points within given Bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i]""" if min is None: min = self._defaultMin if max is None: max = self._defaultMax #if numpy.any(( asarray(min) > asarray(max) ),0): # raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim or len(max) != self.nDim: raise ValueError("bounds array must be length %s" % self.nDim) # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] #generate random initial values for i in range(len(self.population)): for j in range(self.nDim): self.population[i][j] = random.uniform(min[j],max[j]) def SetMultinormalInitialPoints(self, mean, var=None): """Generate Initial Points from Multivariate Normal. input:: - mean must be a sequence of length self.nDim - var can be... None: -> it becomes the identity scalar: -> var becomes scalar * I matrix: -> the variance matrix. must be the right size! """ from mystic.tools import random_state rng = random_state(module='numpy.random') assert(len(mean) == self.nDim) if var is None: var = numpy.eye(self.nDim) else: try: # scalar ? float(var) except: # nope. var better be matrix of the right size (no check) pass else: var = var * numpy.eye(self.nDim) for i in range(len(self.population)): self.population[i] = rng.multivariate_normal(mean, var).tolist() return def SetSampledInitialPoints(self, dist=None): """Generate Random Initial Points from Distribution (dist) input:: - dist: a mystic.math.Distribution instance """ from mystic.math import Distribution _dist = Distribution() if dist is None: dist = _dist elif type(_dist) not in dist.__class__.mro(): dist = Distribution(dist) #XXX: or throw error? for i in range(self.nPop): self.population[i] = dist(self.nDim) return def enable_signal_handler(self):#, callback='*'): """enable workflow interrupt handler while solver is running""" """ #XXX: disabled, as would add state to solver input:: - if a callback function is provided, generate a new handler with the given callback. If callback is None, do not use a callback. If callback is not provided, just turn on the existing handler. """ ## always _generate handler on first call #if (self.signal_handler is None) and callback == '*': # callback = None ## when a new callback is given, generate a new handler #if callback != '*': # self._generateHandler(callback) self._handle_sigint = True def disable_signal_handler(self): """disable workflow interrupt handler while solver is running""" self._handle_sigint = False def SetSaveFrequency(self, generations=None, filename=None, **kwds): """set frequency for saving solver restart file input:: - generations = number of solver iterations before next save of state - filename = name of file in which to save solver state note:: SetSaveFrequency(None) will disable saving solver restart file""" self._saveiter = generations #self._saveeval = evaluations self._state = filename return def SetEvaluationLimits(self, generations=None, evaluations=None, \ new=False, **kwds): """set limits for generations and/or evaluations input:: - generations = maximum number of solver iterations (i.e. steps) - evaluations = maximum number of function evaluations""" # backward compatibility self._maxiter = kwds['maxiter'] if 'maxiter' in kwds else generations self._maxfun = kwds['maxfun'] if 'maxfun' in kwds else evaluations # handle if new (reset counter, instead of extend counter) if new: if generations is not None: self._maxiter += self.generations else: self._maxiter = "*" #XXX: better as self._newmax = True ? if evaluations is not None: self._maxfun += self.evaluations else: self._maxfun = "*" return def _SetEvaluationLimits(self, iterscale=None, evalscale=None): """set the evaluation limits""" if iterscale is None: iterscale = 10 if evalscale is None: evalscale = 1000 N = len(self.population[0]) # usually self.nDim # if SetEvaluationLimits not applied, use the solver default if self._maxiter is None: self._maxiter = N * self.nPop * iterscale elif self._maxiter == "*": # (i.e. None, but 'reset counter') self._maxiter = (N * self.nPop * iterscale) + self.generations if self._maxfun is None: self._maxfun = N * self.nPop * evalscale elif self._maxfun == "*": self._maxfun = (N * self.nPop * evalscale) + self.evaluations return def Terminated(self, disp=False, info=False, termination=None, **kwds): """check if the solver meets the given termination conditions Input:: - disp = if True, print termination statistics and/or warnings - info = if True, return termination message (instead of boolean) - termination = termination conditions to check against Notes:: If no termination conditions are given, the solver's stored termination conditions will be used. """ if termination is None: termination = self._termination # ensure evaluation limits have been imposed self._SetEvaluationLimits() # check for termination messages msg = termination(self, info=True) sig = "SolverInterrupt with %s" % {} lim = "EvaluationLimits with %s" % {'evaluations':self._maxfun, 'generations':self._maxiter} # push solver internals to scipy.optimize.fmin interface if self._fcalls[0] >= self._maxfun and self._maxfun is not None: msg = lim #XXX: prefer the default stop ? if disp: print("Warning: Maximum number of function evaluations has "\ "been exceeded.") elif self.generations >= self._maxiter and self._maxiter is not None: msg = lim #XXX: prefer the default stop ? if disp: print("Warning: Maximum number of iterations has been exceeded") elif self._EARLYEXIT: msg = sig if disp: print("Warning: Optimization terminated with signal interrupt.") elif msg and disp: print("Optimization terminated successfully.") print(" Current function value: %f" % self.bestEnergy) print(" Iterations: %d" % self.generations) print(" Function evaluations: %d" % self._fcalls[0]) if info: return msg return bool(msg) def SetTermination(self, termination): # disp ? """set the termination conditions""" #XXX: validate that termination is a 'condition' ? self._termination = termination self._collapse = False if termination is not None: from mystic.termination import state stop = state(termination) stop = getattr(stop, 'iterkeys', stop.keys)() self._collapse = any(key.startswith('Collapse') for key in stop) return def SetObjective(self, cost, ExtraArgs=None): # callback=None/False ? """decorate the cost function with bounds, penalties, monitors, etc""" _cost,_raw,_args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args): return # get cost and args if None was given if cost is None: cost = _raw args = _args if ExtraArgs is None else ExtraArgs args = () if args is None else args # quick validation check (so doesn't screw up internals) if not isvalid(cost, [0]*self.nDim, *args): try: name = cost.__name__ except AttributeError: # raise new error for non-callables cost(*args) validate(cost, None, *args) #val = len(args) + 1 #XXX: 'klepto.validate' for better error? #msg = '%s() invalid number of arguments (%d given)' % (name, val) #raise TypeError(msg) # hold on to the 'raw' cost function self._cost = (None, cost, ExtraArgs) self._live = False return def Collapsed(self, disp=False, info=False): """check if the solver meets the given collapse conditions Input:: - disp = if True, print details about the solver state at collapse - info = if True, return collapsed state (instead of boolean) """ stop = getattr(self, '__stop__', self.Terminated(info=True)) import mystic.collapse as ct collapses = ct.collapsed(stop) or dict() if collapses and disp: for (k,v) in getattr(collapses, 'iteritems', collapses.items)(): print(" %s: %s" % (k.split()[0],v)) #print("# Collapse at: Generation", self._stepmon._step-1, \ # "with", self.bestEnergy, "@\n#", list(self.bestSolution)) return collapses if info else bool(collapses) def Collapse(self, disp=False): """if solver has terminated by collapse, apply the collapse (unless both collapse and "stop" are simultaneously satisfied) """ #XXX: return True for "collapse and continue" and False otherwise? collapses = self.Collapsed(disp=disp, info=True) if collapses: # stop if any Termination is not from Collapse stop = getattr(self, '__stop__', self.Terminated(info=True)) stop = not all(k.startswith("Collapse") for k in stop.split("; ")) if stop: return {} #XXX: self._collapse = False ? else: stop = True if collapses: # then stomach a bunch of module imports (yuck) import mystic.tools as to import mystic.termination as mt import mystic.constraints as cn import mystic.mask as ma # get collapse conditions #XXX: efficient? 4x loops over collapses state = mt.state(self._termination) npts = getattr(self._stepmon, '_npts', None) #XXX: default? #conditions = [cn.impose_at(*to.select_params(self,collapses[k])) if state[k].get('target') is None else cn.impose_at(collapses[k],state[k].get('target')) for k in collapses if k.startswith('CollapseAt')] #conditions += [cn.impose_as(collapses[k],state[k].get('offset')) for k in collapses if k.startswith('CollapseAs')] #randomize = False conditions = []; _conditions = []; conditions_ = [] for k in collapses: #FIXME: these should be encapsulted in termination instance if k.startswith('CollapseAt'): t = state[k] t = t['target'] if 'target' in t else None if t is None: t = cn.impose_at(*to.select_params(self,collapses[k])) else: t = cn.impose_at(collapses[k],t) conditions.append(t) elif k.startswith('CollapseAs'): t = state[k] t = t['offset'] if 'offset' in t else None _conditions.append(cn.impose_as(collapses[k],t)) elif k.startswith(('CollapseCost','CollapseGrad')): t = state[k] t = t['clip'] if 'clip' in t else True conditions_.append(cn.impose_bounds(collapses[k],clip=t)) #randomize = True conditions.extend(_conditions) conditions.extend(conditions_) del _conditions; del conditions_ # get measure collapse conditions if npts: #XXX: faster/better if comes first or last? conditions += [cn.impose_measure( npts, [collapses[k] for k in collapses if k.startswith('CollapsePosition')], [collapses[k] for k in collapses if k.startswith('CollapseWeight')] )] # update termination and constraints in solver constraints = to.chain(*conditions)(self._constraints) termination = ma.update_mask(self._termination, collapses) self.SetConstraints(constraints) self.SetTermination(termination) #if randomize: self.SetInitialPoints(self.population[0]) #print(mt.state(self._termination).keys()) #return bool(collapses) and not stop return collapses def _update_objective(self): """decorate the cost function with bounds, penalties, monitors, etc""" # rewrap the cost if the solver has been run if False: # trigger immediately self._decorate_objective(*self._cost[1:]) else: # delay update until _bootstrap self.Finalize() return def _decorate_objective(self, cost, ExtraArgs=None): """decorate the cost function with bounds, penalties, monitors, etc""" #print("@%r %r %r" % (cost, ExtraArgs, max)) raw = cost if ExtraArgs is None: ExtraArgs = () self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon) if self._useStrictRange: indx = list(self.popEnergy).index(self.bestEnergy) ngen = self.generations #XXX: no random if generations=0 ? for i in range(self.nPop): self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx)) cost = wrap_bounds(cost, self._strictMin, self._strictMax) cost = wrap_penalty(cost, self._penalty) cost = wrap_nested(cost, self._constraints) if self._reducer: #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool) cost = reduced(self._reducer, arraylike=True)(cost) # hold on to the 'wrapped' and 'raw' cost function self._cost = (cost, raw, ExtraArgs) self._live = True return cost def _bootstrap_objective(self, cost=None, ExtraArgs=None): """HACK to enable not explicitly calling _decorate_objective""" _cost,_raw,_args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args) and self._live: return _cost # 'wrap' the 'new' cost function with _decorate self.SetObjective(cost, ExtraArgs) return self._decorate_objective(*self._cost[1:]) #XXX: when _decorate called, solver._fcalls will be reset ? def _Step(self, cost=None, ExtraArgs=None, **kwds): """perform a single optimization iteration *** this method must be overwritten ***""" raise NotImplementedError("an optimization algorithm was not provided") def SaveSolver(self, filename=None, **kwds): """save solver state to a restart file""" import dill fd = None if filename is None: # then check if already has registered file if self._state is None: # then create a new one import os, tempfile fd, self._state = tempfile.mkstemp(suffix='.pkl') os.close(fd) filename = self._state self._state = filename f = open(filename, 'wb') try: dill.dump(self, f, **kwds) self._stepmon.info('DUMPED("%s")' % filename) #XXX: before / after ? finally: f.close() return def __save_state(self, force=False): """save the solver state, if chosen save frequency is met""" # save the last iteration if force and bool(self._state): self.SaveSolver() return # save the zeroth iteration nonzero = True #XXX: or bool(self.generations) ? # after _saveiter generations, then save state iters = self._saveiter saveiter = bool(iters) and not bool(self.generations % iters) if nonzero and saveiter: self.SaveSolver() #FIXME: if _saveeval (or more) since last check, then save state #save = self.evaluations % self._saveeval return def __load_state(self, solver, **kwds): """load solver.__dict__ into self.__dict__; override with kwds""" #XXX: should do some filtering on kwds ? self.__dict__.update(solver.__dict__, **kwds) return def Finalize(self, **kwds): """cleanup upon exiting the main optimization loop""" self._live = False return def _process_inputs(self, kwds): """process and activate input settings""" #allow for inputs that don't conform to AbstractSolver interface #NOTE: not sticky: callback, disp #NOTE: sticky: EvaluationMonitor, StepMonitor, penalty, constraints settings = \ {'callback':None, #user-supplied function, called after each step 'disp':0} #non-zero to print convergence messages [settings.update({i:j}) for (i,j) in kwds.items() if i in settings] # backward compatibility if 'EvaluationMonitor' in kwds: \ self.SetEvaluationMonitor(kwds['EvaluationMonitor']) if 'StepMonitor' in kwds: \ self.SetGenerationMonitor(kwds['StepMonitor']) if 'penalty' in kwds: \ self.SetPenalty(kwds['penalty']) if 'constraints' in kwds: \ self.SetConstraints(kwds['constraints']) return settings def Step(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Take a single optimization step using the given 'cost' function. Uses an optimization algorithm to take one 'step' toward the minimum of a function of one or more variables. Args: cost (func, default=None): the function to be minimized: ``y = cost(x)``. termination (termination, default=None): termination conditions. ExtraArgs (tuple, default=None): extra arguments for cost. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. disp (bool, default=False): if True, print convergence messages. Returns: None Notes: To run the solver until termination, call ``Solve()``. Alternately, use ``Terminated()`` as the stop condition in a while loop over ``Step``. If the algorithm does not meet the given termination conditions after the call to ``Step``, the solver may be left in an "out-of-sync" state. When abandoning an non-terminated solver, one should call ``Finalize()`` to make sure the solver is fully returned to a "synchronized" state. """ if 'disp' in kwds: disp = bool(kwds['disp'])#; del kwds['disp'] else: disp = False # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) # check termination before 'stepping' if len(self._stepmon): msg = self.Terminated(disp=disp, info=True) or None else: msg = None # if not terminated, then take a step if msg is None: self._Step(**kwds) #FIXME: not all kwds are given in __doc__ if self.Terminated(): # then cleanup/finalize self.Finalize() # get termination message and log state msg = self.Terminated(disp=disp, info=True) or None if msg: self._stepmon.info('STOP("%s")' % msg) self.__save_state(force=True) return msg def _Solve(self, cost, ExtraArgs, **settings): """Run the optimizer to termination, using the given settings. Args: cost (func): the function to be minimized: ``y = cost(x)``. ExtraArgs (tuple): tuple of extra arguments for ``cost``. settings (dict): optimizer settings (produced by _process_inputs) Returns: None """ disp = settings['disp'] if 'disp' in settings else False # the main optimization loop stop = False while not stop: stop = self.Step(**settings) #XXX: remove need to pass settings? continue # if collapse, then activate any relevant collapses and continue self.__stop__ = stop #HACK: avoid re-evaluation of Termination while self._collapse and self.Collapse(disp=disp): del self.__stop__ #HACK stop = False while not stop: stop = self.Step(**settings) #XXX: move Collapse inside of Step? continue self.__stop__ = stop #HACK del self.__stop__ #HACK return def Solve(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Minimize a 'cost' function with given termination conditions. Uses an optimization algorithm to find the minimum of a function of one or more variables. Args: cost (func, default=None): the function to be minimized: ``y = cost(x)``. termination (termination, default=None): termination conditions. ExtraArgs (tuple, default=None): extra arguments for cost. sigint_callback (func, default=None): callback function for signal handler. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. disp (bool, default=False): if True, print convergence messages. Returns: None """ # process and activate input settings if 'sigint_callback' in kwds: self.sigint_callback = kwds['sigint_callback'] del kwds['sigint_callback'] else: self.sigint_callback = None settings = self._process_inputs(kwds) # set up signal handler #FIXME: sigint doesn't behave well in parallel self._EARLYEXIT = False #XXX: why not use EARLYEXIT singleton? # activate signal handler #import threading as thread #mainthread = isinstance(thread.current_thread(), thread._MainThread) #if mainthread: #XXX: if not mainthread, signal will raise ValueError import mystic._signal as signal if self._handle_sigint: signal.signal(signal.SIGINT, signal.Handler(self)) # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) #XXX: self.Step(cost, termination, ExtraArgs, **settings) ? # run the optimizer to termination self._Solve(cost, ExtraArgs, **settings) # restore default handler for signal interrupts if self._handle_sigint: signal.signal(signal.SIGINT, signal.default_int_handler) return def __copy__(self): cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result def __deepcopy__(self, memo): import copy import dill cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if v is self._cost: setattr(result, k, tuple(dill.copy(i) for i in v)) else: try: #XXX: work-around instancemethods in python2.6 setattr(result, k, copy.deepcopy(v, memo)) except TypeError: setattr(result, k, dill.copy(v)) return result # extensions to the solver interface evaluations = property(__evaluations ) generations = property(__generations ) energy_history = property(__energy_history,__set_energy_history ) solution_history = property(__solution_history,__set_solution_history ) bestEnergy = property(__bestEnergy,__set_bestEnergy ) bestSolution = property(__bestSolution,__set_bestSolution ) pass
except ImportError: from mystic._scipyoptimize import fmin_cg leastsq = None # desol, dstepmon = de_solve() print("desol: %s" % desol) print("dstepmon 50: %s" % dstepmon.x[50]) print("dstepmon 100: %s" % dstepmon.x[100]) # # this will try to use nelder_mean from a relatively "near by" point (very sensitive) point = [1234., -500., 10., 0.001] # both cg and nm does fine point = [1000, -100, 0, 1] # cg will do badly on this one # this will try nelder-mead from an unconverged DE solution #point = dstepmon.x[-150] # simplex, esow = Monitor(), Monitor() solver = fmin(len(point)) solver.SetInitialPoints(point) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(simplex) solver.Solve(cost_function, CRT()) sol = solver.Solution() print("\nsimplex solution: %s" % sol) # solcg = fmin_cg(cost_function, point) print("\nConjugate-Gradient (Polak Rubiere) : %s" % solcg) # if leastsq: sollsq = leastsq(vec_cost_function, point) sollsq = sollsq[0]
def Solve(self, cost, termination=None, ExtraArgs=(), **kwds): """Minimize a 'cost' function with given termination conditions. Description: Uses an ensemble of optimizers to find the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. ExtraArgs -- extra arguments for cost. Further Inputs: sigint_callback -- callback function for signal handler. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. [default = 0] """ # process and activate input settings if 'sigint_callback' in kwds: self.sigint_callback = kwds['sigint_callback'] del kwds['sigint_callback'] else: self.sigint_callback = None settings = self._process_inputs(kwds) disp = settings['disp'] if 'disp' in settings else False echo = settings['callback'] if 'callback' in settings else None # for key in settings: # exec "%s = settings['%s']" % (key,key) if disp in ['verbose', 'all']: verbose = True else: verbose = False #------------------------------------------------------------- from mystic.python_map import python_map if self._map != python_map: #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp' from mystic.monitors import Null evalmon = Null() else: evalmon = self._evalmon fcalls, cost = wrap_function(cost, ExtraArgs, evalmon) # set up signal handler #self._EARLYEXIT = False # activate signal_handler #import threading as thread #mainthread = isinstance(thread.current_thread(), thread._MainThread) #if mainthread: #XXX: if not mainthread, signal will raise ValueError import mystic._signal as signal if self._handle_sigint: signal.signal(signal.SIGINT, signal.Handler(self)) # register termination function if termination is not None: self.SetTermination(termination) # get the nested solver instance solver = self._AbstractEnsembleSolver__get_solver_instance() #------------------------------------------------------------- # generate starting points initial_values = self._InitialPoints() # run optimizer for each grid point from copy import deepcopy as _copy op = [_copy(solver) for i in range(len(initial_values))] #cf = [cost for i in range(len(initial_values))] vb = [verbose for i in range(len(initial_values))] cb = [echo for i in range(len(initial_values))] #XXX: remove? at = self.id if self.id else 0 # start at self.id id = range(at,at+len(initial_values)) # generate the local_optimize function def local_optimize(solver, x0, rank=None, disp=False, callback=None): from copy import deepcopy as _copy from mystic.tools import isNull solver.id = rank solver.SetInitialPoints(x0) if solver._useStrictRange: #XXX: always, settable, or sync'd ? solver.SetStrictRanges(min=solver._strictMin, \ max=solver._strictMax) # or lower,upper ? solver.Solve(cost, disp=disp, callback=callback) sm = solver._stepmon em = solver._evalmon if isNull(sm): sm = ([],[],[],[]) else: sm = (_copy(sm._x),_copy(sm._y),_copy(sm._id),_copy(sm._info)) if isNull(em): em = ([],[],[],[]) else: em = (_copy(em._x),_copy(em._y),_copy(em._id),_copy(em._info)) return solver, sm, em # map:: solver = local_optimize(solver, x0, id, verbose) results = list(self._map(local_optimize, op, initial_values, id, \ vb, cb, **self._mapconfig)) # save initial state self._AbstractSolver__save_state() #XXX: HACK TO GET CONTENT OF ALL MONITORS # reconnect monitors; save all solvers from mystic.monitors import Monitor while results: #XXX: option to not save allSolvers? skip this and _copy _solver, _stepmon, _evalmon = results.pop() sm = Monitor() sm._x,sm._y,sm._id,sm._info = _stepmon _solver._stepmon.extend(sm) del sm em = Monitor() em._x,em._y,em._id,em._info = _evalmon _solver._evalmon.extend(em) del em self._allSolvers[len(results)] = _solver del results, _solver, _stepmon, _evalmon #XXX: END HACK # get the results with the lowest energy self._bestSolver = self._allSolvers[0] bestpath = self._bestSolver._stepmon besteval = self._bestSolver._evalmon self._total_evals = self._bestSolver.evaluations for solver in self._allSolvers[1:]: self._total_evals += solver.evaluations # add func evals if solver.bestEnergy < self._bestSolver.bestEnergy: self._bestSolver = solver bestpath = solver._stepmon besteval = solver._evalmon # return results to internals self.population = self._bestSolver.population #XXX: pointer? copy? self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy? self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy? self.bestEnergy = self._bestSolver.bestEnergy self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy? self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy? self._maxiter = self._bestSolver._maxiter self._maxfun = self._bestSolver._maxfun # write 'bests' to monitors #XXX: non-best monitors may be useful too self._stepmon = bestpath #XXX: pointer? copy? self._evalmon = besteval #XXX: pointer? copy? self.energy_history = None self.solution_history = None #from mystic.tools import isNull #if isNull(bestpath): # self._stepmon = bestpath #else: # for i in range(len(bestpath.y)): # self._stepmon(bestpath.x[i], bestpath.y[i], self.id) # #XXX: could apply callback here, or in exec'd code #if isNull(besteval): # self._evalmon = besteval #else: # for i in range(len(besteval.y)): # self._evalmon(besteval.x[i], besteval.y[i]) #------------------------------------------------------------- # restore default handler for signal interrupts if self._handle_sigint: signal.signal(signal.SIGINT, signal.default_int_handler) # log any termination messages msg = self.Terminated(disp=disp, info=True) if msg: self._stepmon.info('STOP("%s")' % msg) # save final state self._AbstractSolver__save_state(force=True) return
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds): """impose a given expextation value (m +/- D) on a given function f. Optimiziation on f over the given bounds seeks a mean 'm' with deviation 'D'. (this function is not 'mean-, range-, or variance-preserving') Inputs: param -- a tuple of target parameters: param = (mean, deviation) f -- a function that takes a list and returns a number npts -- a tuple of dimensions of the target product measure bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds) weights -- a list of sample weights Additional Inputs: constraints -- a function that takes a nested list of N x 1D discrete measure positions and weights x' = constraints(x, w) Outputs: samples -- a list of sample positions For example: >>> # provide the dimensions and bounds >>> nx = 3; ny = 2; nz = 1 >>> x_lb = [10.0]; y_lb = [0.0]; z_lb = [10.0] >>> x_ub = [50.0]; y_ub = [9.0]; z_ub = [90.0] >>> >>> # prepare the bounds >>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb) >>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub) >>> >>> # generate a list of samples with mean +/- dev imposed >>> mean = 2.0; dev = 0.01 >>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub)) >>> >>> # test the results by calculating the expectation value for the samples >>> expectation(f, samples) >>> 2.00001001012246015 """ # param[0] is the target mean # param[1] is the acceptable deviation from the target mean # FIXME: the following is a HACK to recover from lost 'weights' information # we 'mimic' discrete measures using the product measure weights # plug in the 'constraints' function: samples' = constrain(samples, weights) constrain = None # default is no constraints if 'constraints' in kwds: constrain = kwds['constraints'] if not constrain: # if None (default), there are no constraints constraints = lambda x: x else: #XXX: better to use a standard "xk' = constrain(xk)" interface ? def constraints(rv): coords = _pack(_nested(rv, npts)) coords = zip(*coords) # 'mimic' a nested list coords = constrain(coords, [weights for i in range(len(coords))]) coords = zip(*coords) # revert back to a packed list return _flat(_unpack(coords, npts)) # construct cost function to reduce deviation from expectation value def cost(rv): """compute cost from a 1-d array of model parameters, where: cost = | E[model] - m |**2 """ # from mystic.math.measures import _pack, _nested, expectation samples = _pack(_nested(rv, npts)) Ex = expectation(f, samples, weights) return (Ex - param[0])**2 # if bounds are not set, use the default optimizer bounds if not bounds: lower_bounds = [] upper_bounds = [] for n in npts: lower_bounds += [None] * n upper_bounds += [None] * n else: lower_bounds, upper_bounds = bounds # construct and configure optimizer debug = kwds['debug'] if 'debug' in kwds else False npop = 200 maxiter = 1000 maxfun = 1e+6 crossover = 0.9 percent_change = 0.9 def optimize(cost, (lb, ub), tolerance, _constraints): from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import VTR from mystic.strategy import Best1Exp from mystic.monitors import VerboseMonitor, Monitor from mystic.tools import random_seed if debug: random_seed(123) evalmon = Monitor() stepmon = Monitor() if debug: stepmon = VerboseMonitor(10) ndim = len(lb) solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(min=lb, max=ub) solver.SetStrictRanges(min=lb, max=ub) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \ CrossProbability=crossover,ScalingFactor=percent_change, \ constraints = _constraints) solved = solver.Solution() diameter_squared = solver.bestEnergy func_evals = len(evalmon) return solved, diameter_squared, func_evals
class AbstractSolver(object): """ AbstractSolver base class for mystic optimizers. """ def __init__(self, dim, **kwds): """ Takes one initial input: dim -- dimensionality of the problem. Additional inputs: npop -- size of the trial solution population. [default = 1] Important class members: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = kwds['npop'] if 'npop' in kwds else 1 self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.signal_handler = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [ 1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history= None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None, None) # (cost, raw_cost, args) #,callback) self._collapse = False self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[0] is False or (kw['info'] if 'info' in kw else True) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination as mt self._EARLYEXIT = mt.EARLYEXIT self._live = False return def Solution(self): """return the best solution""" return self.bestSolution def __evaluations(self): """get the number of function calls""" return self._fcalls[0] def __generations(self): """get the number of iterations""" return max(0,len(self._stepmon)-1) def __energy_history(self): """get the energy_history (default: energy_history = _stepmon._y)""" if self._energy_history is None: return self._stepmon._y return self._energy_history def __set_energy_history(self, energy): """set the energy_history (energy=None will sync with _stepmon._y)""" self._energy_history = energy return def __solution_history(self): """get the solution_history (default: solution_history = _stepmon.x)""" if self._solution_history is None: return self._stepmon.x return self._solution_history def __set_solution_history(self, params): """set the solution_history (params=None will sync with _stepmon.x)""" self._solution_history = params return def __bestSolution(self): """get the bestSolution (default: bestSolution = population[0])""" if self._bestSolution is None: return self.population[0] return self._bestSolution def __set_bestSolution(self, params): """set the bestSolution (params=None will sync with population[0])""" self._bestSolution = params return def __bestEnergy(self): """get the bestEnergy (default: bestEnergy = popEnergy[0])""" if self._bestEnergy is None: return self.popEnergy[0] return self._bestEnergy def __set_bestEnergy(self, energy): """set the bestEnergy (energy=None will sync with popEnergy[0])""" self._bestEnergy = energy return def SetReducer(self, reducer, arraylike=False): """apply a reducer function to the cost function input:: - a reducer function of the form: y' = reducer(yk), where yk is a results vector and y' is a single value. Ideally, this method is applied to a cost function with a multi-value return, to reduce the output to a single value. If arraylike, the reducer provided should take a single array as input and produce a scalar; otherwise, the reducer provided should meet the requirements of the python's builtin 'reduce' method (e.g. lambda x,y: x+y), taking two scalars and producing a scalar.""" if not reducer: self._reducer = None elif not callable(reducer): raise TypeError, "'%s' is not a callable function" % reducer elif not arraylike: self._reducer = wrap_reducer(reducer) else: #XXX: check if is arraylike? self._reducer = reducer return self._update_objective() def SetPenalty(self, penalty): """apply a penalty function to the optimization input:: - a penalty function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. Ideally, this function is constructed so a penalty is applied when the desired (i.e. encoded) constraints are violated. Equality constraints should be considered satisfied when the penalty condition evaluates to zero, while inequality constraints are satisfied when the penalty condition evaluates to a non-positive number.""" if not penalty: self._penalty = lambda x: 0.0 elif not callable(penalty): raise TypeError, "'%s' is not a callable function" % penalty else: #XXX: check for format: y' = penalty(x) ? self._penalty = penalty return self._update_objective() def SetConstraints(self, constraints): """apply a constraints function to the optimization input:: - a constraints function of the form: xk' = constraints(xk), where xk is the current parameter vector. Ideally, this function is constructed so the parameter vector it passes to the cost function will satisfy the desired (i.e. encoded) constraints.""" if not constraints: self._constraints = lambda x: x elif not callable(constraints): raise TypeError, "'%s' is not a callable function" % constraints else: #XXX: check for format: x' = constraints(x) ? self._constraints = constraints return self._update_objective() def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._stepmon if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor self.energy_history = None # sync with self._stepmon self.solution_history = None # sync with self._stepmon return def SetEvaluationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each cost function evaluation""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._evalmon if isinstance(monitor, (Null, Monitor) ): # is Monitor() or Null() self._evalmon = monitor self._evalmon.prepend(current) elif monitor == Null: # is Null self._evalmon = monitor() self._evalmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._evalmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor return def SetStrictRanges(self, min=None, max=None): """ensure solution is within bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i] note:: SetStrictRanges(None) will remove strict range constraints""" if min is False or max is False: self._useStrictRange = False return self._update_objective() #XXX: better to use 'defaultMin,defaultMax' or '-inf,inf' ??? if min is None: min = self._defaultMin if max is None: max = self._defaultMax # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] min = asarray(min); max = asarray(max) if numpy.any(( min > max ),0): raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim self._useStrictRange = True self._strictMin = min self._strictMax = max return self._update_objective() def _clipGuessWithinRangeBoundary(self, x0, at=True): """ensure that initial guess is set within bounds input:: - x0: must be a sequence of length self.nDim""" #if len(x0) != self.nDim: #XXX: unnecessary w/ self.trialSolution # raise ValueError, "initial guess must be length %s" % self.nDim x0 = asarray(x0) bounds = (self._strictMin,self._strictMax) if not len(self._strictMin): return x0 # clip x0 at bounds settings = numpy.seterr(all='ignore') x_ = x0.clip(*bounds) numpy.seterr(**settings) if at: return x_ # clip x0 within bounds x_ = x_ != x0 x0[x_] = random.uniform(self._strictMin,self._strictMax)[x_] return x0 def SetInitialPoints(self, x0, radius=0.05): """Set Initial Points with Guess (x0) input:: - x0: must be a sequence of length self.nDim - radius: generate random points within [-radius*x0, radius*x0] for i!=0 when a simplex-type initial guess in required""" x0 = asfarray(x0) rank = len(x0.shape) if rank is 0: x0 = asfarray([x0]) rank = 1 if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if len(x0) != self.nDim: raise ValueError, "Initial guess must be length %s" % self.nDim #slightly alter initial values for solvers that depend on randomness min = x0*(1-radius) max = x0*(1+radius) numzeros = len(x0[x0==0]) min[min==0] = asarray([-radius for i in range(numzeros)]) max[max==0] = asarray([radius for i in range(numzeros)]) self.SetRandomInitialPoints(min,max) #stick initial values in population[i], i=0 self.population[0] = x0.tolist() def SetRandomInitialPoints(self, min=None, max=None): """Generate Random Initial Points within given Bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i]""" if min is None: min = self._defaultMin if max is None: max = self._defaultMax #if numpy.any(( asarray(min) > asarray(max) ),0): # raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim or len(max) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] #generate random initial values for i in range(len(self.population)): for j in range(self.nDim): self.population[i][j] = random.uniform(min[j],max[j]) def SetMultinormalInitialPoints(self, mean, var=None): """Generate Initial Points from Multivariate Normal. input:: - mean must be a sequence of length self.nDim - var can be... None: -> it becomes the identity scalar: -> var becomes scalar * I matrix: -> the variance matrix. must be the right size! """ from mystic.tools import random_state rng = random_state(module='numpy.random') assert(len(mean) == self.nDim) if var is None: var = numpy.eye(self.nDim) else: try: # scalar ? float(var) except: # nope. var better be matrix of the right size (no check) pass else: var = var * numpy.eye(self.nDim) for i in range(len(self.population)): self.population[i] = rng.multivariate_normal(mean, var).tolist() return def SetSampledInitialPoints(self, dist=None): """Generate Random Initial Points from Distribution (dist) input:: - dist: a mystic.math.Distribution instance """ from mystic.math import Distribution if dist is None: dist = Distribution() elif type(Distribution) not in dist.__class__.mro(): dist = Distribution(dist) #XXX: or throw error? for i in range(self.nPop): self.population[i] = dist(self.nDim) return def enable_signal_handler(self):#, callback='*'): """enable workflow interrupt handler while solver is running""" """ #XXX: disabled, as would add state to solver input:: - if a callback function is provided, generate a new handler with the given callback. If callback is None, do not use a callback. If callback is not provided, just turn on the existing handler. """ ## always _generate handler on first call #if (self.signal_handler is None) and callback == '*': # callback = None ## when a new callback is given, generate a new handler #if callback != '*': # self._generateHandler(callback) self._handle_sigint = True def disable_signal_handler(self): """disable workflow interrupt handler while solver is running""" self._handle_sigint = False def _generateHandler(self,sigint_callback): """factory to generate signal handler Available switches:: - sol --> Print current best solution. - cont --> Continue calculation. - call --> Executes sigint_callback, if provided. - exit --> Exits with current best solution. """ def handler(signum, frame): import inspect print inspect.getframeinfo(frame) print inspect.trace() while 1: s = raw_input(\ """ Enter sense switch. sol: Print current best solution. cont: Continue calculation. call: Executes sigint_callback [%s]. exit: Exits with current best solution. >>> """ % sigint_callback) if s.lower() == 'sol': print self.bestSolution elif s.lower() == 'cont': return elif s.lower() == 'call': # sigint call_back if sigint_callback is not None: sigint_callback(self.bestSolution) elif s.lower() == 'exit': self._EARLYEXIT = True return else: print "unknown option : %s" % s return self.signal_handler = handler return def SetSaveFrequency(self, generations=None, filename=None, **kwds): """set frequency for saving solver restart file input:: - generations = number of solver iterations before next save of state - filename = name of file in which to save solver state note:: SetSaveFrequency(None) will disable saving solver restart file""" self._saveiter = generations #self._saveeval = evaluations self._state = filename return def SetEvaluationLimits(self, generations=None, evaluations=None, \ new=False, **kwds): """set limits for generations and/or evaluations input:: - generations = maximum number of solver iterations (i.e. steps) - evaluations = maximum number of function evaluations""" # backward compatibility self._maxiter = kwds['maxiter'] if 'maxiter' in kwds else generations self._maxfun = kwds['maxfun'] if 'maxfun' in kwds else evaluations # handle if new (reset counter, instead of extend counter) if new: if generations is not None: self._maxiter += self.generations else: self._maxiter = "*" #XXX: better as self._newmax = True ? if evaluations is not None: self._maxfun += self.evaluations else: self._maxfun = "*" return def _SetEvaluationLimits(self, iterscale=None, evalscale=None): """set the evaluation limits""" if iterscale is None: iterscale = 10 if evalscale is None: evalscale = 1000 N = len(self.population[0]) # usually self.nDim # if SetEvaluationLimits not applied, use the solver default if self._maxiter is None: self._maxiter = N * self.nPop * iterscale elif self._maxiter == "*": # (i.e. None, but 'reset counter') self._maxiter = (N * self.nPop * iterscale) + self.generations if self._maxfun is None: self._maxfun = N * self.nPop * evalscale elif self._maxiter == "*": self._maxfun = (N * self.nPop * evalscale) + self.evaluations return def Terminated(self, disp=False, info=False, termination=None): """check if the solver meets the given termination conditions Input:: - disp = if True, print termination statistics and/or warnings - info = if True, return termination message (instead of boolean) - termination = termination conditions to check against Note:: If no termination conditions are given, the solver's stored termination conditions will be used. """ if termination is None: termination = self._termination # ensure evaluation limits have been imposed self._SetEvaluationLimits() # check for termination messages msg = termination(self, info=True) sig = "SolverInterrupt with %s" % {} lim = "EvaluationLimits with %s" % {'evaluations':self._maxfun, 'generations':self._maxiter} # push solver internals to scipy.optimize.fmin interface if self._fcalls[0] >= self._maxfun and self._maxfun is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif self.generations >= self._maxiter and self._maxiter is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of iterations has been exceeded" elif self._EARLYEXIT: msg = sig if disp: print "Warning: Optimization terminated with signal interrupt." elif msg and disp: print "Optimization terminated successfully." print " Current function value: %f" % self.bestEnergy print " Iterations: %d" % self.generations print " Function evaluations: %d" % self._fcalls[0] if info: return msg return bool(msg) def SetTermination(self, termination): # disp ? """set the termination conditions""" #XXX: validate that termination is a 'condition' ? self._termination = termination self._collapse = False if termination is not None: from mystic.termination import state self._collapse = any(key.startswith('Collapse') for key in state(termination).iterkeys()) return def SetObjective(self, cost, ExtraArgs=None): # callback=None/False ? """decorate the cost function with bounds, penalties, monitors, etc""" _cost,_raw,_args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args): return # get cost and args if None was given if cost is None: cost = _raw args = _args if ExtraArgs is None else ExtraArgs args = () if args is None else args # quick validation check (so doesn't screw up internals) if not isvalid(cost, [0]*self.nDim, *args): try: name = cost.__name__ except AttributeError: # raise new error for non-callables cost(*args) validate(cost, None, *args) #val = len(args) + 1 #XXX: 'klepto.validate' for better error? #msg = '%s() invalid number of arguments (%d given)' % (name, val) #raise TypeError(msg) # hold on to the 'raw' cost function self._cost = (None, cost, ExtraArgs) self._live = False return def Collapsed(self, disp=False, info=False): """check if the solver meets the given collapse conditions Input:: - disp = if True, print details about the solver state at collapse - info = if True, return collapsed state (instead of boolean) """ stop = getattr(self, '__stop__', self.Terminated(info=True)) import mystic.collapse as ct collapses = ct.collapsed(stop) or dict() if collapses and disp: for (k,v) in collapses.iteritems(): print " %s: %s" % (k.split()[0],v) #print "# Collapse at: Generation", self._stepmon._step-1, \ # "with", self.bestEnergy, "@\n#", list(self.bestSolution) return collapses if info else bool(collapses) def Collapse(self, disp=False): """if solver has terminated by collapse, apply the collapse""" collapses = self.Collapsed(disp=disp, info=True) if collapses: # then stomach a bunch of module imports (yuck) import mystic.tools as to import mystic.termination as mt import mystic.constraints as cn import mystic.mask as ma # get collapse conditions #XXX: efficient? 4x loops over collapses state = mt.state(self._termination) npts = getattr(self._stepmon, '_npts', None) #XXX: default? conditions = [cn.impose_at(*to.select_params(self,collapses[k])) if state[k].get('target') is None else cn.impose_at(collapses[k],state[k].get('target')) for k in collapses if k.startswith('CollapseAt')] conditions += [cn.impose_as(collapses[k],state[k].get('offset')) for k in collapses if k.startswith('CollapseAs')] # get measure collapse conditions if npts: #XXX: faster/better if comes first or last? conditions += [cn.impose_measure( npts, [collapses[k] for k in collapses if k.startswith('CollapsePosition')], [collapses[k] for k in collapses if k.startswith('CollapseWeight')] )] # update termination and constraints in solver constraints = to.chain(*conditions)(self._constraints) termination = ma.update_mask(self._termination, collapses) self.SetConstraints(constraints) self.SetTermination(termination) #print mt.state(self._termination).keys() return collapses def _update_objective(self): """decorate the cost function with bounds, penalties, monitors, etc""" # rewrap the cost if the solver has been run if False: # trigger immediately self._decorate_objective(*self._cost[1:]) else: # delay update until _bootstrap self.Finalize() return def _decorate_objective(self, cost, ExtraArgs=None): """decorate the cost function with bounds, penalties, monitors, etc""" #print ("@", cost, ExtraArgs, max) raw = cost if ExtraArgs is None: ExtraArgs = () self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon) if self._useStrictRange: indx = list(self.popEnergy).index(self.bestEnergy) ngen = self.generations #XXX: no random if generations=0 ? for i in range(self.nPop): self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i], (not ngen) or (i is indx)) cost = wrap_bounds(cost, self._strictMin, self._strictMax) cost = wrap_penalty(cost, self._penalty) cost = wrap_nested(cost, self._constraints) if self._reducer: #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool) cost = reduced(self._reducer, arraylike=True)(cost) # hold on to the 'wrapped' and 'raw' cost function self._cost = (cost, raw, ExtraArgs) self._live = True return cost def _bootstrap_objective(self, cost=None, ExtraArgs=None): """HACK to enable not explicitly calling _decorate_objective""" _cost,_raw,_args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args) and self._live: return _cost # 'wrap' the 'new' cost function with _decorate self.SetObjective(cost, ExtraArgs) return self._decorate_objective(*self._cost[1:]) #XXX: when _decorate called, solver._fcalls will be reset ? def _Step(self, cost=None, ExtraArgs=None, **kwds): """perform a single optimization iteration *** this method must be overwritten ***""" raise NotImplementedError, "an optimization algorithm was not provided" def SaveSolver(self, filename=None, **kwds): """save solver state to a restart file""" import dill fd = None if filename is None: # then check if already has registered file if self._state is None: # then create a new one import os, tempfile fd, self._state = tempfile.mkstemp(suffix='.pkl') os.close(fd) filename = self._state self._state = filename f = file(filename, 'wb') try: dill.dump(self, f, **kwds) self._stepmon.info('DUMPED("%s")' % filename) #XXX: before / after ? finally: f.close() return def __save_state(self, force=False): """save the solver state, if chosen save frequency is met""" # save the last iteration if force and bool(self._state): self.SaveSolver() return # save the zeroth iteration nonzero = True #XXX: or bool(self.generations) ? # after _saveiter generations, then save state iters = self._saveiter saveiter = bool(iters) and not bool(self.generations % iters) if nonzero and saveiter: self.SaveSolver() #FIXME: if _saveeval (or more) since last check, then save state #save = self.evaluations % self._saveeval return def __load_state(self, solver, **kwds): """load solver.__dict__ into self.__dict__; override with kwds""" #XXX: should do some filtering on kwds ? self.__dict__.update(solver.__dict__, **kwds) return def Finalize(self, **kwds): """cleanup upon exiting the main optimization loop""" self._live = False return def _process_inputs(self, kwds): """process and activate input settings""" #allow for inputs that don't conform to AbstractSolver interface #NOTE: not sticky: callback, disp #NOTE: sticky: EvaluationMonitor, StepMonitor, penalty, constraints settings = \ {'callback':None, #user-supplied function, called after each step 'disp':0} #non-zero to print convergence messages [settings.update({i:j}) for (i,j) in kwds.items() if i in settings] # backward compatibility if 'EvaluationMonitor' in kwds: \ self.SetEvaluationMonitor(kwds['EvaluationMonitor']) if 'StepMonitor' in kwds: \ self.SetGenerationMonitor(kwds['StepMonitor']) if 'penalty' in kwds: \ self.SetPenalty(kwds['penalty']) if 'constraints' in kwds: \ self.SetConstraints(kwds['constraints']) return settings def Step(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Take a single optimiztion step using the given 'cost' function. Description: Uses an optimization algorithm to take one 'step' toward the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. ExtraArgs -- extra arguments for cost. Further Inputs: callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. Notes: If the algorithm does not meet the given termination conditions after the call to "Step", the solver may be left in an "out-of-sync" state. When abandoning an non-terminated solver, one should call "Finalize" to make sure the solver is fully returned to a "synchronized" state. To run the solver until termination, call "Solve()". Alternately, use Terminated()" as the condition in a while loop over "Step". """ disp = kwds.pop('disp', False) # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) # check termination before 'stepping' if len(self._stepmon): msg = self.Terminated(disp=disp, info=True) or None else: msg = None # if not terminated, then take a step if msg is None: self._Step(**kwds) #FIXME: not all kwds are given in __doc__ if self.Terminated(): # then cleanup/finalize self.Finalize() # get termination message and log state msg = self.Terminated(disp=disp, info=True) or None if msg: self._stepmon.info('STOP("%s")' % msg) self.__save_state(force=True) return msg def Solve(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Minimize a 'cost' function with given termination conditions. Description: Uses an optimization algorithm to find the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. ExtraArgs -- extra arguments for cost. Further Inputs: sigint_callback -- callback function for signal handler. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. """ # process and activate input settings sigint_callback = kwds.pop('sigint_callback', None) settings = self._process_inputs(kwds) disp = settings.get('disp', False) # set up signal handler self._EARLYEXIT = False #XXX: why not use EARLYEXIT singleton? self._generateHandler(sigint_callback) # activate signal handler #import threading as thread #mainthread = isinstance(thread.current_thread(), thread._MainThread) #if mainthread: #XXX: if not mainthread, signal will raise ValueError import signal if self._handle_sigint: signal.signal(signal.SIGINT,self.signal_handler) # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) #XXX: self.Step(cost, termination, ExtraArgs, **settings) ? # the main optimization loop stop = False while not stop: stop = self.Step(**settings) #XXX: remove need to pass settings? continue # if collapse, then activate any relevant collapses and continue self.__stop__ = stop #HACK: avoid re-evaluation of Termination while self._collapse and self.Collapse(disp=disp): del self.__stop__ #HACK stop = False while not stop: stop = self.Step(**settings) #XXX: move Collapse inside of Step? continue self.__stop__ = stop #HACK del self.__stop__ #HACK # restore default handler for signal interrupts if self._handle_sigint: signal.signal(signal.SIGINT,signal.default_int_handler) return def __copy__(self): cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result def __deepcopy__(self, memo): import copy import dill cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if v is self._cost: setattr(result, k, tuple(dill.copy(i) for i in v)) else: try: #XXX: work-around instancemethods in python2.6 setattr(result, k, copy.deepcopy(v, memo)) except TypeError: setattr(result, k, dill.copy(v)) return result # extensions to the solver interface evaluations = property(__evaluations ) generations = property(__generations ) energy_history = property(__energy_history,__set_energy_history ) solution_history = property(__solution_history,__set_solution_history ) bestEnergy = property(__bestEnergy,__set_bestEnergy ) bestSolution = property(__bestSolution,__set_bestSolution ) pass
algor = [] x0 = [0.8, 1.2, 0.7] #x0 = [0.8,1.2,1.7] #... better when using "bad" range min = [-0.999, -0.999, 0.999] #XXX: behaves badly when large range max = [200.001, 100.001, numpy.inf] #... for >=1 x0 out of bounds; (up xtol) # min = [-0.999, -0.999, -0.999] # max = [200.001, 100.001, numpy.inf] # min = [-0.999, -0.999, 0.999] # max = [2.001, 1.001, 1.001] print "Nelder-Mead Simplex" print "===================" start = time.time() from mystic.monitors import Monitor, VerboseMonitor #stepmon = VerboseMonitor(1) stepmon = Monitor() #VerboseMonitor(10) from mystic.termination import CandidateRelativeTolerance as CRT #from mystic._scipyoptimize import fmin from mystic.solvers import fmin, NelderMeadSimplexSolver #print fmin(rosen,x0,retall=0,full_output=0,maxiter=121) solver = NelderMeadSimplexSolver(len(x0)) solver.SetInitialPoints(x0) solver.SetStrictRanges(min, max) solver.SetEvaluationLimits(generations=146) solver.SetGenerationMonitor(stepmon) solver.enable_signal_handler() solver.Solve(rosen, CRT(xtol=4e-5), disp=1) print solver.bestSolution #print "Current function value: %s" % solver.bestEnergy #print "Iterations: %s" % solver.generations
def diffev(cost,x0,npop=4,args=(),bounds=None,ftol=5e-3,gtol=None, maxiter=None,maxfun=None,cross=0.9,scale=0.8, full_output=0,disp=1,retall=0,callback=None,**kwds): """Minimize a function using differential evolution. Description: Uses a differential evolution algorith to find the minimum of a function of one or more variables. Mimics a scipy.optimize style interface. Inputs: cost -- the Python function or method to be minimized. x0 -- the initial guess (ndarray), if desired to start from a set point; otherwise takes an array of (min,max) bounds, for when random initial points are desired npop -- size of the trial solution population. Additional Inputs: args -- extra arguments for cost. bounds -- list - n pairs of bounds (min,max), one pair for each parameter. ftol -- number - acceptable relative error in cost(xopt) for convergence. gtol -- number - maximum number of iterations to run without improvement. maxiter -- number - the maximum number of iterations to perform. maxfun -- number - the maximum number of function evaluations. cross -- number - the probability of cross-parameter mutations scale -- number - multiplier for impact of mutations on trial solution. full_output -- number - non-zero if fval and warnflag outputs are desired. disp -- number - non-zero to print convergence messages. retall -- number - non-zero to return list of solutions at each iteration. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. handler -- boolean - enable/disable handling of interrupt signal strategy -- strategy - override the default mutation strategy itermon -- monitor - override the default GenerationMonitor evalmon -- monitor - override the default EvaluationMonitor constraints -- an optional user-supplied function. It is called as constraints(xk), where xk is the current parameter vector. This function must return xk', a parameter vector that satisfies the encoded constraints. penalty -- an optional user-supplied function. It is called as penalty(xk), where xk is the current parameter vector. This function should return y', with y' == 0 when the encoded constraints are satisfied, and y' > 0 otherwise. Returns: (xopt, {fopt, iter, funcalls, warnflag}, {allvecs}) xopt -- ndarray - minimizer of function fopt -- number - value of function at minimum: fopt = cost(xopt) iter -- number - number of iterations funcalls -- number - number of function calls warnflag -- number - Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- list - a list of solutions at each iteration """ invariant_current = False if kwds.has_key('invariant_current'): invariant_current = kwds['invariant_current'] handler = False if kwds.has_key('handler'): handler = kwds['handler'] from mystic.strategy import Best1Bin strategy = Best1Bin if kwds.has_key('strategy'): strategy = kwds['strategy'] from mystic.monitors import Monitor stepmon = Monitor() evalmon = Monitor() if kwds.has_key('itermon'): stepmon = kwds['itermon'] if kwds.has_key('evalmon'): evalmon = kwds['evalmon'] if gtol: #if number of generations provided, use ChangeOverGeneration from mystic.termination import ChangeOverGeneration termination = ChangeOverGeneration(ftol,gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) ND = len(x0) if invariant_current: #use Solver2, not Solver1 solver = DifferentialEvolutionSolver2(ND,npop) else: solver = DifferentialEvolutionSolver(ND,npop) solver.SetEvaluationLimits(maxiter,maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if kwds.has_key('penalty'): penalty = kwds['penalty'] solver.SetPenalty(penalty) if kwds.has_key('constraints'): constraints = kwds['constraints'] solver.SetConstraints(constraints) if bounds is not None: minb,maxb = unpair(bounds) solver.SetStrictRanges(minb,maxb) try: #x0 passed as 1D array of (min,max) pairs minb,maxb = unpair(x0) solver.SetRandomInitialPoints(minb,maxb) except: #x0 passed as 1D array of initial parameter values solver.SetInitialPoints(x0) if handler: solver.enable_signal_handler() #TODO: allow sigint_callbacks for all minimal interfaces ? solver.Solve(cost,termination=termination,strategy=strategy,\ #sigint_callback=other_callback,\ CrossProbability=cross,ScalingFactor=scale,\ ExtraArgs=args,callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x if fcalls >= solver._maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iterations >= solver._maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iterations print " Function evaluations: %d" % fcalls if full_output: retlist = x, fval, iterations, fcalls, warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def fmin(cost, x0, args=(), bounds=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, **kwds): """Minimize a function using the downhill simplex algorithm. Description: Uses a Nelder-Mead simplex algorithm to find the minimum of a function of one or more variables. Mimics the scipy.optimize.fmin interface. Inputs: cost -- the Python function or method to be minimized. x0 -- ndarray - the initial guess. Additional Inputs: args -- extra arguments for cost. bounds -- list - n pairs of bounds (min,max), one pair for each parameter. xtol -- number - acceptable relative error in xopt for convergence. ftol -- number - acceptable relative error in cost(xopt) for convergence. maxiter -- number - the maximum number of iterations to perform. maxfun -- number - the maximum number of function evaluations. full_output -- number - non-zero if fval and warnflag outputs are desired. disp -- number - non-zero to print convergence messages. retall -- number - non-zero to return list of solutions at each iteration. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. handler -- boolean - enable/disable handling of interrupt signal itermon -- monitor - override the default GenerationMonitor evalmon -- monitor - override the default EvaluationMonitor constraints -- an optional user-supplied function. It is called as constraints(xk), where xk is the current parameter vector. This function must return xk', a parameter vector that satisfies the encoded constraints. penalty -- an optional user-supplied function. It is called as penalty(xk), where xk is the current parameter vector. This function should return y', with y' == 0 when the encoded constraints are satisfied, and y' > 0 otherwise. Returns: (xopt, {fopt, iter, funcalls, warnflag}, {allvecs}) xopt -- ndarray - minimizer of function fopt -- number - value of function at minimum: fopt = cost(xopt) iter -- number - number of iterations funcalls -- number - number of function calls warnflag -- number - Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- list - a list of solutions at each iteration """ handler = False if kwds.has_key('handler'): handler = kwds['handler'] from mystic.monitors import Monitor stepmon = Monitor() evalmon = Monitor() if kwds.has_key('itermon'): stepmon = kwds['itermon'] if kwds.has_key('evalmon'): evalmon = kwds['evalmon'] if xtol: #if tolerance in x is provided, use CandidateRelativeTolerance from mystic.termination import CandidateRelativeTolerance as CRT termination = CRT(xtol, ftol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) solver = NelderMeadSimplexSolver(len(x0)) solver.SetInitialPoints(x0) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if kwds.has_key('penalty'): penalty = kwds['penalty'] solver.SetPenalty(penalty) if kwds.has_key('constraints'): constraints = kwds['constraints'] solver.SetConstraints(constraints) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) if handler: solver.enable_signal_handler() solver.Solve(cost,termination=termination,\ disp=disp, ExtraArgs=args, callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x if fcalls >= solver._maxfun: warnflag = 1 elif iterations >= solver._maxiter: warnflag = 2 if full_output: retlist = x, fval, iterations, fcalls, warnflag if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def fmin_powell(cost, x0, args=(), bounds=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None, **kwds): """Minimize a function using modified Powell's method. Description: Uses a modified Powell Directional Search algorithm to find the minimum of function of one or more variables. Mimics the scipy.optimize.fmin_powell interface. Inputs: cost -- the Python function or method to be minimized. x0 -- ndarray - the initial guess. Additional Inputs: args -- extra arguments for cost. bounds -- list - n pairs of bounds (min,max), one pair for each parameter. xtol -- number - acceptable relative error in xopt for convergence. ftol -- number - acceptable relative error in cost(xopt) for convergence. gtol -- number - maximum number of iterations to run without improvement. maxiter -- number - the maximum number of iterations to perform. maxfun -- number - the maximum number of function evaluations. full_output -- number - non-zero if fval and warnflag outputs are desired. disp -- number - non-zero to print convergence messages. retall -- number - non-zero to return list of solutions at each iteration. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. direc -- initial direction set handler -- boolean - enable/disable handling of interrupt signal itermon -- monitor - override the default GenerationMonitor evalmon -- monitor - override the default EvaluationMonitor constraints -- an optional user-supplied function. It is called as constraints(xk), where xk is the current parameter vector. This function must return xk', a parameter vector that satisfies the encoded constraints. penalty -- an optional user-supplied function. It is called as penalty(xk), where xk is the current parameter vector. This function should return y', with y' == 0 when the encoded constraints are satisfied, and y' > 0 otherwise. Returns: (xopt, {fopt, iter, funcalls, warnflag, direc}, {allvecs}) xopt -- ndarray - minimizer of function fopt -- number - value of function at minimum: fopt = cost(xopt) iter -- number - number of iterations funcalls -- number - number of function calls warnflag -- number - Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' direc -- current direction set allvecs -- list - a list of solutions at each iteration """ #FIXME: need to resolve "direc" # - should just pass 'direc', and then hands-off ? How return it ? handler = False if kwds.has_key('handler'): handler = kwds['handler'] from mystic.monitors import Monitor stepmon = Monitor() evalmon = Monitor() if kwds.has_key('itermon'): stepmon = kwds['itermon'] if kwds.has_key('evalmon'): evalmon = kwds['evalmon'] gtol = 2 # termination generations (scipy: 2, default: 10) if kwds.has_key('gtol'): gtol = kwds['gtol'] if gtol: #if number of generations is provided, use NCOG from mystic.termination import NormalizedChangeOverGeneration as NCOG termination = NCOG(ftol, gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) solver = PowellDirectionalSolver(len(x0)) solver.SetInitialPoints(x0) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if kwds.has_key('penalty'): penalty = kwds['penalty'] solver.SetPenalty(penalty) if kwds.has_key('constraints'): constraints = kwds['constraints'] solver.SetConstraints(constraints) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) if handler: solver.enable_signal_handler() solver.Solve(cost,termination=termination,\ xtol=xtol, ExtraArgs=args, callback=callback, \ disp=disp, direc=direc) #XXX: last two lines use **kwds solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin_powell interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x direc = solver._direc if fcalls >= solver._maxfun: warnflag = 1 elif iterations >= solver._maxiter: warnflag = 2 x = squeeze(x) #FIXME: write squeezed x to stepmon instead? if full_output: retlist = x, fval, iterations, fcalls, warnflag, direc if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
class AbstractSolver(object): """ AbstractSolver base class for mystic optimizers. """ def __init__(self, dim, **kwds): """ Takes one initial input: dim -- dimensionality of the problem. Additional inputs: npop -- size of the trial solution population. [default = 1] Important class members: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = 1 if kwds.has_key('npop'): NP = kwds['npop'] self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.signal_handler = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history = None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._cost = (None, None) self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[ 0] is False or kw.get('info', True ) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination self._EARLYEXIT = mystic.termination.EARLYEXIT return def Solution(self): """return the best solution""" return self.bestSolution def __evaluations(self): """get the number of function calls""" return self._fcalls[0] def __generations(self): """get the number of iterations""" return max(0, len(self.energy_history) - 1) #return max(0,len(self._stepmon)-1) def __energy_history(self): """get the energy_history (default: energy_history = _stepmon.y)""" if self._energy_history is None: return self._stepmon.y return self._energy_history def __set_energy_history(self, energy): """set the energy_history (energy=None will sync with _stepmon.y)""" self._energy_history = energy return def __solution_history(self): """get the solution_history (default: solution_history = _stepmon.x)""" if self._solution_history is None: return self._stepmon.x return self._solution_history def __set_solution_history(self, params): """set the solution_history (params=None will sync with _stepmon.x)""" self._solution_history = params return def __bestSolution(self): """get the bestSolution (default: bestSolution = population[0])""" if self._bestSolution is None: return self.population[0] return self._bestSolution def __set_bestSolution(self, params): """set the bestSolution (params=None will sync with population[0])""" self._bestSolution = params return def __bestEnergy(self): """get the bestEnergy (default: bestEnergy = popEnergy[0])""" if self._bestEnergy is None: return self.popEnergy[0] return self._bestEnergy def __set_bestEnergy(self, energy): """set the bestEnergy (energy=None will sync with popEnergy[0])""" self._bestEnergy = energy return def SetPenalty(self, penalty): """apply a penalty function to the optimization input:: - a penalty function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. Ideally, this function is constructed so a penalty is applied when the desired (i.e. encoded) constraints are violated. Equality constraints should be considered satisfied when the penalty condition evaluates to zero, while inequality constraints are satisfied when the penalty condition evaluates to a non-positive number.""" if not penalty: self._penalty = lambda x: 0.0 elif not callable(penalty): raise TypeError, "'%s' is not a callable function" % penalty else: #XXX: check for format: y' = penalty(x) ? self._penalty = penalty return def SetConstraints(self, constraints): """apply a constraints function to the optimization input:: - a constraints function of the form: xk' = constraints(xk), where xk is the current parameter vector. Ideally, this function is constructed so the parameter vector it passes to the cost function will satisfy the desired (i.e. encoded) constraints.""" if not constraints: self._constraints = lambda x: x elif not callable(constraints): raise TypeError, "'%s' is not a callable function" % constraints else: #XXX: check for format: x' = constraints(x) ? self._constraints = constraints return def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration""" from mystic.monitors import Null, Monitor #, CustomMonitor current = Null() if new else self._stepmon if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor self.energy_history = self._stepmon.y self.solution_history = self._stepmon.x return def SetEvaluationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each cost function evaluation""" from mystic.monitors import Null, Monitor #, CustomMonitor current = Null() if new else self._evalmon if isinstance(monitor, (Null, Monitor)): # is Monitor() or Null() self._evalmon = monitor self._evalmon.prepend(current) elif monitor == Null: # is Null self._evalmon = monitor() self._evalmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._evalmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor return def SetStrictRanges(self, min=None, max=None): """ensure solution is within bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i] note:: SetStrictRanges(None) will remove strict range constraints""" if min is False or max is False: self._useStrictRange = False return #XXX: better to use 'defaultMin,defaultMax' or '-inf,inf' ??? if min == None: min = self._defaultMin if max == None: max = self._defaultMax # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] == None: min[i] = self._defaultMin[0] if max[i] == None: max[i] = self._defaultMax[0] min = asarray(min) max = asarray(max) if numpy.any((min > max), 0): raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim self._useStrictRange = True self._strictMin = min self._strictMax = max return def _clipGuessWithinRangeBoundary(self, x0): #FIXME: use self.trialSolution? """ensure that initial guess is set within bounds input:: - x0: must be a sequence of length self.nDim""" #if len(x0) != self.nDim: #XXX: unnecessary w/ self.trialSolution # raise ValueError, "initial guess must be length %s" % self.nDim x0 = asarray(x0) lo = self._strictMin hi = self._strictMax # crop x0 at bounds x0[x0 < lo] = lo[x0 < lo] x0[x0 > hi] = hi[x0 > hi] return x0 def SetInitialPoints(self, x0, radius=0.05): """Set Initial Points with Guess (x0) input:: - x0: must be a sequence of length self.nDim - radius: generate random points within [-radius*x0, radius*x0] for i!=0 when a simplex-type initial guess in required""" x0 = asfarray(x0) rank = len(x0.shape) if rank is 0: x0 = asfarray([x0]) rank = 1 if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if len(x0) != self.nDim: raise ValueError, "Initial guess must be length %s" % self.nDim #slightly alter initial values for solvers that depend on randomness min = x0 * (1 - radius) max = x0 * (1 + radius) numzeros = len(x0[x0 == 0]) min[min == 0] = asarray([-radius for i in range(numzeros)]) max[max == 0] = asarray([radius for i in range(numzeros)]) self.SetRandomInitialPoints(min, max) #stick initial values in population[i], i=0 self.population[0] = x0.tolist() def SetRandomInitialPoints(self, min=None, max=None): """Generate Random Initial Points within given Bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i]""" if min == None: min = self._defaultMin if max == None: max = self._defaultMax #if numpy.any(( asarray(min) > asarray(max) ),0): # raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim or len(max) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] == None: min[i] = self._defaultMin[0] if max[i] == None: max[i] = self._defaultMax[0] import random #generate random initial values for i in range(len(self.population)): for j in range(self.nDim): self.population[i][j] = random.uniform(min[j], max[j]) def SetMultinormalInitialPoints(self, mean, var=None): """Generate Initial Points from Multivariate Normal. input:: - mean must be a sequence of length self.nDim - var can be... None: -> it becomes the identity scalar: -> var becomes scalar * I matrix: -> the variance matrix. must be the right size! """ from numpy.random import multivariate_normal assert (len(mean) == self.nDim) if var == None: var = numpy.eye(self.nDim) else: try: # scalar ? float(var) except: # nope. var better be matrix of the right size (no check) pass else: var = var * numpy.eye(self.nDim) for i in range(len(self.population)): self.population[i] = multivariate_normal(mean, var).tolist() return def enable_signal_handler(self): """enable workflow interrupt handler while solver is running""" self._handle_sigint = True def disable_signal_handler(self): """disable workflow interrupt handler while solver is running""" self._handle_sigint = False def _generateHandler(self, sigint_callback): """factory to generate signal handler Available switches:: - sol --> Print current best solution. - cont --> Continue calculation. - call --> Executes sigint_callback, if provided. - exit --> Exits with current best solution. """ def handler(signum, frame): import inspect print inspect.getframeinfo(frame) print inspect.trace() while 1: s = raw_input(\ """ Enter sense switch. sol: Print current best solution. cont: Continue calculation. call: Executes sigint_callback [%s]. exit: Exits with current best solution. >>> """ % sigint_callback) if s.lower() == 'sol': print self.bestSolution elif s.lower() == 'cont': return elif s.lower() == 'call': # sigint call_back if sigint_callback is not None: sigint_callback(self.bestSolution) elif s.lower() == 'exit': self._EARLYEXIT = True return else: print "unknown option : %s" % s return self.signal_handler = handler return def SetSaveFrequency(self, generations=None, filename=None, **kwds): """set frequency for saving solver restart file input:: - generations = number of solver iterations before next save of state - filename = name of file in which to save solver state note:: SetSaveFrequency(None) will disable saving solver restart file""" self._saveiter = generations #self._saveeval = evaluations self._state = filename return def SetEvaluationLimits(self, generations=None, evaluations=None, \ new=False, **kwds): """set limits for generations and/or evaluations input:: - generations = maximum number of solver iterations (i.e. steps) - evaluations = maximum number of function evaluations""" self._maxiter = generations self._maxfun = evaluations # backward compatibility if kwds.has_key('maxiter'): self._maxiter = kwds['maxiter'] if kwds.has_key('maxfun'): self._maxfun = kwds['maxfun'] # handle if new (reset counter, instead of extend counter) if new: if generations is not None: self._maxiter += self.generations else: self._maxiter = "*" #XXX: better as self._newmax = True ? if evaluations is not None: self._maxfun += self.evaluations else: self._maxfun = "*" return def _SetEvaluationLimits(self, iterscale=None, evalscale=None): """set the evaluation limits""" if iterscale is None: iterscale = 10 if evalscale is None: evalscale = 1000 N = len(self.population[0]) # usually self.nDim # if SetEvaluationLimits not applied, use the solver default if self._maxiter is None: self._maxiter = N * self.nPop * iterscale elif self._maxiter == "*": # (i.e. None, but 'reset counter') self._maxiter = (N * self.nPop * iterscale) + self.generations if self._maxfun is None: self._maxfun = N * self.nPop * evalscale elif self._maxiter == "*": self._maxfun = (N * self.nPop * evalscale) + self.evaluations return def CheckTermination(self, disp=False, info=False, termination=None): """check if the solver meets the given termination conditions Input:: - disp = if True, print termination statistics and/or warnings - info = if True, return termination message (instead of boolean) - termination = termination conditions to check against Note:: If no termination conditions are given, the solver's stored termination conditions will be used. """ if termination == None: termination = self._termination # check for termination messages msg = termination(self, info=True) lim = "EvaluationLimits with %s" % { 'evaluations': self._maxfun, 'generations': self._maxiter } # push solver internals to scipy.optimize.fmin interface if self._fcalls[0] >= self._maxfun and self._maxfun is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif self.generations >= self._maxiter and self._maxiter is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of iterations has been exceeded" elif msg and disp: print "Optimization terminated successfully." print " Current function value: %f" % self.bestEnergy print " Iterations: %d" % self.generations print " Function evaluations: %d" % self._fcalls[0] if info: return msg return bool(msg) def SetTermination(self, termination): """set the termination conditions""" #XXX: validate that termination is a 'condition' ? self._termination = termination return def _RegisterObjective(self, cost, ExtraArgs=None): """decorate cost function with bounds, penalties, monitors, etc""" if ExtraArgs == None: ExtraArgs = () self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon) if self._useStrictRange: for i in range(self.nPop): self.population[i] = self._clipGuessWithinRangeBoundary( self.population[i]) cost = wrap_bounds(cost, self._strictMin, self._strictMax) cost = wrap_penalty(cost, self._penalty) cost = wrap_nested(cost, self._constraints) # hold on to the 'wrapped' cost function self._cost = (cost, ExtraArgs) return cost def _bootstrap_decorate(self, cost=None, ExtraArgs=None): """HACK to enable not explicitly calling _RegisterObjective""" args = None if cost == None: # 'use existing cost' cost, args = self._cost # use args, unless override with ExtraArgs if ExtraArgs != None: args = ExtraArgs if self._cost[0] == None: # '_RegisterObjective not yet called' if args is None: args = () cost = self._RegisterObjective(cost, args) return cost def Step(self, cost=None, ExtraArgs=None, **kwds): """perform a single optimization iteration *** this method must be overwritten ***""" raise NotImplementedError, "an optimization algorithm was not provided" def SaveSolver(self, filename=None, **kwds): """save solver state to a restart file""" import dill if filename == None: # then check if already has registered file if self._state == None: # then create a new one import tempfile self._state = tempfile.mkstemp(suffix='.pkl')[-1] filename = self._state self._state = filename f = file(filename, 'wb') try: dill.dump(self, f, **kwds) self._stepmon.info('DUMPED("%s")' % filename) #XXX: before / after ? finally: f.close() return def __save_state(self, force=False): """save the solver state, if chosen save frequency is met""" # save the last iteration if force and bool(self._state): self.SaveSolver() return # save the zeroth iteration nonzero = True #XXX: or bool(self.generations) ? # after _saveiter generations, then save state iters = self._saveiter saveiter = bool(iters) and not bool(self.generations % iters) if nonzero and saveiter: self.SaveSolver() #FIXME: if _saveeval (or more) since last check, then save state #save = self.evaluations % self._saveeval return def __load_state(self, solver, **kwds): """load solver.__dict__ into self.__dict__; override with kwds""" #XXX: should do some filtering on kwds ? self.__dict__.update(solver.__dict__, **kwds) return def _exitMain(self, **kwds): """cleanup upon exiting the main optimization loop""" pass def _process_inputs(self, kwds): """process and activate input settings""" #allow for inputs that don't conform to AbstractSolver interface settings = \ {'callback':None, #user-supplied function, called after each step 'disp':0} #non-zero to print convergence messages [settings.update({i: j}) for (i, j) in kwds.items() if i in settings] # backward compatibility if kwds.has_key('EvaluationMonitor'): \ self.SetEvaluationMonitor(kwds.get('EvaluationMonitor')) if kwds.has_key('StepMonitor'): \ self.SetGenerationMonitor(kwds.get('StepMonitor')) if kwds.has_key('penalty'): \ self.SetPenalty(kwds.get('penalty')) if kwds.has_key('constraints'): \ self.SetConstraints(kwds.get('constraints')) return settings def Solve(self, cost=None, termination=None, sigint_callback=None, ExtraArgs=None, **kwds): """Minimize a 'cost' function with given termination conditions. Description: Uses an optimization algorith to find the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. sigint_callback -- callback function for signal handler. ExtraArgs -- extra arguments for cost. Further Inputs: callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. """ # HACK to enable not explicitly calling _RegisterObjective cost = self._bootstrap_decorate(cost, ExtraArgs) # process and activate input settings settings = self._process_inputs(kwds) for key in settings: exec "%s = settings['%s']" % (key, key) # set up signal handler import signal self._EARLYEXIT = False self._generateHandler(sigint_callback) if self._handle_sigint: signal.signal(signal.SIGINT, self.signal_handler) ## decorate cost function with bounds, penalties, monitors, etc #self._RegisterObjective(cost, ExtraArgs) #XXX: SetObjective ? # register termination function if termination is not None: self.SetTermination(termination) # the initital optimization iteration if not len(self._stepmon): # do generation = 0 self.Step() if callback is not None: callback(self.bestSolution) # initialize termination conditions, if needed self._termination(self) #XXX: call at generation 0 or always? # impose the evaluation limits self._SetEvaluationLimits() # the main optimization loop while not self.CheckTermination() and not self._EARLYEXIT: self.Step(**settings) if callback is not None: callback(self.bestSolution) else: self._exitMain() # handle signal interrupts signal.signal(signal.SIGINT, signal.default_int_handler) # log any termination messages msg = self.CheckTermination(disp=disp, info=True) if msg: self._stepmon.info('STOP("%s")' % msg) # save final state self.__save_state(force=True) return # extensions to the solver interface evaluations = property(__evaluations) generations = property(__generations) energy_history = property(__energy_history, __set_energy_history) solution_history = property(__solution_history, __set_solution_history) bestEnergy = property(__bestEnergy, __set_bestEnergy) bestSolution = property(__bestSolution, __set_bestSolution) pass
def sparsity(cost,ndim,npts=8,args=(),bounds=None,ftol=1e-4,maxiter=None, \ maxfun=None,full_output=0,disp=1,retall=0,callback=None,**kwds): """Minimize a function using the sparsity ensemble solver. Uses a sparsity ensemble algorithm to find the minimum of a function of one or more variables. Mimics the ``scipy.optimize.fmin`` interface. Starts *npts* solver instances at points in parameter space where existing points are sparse. Args: cost (func): the function or method to be minimized: ``y = cost(x)``. ndim (int): dimensionality of the problem. npts (int, default=8): number of solver instances. args (tuple, default=()): extra arguments for cost. bounds (list(tuple), default=None): list of pairs of bounds (min,max), one for each parameter. ftol (float, default=1e-4): acceptable relative error in ``cost(xopt)`` for convergence. gtol (float, default=10): maximum iterations to run without improvement. rtol (float, default=None): minimum acceptable distance from other points. maxiter (int, default=None): the maximum number of iterations to perform. maxfun (int, default=None): the maximum number of function evaluations. full_output (bool, default=False): True if fval and warnflag are desired. disp (bool, default=True): if True, print convergence messages. retall (bool, default=False): True if allvecs is desired. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. solver (solver, default=None): override the default nested Solver instance. handler (bool, default=False): if True, enable handling interrupt signals. itermon (monitor, default=None): override the default GenerationMonitor. evalmon (monitor, default=None): override the default EvaluationMonitor. constraints (func, default=None): a function ``xk' = constraints(xk)``, where xk is the current parameter vector, and xk' is a parameter vector that satisfies the encoded constraints. penalty (func, default=None): a function ``y = penalty(xk)``, where xk is the current parameter vector, and ``y' == 0`` when the encoded constraints are satisfied (and ``y' > 0`` otherwise). tight (bool, default=False): enforce bounds and constraints concurrently. map (func, default=None): a (parallel) map function ``y = map(f, x)``. dist (mystic.math.Distribution, default=None): generate randomness in ensemble starting position using the given distribution. step (bool, default=False): if True, enable Step within the ensemble. Returns: ``(xopt, {fopt, iter, funcalls, warnflag, allfuncalls}, {allvecs})`` Notes: - xopt (*ndarray*): the minimizer of the cost function - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)`` - iter (*int*): number of iterations - funcalls (*int*): number of function calls - warnflag (*int*): warning flag: - ``1 : Maximum number of function evaluations`` - ``2 : Maximum number of iterations`` - allfuncalls (*int*): total function calls (for all solver instances) - allvecs (*list*): a list of solutions at each iteration """ handler = kwds['handler'] if 'handler' in kwds else False from mystic.solvers import NelderMeadSimplexSolver as _solver if 'solver' in kwds: _solver = kwds['solver'] from mystic.monitors import Monitor stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor() evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor() gtol = 10 # termination generations (scipy: 2, default: 10) if 'gtol' in kwds: gtol = kwds['gtol'] if gtol: #if number of generations is provided, use NCOG from mystic.termination import NormalizedChangeOverGeneration termination = NormalizedChangeOverGeneration(ftol, gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) rtol = kwds[ 'rtol'] if 'rtol' in kwds else None #NOTE: 'data' set w/monitors solver = SparsitySolver(ndim, npts, rtol) solver.SetNestedSolver(_solver) #XXX: skip settings for configured solver? solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if 'dist' in kwds: solver.SetDistribution(kwds['dist']) if 'penalty' in kwds: solver.SetPenalty(kwds['penalty']) if 'constraints' in kwds: solver.SetConstraints(kwds['constraints']) if bounds is not None: minb, maxb = unpair(bounds) tight = kwds['tight'] if 'tight' in kwds else False solver.SetStrictRanges(minb, maxb, tight=tight) # clip? _map = kwds['map'] if 'map' in kwds else None if _map: solver.SetMapper(_map) if handler: solver.enable_signal_handler() solver.Solve(cost,termination=termination,disp=disp, \ ExtraArgs=args,callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface msg = solver.Terminated(disp=False, info=True) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations all_fcalls = solver._total_evals iterations = solver.generations allvecs = solver._stepmon.x if fcalls >= solver._maxfun: #XXX: check against total or individual? warnflag = 1 elif iterations >= solver._maxiter: #XXX: check against total or individual? warnflag = 2 else: pass if full_output: retlist = x, fval, iterations, fcalls, warnflag, all_fcalls if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def solve(self, objective, **kwds): #NOTE: single axis only """solve (in measure space) for bound on given objective Input: objective: cost function of the form y = objective(x) Additional Input: solver: mystic.solver instance [default: DifferentialEvolutionSolver2] npop: population size [default: None] id: a unique identifier for the solver [default: None] nested: mystic.solver instance [default: None], for ensemble solvers x0: initial parameter guess [default: use RandomInitialPoints] pool: pathos.pool instance [default: None] maxiter: max number of iterations [default: defined in solver] maxfun: max number of objective evaluations [default: defined in solver] evalmon: mystic.monitor instance [default: Monitor], for evaluations stepmon: mystic.monitor instance [default: Monitor], for iterations save: iteration frequency to save solver [default: None] opts: dict of configuration options for solver.Solve [default: {}] Returns: solver instance, after Solve has been called """ kwds.update(self.kwds) #FIXME: good idea??? [bad in parallel???] lb, ub = self.lb, self.ub solver = kwds.get('solver', DifferentialEvolutionSolver2) npop = kwds.get('npop', None) if npop is not None: solver = solver(len(lb), npop) else: solver = solver(len(lb)) solver.id = kwds.pop('id', None) nested = kwds.get('nested', None) x0 = kwds.get('x0', None) if nested is not None: # Buckshot/Sparsity solver.SetNestedSolver(nested) else: # DiffEv/Nelder/Powell if x0 is None: solver.SetRandomInitialPoints(min=lb, max=ub) else: solver.SetInitialPoints(x0) save = kwds.get('save', None) if save is not None: solver.SetSaveFrequency(save, 'Solver.pkl') #XXX: set name? mapper = kwds.get('pool', None) if mapper is not None: pool = mapper() #XXX: ThreadPool, ProcessPool, etc solver.SetMapper(pool.map) #NOTE: not Nelder/Powell maxiter = kwds.get('maxiter', None) maxfun = kwds.get('maxfun', None) solver.SetEvaluationLimits(maxiter, maxfun) evalmon = kwds.get('evalmon', None) evalmon = Monitor() if evalmon is None else evalmon solver.SetEvaluationMonitor(evalmon[:0]) stepmon = kwds.get('stepmon', None) stepmon = Monitor() if stepmon is None else stepmon solver.SetGenerationMonitor(stepmon[:0]) solver.SetStrictRanges(min=lb, max=ub) solver.SetConstraints(self.constraint) opts = kwds.get('opts', {}) # solve solver.Solve(objective, **opts) if mapper is not None: pool.close() pool.join() pool.clear() #NOTE: if used, then shut down pool #NOTE: debugging code #print("solved: %s" % solver.Solution()) #func_bound = solver.bestEnergy #func_evals = solver.evaluations #from mystic.munge import write_support_file #write_support_file(solver._stepmon) #print("func_bound: %s" % func_bound) #NOTE: may be inverted #print("func_evals: %s" % func_evals) return solver
""" # build train/test data xx = np.array(x) yy = np.array(y) if test_size is None: return xx, xx, yy, yy from sklearn.model_selection import train_test_split as split return split(xx, yy, test_size=test_size, random_state=random_state) if __name__ == '__main__': # get access to data in archive import dataset as ds from mystic.monitors import Monitor m = Monitor() m._x, m._y = ds.read_archive('demo') if not len(m._x): msg = "the 'demo' archive is empty." raise ValueError(msg) xtrain, xtest, ytrain, ytest = traintest(m._x, m._y, test_size=.2, random_state=42) import sklearn.preprocessing as pre import sklearn.neural_network as nn # build dicts of hyperparameters for ANN instance args = dict(hidden_layer_sizes=(100, 75, 50, 25), max_iter=1000,
def impose_valid(cutoff, model, guess=None, **kwds): """impose model validity on a given list of parameters w,x,y Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0. (this function is not ???-preserving) Inputs: cutoff -- maximum acceptable model invalidity |y - F(x')|; a single value model -- the model function, y' = F(x'), that approximates reality, y = G(x) guess -- the scenario providing an initial guess at validity, or a tuple of dimensions of the target scenario Additional Inputs: hausdorff -- norm; where if given, ytol = |y - F(x')| + |x - x'|/norm xtol -- acceptable pointwise graphical distance of model from reality tol -- acceptable optimizer termination before sum(infeasibility) = 0. bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds) constraints -- a function that takes a flat list parameters x' = constraints(x) Outputs: pm -- a scenario with desired model validity Notes: xtol defines the n-dimensional base of a pilar of height cutoff, centered at each point. The region inside the pilar defines the space where a "valid" model must intersect. If xtol is not specified, then the base of the pilar will be a dirac at x' = x. This function performs an optimization to find a set of points where the model is valid. Here, tol is used to set the optimization termination for the sum(graphical_distances), while cutoff is used in defining the graphical_distance between x,y and x',F(x'). """ from numpy import sum as _sum, asarray from mystic.math.distance import graphical_distance, infeasibility, _npts if guess is None: message = "Requires a guess scenario, or a tuple of scenario dimensions." raise TypeError, message # get initial guess if hasattr(guess, 'pts'): # guess is a scenario pts = guess.pts # number of x guess = guess.flatten(all=True) else: pts = guess # guess is given as a tuple of 'pts' guess = None npts = _npts(pts) # number of Y # prepare bounds for solver bounds = kwds.pop('bounds', None) # if bounds are not set, use the default optimizer bounds if bounds is None: lower_bounds = []; upper_bounds = [] for n in pts: # bounds for n*x in each dimension (x2 due to weights) lower_bounds += [None]*n * 2 upper_bounds += [None]*n * 2 # also need bounds for npts*y values lower_bounds += [None]*npts upper_bounds += [None]*npts bounds = lower_bounds, upper_bounds bounds = asarray(bounds).T # plug in the 'constraints' function: param' = constraints(param) constraints = kwds.pop('constraints', None) # default is no constraints if not constraints: # if None (default), there are no constraints constraints = lambda x: x # 'wiggle room' tolerances ipop = kwds.pop('ipop', 10) #XXX: tune ipop (inner optimization)? imax = kwds.pop('imax', 10) #XXX: tune imax (inner optimization)? # tolerance for optimization on sum(y) tol = kwds.pop('tol', 0.0) # default npop = kwds.pop('npop', 20) #XXX: tune npop (outer optimization)? maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter (outer optimization)? # if no guess was made, then use bounds constraints if guess is None: if npop: guess = bounds else: # fmin_powell needs a list params (not bounds) guess = [(a + b)/2. for (a,b) in bounds] # construct cost function to reduce sum(infeasibility) def cost(rv): """compute cost from a 1-d array of model parameters, where: cost = | sum( infeasibility ) | """ # converting rv to scenario points = scenario() points.load(rv, pts) # calculate infeasibility Rv = graphical_distance(model, points, ytol=cutoff, ipop=ipop, \ imax=imax, **kwds) v = infeasibility(Rv, cutoff) # converting v to E return _sum(v) #XXX: abs ? # construct and configure optimizer debug = False #!!! maxfun = 1e+6 crossover = 0.9; percent_change = 0.8 ftol = abs(tol); gtol = None #XXX: optimally, should be VTRCOG... if debug: print "lower bounds: %s" % bounds.T[0] print "upper bounds: %s" % bounds.T[1] # print "initial value: %s" % guess # use optimization to get model-valid points from mystic.solvers import diffev2, fmin_powell from mystic.monitors import Monitor, VerboseMonitor from mystic.strategy import Best1Bin, Best1Exp evalmon = Monitor(); stepmon = Monitor(); strategy = Best1Exp if debug: stepmon = VerboseMonitor(2) #!!! if npop: # use VTR results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\ maxiter=maxiter, maxfun=maxfun, constraints=constraints,\ cross=crossover, scale=percent_change, strategy=strategy,\ evalmon=evalmon, itermon=stepmon,\ full_output=1, disp=0, handler=False) else: # use VTR results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\ maxiter=maxiter, maxfun=maxfun, constraints=constraints,\ evalmon=evalmon, itermon=stepmon,\ full_output=1, disp=0, handler=False) # repack the results pm = scenario() pm.load(results[0], pts) # params: w,x,y #if debug: print "final cost: %s" % results[1] if debug and results[2] >= maxiter: # iterations print "Warning: constraints solver terminated at maximum iterations" #func_evals = results[3] # evaluation return pm
def impose_feasible(cutoff, data, guess=None, **kwds): """impose shortness on a given list of parameters w,x,y. Optimization on w,x,y over the given bounds seeks sum(infeasibility) = 0. (this function is not ???-preserving) Inputs: cutoff -- maximum acceptable deviation from shortness data -- a dataset of observed points (these points are 'static') guess -- the scenario providing an initial guess at feasibility, or a tuple of dimensions of the target scenario Additional Inputs: tol -- acceptable optimizer termination before sum(infeasibility) = 0. bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds) constraints -- a function that takes a flat list parameters x' = constraints(x) Outputs: pm -- a scenario with desired shortness """ from numpy import sum, asarray from mystic.math.legacydata import dataset from mystic.math.distance import lipschitz_distance, infeasibility, _npts if guess is None: message = "Requires a guess scenario, or a tuple of scenario dimensions." raise TypeError, message # get initial guess if hasattr(guess, 'pts'): # guess is a scenario pts = guess.pts # number of x guess = guess.flatten(all=True) else: pts = guess # guess is given as a tuple of 'pts' guess = None npts = _npts(pts) # number of Y long_form = len(pts) - list(pts).count(2) # can use '2^K compressed format' # prepare bounds for solver bounds = kwds.pop('bounds', None) # if bounds are not set, use the default optimizer bounds if bounds is None: lower_bounds = []; upper_bounds = [] for n in pts: # bounds for n*x in each dimension (x2 due to weights) lower_bounds += [None]*n * 2 upper_bounds += [None]*n * 2 # also need bounds for npts*y values lower_bounds += [None]*npts upper_bounds += [None]*npts bounds = lower_bounds, upper_bounds bounds = asarray(bounds).T # plug in the 'constraints' function: param' = constraints(param) # constraints should impose_mean(y,w), and possibly sum(weights) constraints = kwds.pop('constraints', None) # default is no constraints if not constraints: # if None (default), there are no constraints constraints = lambda x: x _self = kwds.pop('with_self', True) # default includes self in shortness if _self is not False: _self = True # tolerance for optimization on sum(y) tol = kwds.pop('tol', 0.0) # default npop = kwds.pop('npop', 20) #XXX: tune npop? maxiter = kwds.pop('maxiter', 1000) #XXX: tune maxiter? # if no guess was made, then use bounds constraints if guess is None: if npop: guess = bounds else: # fmin_powell needs a list params (not bounds) guess = [(a + b)/2. for (a,b) in bounds] # construct cost function to reduce sum(lipschitz_distance) def cost(rv): """compute cost from a 1-d array of model parameters, where: cost = | sum(lipschitz_distance) | """ _data = dataset() _pm = scenario() _pm.load(rv, pts) # here rv is param: w,x,y if not long_form: positions = _pm.select(*range(npts)) else: positions = _pm.positions _data.load( data.coords, data.values ) # LOAD static if _self: _data.load( positions, _pm.values ) # LOAD dynamic _data.lipschitz = data.lipschitz # LOAD L Rv = lipschitz_distance(_data.lipschitz, _pm, _data, tol=cutoff, **kwds) v = infeasibility(Rv, cutoff) return abs(sum(v)) # construct and configure optimizer debug = False #!!! maxfun = 1e+6 crossover = 0.9; percent_change = 0.9 ftol = abs(tol); gtol = None if debug: print "lower bounds: %s" % bounds.T[0] print "upper bounds: %s" % bounds.T[1] # print "initial value: %s" % guess # use optimization to get feasible points from mystic.solvers import diffev2, fmin_powell from mystic.monitors import Monitor, VerboseMonitor from mystic.strategy import Best1Bin, Best1Exp evalmon = Monitor(); stepmon = Monitor(); strategy = Best1Exp if debug: stepmon = VerboseMonitor(10) #!!! if npop: # use VTR results = diffev2(cost, guess, npop, ftol=ftol, gtol=gtol, bounds=bounds,\ maxiter=maxiter, maxfun=maxfun, constraints=constraints,\ cross=crossover, scale=percent_change, strategy=strategy,\ evalmon=evalmon, itermon=stepmon,\ full_output=1, disp=0, handler=False) else: # use VTR results = fmin_powell(cost, guess, ftol=ftol, gtol=gtol, bounds=bounds,\ maxiter=maxiter, maxfun=maxfun, constraints=constraints,\ evalmon=evalmon, itermon=stepmon,\ full_output=1, disp=0, handler=False) # repack the results pm = scenario() pm.load(results[0], pts) # params: w,x,y #if debug: print "final cost: %s" % results[1] if debug and results[2] >= maxiter: # iterations print "Warning: constraints solver terminated at maximum iterations" #func_evals = results[3] # evaluation return pm
def _run_solver(self, early_terminate=False, **kwds): from mystic.monitors import Monitor import numpy from mystic.tools import random_seed seed = 111 if self.maxiter is None else 321 #XXX: good numbers... random_seed(seed) esow = Monitor() ssow = Monitor() solver = self.solver solver.SetRandomInitialPoints(min = self.min, max = self.max) if self.usebounds: solver.SetStrictRanges(self.min, self.max) if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun) if self.useevalmon: solver.SetEvaluationMonitor(esow) if self.usestepmon: solver.SetGenerationMonitor(ssow) #### run solver, but trap output _stdout = trap_stdout() solver.Solve(self.costfunction, self.term, **kwds) out = release_stdout(_stdout) ################################ sol = solver.Solution() iter=1 #if self.uselimits and self.maxiter == 0: iter=0 # sanity check solver internals self.assertTrue(solver.generations == len(solver._stepmon._y)-iter) self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1]) self.assertTrue(solver.solution_history == solver._stepmon.x) self.assertTrue(solver.energy_history == solver._stepmon.y) if self.usestepmon: self.assertTrue(ssow.x == solver._stepmon.x) self.assertTrue(ssow.y == solver._stepmon.y) self.assertTrue(ssow._y == solver._stepmon._y) if self.useevalmon: self.assertTrue(solver.evaluations == len(solver._evalmon._y)) self.assertTrue(esow.x == solver._evalmon.x) self.assertTrue(esow.y == solver._evalmon.y) self.assertTrue(esow._y == solver._evalmon._y) # Fail appropriately for solver/termination mismatch if early_terminate: self.assertTrue(solver.generations < 2) warn = "Warning: Invalid termination condition (nPop < 2)" self.assertTrue(warn in out) return g = solver.generations calls = [(g+1)*self.NP, (2*g)+1] iters = [g] # Test early terminations if self.uselimits and self.maxfun == 0: calls += [1, 20] #XXX: scipy* iters += [1] #XXX: scipy* self.assertTrue(solver.evaluations in calls) self.assertTrue(solver.generations in iters) return if self.uselimits and self.maxfun == 1: calls += [1, 20] #XXX: scipy* iters += [1] #XXX: scipy* self.assertTrue(solver.evaluations in calls) self.assertTrue(solver.generations in iters) return if self.uselimits and self.maxiter == 0: calls += [1, 20] #XXX: scipy* iters += [1] #XXX: scipy* self.assertTrue(solver.evaluations in calls) self.assertTrue(solver.generations in iters) return if self.uselimits and self.maxiter == 1: calls += [20] #Powell's self.assertTrue(solver.evaluations in calls) self.assertTrue(solver.generations in iters) return if self.uselimits and self.maxiter and 2 <= self.maxiter <= 5: calls += [52, 79, 107, 141] #Powell's self.assertTrue(solver.evaluations in calls) self.assertTrue(solver.generations in iters) return # Verify solution is close to exact #print(sol) for i in range(len(sol)): self.assertAlmostEqual(sol[i], self.exact[i], self.precision) return
def test_rosenbrock(verbose=False): """Test the 2-dimensional Rosenbrock function. Testing 2-D Rosenbrock: Expected: x=[1., 1.] and f=0 Using DifferentialEvolutionSolver: Solution: [ 1.00000037 1.0000007 ] f value: 2.29478683682e-13 Iterations: 99 Function evaluations: 3996 Time elapsed: 0.582273006439 seconds Using DifferentialEvolutionSolver2: Solution: [ 0.99999999 0.99999999] f value: 3.84824937598e-15 Iterations: 100 Function evaluations: 4040 Time elapsed: 0.577210903168 seconds Using NelderMeadSimplexSolver: Solution: [ 0.99999921 1.00000171] f value: 1.08732211477e-09 Iterations: 70 Function evaluations: 130 Time elapsed: 0.0190329551697 seconds Using PowellDirectionalSolver: Solution: [ 1. 1.] f value: 0.0 Iterations: 28 Function evaluations: 859 Time elapsed: 0.113857030869 seconds """ if verbose: print("Testing 2-D Rosenbrock:") print("Expected: x=[1., 1.] and f=0") from mystic.models import rosen as costfunc ndim = 2 lb = [-5.]*ndim ub = [5.]*ndim x0 = [2., 3.] maxiter = 10000 # DifferentialEvolutionSolver if verbose: print("\nUsing DifferentialEvolutionSolver:") npop = 40 from mystic.solvers import DifferentialEvolutionSolver from mystic.termination import ChangeOverGeneration as COG from mystic.strategy import Rand1Bin esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver(ndim, npop) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = COG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Bin) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 2.29478683682e-13, tol=3e-3) # DifferentialEvolutionSolver2 if verbose: print("\nUsing DifferentialEvolutionSolver2:") npop = 40 from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import ChangeOverGeneration as COG from mystic.strategy import Rand1Bin esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = COG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Bin) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 3.84824937598e-15, tol=3e-3) # NelderMeadSimplexSolver if verbose: print("\nUsing NelderMeadSimplexSolver:") from mystic.solvers import NelderMeadSimplexSolver from mystic.termination import CandidateRelativeTolerance as CRT esow = Monitor() ssow = Monitor() solver = NelderMeadSimplexSolver(ndim) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = CRT() time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 1.08732211477e-09, tol=3e-3) # PowellDirectionalSolver if verbose: print("\nUsing PowellDirectionalSolver:") from mystic.solvers import PowellDirectionalSolver from mystic.termination import NormalizedChangeOverGeneration as NCOG esow = Monitor() ssow = Monitor() solver = PowellDirectionalSolver(ndim) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = NCOG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3)
class AbstractSolver(object): """AbstractSolver base class for mystic optimizers. """ def __init__(self, dim, **kwds): """ Takes one initial input:: dim -- dimensionality of the problem. Additional inputs:: npop -- size of the trial solution population. [default = 1] Important class members:: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = kwds['npop'] if 'npop' in kwds else 1 self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.sigint_callback = None self._handle_sigint = False self._useStrictRange = False self._useTightRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history = None self.id = None # identifier (use like "rank" for MPI) self._strictbounds = lambda x: x self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None, None) # (cost, raw_cost, args) #,callback) self._collapse = False self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[ 0] is False or (kw['info'] if 'info' in kw else True ) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination as mt self._EARLYEXIT = mt.EARLYEXIT self._live = False return def Solution(self): """return the best solution""" return self.bestSolution def __evaluations(self): """get the number of function calls""" return self._fcalls[0] def __generations(self): """get the number of iterations""" return max(0, len(self._stepmon) - 1) def __energy_history(self): """get the energy_history (default: energy_history = _stepmon._y)""" if self._energy_history is None: return self._stepmon._y return self._energy_history def __set_energy_history(self, energy): """set the energy_history (energy=None will sync with _stepmon._y)""" self._energy_history = energy return def __solution_history(self): """get the solution_history (default: solution_history = _stepmon.x)""" if self._solution_history is None: return self._stepmon.x return self._solution_history def __set_solution_history(self, params): """set the solution_history (params=None will sync with _stepmon.x)""" self._solution_history = params return def __bestSolution(self): """get the bestSolution (default: bestSolution = population[0])""" if self._bestSolution is None: return self.population[0] # bs = self.population[0] # return bs.copy() if hasattr(bs, 'copy') else bs[:] return self._bestSolution def __set_bestSolution(self, params): """set the bestSolution (params=None will sync with population[0])""" self._bestSolution = params #bs = params #if bs is None: self._bestSolution = None #else: self._bestSolution = bs.copy() if hasattr(bs, 'copy') else bs[:] return def __bestEnergy(self): """get the bestEnergy (default: bestEnergy = popEnergy[0])""" if self._bestEnergy is None: return self.popEnergy[0] return self._bestEnergy def __set_bestEnergy(self, energy): """set the bestEnergy (energy=None will sync with popEnergy[0])""" self._bestEnergy = energy return def SetReducer(self, reducer, arraylike=False): """apply a reducer function to the cost function input:: - a reducer function of the form: y' = reducer(yk), where yk is a results vector and y' is a single value. Ideally, this method is applied to a cost function with a multi-value return, to reduce the output to a single value. If arraylike, the reducer provided should take a single array as input and produce a scalar; otherwise, the reducer provided should meet the requirements of the python's builtin 'reduce' method (e.g. lambda x,y: x+y), taking two scalars and producing a scalar.""" if not reducer: self._reducer = None elif not isinstance(reducer, _Callable): raise TypeError("'%s' is not a callable function" % reducer) elif not arraylike: self._reducer = wrap_reducer(reducer) else: #XXX: check if is arraylike? self._reducer = reducer return self._update_objective() def SetPenalty(self, penalty): """apply a penalty function to the optimization input:: - a penalty function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. Ideally, this function is constructed so a penalty is applied when the desired (i.e. encoded) constraints are violated. Equality constraints should be considered satisfied when the penalty condition evaluates to zero, while inequality constraints are satisfied when the penalty condition evaluates to a non-positive number.""" if not penalty: self._penalty = lambda x: 0.0 elif not isinstance(penalty, _Callable): raise TypeError("'%s' is not a callable function" % penalty) else: #XXX: check for format: y' = penalty(x) ? self._penalty = penalty return self._update_objective() def SetConstraints(self, constraints): """apply a constraints function to the optimization input:: - a constraints function of the form: xk' = constraints(xk), where xk is the current parameter vector. Ideally, this function is constructed so the parameter vector it passes to the cost function will satisfy the desired (i.e. encoded) constraints.""" if not constraints: self._constraints = lambda x: x elif not isinstance(constraints, _Callable): raise TypeError("'%s' is not a callable function" % constraints) else: #XXX: check for format: x' = constraints(x) ? self._constraints = constraints return self._update_objective() def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration input:: - a monitor instance or monitor type used to track (x, f(x)). Any data collected in an existing generation monitor will be prepended, unless new is True.""" from mystic.monitors import Null, Monitor #, CustomMonitor if monitor is None: monitor = Null() current = Null() if new else self._stepmon if current is monitor: current = Null() if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError("'%s' is not a monitor instance" % monitor) self.energy_history = None # sync with self._stepmon self.solution_history = None # sync with self._stepmon return def SetEvaluationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each cost function evaluation input:: - a monitor instance or monitor type used to track (x, f(x)). Any data collected in an existing evaluation monitor will be prepended, unless new is True.""" from mystic.monitors import Null, Monitor #, CustomMonitor if monitor is None: monitor = Null() current = Null() if new else self._evalmon if current is monitor: current = Null() if isinstance(monitor, (Null, Monitor)): # is Monitor() or Null() self._evalmon = monitor self._evalmon.prepend(current) elif monitor == Null: # is Null self._evalmon = monitor() self._evalmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._evalmon = monitor #FIXME: need .prepend(current) else: raise TypeError("'%s' is not a monitor instance" % monitor) return def SetStrictRanges(self, min=None, max=None, **kwds): """ensure solution is within bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i] additional input:: - tight (bool): if True, apply bounds concurrent with other constraints - clip (bool): if True, bounding constraints will clip exterior values note:: SetStrictRanges(None) will remove strict range constraints notes:: By default, the bounds are coupled to the other constraints with a coupler (e.g. ``mystic.coupler.outer``), and not applied concurrently (i.e. with ``mystic.constraints.and_``). Using a coupler favors speed over robustness, and relies on the user to formulate the constraints so they do not conflict with imposing the bounds. note:: The keyword ``clip`` controls the clipping behavior for the bounding constraints. The default is to rely on ``_clipGuessWithinRangeBoundary`` when ensuring the bounds are respected, and to not take action when the other constraints are being imposed. However when ``tight=True``, the default is that the bounds constraints clip at the bounds. By default, bounds constraints are applied with a symbolic solver, as the symbolic solver is generally faster than ``mystic.constraints.impose_bounds``. All of the above default behaviors are active when ``clip=None``. note:: If ``clip=False``, ``impose_bounds`` will be used to map the candidate solution inside the bounds, while ``clip=True`` will use ``impose_bounds`` to clip the candidate solution at the bounds. Note that ``clip=True`` is *not* the same as the default (``clip=None``, which uses a symbolic solver). If ``clip`` is specified while ``tight`` is not, then ``tight`` will be set to ``True``.""" tight = kwds['tight'] if 'tight' in kwds else None clip = kwds['clip'] if 'clip' in kwds else None if clip is None: # tight in (True, False, None) args = dict(symbolic=True) if tight else dict() elif tight is False: # clip in (True, False) raise ValueError('can not specify clip when tight is False') else: # tight in (True, None) args = dict(symbolic=False, clip=clip) #XXX: we are ignoring bad kwds entries, should we? if min is False or max is False: self._useStrictRange = False self._strictbounds = self._boundsconstraints(**args) return self._update_objective() #XXX: better to use 'defaultMin,defaultMax' or '-inf,inf' ??? if min is None: min = self._defaultMin if max is None: max = self._defaultMax # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] min = asarray(min) max = asarray(max) if numpy.any((min > max), 0): raise ValueError("each min[i] must be <= the corresponding max[i]") if len(min) != self.nDim: raise ValueError("bounds array must be length %s" % self.nDim) self._useStrictRange = True self._strictMin = min self._strictMax = max self._strictbounds = self._boundsconstraints(**args) return self._update_objective() def _boundsconstraints(self, **kwds): """if _useStrictRange, build a constraint from (_strictMin,strictMax) symbolic: bool, if True, use symbolic constraints [default: None] clip: bool, if True, clip exterior values to the bounds [default: None] NOTE: By default, the bounds and constraints are imposed sequentially with a coupler. Using a coupler chooses speed over robustness, and relies on the user to formulate the constraints so that they do not conflict with imposing the bounds. Hence, if no keywords are provided, the bounds and constraints are applied sequentially. NOTE: If any of the keyword arguments are used, then the bounds and constraints are imposed concurrently. This is slower but more robust than applying the bounds and constraints sequentially (the default). When the bounds and constraints are applied concurrently, the defaults for the keywords (symbolic and clip) are set to True, unless otherwise specified. NOTE: If `symbolic=True`, use symbolic constraints to impose the bounds; otherwise use `mystic.constraints.impose_bounds`. Using `clip=False` will set `symbolic=False` unless symbolic is specified otherwise. """ symbolic = kwds['symbolic'] if 'symbolic' in kwds else None clip = kwds['clip'] if 'clip' in kwds else None # set the (complicated) defaults if symbolic is None and clip is not None: # clip in [False, True] symbolic = bool(clip) elif clip is None: clip = True ignore = symbolic is None if not self._useStrictRange or ignore: self._useTightRange = False return lambda x: x self._useTightRange = True if symbolic and not clip: raise NotImplementedError( "symbolic must clip to the nearest bound") # build the constraint min = self._strictMin max = self._strictMax from mystic.constraints import boundsconstrain as bcon cons = bcon(min, max, symbolic=symbolic, clip=clip) return cons def _clipGuessWithinRangeBoundary(self, x0, at=True): """ensure that initial guess is set within bounds input:: - x0: must be a sequence of length self.nDim - at: bool, if True, then clip at the bounds""" #if len(x0) != self.nDim: #XXX: unnecessary w/ self.trialSolution # raise ValueError, "initial guess must be length %s" % self.nDim x0 = asarray(x0) bounds = (self._strictMin, self._strictMax) if not len(self._strictMin): return x0 # clip x0 at bounds settings = numpy.seterr(all='ignore') x_ = x0.clip(*bounds) numpy.seterr(**settings) if at: return x_ # clip x0 within bounds x_ = x_ != x0 x0[x_] = random.uniform(self._strictMin, self._strictMax)[x_] return x0 def SetInitialPoints(self, x0, radius=0.05): """Set Initial Points with Guess (x0) input:: - x0: must be a sequence of length self.nDim - radius: generate random points within [-radius*x0, radius*x0] for i!=0 when a simplex-type initial guess in required""" x0 = asfarray(x0) rank = len(x0.shape) if rank == 0: x0 = asfarray([x0]) rank = 1 if not -1 < rank < 2: raise ValueError( "Initial guess must be a scalar or rank-1 sequence.") if len(x0) != self.nDim: raise ValueError("Initial guess must be length %s" % self.nDim) #slightly alter initial values for solvers that depend on randomness min = x0 * (1 - radius) max = x0 * (1 + radius) numzeros = len(x0[x0 == 0]) min[min == 0] = asarray([-radius for i in range(numzeros)]) max[max == 0] = asarray([radius for i in range(numzeros)]) self.SetRandomInitialPoints(min, max) #stick initial values in population[i], i=0 self.population[0] = x0.tolist() def SetRandomInitialPoints(self, min=None, max=None): """Generate Random Initial Points within given Bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i]""" if min is None: min = self._defaultMin if max is None: max = self._defaultMax #if numpy.any(( asarray(min) > asarray(max) ),0): # raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim or len(max) != self.nDim: raise ValueError("bounds array must be length %s" % self.nDim) # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] is None: min[i] = self._defaultMin[0] if max[i] is None: max[i] = self._defaultMax[0] #generate random initial values for i in range(len(self.population)): for j in range(self.nDim): self.population[i][j] = random.uniform(min[j], max[j]) def SetMultinormalInitialPoints(self, mean, var=None): """Generate Initial Points from Multivariate Normal. input:: - mean must be a sequence of length self.nDim - var can be... None: -> it becomes the identity scalar: -> var becomes scalar * I matrix: -> the variance matrix. must be the right size! """ from mystic.tools import random_state rng = random_state(module='numpy.random') assert (len(mean) == self.nDim) if var is None: var = numpy.eye(self.nDim) else: try: # scalar ? float(var) except: # nope. var better be matrix of the right size (no check) pass else: var = var * numpy.eye(self.nDim) for i in range(len(self.population)): self.population[i] = rng.multivariate_normal(mean, var).tolist() return def SetSampledInitialPoints(self, dist=None): """Generate Random Initial Points from Distribution (dist) input:: - dist: a mystic.math.Distribution instance """ from mystic.math import Distribution _dist = Distribution() if dist is None: dist = _dist elif type(_dist) not in dist.__class__.mro(): dist = Distribution(dist) #XXX: or throw error? for i in range(self.nPop): #FIXME: accept a list of Distributions self.population[i] = dist(self.nDim) return def enable_signal_handler(self): #, callback='*'): """enable workflow interrupt handler while solver is running""" """ #XXX: disabled, as would add state to solver input:: - if a callback function is provided, generate a new handler with the given callback. If callback is None, do not use a callback. If callback is not provided, just turn on the existing handler. """ ## always _generate handler on first call #if (self.signal_handler is None) and callback == '*': # callback = None ## when a new callback is given, generate a new handler #if callback != '*': # self._generateHandler(callback) self._handle_sigint = True def disable_signal_handler(self): """disable workflow interrupt handler while solver is running""" self._handle_sigint = False def SetSaveFrequency(self, generations=None, filename=None): """set frequency for saving solver restart file input:: - generations = number of solver iterations before next save of state - filename = name of file in which to save solver state note:: SetSaveFrequency(None) will disable saving solver restart file""" self._saveiter = generations #self._saveeval = evaluations self._state = filename return def SetEvaluationLimits(self, generations=None, evaluations=None, \ new=False, **kwds): """set limits for generations and/or evaluations input:: - generations: maximum number of solver iterations (i.e. steps) - evaluations: maximum number of function evaluations - new: bool, if True, the above limit the new evaluations and iterations; otherwise, the limits refer to total evaluations and iterations.""" # backward compatibility self._maxiter = kwds['maxiter'] if 'maxiter' in kwds else generations self._maxfun = kwds['maxfun'] if 'maxfun' in kwds else evaluations # handle if new (reset counter, instead of extend counter) if new: if generations is not None: self._maxiter += self.generations else: self._maxiter = "*" #XXX: better as self._newmax = True ? if evaluations is not None: self._maxfun += self.evaluations else: self._maxfun = "*" return def _SetEvaluationLimits(self, iterscale=None, evalscale=None): """set the evaluation limits input:: - iterscale and evalscale are integers used to set the maximum iteration and evaluation limits, respectively. The new limit is defined as limit = (nDim * nPop * scale) + count, where count is the number of existing iterations or evaluations, respectively. The default for iterscale is 10, while the default for evalscale is 1000. """ if iterscale is None: iterscale = 10 if evalscale is None: evalscale = 1000 N = len(self.population[0]) # usually self.nDim # if SetEvaluationLimits not applied, use the solver default if self._maxiter is None: self._maxiter = N * self.nPop * iterscale elif self._maxiter == "*": # (i.e. None, but 'reset counter') self._maxiter = (N * self.nPop * iterscale) + self.generations if self._maxfun is None: self._maxfun = N * self.nPop * evalscale elif self._maxfun == "*": self._maxfun = (N * self.nPop * evalscale) + self.evaluations return def Terminated(self, disp=False, info=False, termination=None, **kwds): """check if the solver meets the given termination conditions Input:: - disp = if True, print termination statistics and/or warnings - info = if True, return termination message (instead of boolean) - termination = termination conditions to check against Notes:: If no termination conditions are given, the solver's stored termination conditions will be used. """ if termination is None: termination = self._termination # ensure evaluation limits have been imposed self._SetEvaluationLimits() # check for termination messages msg = termination(self, info=True) sig = "SolverInterrupt with %s" % {} lim = "EvaluationLimits with %s" % { 'evaluations': self._maxfun, 'generations': self._maxiter } # push solver internals to scipy.optimize.fmin interface if self._fcalls[0] >= self._maxfun and self._maxfun is not None: msg = lim #XXX: prefer the default stop ? if disp: print("Warning: Maximum number of function evaluations has "\ "been exceeded.") elif self.generations >= self._maxiter and self._maxiter is not None: msg = lim #XXX: prefer the default stop ? if disp: print( "Warning: Maximum number of iterations has been exceeded") elif self._EARLYEXIT: msg = sig if disp: print( "Warning: Optimization terminated with signal interrupt.") elif msg and disp: print("Optimization terminated successfully.") print(" Current function value: %f" % self.bestEnergy) print(" Iterations: %d" % self.generations) print(" Function evaluations: %d" % self._fcalls[0]) if info: return msg return bool(msg) def SetTermination(self, termination): # disp ? """set the termination conditions input:: - termination = termination conditions to check against""" #XXX: validate that termination is a 'condition' ? self._termination = termination self._collapse = False if termination is not None: from mystic.termination import state stop = state(termination) stop = getattr(stop, 'iterkeys', stop.keys)() self._collapse = any(key.startswith('Collapse') for key in stop) return def SetObjective(self, cost, ExtraArgs=None): # callback=None/False ? """set the cost function for the optimization input:: - cost is the objective function, of the form y = cost(x, *ExtraArgs), where x is a candidate solution, and ExtraArgs is the tuple of positional arguments required to evaluate the objective. note:: this method decorates the objective with bounds, penalties, monitors, etc""" _cost, _raw, _args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args): return # get cost and args if None was given if cost is None: cost = _raw args = _args if ExtraArgs is None else ExtraArgs args = () if args is None else args # quick validation check (so doesn't screw up internals) if not isvalid(cost, [0] * self.nDim, *args): try: name = cost.__name__ except AttributeError: # raise new error for non-callables cost(*args) validate(cost, None, *args) #val = len(args) + 1 #XXX: 'klepto.validate' for better error? #msg = '%s() invalid number of arguments (%d given)' % (name, val) #raise TypeError(msg) # hold on to the 'raw' cost function self._cost = (None, cost, ExtraArgs) self._live = False return def Collapsed(self, disp=False, info=False): """check if the solver meets the given collapse conditions Input:: - disp: if True, print details about the solver state at collapse - info: if True, return collapsed state (instead of boolean)""" stop = getattr(self, '__stop__', self.Terminated(info=True)) import mystic.collapse as ct collapses = ct.collapsed(stop) or dict() if collapses and disp: for (k, v) in getattr(collapses, 'iteritems', collapses.items)(): print(" %s: %s" % (k.split()[0], v)) #print("# Collapse at: Generation", self._stepmon._step-1, \ # "with", self.bestEnergy, "@\n#", list(self.bestSolution)) return collapses if info else bool(collapses) def __get_collapses(self, disp=False): """get dict of {collapse termination info: collapse} input:: - disp: if True, print details about the solver state at collapse""" collapses = self.Collapsed(disp=disp, info=True) if collapses: # stop if any Termination is not from Collapse stop = getattr(self, '__stop__', self.Terminated(info=True)) stop = not all(k.startswith("Collapse") for k in stop.split("; ")) if stop: return {} #XXX: self._collapse = False ? return collapses def __collapse_termination(self, collapses): """get (initial state, resulting termination) for the give collapses""" import mystic.termination as mt import mystic.mask as ma state = mt.state(self._termination) termination = ma.update_mask(self._termination, collapses) return state, termination def __collapse_constraints(self, state, collapses): """get updated constraints for the given state and collapses""" import mystic.tools as to import mystic.constraints as cn # get collapse conditions #XXX: efficient? 4x loops over collapses npts = getattr(self._stepmon, '_npts', None) #XXX: default? #conditions = [cn.impose_at(*to.select_params(self,collapses[k])) if state[k].get('target') is None else cn.impose_at(collapses[k],state[k].get('target')) for k in collapses if k.startswith('CollapseAt')] #conditions += [cn.impose_as(collapses[k],state[k].get('offset')) for k in collapses if k.startswith('CollapseAs')] #randomize = False conditions = [] _conditions = [] conditions_ = [] for k in collapses: #FIXME: these should be encapsulted in termination instance if k.startswith('CollapseAt'): t = state[k] t = t['target'] if 'target' in t else None if t is None: t = cn.impose_at(*to.select_params(self, collapses[k])) else: t = cn.impose_at(collapses[k], t) conditions.append(t) elif k.startswith('CollapseAs'): t = state[k] t = t['offset'] if 'offset' in t else None _conditions.append(cn.impose_as(collapses[k], t)) elif k.startswith(('CollapseCost', 'CollapseGrad')): t = state[k] t = t['clip'] if 'clip' in t else True conditions_.append(cn.impose_bounds(collapses[k], clip=t)) #randomize = True conditions.extend(_conditions) conditions.extend(conditions_) del _conditions del conditions_ # get measure collapse conditions if npts: #XXX: faster/better if comes first or last? conditions += [ cn.impose_measure(npts, [ collapses[k] for k in collapses if k.startswith('CollapsePosition') ], [ collapses[k] for k in collapses if k.startswith('CollapseWeight') ]) ] # get updated constraints return to.chain(*conditions)(self._constraints) def Collapse(self, disp=False): """if solver has terminated by collapse, apply the collapse (unless both collapse and "stop" are simultaneously satisfied) input:: - disp: if True, print details about the solver state at collapse note:: updates the solver's termination conditions and constraints """ #XXX: return True for "collapse and continue" and False otherwise? collapses = self.__get_collapses(disp) if collapses: # then stomach a bunch of module imports (yuck) state, termination = self.__collapse_termination(collapses) constraints = self.__collapse_constraints(state, collapses) # update termination and constraints in solver self.SetConstraints(constraints) self.SetTermination(termination) #if randomize: self.SetInitialPoints(self.population[0]) #import mystic.termination as mt #print(mt.state(self._termination).keys()) #return bool(collapses) and not stop return collapses def _update_objective(self): """decorate the cost function with bounds, penalties, monitors, etc""" # rewrap the cost if the solver has been run if False: # trigger immediately self._decorate_objective(*self._cost[1:]) else: # delay update until _bootstrap self.Finalize() return def _decorate_objective(self, cost, ExtraArgs=None): """decorate the cost function with bounds, penalties, monitors, etc input:: - cost is the objective function, of the form y = cost(x, *ExtraArgs), where x is a candidate solution, and ExtraArgs is the tuple of positional arguments required to evaluate the objective.""" #print("@%r %r %r" % (cost, ExtraArgs, max)) evalmon = self._evalmon raw = cost if ExtraArgs is None: ExtraArgs = () self._fcalls, cost = wrap_function(cost, ExtraArgs, evalmon) if self._useStrictRange: indx = list(self.popEnergy).index(self.bestEnergy) ngen = self.generations #XXX: no random if generations=0 ? for i in range(self.nPop): self.population[i] = self._clipGuessWithinRangeBoundary( self.population[i], (not ngen) or (i == indx)) cost = wrap_bounds(cost, self._strictMin, self._strictMax) #XXX: remove? from mystic.constraints import and_ constraints = and_(self._constraints, self._strictbounds, onfail=self._strictbounds) else: constraints = self._constraints cost = wrap_penalty(cost, self._penalty) cost = wrap_nested(cost, constraints) if self._reducer: #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool) cost = reduced(self._reducer, arraylike=True)(cost) # hold on to the 'wrapped' and 'raw' cost function self._cost = (cost, raw, ExtraArgs) self._live = True return cost def _bootstrap_objective(self, cost=None, ExtraArgs=None): """HACK to enable not explicitly calling _decorate_objective input:: - cost is the objective function, of the form y = cost(x, *ExtraArgs), where x is a candidate solution, and ExtraArgs is the tuple of positional arguments required to evaluate the objective.""" _cost, _raw, _args = self._cost # check if need to 'wrap' or can return the stored cost if (cost is None or cost is _raw or cost is _cost) and \ (ExtraArgs is None or ExtraArgs is _args) and self._live: return _cost # 'wrap' the 'new' cost function with _decorate self.SetObjective(cost, ExtraArgs) return self._decorate_objective(*self._cost[1:]) def _Step(self, cost=None, ExtraArgs=None, **kwds): """perform a single optimization iteration input:: - cost is the objective function, of the form y = cost(x, *ExtraArgs), where x is a candidate solution, and ExtraArgs is the tuple of positional arguments required to evaluate the objective. *** this method must be overwritten ***""" raise NotImplementedError("an optimization algorithm was not provided") def SaveSolver(self, filename=None, **kwds): """save solver state to a restart file input:: - filename: string of full filepath for the restart file note:: any additional keyword arguments are passed to dill.dump""" import dill fd = None if filename is None: # then check if already has registered file if self._state is None: # then create a new one import os, tempfile fd, self._state = tempfile.mkstemp(suffix='.pkl') os.close(fd) filename = self._state self._state = filename f = open(filename, 'wb') try: dill.dump(self, f, **kwds) self._stepmon.info('DUMPED("%s")' % filename) #XXX: before / after ? finally: f.close() return def __save_state(self, force=False): """save the solver state, if chosen save frequency is met input:: - if force is True, save the solver state regardless of save frequency""" # save the last iteration if force and bool(self._state): self.SaveSolver() return # save the zeroth iteration nonzero = True #XXX: or bool(self.generations) ? # after _saveiter generations, then save state iters = self._saveiter saveiter = bool(iters) and not bool(self.generations % iters) if nonzero and saveiter: self.SaveSolver() #FIXME: if _saveeval (or more) since last check, then save state #save = self.evaluations % self._saveeval return def __load_state(self, solver, **kwds): """load solver.__dict__ into self.__dict__; override with kwds input:: - solver is a solver instance, while kwds are a dict of solver state""" #XXX: should do some filtering on kwds ? self.__dict__.update(solver.__dict__, **kwds) return def Finalize(self): """cleanup upon exiting the main optimization loop""" self._live = False return def _process_inputs(self, kwds): """process and activate input settings Args: callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. disp (bool, default=False): if True, print convergence messages. Additional Args: EvaluationMonitor: a monitor instance to capture each evaluation of cost. StepMonitor: a monitor instance to capture each iteration's best results. penalty: a function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. constraints: a function of the form: xk' = constraints(xk), where xk is the current parameter vector. Note: The additional args are 'sticky', in that once they are given, they remain set until they are explicitly changed. Conversely, the args are not sticky, and are thus set for a one-time use. """ #allow for inputs that don't conform to AbstractSolver interface #NOTE: not sticky: callback, disp #NOTE: sticky: EvaluationMonitor, StepMonitor, penalty, constraints settings = \ {'callback':None, #user-supplied function, called after each step 'disp':0} #non-zero to print convergence messages [settings.update({i: j}) for (i, j) in kwds.items() if i in settings] # backward compatibility if 'EvaluationMonitor' in kwds: \ self.SetEvaluationMonitor(kwds['EvaluationMonitor']) if 'StepMonitor' in kwds: \ self.SetGenerationMonitor(kwds['StepMonitor']) if 'penalty' in kwds: \ self.SetPenalty(kwds['penalty']) if 'constraints' in kwds: \ self.SetConstraints(kwds['constraints']) return settings def Step(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Take a single optimization step using the given 'cost' function. Uses an optimization algorithm to take one 'step' toward the minimum of a function of one or more variables. Args: cost (func, default=None): the function to be minimized: ``y = cost(x)``. termination (termination, default=None): termination conditions. ExtraArgs (tuple, default=None): extra arguments for cost. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. disp (bool, default=False): if True, print convergence messages. Returns: None Notes: To run the solver until termination, call ``Solve()``. Alternately, use ``Terminated()`` as the stop condition in a while loop over ``Step``. If the algorithm does not meet the given termination conditions after the call to ``Step``, the solver may be left in an "out-of-sync" state. When abandoning an non-terminated solver, one should call ``Finalize()`` to make sure the solver is fully returned to a "synchronized" state. This method accepts additional args that are specific for the current solver, as detailed in the `_process_inputs` method. """ if 'disp' in kwds: disp = bool(kwds['disp']) #; del kwds['disp'] else: disp = False # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) # check termination before 'stepping' if len(self._stepmon): msg = self.Terminated(disp=disp, info=True) or None else: msg = None # if not terminated, then take a step if msg is None: self._Step(**kwds) #FIXME: not all kwds are given in __doc__ if self.Terminated(): # then cleanup/finalize self.Finalize() # get termination message and log state msg = self.Terminated(disp=disp, info=True) or None if msg: self._stepmon.info('STOP("%s")' % msg) self.__save_state(force=True) return msg def _Solve(self, cost, ExtraArgs, **settings): """Run the optimizer to termination, using the given settings. Args: cost (func): the function to be minimized: ``y = cost(x)``. ExtraArgs (tuple): tuple of extra arguments for ``cost``. settings (dict): optimizer settings (produced by _process_inputs) Returns: None """ disp = settings['disp'] if 'disp' in settings else False # the main optimization loop stop = False while not stop: stop = self.Step(**settings) #XXX: remove need to pass settings? continue # if collapse, then activate any relevant collapses and continue self.__stop__ = stop #HACK: avoid re-evaluation of Termination while self._collapse and self.Collapse(disp=disp): del self.__stop__ #HACK stop = False while not stop: stop = self.Step(** settings) #XXX: move Collapse inside of Step? continue self.__stop__ = stop #HACK del self.__stop__ #HACK return def Solve(self, cost=None, termination=None, ExtraArgs=None, **kwds): """Minimize a 'cost' function with given termination conditions. Uses an optimization algorithm to find the minimum of a function of one or more variables. Args: cost (func, default=None): the function to be minimized: ``y = cost(x)``. termination (termination, default=None): termination conditions. ExtraArgs (tuple, default=None): extra arguments for cost. sigint_callback (func, default=None): callback function for signal handler. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. disp (bool, default=False): if True, print convergence messages. Returns: None """ # process and activate input settings if 'sigint_callback' in kwds: self.sigint_callback = kwds['sigint_callback'] del kwds['sigint_callback'] else: self.sigint_callback = None settings = self._process_inputs(kwds) # set up signal handler #FIXME: sigint doesn't behave well in parallel self._EARLYEXIT = False #XXX: why not use EARLYEXIT singleton? # activate signal handler #import threading as thread #mainthread = isinstance(thread.current_thread(), thread._MainThread) #if mainthread: #XXX: if not mainthread, signal will raise ValueError import mystic._signal as signal if self._handle_sigint: signal.signal(signal.SIGINT, signal.Handler(self)) # register: cost, termination, ExtraArgs cost = self._bootstrap_objective(cost, ExtraArgs) if termination is not None: self.SetTermination(termination) #XXX: self.Step(cost, termination, ExtraArgs, **settings) ? # run the optimizer to termination self._Solve(cost, ExtraArgs, **settings) # restore default handler for signal interrupts if self._handle_sigint: signal.signal(signal.SIGINT, signal.default_int_handler) return def __copy__(self): """return a shallow copy of the solver""" cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result def __deepcopy__(self, memo): """return a deep copy of the solver""" import copy import dill cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if v is self._cost: setattr(result, k, tuple(dill.copy(i) for i in v)) else: try: #XXX: work-around instancemethods in python2.6 setattr(result, k, copy.deepcopy(v, memo)) except TypeError: setattr(result, k, dill.copy(v)) return result def _is_new(self): 'determine if solver has been run or not' return bool(self.evaluations) or bool(self.generations) # extensions to the solver interface evaluations = property(__evaluations) generations = property(__generations) energy_history = property(__energy_history, __set_energy_history) solution_history = property(__solution_history, __set_solution_history) bestEnergy = property(__bestEnergy, __set_bestEnergy) bestSolution = property(__bestSolution, __set_bestSolution) pass
from timeit import Timer print("Differential Evolution") print("======================") t = Timer("main()", "from __main__ import main") timetaken = t.timeit(number=1) print("CPU Time: %s\n" % timetaken) print("with bounds...") import time times = [] algor = [] print("Differential Evolution") print("======================") start = time.time() esow = Monitor() ssow = Monitor() #ssow= VerboseMonitor(1) # import random # xinit = [random.random() for j in range(ND)] xinit = [0.8, 1.2, 0.7] # xinit = [0.8,1.2,1.7] #... better when using "bad" range min = [-0.999, -0.999, 0.999] #XXX: behaves badly when large range max = [200.001, 100.001, inf] #... for >=1 x0 out of bounds; (up xtol) # min = [-0.999, -0.999, -0.999] # max = [200.001, 100.001, inf] # min = [-0.999, -0.999, 0.999] #XXX: tight range and non-randomness # max = [2.001, 1.001, 1.001] #...: is _bad_ for DE solvers #print(diffev(rosen,xinit,NP,retall=0,full_output=0))
def diffev(cost, x0, npop=4, args=(), bounds=None, ftol=5e-3, gtol=None, maxiter=None, maxfun=None, cross=0.9, scale=0.8, full_output=0, disp=1, retall=0, callback=None, **kwds): """Minimize a function using differential evolution. Uses a differential evolution algorithm to find the minimum of a function of one or more variables. Mimics a ``scipy.optimize`` style interface. Args: cost (func): the function or method to be minimized: ``y = cost(x)``. x0 (ndarray): the initial guess parameter vector ``x`` if desired start is a single point, otherwise takes a list of (min,max) bounds that define a region from which random initial points are drawn. npop (int, default=4): size of the trial solution population. args (tuple, default=()): extra arguments for cost. bounds (list(tuple), default=None): list of pairs of bounds (min,max), one for each parameter. ftol (float, default=5e-3): acceptable relative error in ``cost(xopt)`` for convergence. gtol (float, default=None): maximum iterations to run without improvement. maxiter (int, default=None): the maximum number of iterations to perform. maxfun (int, default=None): the maximum number of function evaluations. cross (float, default=0.9): the probability of cross-parameter mutations. scale (float, default=0.8): multiplier for mutations on the trial solution. full_output (bool, default=False): True if fval and warnflag are desired. disp (bool, default=True): if True, print convergence messages. retall (bool, default=False): if True, return list of solutions at each iteration. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. handler (bool, default=False): if True, enable handling interrupt signals. strategy (strategy, default=None): override the default mutation strategy. itermon (monitor, default=None): override the default GenerationMonitor. evalmon (monitor, default=None): override the default EvaluationMonitor. constraints (func, default=None): a function ``xk' = constraints(xk)``, where xk is the current parameter vector, and xk' is a parameter vector that satisfies the encoded constraints. penalty (func, default=None): a function ``y = penalty(xk)``, where xk is the current parameter vector, and ``y' == 0`` when the encoded constraints are satisfied (and ``y' > 0`` otherwise). Returns: ``(xopt, {fopt, iter, funcalls, warnflag}, {allvecs})`` Notes: - xopt (*ndarray*): the minimizer of the cost function - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)`` - iter (*int*): number of iterations - funcalls (*int*): number of function calls - warnflag (*int*): warning flag: - ``1 : Maximum number of function evaluations`` - ``2 : Maximum number of iterations`` - allvecs (*list*): a list of solutions at each iteration """ invariant_current = kwds[ 'invariant_current'] if 'invariant_current' in kwds else False handler = kwds['handler'] if 'handler' in kwds else False from mystic.strategy import Best1Bin strategy = kwds['strategy'] if 'strategy' in kwds else Best1Bin from mystic.monitors import Monitor stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor() evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor() if gtol: #if number of generations provided, use ChangeOverGeneration from mystic.termination import ChangeOverGeneration termination = ChangeOverGeneration(ftol, gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) ND = len(x0) if invariant_current: #use Solver2, not Solver1 solver = DifferentialEvolutionSolver2(ND, npop) else: solver = DifferentialEvolutionSolver(ND, npop) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if 'penalty' in kwds: solver.SetPenalty(kwds['penalty']) if 'constraints' in kwds: solver.SetConstraints(kwds['constraints']) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) try: #x0 passed as 1D array of (min,max) pairs minb, maxb = unpair(x0) solver.SetRandomInitialPoints(minb, maxb) except: #x0 passed as 1D array of initial parameter values solver.SetInitialPoints(x0) if handler: solver.enable_signal_handler() #TODO: allow sigint_callbacks for all minimal interfaces ? solver.Solve(cost, termination=termination, strategy=strategy, \ #sigint_callback=other_callback,\ CrossProbability=cross, ScalingFactor=scale, \ ExtraArgs=args, callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x if fcalls >= solver._maxfun: warnflag = 1 if disp: print("Warning: Maximum number of function evaluations has "\ "been exceeded.") elif iterations >= solver._maxiter: warnflag = 2 if disp: print("Warning: Maximum number of iterations has been exceeded") else: if disp: print("Optimization terminated successfully.") print(" Current function value: %f" % fval) print(" Iterations: %d" % iterations) print(" Function evaluations: %d" % fcalls) if full_output: retlist = x, fval, iterations, fcalls, warnflag if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def buckshot(cost,ndim,npts=8,args=(),bounds=None,ftol=1e-4,maxiter=None, \ maxfun=None,full_output=0,disp=1,retall=0,callback=None,**kwds): """Minimize a function using the buckshot ensemble solver. Description: Uses a buckshot ensemble algorithm to find the minimum of a function of one or more variables. Mimics the scipy.optimize.fmin interface. Starts 'npts' solver instances at random points in parameter space. Inputs: cost -- the Python function or method to be minimized. ndim -- dimensionality of the problem. npts -- number of solver instances. Additional Inputs: args -- extra arguments for cost. bounds -- list - n pairs of bounds (min,max), one pair for each parameter. ftol -- number - acceptable relative error in cost(xopt) for convergence. gtol -- number - maximum number of iterations to run without improvement. maxiter -- number - the maximum number of iterations to perform. maxfun -- number - the maximum number of function evaluations. full_output -- number - non-zero if fval and warnflag outputs are desired. disp -- number - non-zero to print convergence messages. retall -- number - non-zero to return list of solutions at each iteration. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. solver -- solver - override the default nested Solver instance. handler -- boolean - enable/disable handling of interrupt signal. itermon -- monitor - override the default GenerationMonitor. evalmon -- monitor - override the default EvaluationMonitor. constraints -- an optional user-supplied function. It is called as constraints(xk), where xk is the current parameter vector. This function must return xk', a parameter vector that satisfies the encoded constraints. penalty -- an optional user-supplied function. It is called as penalty(xk), where xk is the current parameter vector. This function should return y', with y' == 0 when the encoded constraints are satisfied, and y' > 0 otherwise. dist -- an optional mystic.math.Distribution instance. If provided, this distribution generates randomness in ensemble starting position. Returns: (xopt, {fopt, iter, funcalls, warnflag, allfuncalls}, {allvecs}) xopt -- ndarray - minimizer of function fopt -- number - value of function at minimum: fopt = cost(xopt) iter -- number - number of iterations funcalls -- number - number of function calls warnflag -- number - Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allfuncalls -- number - total function calls (for all solver instances) allvecs -- list - a list of solutions at each iteration """ handler = kwds['handler'] if 'handler' in kwds else False from mystic.solvers import NelderMeadSimplexSolver as _solver if 'solver' in kwds: _solver = kwds['solver'] from mystic.monitors import Monitor stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor() evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor() gtol = 10 # termination generations (scipy: 2, default: 10) if 'gtol' in kwds: gtol = kwds['gtol'] if gtol: #if number of generations is provided, use NCOG from mystic.termination import NormalizedChangeOverGeneration termination = NormalizedChangeOverGeneration(ftol, gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) solver = BuckshotSolver(ndim, npts) solver.SetNestedSolver(_solver) #XXX: skip settings for configured solver? solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if 'dist' in kwds: solver.SetDistribution(kwds['dist']) if 'penalty' in kwds: solver.SetPenalty(kwds['penalty']) if 'constraints' in kwds: solver.SetConstraints(kwds['constraints']) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) if handler: solver.enable_signal_handler() solver.Solve(cost,termination=termination,disp=disp, \ ExtraArgs=args,callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface msg = solver.Terminated(disp=False, info=True) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations all_fcalls = solver._total_evals iterations = solver.generations allvecs = solver._stepmon.x if fcalls >= solver._maxfun: #XXX: check against total or individual? warnflag = 1 elif iterations >= solver._maxiter: #XXX: check against total or individual? warnflag = 2 else: pass if full_output: retlist = x, fval, iterations, fcalls, warnflag, all_fcalls if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
# License: 3-clause BSD. The full license text is available at: # - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE from mystic.models import rosen from mystic.solvers import * from mystic.termination import VTRChangeOverGeneration from mystic.monitors import VerboseMonitor, Monitor from mystic.tools import random_seed random_seed(123) lb, ub = [-100.]*3, [100]*3 interval = None if interval: _stepmon = VerboseMonitor(interval) else: _stepmon = Monitor() _term = VTRChangeOverGeneration(generations=200) _solver = DifferentialEvolutionSolver(3, 20)#40) _solver.SetRandomInitialPoints(lb,ub) _solver.SetStrictRanges(lb,ub) _solver.SetTermination(_term) _solver.SetGenerationMonitor(_stepmon) _solver.SetEvaluationLimits(100, 1000) _solver.Solve(rosen) _energy = _solver.bestEnergy _solution = _solver.bestSolution _population = _solver.population _solver.SetEvaluationLimits(10000, 100000) _solver.Solve()
from mystic.solvers import DifferentialEvolutionSolver2 from mystic.monitors import VerboseMonitor, Monitor from mystic.termination import ChangeOverGeneration as COG # kwds for solver opts = dict(termination=COG(1e-10, 100)) param = dict( solver=DifferentialEvolutionSolver2, npop=80, maxiter=1500, maxfun=1e+6, x0=None, # use RandomInitialPoints nested=None, # don't use SetNested pool=None, # don't use SetMapper stepmon=VerboseMonitor(1, label='output'), # monitor config evalmon=Monitor(), # monitor config (re-initialized in solve) # kwds to pass directly to Solve(objective, **opt) opts=opts, ) from mystic.math.discrete import product_measure from mystic.math import almostEqual as almost from mystic.constraints import and_, integers from mystic.coupler import outer # lower and upper bound for parameters and weights xlb = (0, 1, 0, 0, 0) xub = (1, 10, 10, 10, 10) wlb = (0, 1, 1, 1, 1) wub = (1, 1, 1, 1, 1) # number of Dirac masses to use for each parameter
return if __name__ == '__main__': print("Differential Evolution") print("======================") # set range for random initial guess ndim = 9 x0 = [(-100, 100)] * ndim random_seed(123) # configure monitors stepmon = VerboseMonitor(50) evalmon = Monitor() # use DE to solve 8th-order Chebyshev coefficients npop = 10 * ndim solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(min=[-100] * ndim, max=[100] * ndim) solver.SetEvaluationLimits(generations=999) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) solver.enable_signal_handler() solver.Solve(chebyshev8cost, termination=VTR(0.01), strategy=Best1Exp, \ CrossProbability=1.0, ScalingFactor=0.9) solution = solver.bestSolution # get solved coefficients and Chi-Squared (from solver members) iterations = solver.generations
def __init__(self, dim, **kwds): """ Takes one initial input: dim -- dimensionality of the problem. Additional inputs: npop -- size of the trial solution population. [default = 1] Important class members: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = kwds['npop'] if 'npop' in kwds else 1 self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.signal_handler = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [ 1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history= None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None, None) # (cost, raw_cost, args) #,callback) self._collapse = False self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[0] is False or (kw['info'] if 'info' in kw else True) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination as mt self._EARLYEXIT = mt.EARLYEXIT self._live = False return
class AbstractSolver(object): """ AbstractSolver base class for mystic optimizers. """ def __init__(self, dim, **kwds): """ Takes one initial input: dim -- dimensionality of the problem. Additional inputs: npop -- size of the trial solution population. [default = 1] Important class members: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = 1 if kwds.has_key('npop'): NP = kwds['npop'] self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.signal_handler = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [ 1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history= None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None) self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[0] is False or kw.get('info',True) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination self._EARLYEXIT = mystic.termination.EARLYEXIT return def Solution(self): """return the best solution""" return self.bestSolution def __evaluations(self): """get the number of function calls""" return self._fcalls[0] def __generations(self): """get the number of iterations""" return max(0,len(self.energy_history)-1) #return max(0,len(self._stepmon)-1) def __energy_history(self): """get the energy_history (default: energy_history = _stepmon.y)""" if self._energy_history is None: return self._stepmon.y return self._energy_history def __set_energy_history(self, energy): """set the energy_history (energy=None will sync with _stepmon.y)""" self._energy_history = energy return def __solution_history(self): """get the solution_history (default: solution_history = _stepmon.x)""" if self._solution_history is None: return self._stepmon.x return self._solution_history def __set_solution_history(self, params): """set the solution_history (params=None will sync with _stepmon.x)""" self._solution_history = params return def __bestSolution(self): """get the bestSolution (default: bestSolution = population[0])""" if self._bestSolution is None: return self.population[0] return self._bestSolution def __set_bestSolution(self, params): """set the bestSolution (params=None will sync with population[0])""" self._bestSolution = params return def __bestEnergy(self): """get the bestEnergy (default: bestEnergy = popEnergy[0])""" if self._bestEnergy is None: return self.popEnergy[0] return self._bestEnergy def __set_bestEnergy(self, energy): """set the bestEnergy (energy=None will sync with popEnergy[0])""" self._bestEnergy = energy return def SetReducer(self, reducer, arraylike=False): """apply a reducer function to the cost function input:: - a reducer function of the form: y' = reducer(yk), where yk is a results vector and y' is a single value. Ideally, this method is applied to a cost function with a multi-value return, to reduce the output to a single value. If arraylike, the reducer provided should take a single array as input and produce a scalar; otherwise, the reducer provided should meet the requirements of the python's builtin 'reduce' method (e.g. lambda x,y: x+y), taking two scalars and producing a scalar.""" if not reducer: self._reducer = None elif not callable(reducer): raise TypeError, "'%s' is not a callable function" % reducer elif not arraylike: self._reducer = wrap_reducer(reducer) else: #XXX: check if is arraylike? self._reducer = reducer return def SetPenalty(self, penalty): """apply a penalty function to the optimization input:: - a penalty function of the form: y' = penalty(xk), with y = cost(xk) + y', where xk is the current parameter vector. Ideally, this function is constructed so a penalty is applied when the desired (i.e. encoded) constraints are violated. Equality constraints should be considered satisfied when the penalty condition evaluates to zero, while inequality constraints are satisfied when the penalty condition evaluates to a non-positive number.""" if not penalty: self._penalty = lambda x: 0.0 elif not callable(penalty): raise TypeError, "'%s' is not a callable function" % penalty else: #XXX: check for format: y' = penalty(x) ? self._penalty = penalty return def SetConstraints(self, constraints): """apply a constraints function to the optimization input:: - a constraints function of the form: xk' = constraints(xk), where xk is the current parameter vector. Ideally, this function is constructed so the parameter vector it passes to the cost function will satisfy the desired (i.e. encoded) constraints.""" if not constraints: self._constraints = lambda x: x elif not callable(constraints): raise TypeError, "'%s' is not a callable function" % constraints else: #XXX: check for format: x' = constraints(x) ? self._constraints = constraints return def SetGenerationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each solver iteration""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._stepmon if isinstance(monitor, Monitor): # is Monitor() self._stepmon = monitor self._stepmon.prepend(current) elif isinstance(monitor, Null) or monitor == Null: # is Null() or Null self._stepmon = Monitor() #XXX: don't allow Null self._stepmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._stepmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor self.energy_history = self._stepmon.y self.solution_history = self._stepmon.x return def SetEvaluationMonitor(self, monitor, new=False): """select a callable to monitor (x, f(x)) after each cost function evaluation""" from mystic.monitors import Null, Monitor#, CustomMonitor current = Null() if new else self._evalmon if isinstance(monitor, (Null, Monitor) ): # is Monitor() or Null() self._evalmon = monitor self._evalmon.prepend(current) elif monitor == Null: # is Null self._evalmon = monitor() self._evalmon.prepend(current) elif hasattr(monitor, '__module__'): # is CustomMonitor() if monitor.__module__ in ['mystic._genSow']: self._evalmon = monitor #FIXME: need .prepend(current) else: raise TypeError, "'%s' is not a monitor instance" % monitor return def SetStrictRanges(self, min=None, max=None): """ensure solution is within bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i] note:: SetStrictRanges(None) will remove strict range constraints""" if min is False or max is False: self._useStrictRange = False return #XXX: better to use 'defaultMin,defaultMax' or '-inf,inf' ??? if min == None: min = self._defaultMin if max == None: max = self._defaultMax # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] == None: min[i] = self._defaultMin[0] if max[i] == None: max[i] = self._defaultMax[0] min = asarray(min); max = asarray(max) if numpy.any(( min > max ),0): raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim self._useStrictRange = True self._strictMin = min self._strictMax = max return def _clipGuessWithinRangeBoundary(self, x0): #FIXME: use self.trialSolution? """ensure that initial guess is set within bounds input:: - x0: must be a sequence of length self.nDim""" #if len(x0) != self.nDim: #XXX: unnecessary w/ self.trialSolution # raise ValueError, "initial guess must be length %s" % self.nDim x0 = asarray(x0) lo = self._strictMin hi = self._strictMax # crop x0 at bounds x0[x0<lo] = lo[x0<lo] x0[x0>hi] = hi[x0>hi] return x0 def SetInitialPoints(self, x0, radius=0.05): """Set Initial Points with Guess (x0) input:: - x0: must be a sequence of length self.nDim - radius: generate random points within [-radius*x0, radius*x0] for i!=0 when a simplex-type initial guess in required""" x0 = asfarray(x0) rank = len(x0.shape) if rank is 0: x0 = asfarray([x0]) rank = 1 if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if len(x0) != self.nDim: raise ValueError, "Initial guess must be length %s" % self.nDim #slightly alter initial values for solvers that depend on randomness min = x0*(1-radius) max = x0*(1+radius) numzeros = len(x0[x0==0]) min[min==0] = asarray([-radius for i in range(numzeros)]) max[max==0] = asarray([radius for i in range(numzeros)]) self.SetRandomInitialPoints(min,max) #stick initial values in population[i], i=0 self.population[0] = x0.tolist() def SetRandomInitialPoints(self, min=None, max=None): """Generate Random Initial Points within given Bounds input:: - min, max: must be a sequence of length self.nDim - each min[i] should be <= the corresponding max[i]""" if min == None: min = self._defaultMin if max == None: max = self._defaultMax #if numpy.any(( asarray(min) > asarray(max) ),0): # raise ValueError, "each min[i] must be <= the corresponding max[i]" if len(min) != self.nDim or len(max) != self.nDim: raise ValueError, "bounds array must be length %s" % self.nDim # when 'some' of the bounds are given as 'None', replace with default for i in range(len(min)): if min[i] == None: min[i] = self._defaultMin[0] if max[i] == None: max[i] = self._defaultMax[0] import random #generate random initial values for i in range(len(self.population)): for j in range(self.nDim): self.population[i][j] = random.uniform(min[j],max[j]) def SetMultinormalInitialPoints(self, mean, var = None): """Generate Initial Points from Multivariate Normal. input:: - mean must be a sequence of length self.nDim - var can be... None: -> it becomes the identity scalar: -> var becomes scalar * I matrix: -> the variance matrix. must be the right size! """ from numpy.random import multivariate_normal assert(len(mean) == self.nDim) if var == None: var = numpy.eye(self.nDim) else: try: # scalar ? float(var) except: # nope. var better be matrix of the right size (no check) pass else: var = var * numpy.eye(self.nDim) for i in range(len(self.population)): self.population[i] = multivariate_normal(mean, var).tolist() return def enable_signal_handler(self): """enable workflow interrupt handler while solver is running""" self._handle_sigint = True def disable_signal_handler(self): """disable workflow interrupt handler while solver is running""" self._handle_sigint = False def _generateHandler(self,sigint_callback): """factory to generate signal handler Available switches:: - sol --> Print current best solution. - cont --> Continue calculation. - call --> Executes sigint_callback, if provided. - exit --> Exits with current best solution. """ def handler(signum, frame): import inspect print inspect.getframeinfo(frame) print inspect.trace() while 1: s = raw_input(\ """ Enter sense switch. sol: Print current best solution. cont: Continue calculation. call: Executes sigint_callback [%s]. exit: Exits with current best solution. >>> """ % sigint_callback) if s.lower() == 'sol': print self.bestSolution elif s.lower() == 'cont': return elif s.lower() == 'call': # sigint call_back if sigint_callback is not None: sigint_callback(self.bestSolution) elif s.lower() == 'exit': self._EARLYEXIT = True return else: print "unknown option : %s" % s return self.signal_handler = handler return def SetSaveFrequency(self, generations=None, filename=None, **kwds): """set frequency for saving solver restart file input:: - generations = number of solver iterations before next save of state - filename = name of file in which to save solver state note:: SetSaveFrequency(None) will disable saving solver restart file""" self._saveiter = generations #self._saveeval = evaluations self._state = filename return def SetEvaluationLimits(self, generations=None, evaluations=None, \ new=False, **kwds): """set limits for generations and/or evaluations input:: - generations = maximum number of solver iterations (i.e. steps) - evaluations = maximum number of function evaluations""" self._maxiter = generations self._maxfun = evaluations # backward compatibility if kwds.has_key('maxiter'): self._maxiter = kwds['maxiter'] if kwds.has_key('maxfun'): self._maxfun = kwds['maxfun'] # handle if new (reset counter, instead of extend counter) if new: if generations is not None: self._maxiter += self.generations else: self._maxiter = "*" #XXX: better as self._newmax = True ? if evaluations is not None: self._maxfun += self.evaluations else: self._maxfun = "*" return def _SetEvaluationLimits(self, iterscale=None, evalscale=None): """set the evaluation limits""" if iterscale is None: iterscale = 10 if evalscale is None: evalscale = 1000 N = len(self.population[0]) # usually self.nDim # if SetEvaluationLimits not applied, use the solver default if self._maxiter is None: self._maxiter = N * self.nPop * iterscale elif self._maxiter == "*": # (i.e. None, but 'reset counter') self._maxiter = (N * self.nPop * iterscale) + self.generations if self._maxfun is None: self._maxfun = N * self.nPop * evalscale elif self._maxiter == "*": self._maxfun = (N * self.nPop * evalscale) + self.evaluations return def CheckTermination(self, disp=False, info=False, termination=None): """check if the solver meets the given termination conditions Input:: - disp = if True, print termination statistics and/or warnings - info = if True, return termination message (instead of boolean) - termination = termination conditions to check against Note:: If no termination conditions are given, the solver's stored termination conditions will be used. """ if termination == None: termination = self._termination # check for termination messages msg = termination(self, info=True) lim = "EvaluationLimits with %s" % {'evaluations':self._maxfun, 'generations':self._maxiter} # push solver internals to scipy.optimize.fmin interface if self._fcalls[0] >= self._maxfun and self._maxfun is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif self.generations >= self._maxiter and self._maxiter is not None: msg = lim #XXX: prefer the default stop ? if disp: print "Warning: Maximum number of iterations has been exceeded" elif msg and disp: print "Optimization terminated successfully." print " Current function value: %f" % self.bestEnergy print " Iterations: %d" % self.generations print " Function evaluations: %d" % self._fcalls[0] if info: return msg return bool(msg) def SetTermination(self, termination): """set the termination conditions""" #XXX: validate that termination is a 'condition' ? self._termination = termination return def _RegisterObjective(self, cost, ExtraArgs=None): """decorate cost function with bounds, penalties, monitors, etc""" if ExtraArgs == None: ExtraArgs = () self._fcalls, cost = wrap_function(cost, ExtraArgs, self._evalmon) if self._useStrictRange: for i in range(self.nPop): self.population[i] = self._clipGuessWithinRangeBoundary(self.population[i]) cost = wrap_bounds(cost, self._strictMin, self._strictMax) cost = wrap_penalty(cost, self._penalty) cost = wrap_nested(cost, self._constraints) if self._reducer: #cost = reduced(*self._reducer)(cost) # was self._reducer = (f,bool) cost = reduced(self._reducer, arraylike=True)(cost) # hold on to the 'wrapped' cost function self._cost = (cost, ExtraArgs) return cost def _bootstrap_decorate(self, cost=None, ExtraArgs=None): """HACK to enable not explicitly calling _RegisterObjective""" args = None if cost == None: # 'use existing cost' cost,args = self._cost # use args, unless override with ExtraArgs if ExtraArgs != None: args = ExtraArgs if self._cost[0] == None: # '_RegisterObjective not yet called' if args is None: args = () cost = self._RegisterObjective(cost, args) return cost def Step(self, cost=None, ExtraArgs=None, **kwds): """perform a single optimization iteration *** this method must be overwritten ***""" raise NotImplementedError, "an optimization algorithm was not provided" def SaveSolver(self, filename=None, **kwds): """save solver state to a restart file""" import dill if filename == None: # then check if already has registered file if self._state == None: # then create a new one import tempfile self._state = tempfile.mkstemp(suffix='.pkl')[-1] filename = self._state self._state = filename f = file(filename, 'wb') try: dill.dump(self, f, **kwds) self._stepmon.info('DUMPED("%s")' % filename) #XXX: before / after ? finally: f.close() return def __save_state(self, force=False): """save the solver state, if chosen save frequency is met""" # save the last iteration if force and bool(self._state): self.SaveSolver() return # save the zeroth iteration nonzero = True #XXX: or bool(self.generations) ? # after _saveiter generations, then save state iters = self._saveiter saveiter = bool(iters) and not bool(self.generations % iters) if nonzero and saveiter: self.SaveSolver() #FIXME: if _saveeval (or more) since last check, then save state #save = self.evaluations % self._saveeval return def __load_state(self, solver, **kwds): """load solver.__dict__ into self.__dict__; override with kwds""" #XXX: should do some filtering on kwds ? self.__dict__.update(solver.__dict__, **kwds) return def _exitMain(self, **kwds): """cleanup upon exiting the main optimization loop""" pass def _process_inputs(self, kwds): """process and activate input settings""" #allow for inputs that don't conform to AbstractSolver interface settings = \ {'callback':None, #user-supplied function, called after each step 'disp':0} #non-zero to print convergence messages [settings.update({i:j}) for (i,j) in kwds.items() if i in settings] # backward compatibility if kwds.has_key('EvaluationMonitor'): \ self.SetEvaluationMonitor(kwds.get('EvaluationMonitor')) if kwds.has_key('StepMonitor'): \ self.SetGenerationMonitor(kwds.get('StepMonitor')) if kwds.has_key('penalty'): \ self.SetPenalty(kwds.get('penalty')) if kwds.has_key('constraints'): \ self.SetConstraints(kwds.get('constraints')) return settings def Solve(self, cost=None, termination=None, sigint_callback=None, ExtraArgs=None, **kwds): """Minimize a 'cost' function with given termination conditions. Description: Uses an optimization algorith to find the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. sigint_callback -- callback function for signal handler. ExtraArgs -- extra arguments for cost. Further Inputs: callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. """ # HACK to enable not explicitly calling _RegisterObjective cost = self._bootstrap_decorate(cost, ExtraArgs) # process and activate input settings settings = self._process_inputs(kwds) for key in settings: exec "%s = settings['%s']" % (key,key) # set up signal handler import signal self._EARLYEXIT = False self._generateHandler(sigint_callback) if self._handle_sigint: signal.signal(signal.SIGINT, self.signal_handler) ## decorate cost function with bounds, penalties, monitors, etc #self._RegisterObjective(cost, ExtraArgs) #XXX: SetObjective ? # register termination function if termination is not None: self.SetTermination(termination) # the initital optimization iteration if not len(self._stepmon): # do generation = 0 self.Step() if callback is not None: callback(self.bestSolution) # initialize termination conditions, if needed self._termination(self) #XXX: call at generation 0 or always? # impose the evaluation limits self._SetEvaluationLimits() # the main optimization loop while not self.CheckTermination() and not self._EARLYEXIT: self.Step(**settings) if callback is not None: callback(self.bestSolution) else: self._exitMain() # handle signal interrupts signal.signal(signal.SIGINT,signal.default_int_handler) # log any termination messages msg = self.CheckTermination(disp=disp, info=True) if msg: self._stepmon.info('STOP("%s")' % msg) # save final state self.__save_state(force=True) return # extensions to the solver interface evaluations = property(__evaluations ) generations = property(__generations ) energy_history = property(__energy_history,__set_energy_history ) solution_history = property(__solution_history,__set_solution_history ) bestEnergy = property(__bestEnergy,__set_bestEnergy ) bestSolution = property(__bestSolution,__set_bestSolution ) pass
def radius(model, point, ytol=0.0, xtol=0.0, ipop=None, imax=None): """graphical distance between a single point x,y and a model F(x')""" # given a single point x,y: find the radius = |y - F(x')| + delta # radius is just a minimization over x' of |y - F(x')| + delta # where we apply a constraints function (of box constraints) of # |x - x'| <= xtol (for each i in x) # # if hausdorff = some iterable, delta = |x - x'|/hausdorff # if hausdorff = True, delta = |x - x'|/spread(x); using the dataset range # if hausdorff = False, delta = 0.0 # # if ipop, then DE else Powell; ytol is used in VTR(ytol) # and will terminate when cost <= ytol x,y = _get_xy(point) y = asarray(y) # catch cases where yptp or y will cause issues in normalization #if not isfinite(yptp): return 0.0 #FIXME: correct? shouldn't happen #if yptp == 0: from numpy import inf; return inf #FIXME: this is bad # build the cost function if hausdorff: # distance in all directions def cost(rv): '''cost = |y - F(x')| + |x - x'| for each x,y (point in dataset)''' _y = model(rv) if not isfinite(_y): return abs(_y) errs = seterr(invalid='ignore', divide='ignore') # turn off warning z = abs((asarray(x) - rv)/ptp) # normalize by range m = abs(y - _y)/yptp # normalize by range seterr(invalid=errs['invalid'], divide=errs['divide']) # turn on warning return m + sum(z[isfinite(z)]) else: # vertical distance only def cost(rv): '''cost = |y - F(x')| for each x,y (point in dataset)''' return abs(y - model(rv)) if debug: print("rv: %s" % str(x)) print("cost: %s" % cost(x)) # if xtol=0, radius is difference in x,y and x,F(x); skip the optimization try: if not imax or not max(xtol): #iterables return cost(x) except TypeError: if not xtol: #non-iterables return cost(x) # set the range constraints xtol = asarray(xtol) bounds = list(zip( x - xtol, x + xtol )) if debug: print("lower: %s" % str(zip(*bounds)[0])) print("upper: %s" % str(zip(*bounds)[1])) # optimize where initially x' = x stepmon = Monitor() if debug: stepmon = VerboseMonitor(1) #XXX: edit settings? MINMAX = 1 #XXX: confirm MINMAX=1 is minimization ftol = ytol gtol = None # use VTRCOG if ipop: results = diffev2(cost, bounds, ipop, ftol=ftol, gtol=gtol, \ itermon = stepmon, maxiter=imax, bounds=bounds, \ full_output=1, disp=0, handler=False) else: results = fmin_powell(cost, x, ftol=ftol, gtol=gtol, \ itermon = stepmon, maxiter=imax, bounds=bounds, \ full_output=1, disp=0, handler=False) #solved = results[0] # x' func_opt = MINMAX * results[1] # cost(x') if debug: print("solved: %s" % results[0]) print("cost: %s" % func_opt) # get the minimum distance |y - F(x')| return func_opt
def fmin(cost, x0, args=(), bounds=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, **kwds): """Minimize a function using the downhill simplex algorithm. Uses a Nelder-Mead simplex algorithm to find the minimum of a function of one or more variables. This algorithm only uses function values, not derivatives or second derivatives. Mimics the ``scipy.optimize.fmin`` interface. This algorithm has a long history of successful use in applications. It will usually be slower than an algorithm that uses first or second derivative information. In practice it can have poor performance in high-dimensional problems and is not robust to minimizing complicated functions. Additionally, there currently is no complete theory describing when the algorithm will successfully converge to the minimum, or how fast it will if it does. Both the ftol and xtol criteria must be met for convergence. Args: cost (func): the function or method to be minimized: ``y = cost(x)``. x0 (ndarray): the initial guess parameter vector ``x``. args (tuple, default=()): extra arguments for cost. bounds (list(tuple), default=None): list of pairs of bounds (min,max), one for each parameter. xtol (float, default=1e-4): acceptable absolute error in ``xopt`` for convergence. ftol (float, default=1e-4): acceptable absolute error in ``cost(xopt)`` for convergence. maxiter (int, default=None): the maximum number of iterations to perform. maxfun (int, default=None): the maximum number of function evaluations. full_output (bool, default=False): True if fval and warnflag are desired. disp (bool, default=True): if True, print convergence messages. retall (bool, default=False): if True, return list of solutions at each iteration. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. handler (bool, default=False): if True, enable handling interrupt signals. itermon (monitor, default=None): override the default GenerationMonitor. evalmon (monitor, default=None): override the default EvaluationMonitor. constraints (func, default=None): a function ``xk' = constraints(xk)``, where xk is the current parameter vector, and xk' is a parameter vector that satisfies the encoded constraints. penalty (func, default=None): a function ``y = penalty(xk)``, where xk is the current parameter vector, and ``y' == 0`` when the encoded constraints are satisfied (and ``y' > 0`` otherwise). Returns: ``(xopt, {fopt, iter, funcalls, warnflag}, {allvecs})`` Notes: - xopt (*ndarray*): the minimizer of the cost function - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)`` - iter (*int*): number of iterations - funcalls (*int*): number of function calls - warnflag (*int*): warning flag: - ``1 : Maximum number of function evaluations`` - ``2 : Maximum number of iterations`` - allvecs (*list*): a list of solutions at each iteration """ handler = kwds['handler'] if 'handler' in kwds else False from mystic.monitors import Monitor stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor() evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor() if xtol: #if tolerance in x is provided, use CandidateRelativeTolerance from mystic.termination import CandidateRelativeTolerance as CRT termination = CRT(xtol, ftol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) solver = NelderMeadSimplexSolver(len(x0)) solver.SetInitialPoints(x0) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if 'penalty' in kwds: solver.SetPenalty(kwds['penalty']) if 'constraints' in kwds: solver.SetConstraints(kwds['constraints']) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) if handler: solver.enable_signal_handler() solver.Solve(cost, termination=termination, \ disp=disp, ExtraArgs=args, callback=callback) solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x if fcalls >= solver._maxfun: warnflag = 1 elif iterations >= solver._maxiter: warnflag = 2 if full_output: retlist = x, fval, iterations, fcalls, warnflag if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def fmin_powell(cost, x0, args=(), bounds=None, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None, **kwds): """Minimize a function using modified Powell's method. Uses a modified Powell Directional Search algorithm to find the minimum of a function of one or more variables. This method only uses function values, not derivatives. Mimics the ``scipy.optimize.fmin_powell`` interface. Powell's method is a conjugate direction method that has two loops. The outer loop simply iterates over the inner loop, while the inner loop minimizes over each current direction in the direction set. At the end of the inner loop, if certain conditions are met, the direction that gave the largest decrease is dropped and replaced with the difference between the current estimated x and the estimated x from the beginning of the inner-loop. The conditions for replacing the direction of largest increase is that: (a) no further gain can be made along the direction of greatest increase in the iteration, and (b) the direction of greatest increase accounted for a large sufficient fraction of the decrease in the function value from the current iteration of the inner loop. Args: cost (func): the function or method to be minimized: ``y = cost(x)``. x0 (ndarray): the initial guess parameter vector ``x``. args (tuple, default=()): extra arguments for cost. bounds (list(tuple), default=None): list of pairs of bounds (min,max), one for each parameter. xtol (float, default=1e-4): acceptable relative error in ``xopt`` for convergence. ftol (float, default=1e-4): acceptable relative error in ``cost(xopt)`` for convergence. gtol (float, default=2): maximum iterations to run without improvement. maxiter (int, default=None): the maximum number of iterations to perform. maxfun (int, default=None): the maximum number of function evaluations. full_output (bool, default=False): True if fval and warnflag are desired. disp (bool, default=True): if True, print convergence messages. retall (bool, default=False): if True, return list of solutions at each iteration. callback (func, default=None): function to call after each iteration. The interface is ``callback(xk)``, with xk the current parameter vector. direc (tuple, default=None): the initial direction set. handler (bool, default=False): if True, enable handling interrupt signals. itermon (monitor, default=None): override the default GenerationMonitor. evalmon (monitor, default=None): override the default EvaluationMonitor. constraints (func, default=None): a function ``xk' = constraints(xk)``, where xk is the current parameter vector, and xk' is a parameter vector that satisfies the encoded constraints. penalty (func, default=None): a function ``y = penalty(xk)``, where xk is the current parameter vector, and ``y' == 0`` when the encoded constraints are satisfied (and ``y' > 0`` otherwise). Returns: ``(xopt, {fopt, iter, funcalls, warnflag, direc}, {allvecs})`` Notes: - xopt (*ndarray*): the minimizer of the cost function - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)`` - iter (*int*): number of iterations - funcalls (*int*): number of function calls - warnflag (*int*): warning flag: - ``1 : Maximum number of function evaluations`` - ``2 : Maximum number of iterations`` - direc (*tuple*): the current direction set - allvecs (*list*): a list of solutions at each iteration """ #FIXME: need to resolve "direc" # - should just pass 'direc', and then hands-off ? How return it ? #XXX: enable use of imax? handler = kwds['handler'] if 'handler' in kwds else False from mystic.monitors import Monitor stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor() evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor() gtol = 2 # termination generations (scipy: 2, default: 10) if 'gtol' in kwds: gtol = kwds['gtol'] if gtol: #if number of generations is provided, use NCOG from mystic.termination import NormalizedChangeOverGeneration as NCOG termination = NCOG(ftol, gtol) else: from mystic.termination import VTRChangeOverGeneration termination = VTRChangeOverGeneration(ftol) solver = PowellDirectionalSolver(len(x0)) solver.SetInitialPoints(x0) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) if 'penalty' in kwds: solver.SetPenalty(kwds['penalty']) if 'constraints' in kwds: solver.SetConstraints(kwds['constraints']) if bounds is not None: minb, maxb = unpair(bounds) solver.SetStrictRanges(minb, maxb) if handler: solver.enable_signal_handler() solver.Solve(cost, termination=termination, \ xtol=xtol, ExtraArgs=args, callback=callback, \ disp=disp, direc=direc) #XXX: last two lines use **kwds solution = solver.Solution() # code below here pushes output to scipy.optimize.fmin_powell interface #x = list(solver.bestSolution) x = solver.bestSolution fval = solver.bestEnergy warnflag = 0 fcalls = solver.evaluations iterations = solver.generations allvecs = stepmon.x direc = solver._direc if fcalls >= solver._maxfun: warnflag = 1 elif iterations >= solver._maxiter: warnflag = 2 x = squeeze(x) #FIXME: write squeezed x to stepmon instead? if full_output: retlist = x, fval, iterations, fcalls, warnflag, direc if retall: retlist += (allvecs, ) else: retlist = x if retall: retlist = (x, allvecs) return retlist
def test_griewangk(verbose=False): """Test Griewangk's function, which has many local minima. Testing Griewangk: Expected: x=[0.]*10 and f=0 Using DifferentialEvolutionSolver: Solution: [ 8.87516194e-09 7.26058147e-09 1.02076001e-08 1.54219038e-08 -1.54328461e-08 2.34589663e-08 2.02809360e-08 -1.36385836e-08 1.38670373e-08 1.59668900e-08] f value: 0.0 Iterations: 4120 Function evaluations: 205669 Time elapsed: 34.4936850071 seconds Using DifferentialEvolutionSolver2: Solution: [ -2.02709316e-09 3.22017968e-09 1.55275472e-08 5.26739541e-09 -2.18490470e-08 3.73725584e-09 -1.02315312e-09 1.24680355e-08 -9.47898116e-09 2.22243557e-08] f value: 0.0 Iterations: 4011 Function evaluations: 200215 Time elapsed: 32.8412370682 seconds """ if verbose: print("Testing Griewangk:") print("Expected: x=[0.]*10 and f=0") from mystic.models import griewangk as costfunc ndim = 10 lb = [-400.]*ndim ub = [400.]*ndim maxiter = 10000 seed = 123 # Re-seed for each solver to have them all start at same x0 # DifferentialEvolutionSolver if verbose: print("\nUsing DifferentialEvolutionSolver:") npop = 50 random_seed(seed) from mystic.solvers import DifferentialEvolutionSolver from mystic.termination import ChangeOverGeneration as COG from mystic.termination import CandidateRelativeTolerance as CRT from mystic.termination import VTR from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver(ndim, npop) solver.SetRandomInitialPoints(lb, ub) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) solver.enable_signal_handler() #term = COG(1e-10) #term = CRT() term = VTR(0.) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Exp, \ CrossProbability=0.3, ScalingFactor=1.0) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3) # DifferentialEvolutionSolver2 if verbose: print("\nUsing DifferentialEvolutionSolver2:") npop = 50 random_seed(seed) from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import ChangeOverGeneration as COG from mystic.termination import CandidateRelativeTolerance as CRT from mystic.termination import VTR from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(lb, ub) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) #term = COG(1e-10) #term = CRT() term = VTR(0.) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Exp, \ CrossProbability=0.3, ScalingFactor=1.0) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3)
solution = solver.Solution() print(solution) if __name__ == '__main__': from timeit import Timer t = Timer("main()", "from __main__ import main") timetaken = t.timeit(number=1) print("CPU Time: %s" % timetaken) from mystic.monitors import Monitor from mystic.solvers import NelderMeadSimplexSolver as fmin from mystic.termination import CandidateRelativeTolerance as CRT import random simplex = Monitor() esow = Monitor() xinit = [random.uniform(0, 5) for j in range(ND)] solver = fmin(len(xinit)) solver.SetInitialPoints(xinit) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(simplex) solver.Solve(CostFunction, CRT()) sol = solver.Solution() print("fmin solution: %s" % sol) # end of file
#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2019 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/mystic/blob/master/LICENSE from mystic.solvers import DifferentialEvolutionSolver from mystic.models import rosen from mystic.tools import solver_bounds from mystic.termination import ChangeOverGeneration as COG, Or, CollapseCost from mystic.monitors import VerboseLoggingMonitor as Monitor solver = DifferentialEvolutionSolver(3, 40) solver.SetRandomInitialPoints([-100] * 3, [100] * 3) mon = Monitor(1) options = dict(limit=1.95, samples=50, clip=False) mask = {} #solver_bounds(solver) stop = Or(CollapseCost(mask=mask, **options), COG(generations=50)) solver.SetGenerationMonitor(mon) solver.SetTermination(stop) solver.SetObjective(rosen) solver.Solve() print(solver.Terminated(info=True)) print('%s @' % solver.bestEnergy) print(solver.bestSolution)
def __init__(self, dim, **kwds): """ Takes one initial input: dim -- dimensionality of the problem. Additional inputs: npop -- size of the trial solution population. [default = 1] Important class members: nDim, nPop = dim, npop generations - an iteration counter. evaluations - an evaluation counter. bestEnergy - current best energy. bestSolution - current best parameter set. [size = dim] popEnergy - set of all trial energy solutions. [size = npop] population - set of all trial parameter solutions. [size = dim*npop] solution_history - history of bestSolution status. [StepMonitor.x] energy_history - history of bestEnergy status. [StepMonitor.y] signal_handler - catches the interrupt signal. """ NP = 1 if kwds.has_key('npop'): NP = kwds['npop'] self.nDim = dim self.nPop = NP self._init_popEnergy = inf self.popEnergy = [self._init_popEnergy] * NP self.population = [[0.0 for i in range(dim)] for j in range(NP)] self.trialSolution = [0.0] * dim self._map_solver = False self._bestEnergy = None self._bestSolution = None self._state = None self._type = self.__class__.__name__ self.signal_handler = None self._handle_sigint = False self._useStrictRange = False self._defaultMin = [-1e3] * dim self._defaultMax = [ 1e3] * dim self._strictMin = [] self._strictMax = [] self._maxiter = None self._maxfun = None self._saveiter = None #self._saveeval = None from mystic.monitors import Null, Monitor self._evalmon = Null() self._stepmon = Monitor() self._fcalls = [0] self._energy_history = None self._solution_history= None self.id = None # identifier (use like "rank" for MPI) self._constraints = lambda x: x self._penalty = lambda x: 0.0 self._reducer = None self._cost = (None, None) self._termination = lambda x, *ar, **kw: False if len(ar) < 1 or ar[0] is False or kw.get('info',True) == False else '' #XXX: better default ? # (get termination details with self._termination.__doc__) import mystic.termination self._EARLYEXIT = mystic.termination.EARLYEXIT return
def Solve(self, cost, termination=None, ExtraArgs=(), **kwds): """Minimize a 'cost' function with given termination conditions. Description: Uses an ensemble of optimizers to find the minimum of a function of one or more variables. Inputs: cost -- the Python function or method to be minimized. Additional Inputs: termination -- callable object providing termination conditions. ExtraArgs -- extra arguments for cost. Further Inputs: sigint_callback -- callback function for signal handler. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. [default = None] disp -- non-zero to print convergence messages. [default = 0] """ # process and activate input settings if 'sigint_callback' in kwds: self.sigint_callback = kwds['sigint_callback'] del kwds['sigint_callback'] else: self.sigint_callback = None settings = self._process_inputs(kwds) disp = settings['disp'] if 'disp' in settings else False echo = settings['callback'] if 'callback' in settings else None # for key in settings: # exec "%s = settings['%s']" % (key,key) if disp in ['verbose', 'all']: verbose = True else: verbose = False #------------------------------------------------------------- from mystic.python_map import python_map if self._map != python_map: #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp' from mystic.monitors import Null evalmon = Null() else: evalmon = self._evalmon fcalls, cost = wrap_function(cost, ExtraArgs, evalmon) # set up signal handler #self._EARLYEXIT = False # activate signal_handler #import threading as thread #mainthread = isinstance(thread.current_thread(), thread._MainThread) #if mainthread: #XXX: if not mainthread, signal will raise ValueError import mystic._signal as signal if self._handle_sigint: signal.signal(signal.SIGINT, signal.Handler(self)) # register termination function if termination is not None: self.SetTermination(termination) # get the nested solver instance solver = self._AbstractEnsembleSolver__get_solver_instance() #------------------------------------------------------------- # generate starting points initial_values = self._InitialPoints() # run optimizer for each grid point from copy import deepcopy as _copy op = [_copy(solver) for i in range(len(initial_values))] #cf = [cost for i in range(len(initial_values))] vb = [verbose for i in range(len(initial_values))] cb = [echo for i in range(len(initial_values))] #XXX: remove? at = self.id if self.id else 0 # start at self.id id = range(at, at + len(initial_values)) # generate the local_optimize function def local_optimize(solver, x0, rank=None, disp=False, callback=None): from copy import deepcopy as _copy from mystic.tools import isNull solver.id = rank solver.SetInitialPoints(x0) if solver._useStrictRange: #XXX: always, settable, or sync'd ? solver.SetStrictRanges(min=solver._strictMin, \ max=solver._strictMax) # or lower,upper ? solver.Solve(cost, disp=disp, callback=callback) sm = solver._stepmon em = solver._evalmon if isNull(sm): sm = ([], [], [], []) else: sm = (_copy(sm._x), _copy(sm._y), _copy(sm._id), _copy(sm._info)) if isNull(em): em = ([], [], [], []) else: em = (_copy(em._x), _copy(em._y), _copy(em._id), _copy(em._info)) return solver, sm, em # map:: solver = local_optimize(solver, x0, id, verbose) results = list(self._map(local_optimize, op, initial_values, id, \ vb, cb, **self._mapconfig)) # save initial state self._AbstractSolver__save_state() #XXX: HACK TO GET CONTENT OF ALL MONITORS # reconnect monitors; save all solvers from mystic.monitors import Monitor while results: #XXX: option to not save allSolvers? skip this and _copy _solver, _stepmon, _evalmon = results.pop() sm = Monitor() sm._x, sm._y, sm._id, sm._info = _stepmon _solver._stepmon.extend(sm) del sm em = Monitor() em._x, em._y, em._id, em._info = _evalmon _solver._evalmon.extend(em) del em self._allSolvers[len(results)] = _solver del results, _solver, _stepmon, _evalmon #XXX: END HACK # get the results with the lowest energy self._bestSolver = self._allSolvers[0] bestpath = self._bestSolver._stepmon besteval = self._bestSolver._evalmon self._total_evals = self._bestSolver.evaluations for solver in self._allSolvers[1:]: self._total_evals += solver.evaluations # add func evals if solver.bestEnergy < self._bestSolver.bestEnergy: self._bestSolver = solver bestpath = solver._stepmon besteval = solver._evalmon # return results to internals self.population = self._bestSolver.population #XXX: pointer? copy? self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy? self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy? self.bestEnergy = self._bestSolver.bestEnergy self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy? self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy? self._maxiter = self._bestSolver._maxiter self._maxfun = self._bestSolver._maxfun # write 'bests' to monitors #XXX: non-best monitors may be useful too self._stepmon = bestpath #XXX: pointer? copy? self._evalmon = besteval #XXX: pointer? copy? self.energy_history = None self.solution_history = None #from mystic.tools import isNull #if isNull(bestpath): # self._stepmon = bestpath #else: # for i in range(len(bestpath.y)): # self._stepmon(bestpath.x[i], bestpath.y[i], self.id) # #XXX: could apply callback here, or in exec'd code #if isNull(besteval): # self._evalmon = besteval #else: # for i in range(len(besteval.y)): # self._evalmon(besteval.x[i], besteval.y[i]) #------------------------------------------------------------- # restore default handler for signal interrupts if self._handle_sigint: signal.signal(signal.SIGINT, signal.default_int_handler) # log any termination messages msg = self.Terminated(disp=disp, info=True) if msg: self._stepmon.info('STOP("%s")' % msg) # save final state self._AbstractSolver__save_state(force=True) return