Esempio n. 1
0
def dakota(cost, lb, ub):
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.strategy import Best1Exp
    from mystic.monitors import VerboseMonitor, Monitor
    from mystic.tools import getch, random_seed

    random_seed(123)

    #stepmon = VerboseMonitor(100)
    stepmon = Monitor()
    evalmon = Monitor()

    ndim = len(lb)  # [(1 + RVend) - RVstart] + 1

    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetRandomInitialPoints(min=lb, max=ub)
    solver.SetStrictRanges(min=lb, max=ub)
    solver.SetEvaluationLimits(maxiter, maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)

    tol = convergence_tol
    solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
                 CrossProbability=crossover,ScalingFactor=percent_change)

    print(solver.bestSolution)
    diameter = -solver.bestEnergy / scale
    func_evals = solver.evaluations
    return diameter, func_evals
def optimize(cost,lb,ub):
  from pathos.pools import ProcessPool as Pool
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import CandidateRelativeTolerance as CRT
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed

  random_seed(123)

 #stepmon = VerboseMonitor(100)
  stepmon = Monitor()
  evalmon = Monitor()

  ndim = len(lb) # [(1 + RVend) - RVstart] + 1

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetMapper(Pool().map)

  tol = convergence_tol
  solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
               CrossProbability=crossover,ScalingFactor=percent_change)

  print("solved: %s" % solver.bestSolution)
  scale = 1.0
  diameter_squared = -solver.bestEnergy / scale  #XXX: scale != 0
  func_evals = solver.evaluations
  return diameter_squared, func_evals
Esempio n. 3
0
    def run(self):
        print('Start local optimization...')
        maxf = self.request_data['optimization']['parameters']['maxf']
        xtol = self.request_data['optimization']['parameters']['xtol']
        ftol = self.request_data['optimization']['parameters']['ftol']


        solver = NelderMeadSimplexSolver(len(self.initial_values))
        solver.SetInitialPoints(self.initial_values)

        solver.SetStrictRanges([i[0] for i in self.bounds], [i[1] for i in self.bounds])
        solver.SetEvaluationLimits(evaluations=maxf)
        solver.SetTermination(CRT(xtol=ftol, ftol=ftol))
        # Inverting weights (*-1) to convert problem to minimizing 
        solver.Solve(
            self.evaluate_single_solution,
            ExtraArgs=([weight * -1 for weight in self.weights]),
            callback=self.callback
        )
        solver.enable_signal_handler()

        #Finally
        self.callback(
            individual=solver.Solution(),
            final=True
        )

        return
def optimize(cost, lb, ub):
    from mystic.differential_evolution import DifferentialEvolutionSolver2
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.strategy import Best1Exp
    from mystic import getch, random_seed, VerboseSow, Sow

    random_seed(123)

    stepmon = VerboseSow(100)
    #stepmon = Sow()
    evalmon = Sow()

    ndim = len(lb)  # [(1 + RVend) - RVstart] + 1

    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetRandomInitialPoints(min=lb, max=ub)
    solver.SetStrictRanges(min=lb, max=ub)
    solver.SetEvaluationLimits(maxiter, maxfun)

    tol = convergence_tol
    solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
                 CrossProbability=crossover,ScalingFactor=percent_change, \
                 StepMonitor=stepmon, EvaluationMonitor=evalmon)

    print "solved: %s" % solver.Solution()
    fitness = solver.bestEnergy / scale  #XXX: scale != 0
    func_evals = len(evalmon.y)
    return fitness, func_evals
Esempio n. 5
0
def run_once():
    simplex = Monitor()
    solver = fmin(2)
    solver.SetRandomInitialPoints([0, 0], [7, 7])
    solver.SetGenerationMonitor(simplex)
    solver.Solve(CostFunction, termination=CRT())
    sol = solver.Solution()

    for x in simplex.x:
        sam.putarray('x', x)
        sam.eval("plot(x([1,2,3,1],1),x([1,2,3,1],2),'k-')")
Esempio n. 6
0
def run_once(x0, x1):
    simplex = Monitor()
    xinit = [x0, x1]

    solver = fmin(len(xinit))
    solver.SetInitialPoints(xinit)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(rosen, termination=CRT())
    sol = solver.Solution()

    for x in simplex.x:
        sam.putarray('x', x)
        sam.eval("plot(x([1,2,3,1],1),x([1,2,3,1],2),'w-')")
Esempio n. 7
0
def mystic_optimize(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.tools import getch, random_seed
    random_seed(123)
    from mystic.solvers import NelderMeadSimplexSolver as fmin
    from mystic.termination import CandidateRelativeTolerance as CRT
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = fmin(len(point))
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, CRT())
    solution = solver.Solution()
    return solution
def mystic_optimize(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.solvers import NelderMeadSimplexSolver as fmin
    from mystic.termination import CandidateRelativeTolerance as CRT
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = fmin(len(point))
    solver.SetInitialPoints(point)
    min = [-100,-100,-100]; max = [100,100,100]
    solver.SetStrictRanges(min,max)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, CRT(1e-7,1e-7))
    solution = solver.Solution()
    return solution
Esempio n. 9
0
    def __init__(self, dim):
        """
Takes one initial input: 
    dim      -- dimensionality of the problem

The size of the simplex is dim+1.
        """
        simplex = dim + 1
        #XXX: cleaner to set npop=simplex, and use 'population' as simplex
        AbstractSolver.__init__(self, dim)  #,npop=simplex)
        self.popEnergy.append(self._init_popEnergy)
        self.population.append([0.0 for i in range(dim)])
        xtol, ftol = 1e-4, 1e-4
        from mystic.termination import CandidateRelativeTolerance as CRT
        self._termination = CRT(xtol, ftol)
Esempio n. 10
0
def run_once_xv():
    simplex = Monitor()
    y1 = y0*random.uniform(0.5,1.5)
    z1 = z0*random.uniform(0.5,1.5)
    xinit = [random.uniform(x0-40,x0+40), y1, z1, random.uniform(v0-0.1,v0+0.1)]

    solver = fmin(len(xinit))
    solver.SetInitialPoints(xinit)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, termination=CRT())
    sol = solver.Solution()
    print(sol)

    for x in simplex.x:
        sam.putarray('x',x)
        sam.eval("plot(x([1,2,3,1],1),x([1,2,3,1],2),'w-','LineWidth',2)")
    return sol
Esempio n. 11
0
    def __init__(self, dim):
        """
Takes one initial input: 
    dim      -- dimensionality of the problem

The size of the simplex is dim+1.
        """
        simplex = dim + 1
        #XXX: cleaner to set npop=simplex, and use 'population' as simplex
        AbstractSolver.__init__(self, dim)  #,npop=simplex)
        self.popEnergy.append(self._init_popEnergy)
        self.population.append([0.0 for i in range(dim)])
        self.radius = 0.05  #percentage change for initial simplex values
        self.adaptive = False  #use adaptive algorithm parameters
        xtol, ftol = 1e-4, 1e-4
        from mystic.termination import CandidateRelativeTolerance as CRT
        self._termination = CRT(xtol, ftol)
    def optimize_linear(self, initial_values: List[float],
                        function) -> List[float]:
        """ Function to optimize one solution linear by using the mystic library

        Args:
            initial_values: the initial solution that the solver starts with
            function: the callback function that sends out the task to the database, awaits the result and takes it
            back in

        Returns:
            solution: a linear optimized solution

        """
        solver = NelderMeadSimplexSolver(dim=len(initial_values))

        solver.SetInitialPoints(x0=initial_values)
        solver.SetStrictRanges(self.low, self.up)
        solver.SetEvaluationLimits(generations=self.maxf)
        solver.SetTermination(CRT(self.xtol, self.ftol))

        solver.Solve(function)

        return list(solver.Solution())
Esempio n. 13
0
    def run(self):
        self.logger.info('Start local optimization...')
        maxf = self.request_data['optimization']['parameters']['maxf']
        xtol = self.request_data['optimization']['parameters']['xtol']
        ftol = self.request_data['optimization']['parameters']['ftol']

        solver = NelderMeadSimplexSolver(len(self.initial_values))
        solver.SetInitialPoints(self.initial_values)
        solver.SetStrictRanges([i[0] for i in self.bounds], [i[1] for i in self.bounds])
        solver.SetEvaluationLimits(evaluations=maxf)
        solver.SetTermination(CRT(xtol=ftol, ftol=ftol))
        solver.Solve(
            self.evaluate_single_solution,
            callback=self.callback
        )
        solver.enable_signal_handler()

        self.callback(
            individual=solver.Solution(),
            final=True
        )

        return
Esempio n. 14
0
 def test_PowellDirectionalSolver_CRT(self):
     from mystic.solvers import PowellDirectionalSolver
     from mystic.termination import CandidateRelativeTolerance as CRT
     self.solver = PowellDirectionalSolver(self.ND)
     self.term = CRT()
     self._run_solver(early_terminate=True)
Esempio n. 15
0
 def test_NelderMeadSimplexSolver_CRT(self):  # Default for this solver
     from mystic.solvers import NelderMeadSimplexSolver
     from mystic.termination import CandidateRelativeTolerance as CRT
     self.solver = NelderMeadSimplexSolver(self.ND)
     self.term = CRT()
     self._run_solver()
Esempio n. 16
0
 def test_DifferentialEvolutionSolver2_CRT(self):
     from mystic.solvers import DifferentialEvolutionSolver2
     from mystic.termination import CandidateRelativeTolerance as CRT
     self.solver = DifferentialEvolutionSolver2(self.ND, self.NP)
     self.term = CRT()
     self._run_solver()
Esempio n. 17
0
def fmin(cost, x0, args=(), bounds=None, xtol=1e-4, ftol=1e-4,
         maxiter=None, maxfun=None, full_output=0, disp=1, retall=0,
         callback=None, **kwds):
    """Minimize a function using the downhill simplex algorithm.
    
Description:

    Uses a Nelder-Mead simplex algorithm to find the minimum of
    a function of one or more variables. Mimics the scipy.optimize.fmin
    interface.

Inputs:

    cost -- the Python function or method to be minimized.
    x0 -- ndarray - the initial guess.

Additional Inputs:

    args -- extra arguments for cost.
    bounds -- list - n pairs of bounds (min,max), one pair for each parameter.
    xtol -- number - acceptable relative error in xopt for convergence.
    ftol -- number - acceptable relative error in cost(xopt) for
        convergence.
    maxiter -- number - the maximum number of iterations to perform.
    maxfun -- number - the maximum number of function evaluations.
    full_output -- number - non-zero if fval and warnflag outputs are
        desired.
    disp -- number - non-zero to print convergence messages.
    retall -- number - non-zero to return list of solutions at each
        iteration.
    callback -- an optional user-supplied function to call after each
        iteration.  It is called as callback(xk), where xk is the
        current parameter vector.
    handler -- boolean - enable/disable handling of interrupt signal.
    itermon -- monitor - override the default GenerationMonitor.
    evalmon -- monitor - override the default EvaluationMonitor.
    constraints -- an optional user-supplied function.  It is called as
        constraints(xk), where xk is the current parameter vector.
        This function must return xk', a parameter vector that satisfies
        the encoded constraints.
    penalty -- an optional user-supplied function.  It is called as
        penalty(xk), where xk is the current parameter vector.
        This function should return y', with y' == 0 when the encoded
        constraints are satisfied, and y' > 0 otherwise.

Returns: (xopt, {fopt, iter, funcalls, warnflag}, {allvecs})

    xopt -- ndarray - minimizer of function
    fopt -- number - value of function at minimum: fopt = cost(xopt)
    iter -- number - number of iterations
    funcalls -- number - number of function calls
    warnflag -- number - Integer warning flag:
        1 : 'Maximum number of function evaluations.'
        2 : 'Maximum number of iterations.'
    allvecs -- list - a list of solutions at each iteration

    """
    handler = kwds['handler'] if 'handler' in kwds else False

    from mystic.monitors import Monitor
    stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor()
    evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor()

    if xtol: #if tolerance in x is provided, use CandidateRelativeTolerance
        from mystic.termination import CandidateRelativeTolerance as CRT
        termination = CRT(xtol,ftol)
    else:
        from mystic.termination import VTRChangeOverGeneration
        termination = VTRChangeOverGeneration(ftol)
    solver = NelderMeadSimplexSolver(len(x0))
    solver.SetInitialPoints(x0)
    solver.SetEvaluationLimits(maxiter,maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    if 'penalty' in kwds:
        solver.SetPenalty(kwds['penalty'])
    if 'constraints' in kwds:
        solver.SetConstraints(kwds['constraints'])
    if bounds is not None:
        minb,maxb = unpair(bounds)
        solver.SetStrictRanges(minb,maxb)

    if handler: solver.enable_signal_handler()
    solver.Solve(cost, termination=termination, \
                 disp=disp, ExtraArgs=args, callback=callback)
    solution = solver.Solution()

    # code below here pushes output to scipy.optimize.fmin interface
   #x = list(solver.bestSolution)
    x = solver.bestSolution
    fval = solver.bestEnergy
    warnflag = 0
    fcalls = solver.evaluations
    iterations = solver.generations
    allvecs = stepmon.x

    if fcalls >= solver._maxfun:
        warnflag = 1
    elif iterations >= solver._maxiter:
        warnflag = 2

    if full_output:
        retlist = x, fval, iterations, fcalls, warnflag
        if retall:
            retlist += (allvecs,)
    else:
        retlist = x
        if retall:
            retlist = (x, allvecs)

    return retlist
Esempio n. 18
0
    print("Nelder-Mead Simplex")
    print("===================")
    start = time.time()
    from mystic.monitors import Monitor, VerboseMonitor
   #stepmon = VerboseMonitor(1)
    stepmon = Monitor() #VerboseMonitor(10)
    from mystic.termination import CandidateRelativeTolerance as CRT

   #from mystic._scipyoptimize import fmin
    from mystic.solvers import fmin, NelderMeadSimplexSolver
   #print(fmin(rosen,x0,retall=0,full_output=0,maxiter=121))
    solver = NelderMeadSimplexSolver(len(x0))
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(min,max)
    solver.SetEvaluationLimits(generations=146)
    solver.SetGenerationMonitor(stepmon)
    solver.enable_signal_handler()
    solver.Solve(rosen, CRT(xtol=4e-5), disp=1)
    print(solver.bestSolution)
   #print("Current function value: %s" % solver.bestEnergy)
   #print("Iterations: %s" % solver.generations)
   #print("Function evaluations: %s" % solver.evaluations)

    times.append(time.time() - start)
    algor.append('Nelder-Mead Simplex\t')

    for k,t in zip(algor,times):
        print("%s\t -- took %s" % (k, t))

# end of file
Esempio n. 19
0
def test_rosenbrock():
    """Test the 2-dimensional Rosenbrock function.

Testing 2-D Rosenbrock:
Expected: x=[1., 1.] and f=0

Using DifferentialEvolutionSolver:
Solution:  [ 1.00000037  1.0000007 ]
f value:  2.29478683682e-13
Iterations:  99
Function evaluations:  3996
Time elapsed:  0.582273006439  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ 0.99999999  0.99999999]
f value:  3.84824937598e-15
Iterations:  100
Function evaluations:  4040
Time elapsed:  0.577210903168  seconds

Using NelderMeadSimplexSolver:
Solution:  [ 0.99999921  1.00000171]
f value:  1.08732211477e-09
Iterations:  70
Function evaluations:  130
Time elapsed:  0.0190329551697  seconds

Using PowellDirectionalSolver:
Solution:  [ 1.  1.]
f value:  0.0
Iterations:  28
Function evaluations:  859
Time elapsed:  0.113857030869  seconds
"""

    print "Testing 2-D Rosenbrock:"
    print "Expected: x=[1., 1.] and f=0"
    from mystic.models import rosen as costfunc
    ndim = 2
    lb = [-5.]*ndim
    ub = [5.]*ndim
    x0 = [2., 3.]
    maxiter = 10000
    
    # DifferentialEvolutionSolver
    print "\nUsing DifferentialEvolutionSolver:"
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 2.29478683682e-13, tol=3e-3)

    # DifferentialEvolutionSolver2
    print "\nUsing DifferentialEvolutionSolver2:"
    npop = 40
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.strategy import Rand1Bin
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = COG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Bin)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 3.84824937598e-15, tol=3e-3)

    # NelderMeadSimplexSolver
    print "\nUsing NelderMeadSimplexSolver:"
    from mystic.solvers import NelderMeadSimplexSolver
    from mystic.termination import CandidateRelativeTolerance as CRT
    esow = Monitor()
    ssow = Monitor() 
    solver = NelderMeadSimplexSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = CRT()
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 1.08732211477e-09, tol=3e-3)

    # PowellDirectionalSolver
    print "\nUsing PowellDirectionalSolver:"
    from mystic.solvers import PowellDirectionalSolver
    from mystic.termination import NormalizedChangeOverGeneration as NCOG
    esow = Monitor()
    ssow = Monitor() 
    solver = PowellDirectionalSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    term = NCOG(1e-10)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)
Esempio n. 20
0
    solution = solver.Solution()

    print(solution)


if __name__ == '__main__':
    from timeit import Timer
    t = Timer("main()", "from __main__ import main")
    timetaken = t.timeit(number=1)
    print("CPU Time: %s" % timetaken)

    from mystic.monitors import Monitor
    from mystic.solvers import NelderMeadSimplexSolver as fmin
    from mystic.termination import CandidateRelativeTolerance as CRT

    import random
    simplex = Monitor()
    esow = Monitor()
    xinit = [random.uniform(0, 5) for j in range(ND)]

    solver = fmin(len(xinit))
    solver.SetInitialPoints(xinit)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(CostFunction, CRT())
    sol = solver.Solution()
    print("fmin solution: %s" % sol)

# end of file
Esempio n. 21
0
def fmin(cost,
         x0,
         args=(),
         bounds=None,
         xtol=1e-4,
         ftol=1e-4,
         maxiter=None,
         maxfun=None,
         full_output=0,
         disp=1,
         retall=0,
         callback=None,
         **kwds):
    """Minimize a function using the downhill simplex algorithm.
    
Uses a Nelder-Mead simplex algorithm to find the minimum of a function of one
or more variables. This algorithm only uses function values, not derivatives or second derivatives. Mimics the ``scipy.optimize.fmin`` interface.

This algorithm has a long history of successful use in applications. It will
usually be slower than an algorithm that uses first or second derivative
information. In practice it can have poor performance in high-dimensional
problems and is not robust to minimizing complicated functions. Additionally,
there currently is no complete theory describing when the algorithm will
successfully converge to the minimum, or how fast it will if it does. Both the
ftol and xtol criteria must be met for convergence.

Args:
    cost (func): the function or method to be minimized: ``y = cost(x)``.
    x0 (ndarray): the initial guess parameter vector ``x``.
    args (tuple, default=()): extra arguments for cost.
    bounds (list(tuple), default=None): list of pairs of bounds (min,max),
        one for each parameter.
    xtol (float, default=1e-4): acceptable absolute error in ``xopt`` for
        convergence.
    ftol (float, default=1e-4): acceptable absolute error in ``cost(xopt)``
        for convergence.
    maxiter (int, default=None): the maximum number of iterations to perform.
    maxfun (int, default=None): the maximum number of function evaluations.
    full_output (bool, default=False): True if fval and warnflag are desired.
    disp (bool, default=True): if True, print convergence messages.
    retall (bool, default=False): True if allvecs is desired.
    callback (func, default=None): function to call after each iteration. The
        interface is ``callback(xk)``, with xk the current parameter vector.
    handler (bool, default=False): if True, enable handling interrupt signals.
    itermon (monitor, default=None): override the default GenerationMonitor.
    evalmon (monitor, default=None): override the default EvaluationMonitor.
    constraints (func, default=None): a function ``xk' = constraints(xk)``,
        where xk is the current parameter vector, and xk' is a parameter
        vector that satisfies the encoded constraints.
    penalty (func, default=None): a function ``y = penalty(xk)``, where xk is
        the current parameter vector, and ``y' == 0`` when the encoded
        constraints are satisfied (and ``y' > 0`` otherwise).

Returns:
    ``(xopt, {fopt, iter, funcalls, warnflag}, {allvecs})``

Notes:
    - xopt (*ndarray*): the minimizer of the cost function
    - fopt (*float*): value of cost function at minimum: ``fopt = cost(xopt)``
    - iter (*int*): number of iterations
    - funcalls (*int*): number of function calls
    - warnflag (*int*): warning flag:
        - ``1 : Maximum number of function evaluations``
        - ``2 : Maximum number of iterations``
    - allvecs (*list*): a list of solutions at each iteration
    """
    handler = kwds['handler'] if 'handler' in kwds else False

    from mystic.monitors import Monitor
    stepmon = kwds['itermon'] if 'itermon' in kwds else Monitor()
    evalmon = kwds['evalmon'] if 'evalmon' in kwds else Monitor()

    if xtol:  #if tolerance in x is provided, use CandidateRelativeTolerance
        from mystic.termination import CandidateRelativeTolerance as CRT
        termination = CRT(xtol, ftol)
    else:
        from mystic.termination import VTRChangeOverGeneration
        termination = VTRChangeOverGeneration(ftol)
    solver = NelderMeadSimplexSolver(len(x0))
    solver.SetInitialPoints(x0)
    solver.SetEvaluationLimits(maxiter, maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    if 'penalty' in kwds:
        solver.SetPenalty(kwds['penalty'])
    if 'constraints' in kwds:
        solver.SetConstraints(kwds['constraints'])
    if bounds is not None:
        minb, maxb = unpair(bounds)
        solver.SetStrictRanges(minb, maxb)

    if handler: solver.enable_signal_handler()
    solver.Solve(cost, termination=termination, \
                 disp=disp, ExtraArgs=args, callback=callback)
    solution = solver.Solution()

    # code below here pushes output to scipy.optimize.fmin interface
    #x = list(solver.bestSolution)
    x = solver.bestSolution
    fval = solver.bestEnergy
    warnflag = 0
    fcalls = solver.evaluations
    iterations = solver.generations
    allvecs = stepmon.x

    if fcalls >= solver._maxfun:
        warnflag = 1
    elif iterations >= solver._maxiter:
        warnflag = 2

    if full_output:
        retlist = x, fval, iterations, fcalls, warnflag
        if retall:
            retlist += (allvecs, )
    else:
        retlist = x
        if retall:
            retlist = (x, allvecs)

    return retlist
Esempio n. 22
0
    print("desol: %s" % desol)
    print("dstepmon 50: %s" % dstepmon.x[50])
    print("dstepmon 100: %s" % dstepmon.x[100])
    #
    # this will try to use nelder_mean from a relatively "near by" point (very sensitive)
    point = [1234., -500., 10., 0.001] # both cg and nm does fine
    point = [1000,-100,0,1] # cg will do badly on this one
    # this will try nelder-mead from an unconverged DE solution 
    #point = dstepmon.x[-150]
    #
    simplex, esow = Monitor(), Monitor()
    solver = fmin(len(point))
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, CRT())
    sol = solver.Solution()

    print("\nsimplex solution: %s" % sol)
    #
    solcg = fmin_cg(cost_function, point)
    print("\nConjugate-Gradient (Polak Rubiere) : %s" % solcg)
    #
    if leastsq:
        sollsq = leastsq(vec_cost_function, point)
        sollsq = sollsq[0]
        print("\nLeast Squares (Levenberg Marquardt) : %s" % sollsq)
    #
    legend = ['Noisy data', 'Differential Evolution', 'Nelder Mead', 'Polak Ribiere']
    plot_noisy_data()
    plot_sol(desol,'r-')
Esempio n. 23
0
    min_bounds = [0, -1, -300, -1, 0, -1, -100, -inf, -inf]
    max_bounds = [200, 1, 0, 1, 200, 1, 0, inf, inf]

    # configure monitors
    stepmon = VerboseMonitor(100)
    evalmon = Monitor()

    # use Nelder-Mead to solve 8th-order Chebyshev coefficients
    solver = NelderMeadSimplexSolver(ndim)
    solver.SetInitialPoints(x0)
    solver.SetEvaluationLimits(generations=999)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    solver.SetStrictRanges(min_bounds, max_bounds)
    solver.enable_signal_handler()
    solver.Solve(chebyshev8cost, termination=CRT(1e-4,1e-4), \
                 sigint_callback=plot_solution)
    solution = solver.bestSolution

    # get solved coefficients and Chi-Squared (from solver members)
    iterations = solver.generations
    cost = solver.bestEnergy
    print "Generation %d has best Chi-Squared: %f" % (iterations, cost)
    print "Solved Coefficients:\n %s\n" % poly1d(solver.bestSolution)

    # compare solution with actual 8th-order Chebyshev coefficients
    print "Actual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)

    # plot solution versus exact coefficients
    plot_solution(solution)
    getch()