コード例 #1
0
def dakota(cost,lb,ub):
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import CandidateRelativeTolerance as CRT
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import getch, random_seed

  random_seed(123)

 #stepmon = VerboseMonitor(100)
  stepmon = Monitor()
  evalmon = Monitor()

  ndim = len(lb) # [(1 + RVend) - RVstart] + 1

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)

  tol = convergence_tol
  solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
               CrossProbability=crossover,ScalingFactor=percent_change)

  print solver.bestSolution
  diameter = -solver.bestEnergy / scale
  func_evals = solver.evaluations
  return diameter, func_evals
コード例 #2
0
def optimize(cost,lower,upper):
  from mystic.tools import random_seed
  from pyina.launchers import Mpi as Pool
  random_seed(123)

  # generate a set of random starting points
  initial_values = samplepts(lower,upper,npts)

  # run optimizer for each grid point
  lb = [lower for i in range(len(initial_values))]
  ub = [upper for i in range(len(initial_values))]
  cf = [cost for i in range(len(initial_values))]
  # map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
  results = Pool(nnodes).map(local_optimize, cf, initial_values, lb, ub)
  #print "results = %s" % results

  # get the results with the lowest energy
  best = list(results[0][0]), results[0][1]
  func_evals = results[0][2]
  for result in results[1:]:
    func_evals += result[2] # add function evaluations
    if result[1] < best[1]: # compare energy
      best = list(result[0]), result[1]

  # return best
  print "solved: %s" % best[0]
  scale = 1.0
  diameter_squared = -best[1] / scale  #XXX: scale != 0
  return diameter_squared, func_evals
コード例 #3
0
def optimize(cost,lower,upper):
  from mystic.tools import random_seed
  from pyina.launchers import Mpi as Pool
  random_seed(123)

  # generate a set of random starting points
  initial_values = samplepts(lower,upper,npts)

  # run optimizer for each grid point
  lb = [lower for i in range(len(initial_values))]
  ub = [upper for i in range(len(initial_values))]
  cf = [cost for i in range(len(initial_values))]
  # map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
  results = Pool(nnodes).map(local_optimize, cf, initial_values, lb, ub)
  #print "results = %s" % results

  # get the results with the lowest energy
  best = list(results[0][0]), results[0][1]
  func_evals = results[0][2]
  for result in results[1:]:
    func_evals += result[2] # add function evaluations
    if result[1] < best[1]: # compare energy
      best = list(result[0]), result[1]

  # return best
  print "solved: %s" % best[0]
  scale = 1.0
  diameter_squared = -best[1] / scale  #XXX: scale != 0
  return diameter_squared, func_evals
コード例 #4
0
def optimize(cost,lb,ub):
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import CandidateRelativeTolerance as CRT
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed

  random_seed(123)

 #stepmon = VerboseMonitor(100)
  stepmon = Monitor()
  evalmon = Monitor()

  ndim = len(lb) # [(1 + RVend) - RVstart] + 1

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)

  tol = convergence_tol
  solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
               CrossProbability=crossover,ScalingFactor=percent_change)

  solved = solver.bestSolution
 #if DEBUG: print("solved: %s" % solved)
  diameter_squared = -solver.bestEnergy / scale  #XXX: scale != 0
  func_evals = solver.evaluations
  return solved, diameter_squared, func_evals
コード例 #5
0
ファイル: MM_surrogate_diam.py プロジェクト: Magellen/mystic
def optimize(cost,lb,ub):
  from pathos.pools import ProcessPool as Pool
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import CandidateRelativeTolerance as CRT
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed

  random_seed(123)

 #stepmon = VerboseMonitor(100)
  stepmon = Monitor()
  evalmon = Monitor()

  ndim = len(lb) # [(1 + RVend) - RVstart] + 1

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetMapper(Pool().map)

  tol = convergence_tol
  solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
               CrossProbability=crossover,ScalingFactor=percent_change)

  print("solved: %s" % solver.bestSolution)
  scale = 1.0
  diameter_squared = -solver.bestEnergy / scale  #XXX: scale != 0
  func_evals = solver.evaluations
  return diameter_squared, func_evals
コード例 #6
0
ファイル: measures.py プロジェクト: resurgo-genetics/mystic
  def optimize(cost, bounds, tolerance, _constraints):
    (lb,ub) = bounds
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import VTR
    from mystic.strategy import Best1Exp
    from mystic.monitors import VerboseMonitor, Monitor
    from mystic.tools import random_seed
    if debug: random_seed(123)
    evalmon = Monitor();  stepmon = Monitor()
    if debug: stepmon = VerboseMonitor(10)

    ndim = len(lb)
    solver = DifferentialEvolutionSolver2(ndim,npop)
    solver.SetRandomInitialPoints(min=lb,max=ub)
    solver.SetStrictRanges(min=lb,max=ub)
    solver.SetEvaluationLimits(maxiter,maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
                 CrossProbability=crossover,ScalingFactor=percent_change, \
                 constraints = _constraints)

    solved = solver.Solution()
    diameter_squared = solver.bestEnergy
    func_evals = len(evalmon)
    return solved, diameter_squared, func_evals
コード例 #7
0
    def main(self, *args, **kwds):
        # general solver

        # exception for DifferentialEvolutionSolver2
        if self.inventory.solver == 'DifferentialEvolution2':
            solvername = DifferentialEvolutionSolver2
        else:
            solvername = eval(self.inventory.solver + 'Solver')

    # create the solver
        try:
            NP = self.mod.NP
            solver = solvername(self.mod.ND, NP)
        except:
            solver = solvername(self.mod.ND)

        costfunction = self.mod.cost
        termination = self.mod.termination

        from mystic.tools import random_seed
        random_seed(123)

        # set initial points
        try:
            solver.SetInitialPoints(self.mod.x0)
        except:
            solver.SetRandomInitialPoints(self.mod.min, self.mod.max)

    # set maximum number of iterations
        try:
            maxiter = self.mod.maxiter
            solver.SetEvaluationLimits(generations=maxiter)
        except:
            pass

    # set bounds, if applicable
        try:
            min_bounds = self.mod.min_bounds
            max_bounds = self.mod.max_bounds
            solver.SetStrictRanges(min_bounds, max_bounds)
        except:
            pass

    # additional arguments/kwds to the Solve() call
        try:
            solverkwds = self.mod.solverkwds
        except:
            solverkwds = {}

        solver.Solve(costfunction, termination, **solverkwds)
        self.solution = solver.Solution()
        return
コード例 #8
0
ファイル: testsolvers_pyre.py プロジェクト: Magellen/mystic
    def main(self, *args, **kwds):
	# general solver

	# exception for DifferentialEvolutionSolver2
	if self.inventory.solver == 'DifferentialEvolution2':
            solvername = DifferentialEvolutionSolver2
        else:
	    solvername = eval(self.inventory.solver + 'Solver')

        # create the solver
	try:
            NP = self.mod.NP
	    solver = solvername(self.mod.ND, NP)
	except:
	    solver = solvername(self.mod.ND)

	costfunction  = self.mod.cost
        termination = self.mod.termination

        from mystic.tools import random_seed
        random_seed(123)

        # set initial points
	try:
            solver.SetInitialPoints(self.mod.x0)
	except:
	    solver.SetRandomInitialPoints(self.mod.min, self.mod.max)

        # set maximum number of iterations
        try:
            maxiter = self.mod.maxiter
            solver.SetEvaluationLimits(generations=maxiter)
        except:
            pass

        # set bounds, if applicable
        try:
            min_bounds = self.mod.min_bounds
            max_bounds = self.mod.max_bounds
            solver.SetStrictRanges(min_bounds, max_bounds)
        except:
            pass

        # additional arguments/kwds to the Solve() call
        try:
            solverkwds = self.mod.solverkwds
        except:
            solverkwds = {}
        
        solver.Solve(costfunction, termination, **solverkwds)
        self.solution = solver.Solution()
	return
コード例 #9
0
ファイル: rosetta_mogi.py プロジェクト: nadiiaaii/mystic
def mystic_optimize(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.tools import getch, random_seed
    random_seed(123)
    from mystic.solvers import NelderMeadSimplexSolver as fmin
    from mystic.termination import CandidateRelativeTolerance as CRT
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = fmin(len(point))
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, CRT())
    solution = solver.Solution()
    return solution
コード例 #10
0
def mystic_optimize(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.tools import getch, random_seed
    random_seed(123)
    from mystic.solvers import NelderMeadSimplexSolver as fmin
    from mystic.termination import CandidateRelativeTolerance as CRT
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = fmin(len(point))
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, CRT())
    solution = solver.Solution()
    return solution
コード例 #11
0
ファイル: rosetta_mogi.py プロジェクト: nadiiaaii/mystic
def mystic_optimize2(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.tools import getch, random_seed
    random_seed(123)
    from mystic.solvers import DifferentialEvolutionSolver as de
    from mystic.termination import ChangeOverGeneration as COG
    NPOP = 50
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = de(len(point),NPOP)
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, COG(generations=100), \
                 CrossProbability=0.5, ScalingFactor=0.5)
    solution = solver.Solution()
    return solution
コード例 #12
0
def mystic_optimize2(point):
    from mystic.monitors import Monitor, VerboseMonitor
    from mystic.tools import getch, random_seed
    random_seed(123)
    from mystic.solvers import DifferentialEvolutionSolver as de
    from mystic.termination import ChangeOverGeneration as COG
    NPOP = 50
    simplex, esow = VerboseMonitor(50), Monitor()
    solver = de(len(point),NPOP)
    solver.SetInitialPoints(point)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(simplex)
    solver.Solve(cost_function, COG(generations=100), \
                 CrossProbability=0.5, ScalingFactor=0.5)
    solution = solver.Solution()
    return solution
コード例 #13
0
ファイル: samples.py プロジェクト: agamdua/mystic
def __test2():
  # From branches/UQ/math/cut.py
  from mystic.tools import random_seed
  random_seed(123)
  lower = [-60.0, -10.0, -50.0]
  upper = [105.0, 30.0, 75.0]

  def model(x):
    x1,x2,x3 = x
    if x1 > (x2 + x3): return x1*x2 - x3
    return 0.0

  failure,success = sample(model,lower,upper)
  pof = float(failure) / float(failure + success)
  print "PoF using method 1: %s" % pof
  random_seed(123)
  print "PoF using method 2: %s" % sampled_pof(model,lower,upper)
コード例 #14
0
def __test2():
    # From branches/UQ/math/cut.py
    from mystic.tools import random_seed
    random_seed(123)
    lower = [-60.0, -10.0, -50.0]
    upper = [105.0, 30.0, 75.0]

    def model(x):
        x1, x2, x3 = x
        if x1 > (x2 + x3): return x1 * x2 - x3
        return 0.0

    failure, success = sample(model, lower, upper)
    pof = float(failure) / float(failure + success)
    print "PoF using method 1: %s" % pof
    random_seed(123)
    print "PoF using method 2: %s" % sampled_pof(model, lower, upper)
コード例 #15
0
def optimize(cost,_bounds,_constraints):
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import ChangeOverGeneration as COG
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed
  from mystic.termination import Or, CollapseWeight, CollapsePosition, state


  if debug:
      random_seed(123) # or 666 to force impose_unweighted reweighting
      stepmon = VerboseMonitor(1,1)
  else:
      stepmon = VerboseMonitor(10) if verbose else Monitor()
  stepmon._npts = npts
  evalmon = Monitor()

  lb,ub = _bounds
  ndim = len(lb)

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetConstraints(_constraints)

  tol = convergence_tol
  term = Or(COG(tol,ngen), CollapseWeight(), CollapsePosition())
  solver.Solve(cost,termination=term,strategy=Best1Exp, disp=verbose, \
               CrossProbability=crossover,ScalingFactor=percent_change)
 #while collapse and solver.Collapse(verbose): #XXX: total_evaluations?
 #    if debug: print(state(solver._termination).keys())
 #    solver.Solve() #XXX: cost, term, strategy, cross, scale ?
 #    if debug: solver.SaveSolver('debug.pkl')

  solved = solver.bestSolution
 #print("solved: %s" % solver.Solution())
  func_max = MINMAX * solver.bestEnergy       #NOTE: -solution assumes -Max
 #func_max = 1.0 + MINMAX*solver.bestEnergy   #NOTE: 1-sol => 1-success = fail
  func_evals = solver.evaluations
  from mystic.munge import write_support_file
  write_support_file(stepmon, npts=npts)
  return solved, func_max, func_evals
コード例 #16
0
ファイル: collapse_measures.py プロジェクト: Magellen/mystic
def optimize(cost,_bounds,_constraints):
  from mystic.solvers import DifferentialEvolutionSolver2
  from mystic.termination import ChangeOverGeneration as COG
  from mystic.strategy import Best1Exp
  from mystic.monitors import VerboseMonitor, Monitor
  from mystic.tools import random_seed
  from mystic.termination import Or, CollapseWeight, CollapsePosition, state


  if debug:
      random_seed(123) # or 666 to force impose_unweighted reweighting
      stepmon = VerboseMonitor(1,1)
  else:
      stepmon = VerboseMonitor(10) if verbose else Monitor()
  stepmon._npts = npts
  evalmon = Monitor()

  lb,ub = _bounds
  ndim = len(lb)

  solver = DifferentialEvolutionSolver2(ndim,npop)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(evalmon)
  solver.SetGenerationMonitor(stepmon)
  solver.SetConstraints(_constraints)

  tol = convergence_tol
  term = Or(COG(tol,ngen), CollapseWeight(), CollapsePosition())
  solver.Solve(cost,termination=term,strategy=Best1Exp, disp=verbose, \
               CrossProbability=crossover,ScalingFactor=percent_change)
 #while collapse and solver.Collapse(verbose): #XXX: total_evaluations?
 #    if debug: print(state(solver._termination).keys())
 #    solver.Solve() #XXX: cost, term, strategy, cross, scale ?
 #    if debug: solver.SaveSolver('debug.pkl')

  solved = solver.bestSolution
 #print("solved: %s" % solver.Solution())
  func_max = MINMAX * solver.bestEnergy       #NOTE: -solution assumes -Max
 #func_max = 1.0 + MINMAX*solver.bestEnergy   #NOTE: 1-sol => 1-success = fail
  func_evals = solver.evaluations
  from mystic.munge import write_support_file
  write_support_file(stepmon, npts=npts)
  return solved, func_max, func_evals
コード例 #17
0
ファイル: test_SOW.py プロジェクト: shirangi/mystic
def test2(monitor, diffenv=None):
    if diffenv == True:
        #from mystic.solvers import DifferentialEvolutionSolver as DE
        from mystic.solvers import DifferentialEvolutionSolver2 as DE
    elif diffenv == False:
        from mystic.solvers import NelderMeadSimplexSolver as noDE
    else:
        from mystic.solvers import PowellDirectionalSolver as noDE
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.tools import getch, random_seed

    random_seed(123)

    lb = [-100, -100, -100]
    ub = [1000, 1000, 1000]
    ndim = len(lb)
    npop = 5
    maxiter = 10
    maxfun = 1e+6
    convergence_tol = 1e-10
    ngen = 100
    crossover = 0.9
    percent_change = 0.9

    def cost(x):
        ax, bx, c = x
        return (ax)**2 - bx + c

    if diffenv == True:
        solver = DE(ndim, npop)
    else:
        solver = noDE(ndim)
    solver.SetRandomInitialPoints(min=lb, max=ub)
    solver.SetStrictRanges(min=lb, max=ub)
    solver.SetEvaluationLimits(maxiter, maxfun)
    solver.SetEvaluationMonitor(monitor)
    #solver.SetGenerationMonitor(monitor)

    tol = convergence_tol
    solver.Solve(cost, termination=COG(tol, ngen))

    solved = solver.Solution()
    monitor.info("solved: %s" % solved)
    func_max = -solver.bestEnergy
    return solved, func_max
コード例 #18
0
ファイル: test_SOW.py プロジェクト: Magellen/mystic
def test2(monitor, diffenv=None):
  if diffenv == True:
   #from mystic.solvers import DifferentialEvolutionSolver as DE
    from mystic.solvers import DifferentialEvolutionSolver2 as DE
  elif diffenv == False:
    from mystic.solvers import NelderMeadSimplexSolver as noDE
  else:
    from mystic.solvers import PowellDirectionalSolver as noDE
  from mystic.termination import ChangeOverGeneration as COG
  from mystic.tools import getch, random_seed

  random_seed(123)

  lb = [-100,-100,-100]
  ub = [1000,1000,1000]
  ndim = len(lb)
  npop = 5
  maxiter = 10
  maxfun = 1e+6
  convergence_tol = 1e-10; ngen = 100
  crossover = 0.9
  percent_change = 0.9

  def cost(x):
    ax,bx,c = x
    return (ax)**2 - bx + c

  if diffenv == True:
    solver = DE(ndim,npop)
  else:
    solver = noDE(ndim)
  solver.SetRandomInitialPoints(min=lb,max=ub)
  solver.SetStrictRanges(min=lb,max=ub)
  solver.SetEvaluationLimits(maxiter,maxfun)
  solver.SetEvaluationMonitor(monitor)
 #solver.SetGenerationMonitor(monitor)

  tol = convergence_tol
  solver.Solve(cost, termination=COG(tol,ngen))

  solved = solver.Solution()
  monitor.info("solved: %s" % solved)
  func_max = -solver.bestEnergy 
  return solved, func_max
コード例 #19
0
def optimize(cost, lower, upper, nbins):
    from mystic.tools import random_seed
    from pyina.launchers import TorqueMpi as Pool
    random_seed(123)

    # generate arrays of points defining a grid in parameter space
    grid_dimensions = len(lower)
    bins = []
    for i in range(grid_dimensions):
        step = abs(upper[i] - lower[i]) / nbins[i]
        bins.append([lower[i] + (j + 0.5) * step for j in range(nbins[i])])

    # build a grid of starting points
    from mystic.math.grid import gridpts
    from pool_helper import local_optimize
    from pool_helper import nnodes, queue, timelimit
    initial_values = gridpts(bins)

    # run optimizer for each grid point
    lb = [lower for i in range(len(initial_values))]
    ub = [upper for i in range(len(initial_values))]
    cf = [cost for i in range(len(initial_values))]
    # map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
    config = {'queue': queue, 'timelimit': timelimit}
    results = Pool(nnodes, **config).map(local_optimize, cf, initial_values,
                                         lb, ub)
    #print("results = %s" % results)

    # get the results with the lowest energy
    best = list(results[0][0]), results[0][1]
    func_evals = results[0][2]
    for result in results[1:]:
        func_evals += result[2]  # add function evaluations
        if result[1] < best[1]:  # compare energy
            best = list(result[0]), result[1]

    # return best
    print("solved: %s" % best[0])
    scale = 1.0
    diameter_squared = -best[1] / scale  #XXX: scale != 0
    return diameter_squared, func_evals
コード例 #20
0
def optimize(cost,lower,upper,nbins):
  from mystic.tools import random_seed
  from pyina.launchers import TorqueMpi as Pool
  random_seed(123)

  # generate arrays of points defining a grid in parameter space
  grid_dimensions = len(lower)
  bins = []
  for i in range(grid_dimensions):
    step = abs(upper[i] - lower[i])/nbins[i]
    bins.append( [lower[i] + (j+0.5)*step for j in range(nbins[i])] )

  # build a grid of starting points
  from mystic.math.grid import gridpts
  from pool_helper import local_optimize
  from pool_helper import nnodes, queue, timelimit
  initial_values = gridpts(bins)

  # run optimizer for each grid point
  lb = [lower for i in range(len(initial_values))]
  ub = [upper for i in range(len(initial_values))]
  cf = [cost for i in range(len(initial_values))]
  # map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
  config = {'queue':queue, 'timelimit':timelimit}
  results = Pool(nnodes, **config).map(local_optimize,cf,initial_values,lb,ub)
  #print "results = %s" % results

  # get the results with the lowest energy
  best = list(results[0][0]), results[0][1]
  func_evals = results[0][2]
  for result in results[1:]:
    func_evals += result[2] # add function evaluations
    if result[1] < best[1]: # compare energy
      best = list(result[0]), result[1]

  # return best
  print "solved: %s" % best[0]
  scale = 1.0
  diameter_squared = -best[1] / scale  #XXX: scale != 0
  return diameter_squared, func_evals
コード例 #21
0
c([1,],1)
c([2,],2)
c.prepend(b)
assert len(b) == 1
assert len(c) == 3
c([3,],3)
assert c.x == [[0], [1], [2], [3]]
assert len(c) == 4

c.prepend(c)
assert c.x == [[0], [1], [2], [3], [0], [1], [2], [3]]
assert len(c) == 8

from mystic.solvers import NelderMeadSimplexSolver
from mystic.tools import random_seed
random_seed(123)

lb = [-100,-100,-100]
ub = [1000,1000,1000]
ndim = len(lb)
maxiter = 10
maxfun = 1e+6

def cost(x):
  ax,bx,c = x
  return (ax)**2 - bx + c

monitor = Monitor()
solver = NelderMeadSimplexSolver(ndim)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
コード例 #22
0
ファイル: example07.py プロジェクト: murphyliu81/mystic
    plt.legend(["Exact", "Fitted"])
    plt.axis([-1.4, 1.4, -2, 8], 'k-')
    plt.draw()
    plt.pause(0.001)
    return


if __name__ == '__main__':

    print("Differential Evolution")
    print("======================")

    # set range for random initial guess
    ndim = 9
    x0 = [(-100, 100)] * ndim
    random_seed(321)

    # draw frame and exact coefficients
    plot_exact()

    # use DE to solve 8th-order Chebyshev coefficients
    npop = 10 * ndim
    solution = diffev(chebyshev8cost, x0, npop)

    # use pretty print for polynomials
    print(poly1d(solution))

    # compare solution with actual 8th-order Chebyshev coefficients
    print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs))

    # plot solution versus exact coefficients
コード例 #23
0
ファイル: ezmap_desolve.py プロジェクト: agamdua/mystic
NP = 40
MAX_GENERATIONS = NP*NP
NNODES = NP/5

seed = 100


if __name__=='__main__':
    def print_solution(func):
        print poly1d(func)
        return

    psow = VerboseMonitor(10)
    ssow = VerboseMonitor(10)

    random_seed(seed)
    print "first sequential..."
    solver = DifferentialEvolutionSolver2(ND,NP)  #XXX: sequential
    solver.SetRandomInitialPoints(min=[-100.0]*ND, max=[100.0]*ND)
    solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
    solver.SetGenerationMonitor(ssow)
    solver.Solve(ChebyshevCost, VTR(0.01), strategy=Best1Exp, \
                 CrossProbability=1.0, ScalingFactor=0.9, disp=1)
    print ""
    print_solution( solver.bestSolution )

    #'''
    random_seed(seed)
    print "\n and now parallel..."
    solver2 = DifferentialEvolutionSolver2(ND,NP)  #XXX: parallel
    solver2.SetMapper(Pool(NNODES).map)
コード例 #24
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/mystic/blob/master/LICENSE

from mystic.constraints import *
from mystic.solvers import *
from mystic.math import almostEqual
from mystic.tools import random_seed
random_seed(42)

def test_one_liner(solver):

  from mystic.math.measures import mean, spread
  @with_spread(5.0)
  @with_mean(5.0)
  def constraints(x):
    return x

  def cost(x):
    return abs(sum(x) - 5.0)

  from numpy import array
  x = array([1,2,3,4,5])
  y = solver(cost, x, constraints=constraints, disp=False)

  assert almostEqual(mean(y), 5.0, tol=1e-15)
  assert almostEqual(spread(y), 5.0, tol=1e-15)
コード例 #25
0
ファイル: measures.py プロジェクト: jcfr/mystic
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds):
  """impose a given expextation value (m +/- D) on a given function f.
Optimiziation on f over the given bounds seeks a mean 'm' with deviation 'D'.
  (this function is not 'mean-, range-, or variance-preserving')

Inputs:
    param -- a tuple of target parameters: param = (mean, deviation)
    f -- a function that takes a list and returns a number
    npts -- a tuple of dimensions of the target product measure
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    weights -- a list of sample weights

Additional Inputs:
    constraints -- a function that takes a nested list of N x 1D discrete
        measure positions and weights   x' = constraints(x, w)

Outputs:
    samples -- a list of sample positions

For example:
    >>> # provide the dimensions and bounds
    >>> nx = 3;  ny = 2;  nz = 1
    >>> x_lb = [10.0];  y_lb = [0.0];  z_lb = [10.0]
    >>> x_ub = [50.0];  y_ub = [9.0];  z_ub = [90.0]
    >>> 
    >>> # prepare the bounds
    >>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb)
    >>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub)
    >>>
    >>> # generate a list of samples with mean +/- dev imposed
    >>> mean = 2.0;  dev = 0.01
    >>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub))
    >>>
    >>> # test the results by calculating the expectation value for the samples
    >>> expectation(f, samples)
    >>> 2.00001001012246015
"""
  # param[0] is the target mean
  # param[1] is the acceptable deviation from the target mean

  # FIXME: the following is a HACK to recover from lost 'weights' information
  #        we 'mimic' discrete measures using the product measure weights
  # plug in the 'constraints' function:  samples' = constrain(samples, weights)
  constrain = None   # default is no constraints
  if 'constraints' in kwds: constrain = kwds['constraints']
  if not constrain:  # if None (default), there are no constraints
    constraints = lambda x: x
  else: #XXX: better to use a standard "xk' = constrain(xk)" interface ?
    def constraints(rv):
      coords = _pack( _nested(rv,npts) )
      coords = zip(*coords)              # 'mimic' a nested list
      coords = constrain(coords, [weights for i in range(len(coords))])
      coords = zip(*coords)              # revert back to a packed list
      return _flat( _unpack(coords,npts) )

  # construct cost function to reduce deviation from expectation value
  def cost(rv):
    """compute cost from a 1-d array of model parameters,
    where:  cost = | E[model] - m |**2 """
    # from mystic.math.measures import _pack, _nested, expectation
    samples = _pack( _nested(rv,npts) )
    Ex = expectation(f, samples, weights)
    return (Ex - param[0])**2

  # if bounds are not set, use the default optimizer bounds
  if not bounds:
    lower_bounds = []; upper_bounds = []
    for n in npts:
      lower_bounds += [None]*n
      upper_bounds += [None]*n
  else: 
    lower_bounds, upper_bounds = bounds

  # construct and configure optimizer
  debug = kwds['debug'] if 'debug' in kwds else False
  npop = 200
  maxiter = 1000;  maxfun = 1e+6
  crossover = 0.9; percent_change = 0.9

  def optimize(cost,(lb,ub),tolerance,_constraints):
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import VTR
    from mystic.strategy import Best1Exp
    from mystic.monitors import VerboseMonitor, Monitor
    from mystic.tools import random_seed
    if debug: random_seed(123)
    evalmon = Monitor();  stepmon = Monitor()
    if debug: stepmon = VerboseMonitor(10)

    ndim = len(lb)
    solver = DifferentialEvolutionSolver2(ndim,npop)
    solver.SetRandomInitialPoints(min=lb,max=ub)
    solver.SetStrictRanges(min=lb,max=ub)
    solver.SetEvaluationLimits(maxiter,maxfun)
    solver.SetEvaluationMonitor(evalmon)
    solver.SetGenerationMonitor(stepmon)
    solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
                 CrossProbability=crossover,ScalingFactor=percent_change, \
                 constraints = _constraints)

    solved = solver.Solution()
    diameter_squared = solver.bestEnergy
    func_evals = len(evalmon)
    return solved, diameter_squared, func_evals
コード例 #26
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/mystic/blob/master/LICENSE

from mystic.constraints import *
from mystic.penalty import quadratic_equality
from mystic.coupler import inner
from mystic.math import almostEqual
from mystic.tools import random_seed
random_seed(213)

def test_penalize():

  from mystic.math.measures import mean, spread
  def mean_constraint(x, target):
    return mean(x) - target

  def range_constraint(x, target):
    return spread(x) - target

  @quadratic_equality(condition=range_constraint, kwds={'target':5.0})
  @quadratic_equality(condition=mean_constraint, kwds={'target':5.0})
  def penalty(x):
    return 0.0

  def cost(x):
    return abs(sum(x) - 5.0)
コード例 #27
0
    def _run_solver(self, early_terminate=False, **kwds):
        from mystic.monitors import Monitor
        import numpy
        from mystic.tools import random_seed
        seed = 111 if self.maxiter is None else 321  #XXX: good numbers...
        random_seed(seed)
        esow = Monitor()
        ssow = Monitor()

        solver = self.solver
        solver.SetRandomInitialPoints(min=self.min, max=self.max)
        if self.usebounds: solver.SetStrictRanges(self.min, self.max)
        if self.uselimits:
            solver.SetEvaluationLimits(self.maxiter, self.maxfun)
        if self.useevalmon: solver.SetEvaluationMonitor(esow)
        if self.usestepmon: solver.SetGenerationMonitor(ssow)
        #### run solver, but trap output
        _stdout = trap_stdout()
        solver.Solve(self.costfunction, self.term, **kwds)
        out = release_stdout(_stdout)
        ################################
        sol = solver.Solution()

        iter = 1
        #if self.uselimits and self.maxiter == 0: iter=0
        # sanity check solver internals
        self.assertTrue(solver.generations == len(solver._stepmon._y) - iter)
        self.assertTrue(list(
            solver.bestSolution) == solver._stepmon.x[-1])  #XXX
        self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
        self.assertTrue(solver.solution_history == solver._stepmon.x)
        self.assertTrue(solver.energy_history == solver._stepmon.y)
        if self.usestepmon:
            self.assertTrue(ssow.x == solver._stepmon.x)
            self.assertTrue(ssow.y == solver._stepmon.y)
            self.assertTrue(ssow._y == solver._stepmon._y)
        if self.useevalmon:
            self.assertTrue(solver.evaluations == len(solver._evalmon._y))
            self.assertTrue(esow.x == solver._evalmon.x)
            self.assertTrue(esow.y == solver._evalmon.y)
            self.assertTrue(esow._y == solver._evalmon._y)

        # Fail appropriately for solver/termination mismatch
        if early_terminate:
            self.assertTrue(solver.generations < 2)
            warn = "Warning: Invalid termination condition (nPop < 2)"
            self.assertTrue(warn in out)
            return

        g = solver.generations
        calls = [(g + 1) * self.NP, (2 * g) + 1]
        iters = [g]
        # Test early terminations
        if self.uselimits and self.maxfun == 0:
            calls += [1, 20]  #XXX: scipy*
            iters += [1]  #XXX: scipy*
            self.assertTrue(solver.evaluations in calls)
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxfun == 1:
            calls += [1, 20]  #XXX: scipy*
            iters += [1]  #XXX: scipy*
            self.assertTrue(solver.evaluations in calls)
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 0:
            calls += [1, 20]  #XXX: scipy*
            iters += [1]  #XXX: scipy*
            self.assertTrue(solver.evaluations in calls)
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 1:
            calls += [20]  #Powell's
            self.assertTrue(solver.evaluations in calls)
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter and 2 <= self.maxiter <= 5:
            calls += [52, 79, 107, 141]  #Powell's
            self.assertTrue(solver.evaluations in calls)
            self.assertTrue(solver.generations in iters)
            return

        # Verify solution is close to exact

    #print(sol)
        for i in range(len(sol)):
            self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
        return
コード例 #28
0
# Copyright (c) 2010-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#
# coded by Alta Fang, 2010
"""
A few basic symbolic constraints tests, but in no way a comprehensive suite.
"""
from numpy import asarray
#from mystic.restarts import sumt
from mystic.constraints import issolution, as_constraint, solve as _solve
from mystic.coupler import inner
from mystic.symbolic import *
from mystic.tools import random_seed
from mystic.math import almostEqual
random_seed(24)


def test_sumt1():
    def costfunc(x):
        x1 = x[0]
        x2 = x[1]
        return  x1**4 - 2.*x1**2*x2 + x1**2 + x1*x2**2 - 2.*x1 + 4.

    constraints_string = """
    x1**2 + x2**2 - 2. = 0.
    0.25*x1**2 + 0.75*x2**2 - 1. <= 0.
        """

    ndim = 2
    x0 = [3., 2.]
コード例 #29
0
ファイル: example07.py プロジェクト: jcfr/mystic
    pylab.plot(x,y,style)
    pylab.legend(["Exact","Fitted"])
    pylab.axis([-1.4,1.4,-2,8],'k-')
    pylab.draw()
    return


if __name__ == '__main__':

    print "Differential Evolution"
    print "======================"

    # set range for random initial guess
    ndim = 9
    x0 = [(-100,100)]*ndim
    random_seed(321)

    # draw frame and exact coefficients
    plot_exact()

    # use DE to solve 8th-order Chebyshev coefficients
    npop = 10*ndim
    solution = diffev(chebyshev8cost,x0,npop)

    # use pretty print for polynomials
    print poly1d(solution)

    # compare solution with actual 8th-order Chebyshev coefficients
    print "\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)

    # plot solution versus exact coefficients
コード例 #30
0
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2022 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/mystic/blob/master/LICENSE

from mystic.constraints import *
from mystic.penalty import quadratic_equality
from mystic.coupler import inner
from mystic.math import almostEqual
from mystic.tools import random_seed
random_seed(669)


def test_penalize():

    from mystic.math.measures import mean, spread

    def mean_constraint(x, target):
        return mean(x) - target

    def range_constraint(x, target):
        return spread(x) - target

    @quadratic_equality(condition=range_constraint, kwds={'target': 5.0})
    @quadratic_equality(condition=mean_constraint, kwds={'target': 5.0})
    def penalty(x):
        return 0.0
コード例 #31
0
ファイル: solver_test_sanity.py プロジェクト: eriknw/mystic
    def _run_solver(self, early_terminate=False, **kwds):
        from mystic.monitors import Monitor
        import numpy
        from mystic.tools import random_seed
        random_seed(321)
        esow = Monitor()
        ssow = Monitor() 

        solver = self.solver
        solver.SetRandomInitialPoints(min = self.min, max = self.max)
        if self.usebounds: solver.SetStrictRanges(self.min, self.max)
        if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun)
        if self.useevalmon: solver.SetEvaluationMonitor(esow)
        if self.usestepmon: solver.SetGenerationMonitor(ssow)
        solver.Solve(self.costfunction, self.term, **kwds)
        sol = solver.Solution()

        iter=1
       #if self.uselimits and self.maxiter == 0: iter=0
        # sanity check solver internals
        self.assertTrue(solver.generations == len(solver._stepmon.y)-iter)
        self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX
        self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
        self.assertTrue(solver.solution_history == solver._stepmon.x)
        self.assertTrue(solver.energy_history == solver._stepmon.y)
        if self.usestepmon:
            self.assertTrue(ssow.x == solver._stepmon.x)
            self.assertTrue(ssow.y == solver._stepmon.y)
        if self.useevalmon:
            self.assertTrue(solver.evaluations == len(solver._evalmon.y))
            self.assertTrue(esow.x == solver._evalmon.x)
            self.assertTrue(esow.y == solver._evalmon.y)

        # Fail appropriately for solver/termination mismatch
        if early_terminate:
            self.assertTrue(solver.generations < 2)
            return

        g = solver.generations
        calls = [(g+1)*self.NP, (2*g)+1]
        iters = [g]
        # Test early terminations
        if self.uselimits and self.maxfun == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxfun == 1:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 1:
            calls += [20] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter >= 2 and self.maxiter <= 5:
            calls += [52, 79, 107, 141] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return

        # Verify solution is close to exact
        print sol
        for i in range(len(sol)):
            self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
        return
コード例 #32
0
ファイル: measures.py プロジェクト: mrakitin/mystic
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds):
    """impose a given expextation value (m +/- D) on a given function f.
Optimization on f over the given bounds seeks a mean 'm' with deviation 'D'.
  (this function is not 'mean-, range-, or variance-preserving')

Inputs:
    param -- a tuple of target parameters: param = (mean, deviation)
    f -- a function that takes a list and returns a number
    npts -- a tuple of dimensions of the target product measure
    bounds -- a tuple of sample bounds:   bounds = (lower_bounds, upper_bounds)
    weights -- a list of sample weights

Additional Inputs:
    constraints -- a function that takes a nested list of N x 1D discrete
        measure positions and weights   x' = constraints(x, w)
    npop -- size of the trial solution population
    maxiter -- number - the maximum number of iterations to perform
    maxfun -- number - the maximum number of function evaluations

Outputs:
    samples -- a list of sample positions

For example:
    >>> # provide the dimensions and bounds
    >>> nx = 3;  ny = 2;  nz = 1
    >>> x_lb = [10.0];  y_lb = [0.0];  z_lb = [10.0]
    >>> x_ub = [50.0];  y_ub = [9.0];  z_ub = [90.0]
    >>> 
    >>> # prepare the bounds
    >>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb)
    >>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub)
    >>>
    >>> # generate a list of samples with mean +/- dev imposed
    >>> mean = 2.0;  dev = 0.01
    >>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub))
    >>>
    >>> # test the results by calculating the expectation value for the samples
    >>> expectation(f, samples)
    >>> 2.00001001012246015
"""
    # param[0] is the target mean
    # param[1] is the acceptable deviation from the target mean

    # FIXME: the following is a HACK to recover from lost 'weights' information
    #        we 'mimic' discrete measures using the product measure weights
    # plug in the 'constraints' function:  samples' = constrain(samples, weights)
    constrain = None  # default is no constraints
    if 'constraints' in kwds: constrain = kwds['constraints']
    if not constrain:  # if None (default), there are no constraints
        constraints = lambda x: x
    else:  #XXX: better to use a standard "xk' = constrain(xk)" interface ?

        def constraints(rv):
            coords = _pack(_nested(rv, npts))
            coords = zip(*coords)  # 'mimic' a nested list
            coords = constrain(coords, [weights for i in range(len(coords))])
            coords = zip(*coords)  # revert back to a packed list
            return _flat(_unpack(coords, npts))

    # construct cost function to reduce deviation from expectation value
    def cost(rv):
        """compute cost from a 1-d array of model parameters,
    where:  cost = | E[model] - m |**2 """
        # from mystic.math.measures import _pack, _nested, expectation
        samples = _pack(_nested(rv, npts))
        Ex = expectation(f, samples, weights)
        return (Ex - param[0])**2

    # if bounds are not set, use the default optimizer bounds
    if not bounds:
        lower_bounds = []
        upper_bounds = []
        for n in npts:
            lower_bounds += [None] * n
            upper_bounds += [None] * n
    else:
        lower_bounds, upper_bounds = bounds

    # construct and configure optimizer
    debug = kwds['debug'] if 'debug' in kwds else False
    npop = kwds.pop('npop', 200)
    maxiter = kwds.pop('maxiter', 1000)
    maxfun = kwds.pop('maxfun', 1e+6)
    crossover = 0.9
    percent_change = 0.9

    def optimize(cost, (lb, ub), tolerance, _constraints):
        from mystic.solvers import DifferentialEvolutionSolver2
        from mystic.termination import VTR
        from mystic.strategy import Best1Exp
        from mystic.monitors import VerboseMonitor, Monitor
        from mystic.tools import random_seed
        if debug: random_seed(123)
        evalmon = Monitor()
        stepmon = Monitor()
        if debug: stepmon = VerboseMonitor(10)

        ndim = len(lb)
        solver = DifferentialEvolutionSolver2(ndim, npop)
        solver.SetRandomInitialPoints(min=lb, max=ub)
        solver.SetStrictRanges(min=lb, max=ub)
        solver.SetEvaluationLimits(maxiter, maxfun)
        solver.SetEvaluationMonitor(evalmon)
        solver.SetGenerationMonitor(stepmon)
        solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \
                     CrossProbability=crossover,ScalingFactor=percent_change, \
                     constraints = _constraints)

        solved = solver.Solution()
        diameter_squared = solver.bestEnergy
        func_evals = len(evalmon)
        return solved, diameter_squared, func_evals
コード例 #33
0
    def _run_solver(self, early_terminate=False, **kwds):
        from mystic.monitors import Monitor
        import numpy
        from mystic.tools import random_seed
        random_seed(321)
        esow = Monitor()
        ssow = Monitor() 

        solver = self.solver
        solver.SetRandomInitialPoints(min = self.min, max = self.max)
        if self.usebounds: solver.SetStrictRanges(self.min, self.max)
        if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun)
        if self.useevalmon: solver.SetEvaluationMonitor(esow)
        if self.usestepmon: solver.SetGenerationMonitor(ssow)
        solver.Solve(self.costfunction, self.term, **kwds)
        sol = solver.Solution()

        iter=1
       #if self.uselimits and self.maxiter == 0: iter=0
        # sanity check solver internals
        self.assertTrue(solver.generations == len(solver._stepmon.y)-iter)
        self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX
        self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
        self.assertTrue(solver.solution_history == solver._stepmon.x)
        self.assertTrue(solver.energy_history == solver._stepmon.y)
        if self.usestepmon:
            self.assertTrue(ssow.x == solver._stepmon.x)
            self.assertTrue(ssow.y == solver._stepmon.y)
        if self.useevalmon:
            self.assertTrue(solver.evaluations == len(solver._evalmon.y))
            self.assertTrue(esow.x == solver._evalmon.x)
            self.assertTrue(esow.y == solver._evalmon.y)

        # Fail appropriately for solver/termination mismatch
        if early_terminate:
            self.assertTrue(solver.generations < 2)
            return

        g = solver.generations
        calls = [(g+1)*self.NP, (2*g)+1]
        iters = [g]
        # Test early terminations
        if self.uselimits and self.maxfun == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxfun == 1:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 1:
            calls += [20] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter >= 2 and self.maxiter <= 5:
            calls += [52, 79, 107, 141] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return

        # Verify solution is close to exact
        print sol
        for i in range(len(sol)):
            self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
        return
コード例 #34
0
cminconstr=cminconstr+"2"

solv = generate_solvers(cminconstr,nvars=len(pointvec))
constraintfunc = generate_constraint(solv)


#Run optimization
if __name__ == '__main__':


    print("Powell's Method")
    print("===============")

    # dimensional information
    from mystic.tools import random_seed
    random_seed(12)
    ndim = len(pointvec)
    nbins = 1 #[2,1,2,1,2,1,2,1,1]


    # configure monitor
    stepmon = VerboseMonitor(1)

    # use lattice-Powell to solve 8th-order Chebyshev coefficients
    solver = LatticeSolver(ndim, nbins)
    solver.SetNestedSolver(PowellDirectionalSolver)
    solver.SetMapper(Pool().map)
    solver.SetGenerationMonitor(stepmon)
    solver.SetStrictRanges(min=[0]*ndim, max=[1]*ndim)
    solver.SetConstraints(constraintfunc)
    solver.Solve(test_obj, NCOG(1e+2), disp=1)
コード例 #35
0
ファイル: test_method_order.py プロジェクト: shirangi/mystic
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2018 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/mystic/blob/master/LICENSE

from mystic.models import rosen
from mystic.solvers import *
from mystic.termination import VTRChangeOverGeneration
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
random_seed(123)
lb, ub = [-100.] * 3, [100] * 3
interval = None

if interval:
    _stepmon = VerboseMonitor(interval)
else:
    _stepmon = Monitor()
_term = VTRChangeOverGeneration(generations=200)
_solver = DifferentialEvolutionSolver(3, 20)  #40)
_solver.SetRandomInitialPoints(lb, ub)
_solver.SetStrictRanges(lb, ub)
_solver.SetTermination(_term)
_solver.SetGenerationMonitor(_stepmon)
_solver.SetEvaluationLimits(100, 1000)
_solver.Solve(rosen)

_energy = _solver.bestEnergy
コード例 #36
0
def test_griewangk():
    """Test Griewangk's function, which has many local minima.

Testing Griewangk:
Expected: x=[0.]*10 and f=0

Using DifferentialEvolutionSolver:
Solution:  [  8.87516194e-09   7.26058147e-09   1.02076001e-08   1.54219038e-08
  -1.54328461e-08   2.34589663e-08   2.02809360e-08  -1.36385836e-08
   1.38670373e-08   1.59668900e-08]
f value:  0.0
Iterations:  4120
Function evaluations:  205669
Time elapsed:  34.4936850071  seconds

Using DifferentialEvolutionSolver2:
Solution:  [ -2.02709316e-09   3.22017968e-09   1.55275472e-08   5.26739541e-09
  -2.18490470e-08   3.73725584e-09  -1.02315312e-09   1.24680355e-08
  -9.47898116e-09   2.22243557e-08]
f value:  0.0
Iterations:  4011
Function evaluations:  200215
Time elapsed:  32.8412370682  seconds
"""

    print "Testing Griewangk:"
    print "Expected: x=[0.]*10 and f=0"
    from mystic.models import griewangk as costfunc
    ndim = 10
    lb = [-400.]*ndim
    ub = [400.]*ndim
    maxiter = 10000
    seed = 123 # Re-seed for each solver to have them all start at same x0
    
    # DifferentialEvolutionSolver
    print "\nUsing DifferentialEvolutionSolver:"
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    solver.enable_signal_handler()
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)

    # DifferentialEvolutionSolver2
    print "\nUsing DifferentialEvolutionSolver2:"
    npop = 50
    random_seed(seed)
    from mystic.solvers import DifferentialEvolutionSolver2
    from mystic.termination import ChangeOverGeneration as COG
    from mystic.termination import CandidateRelativeTolerance as CRT
    from mystic.termination import VTR
    from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp
    esow = Monitor()
    ssow = Monitor() 
    solver = DifferentialEvolutionSolver2(ndim, npop)
    solver.SetRandomInitialPoints(lb, ub)
    solver.SetStrictRanges(lb, ub)
    solver.SetEvaluationLimits(generations=maxiter)
    solver.SetEvaluationMonitor(esow)
    solver.SetGenerationMonitor(ssow)
    #term = COG(1e-10)
    #term = CRT()
    term = VTR(0.)
    time1 = time.time() # Is this an ok way of timing?
    solver.Solve(costfunc, term, strategy=Rand1Exp, \
                 CrossProbability=0.3, ScalingFactor=1.0)
    sol = solver.Solution()
    time_elapsed = time.time() - time1
    fx = solver.bestEnergy
    print "Solution: ", sol
    print "f value: ", fx
    print "Iterations: ", solver.generations
    print "Function evaluations: ", len(esow.x)
    print "Time elapsed: ", time_elapsed, " seconds"
    assert almostEqual(fx, 0.0, tol=3e-3)
コード例 #37
0
ファイル: mpmap_desolve.py プロジェクト: Magellen/mystic
NNODES = NP // 5

seed = 321

if __name__ == '__main__':
    from pathos.helpers import freeze_support
    freeze_support()  # help Windows use multiprocessing

    def print_solution(func):
        print(poly1d(func))
        return

    psow = VerboseMonitor(10)
    ssow = VerboseMonitor(10)

    random_seed(seed)
    print("first sequential...")
    solver = DifferentialEvolutionSolver2(ND, NP)
    solver.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND)
    solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
    solver.SetGenerationMonitor(ssow)
    solver.Solve(ChebyshevCost, VTR(0.01), strategy=Best1Exp, \
                 CrossProbability=1.0, ScalingFactor=0.9, disp=1)
    print("")
    print_solution(solver.bestSolution)

    random_seed(seed)
    print("\n and now parallel...")
    solver2 = DifferentialEvolutionSolver2(ND, NP)
    solver2.SetMapper(Pool(NNODES).map)  # parallel
    solver2.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND)
コード例 #38
0
    def _run_solver(self, early_terminate=False, **kwds):
        from mystic.monitors import Monitor
        import numpy
        from mystic.tools import random_seed
        seed = 111 if self.maxiter is None else 321 #XXX: good numbers...
        random_seed(seed)
        esow = Monitor()
        ssow = Monitor() 

        solver = self.solver
        solver.SetRandomInitialPoints(min = self.min, max = self.max)
        if self.usebounds: solver.SetStrictRanges(self.min, self.max)
        if self.uselimits: solver.SetEvaluationLimits(self.maxiter, self.maxfun)
        if self.useevalmon: solver.SetEvaluationMonitor(esow)
        if self.usestepmon: solver.SetGenerationMonitor(ssow)
        #### run solver, but trap output
        _stdout = trap_stdout()
        solver.Solve(self.costfunction, self.term, **kwds)
        out = release_stdout(_stdout)
        ################################
        sol = solver.Solution()

        iter=1
       #if self.uselimits and self.maxiter == 0: iter=0
        # sanity check solver internals
        self.assertTrue(solver.generations == len(solver._stepmon._y)-iter)
        self.assertTrue(list(solver.bestSolution) == solver._stepmon.x[-1]) #XXX
        self.assertTrue(solver.bestEnergy == solver._stepmon.y[-1])
        self.assertTrue(solver.solution_history == solver._stepmon.x)
        self.assertTrue(solver.energy_history == solver._stepmon.y)
        if self.usestepmon:
            self.assertTrue(ssow.x == solver._stepmon.x)
            self.assertTrue(ssow.y == solver._stepmon.y)
            self.assertTrue(ssow._y == solver._stepmon._y)
        if self.useevalmon:
            self.assertTrue(solver.evaluations == len(solver._evalmon._y))
            self.assertTrue(esow.x == solver._evalmon.x)
            self.assertTrue(esow.y == solver._evalmon.y)
            self.assertTrue(esow._y == solver._evalmon._y)

        # Fail appropriately for solver/termination mismatch
        if early_terminate:
            self.assertTrue(solver.generations < 2)
            warn = "Warning: Invalid termination condition (nPop < 2)"
            self.assertTrue(warn in out)
            return

        g = solver.generations
        calls = [(g+1)*self.NP, (2*g)+1]
        iters = [g]
        # Test early terminations
        if self.uselimits and self.maxfun == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxfun == 1:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 0:
            calls += [1, 20] #XXX: scipy*
            iters += [1]     #XXX: scipy*
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter == 1:
            calls += [20] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return
        if self.uselimits and self.maxiter and 2 <= self.maxiter <= 5:
            calls += [52, 79, 107, 141] #Powell's
            self.assertTrue(solver.evaluations in calls) 
            self.assertTrue(solver.generations in iters)
            return

        # Verify solution is close to exact
       #print(sol)
        for i in range(len(sol)):
            self.assertAlmostEqual(sol[i], self.exact[i], self.precision)
        return