def main(): solver = DifferentialEvolutionSolver(ND, NP) solver.SetRandomInitialPoints(min = [-100.0]*ND, max = [100.0]*ND) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.enable_signal_handler() strategy = Best1Bin stepmon = VerboseMonitor(1) solver.SetGenerationMonitor(stepmon) solver.Solve(wavy, ChangeOverGeneration(generations=50), \ strategy=strategy, CrossProbability=1.0, ScalingFactor=0.9, \ sigint_callback = plot_solution) solution = solver.Solution() return solution, solver
def main(): from mystic.solvers import DifferentialEvolutionSolver2 as DifferentialEvolutionSolver solver = DifferentialEvolutionSolver(ND, NP) solver.SetRandomInitialPoints(min = [-100.0]*ND, max = [100.0]*ND) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.SetGenerationMonitor(VerboseMonitor(30)) solver.enable_signal_handler() strategy = Best1Exp #strategy = Best1Bin solver.Solve(ChebyshevCost, termination=VTR(0.01), strategy=strategy, \ CrossProbability=1.0, ScalingFactor=0.9, \ sigint_callback=plot_solution) solution = solver.Solution() return solution
def de_solve(CF, a4=None, a5=None): """solve with DE for given cost funciton; fix a4 and/or a5 if provided""" minrange = [0, 100, 100, 1, 1] maxrange = [100, 2000, 200, 200, 200] interval = 10 if a5 != None: minrange[4] = maxrange[4] = a5 interval = 20 if a4 != None: minrange[3] = maxrange[3] = a4 if interval == 20: interval = 1000 solver = DifferentialEvolutionSolver(ND, NP) solver.enable_signal_handler() stepmon = MyMonitor(interval) solver.SetRandomInitialPoints(min=minrange,max=maxrange) solver.SetStrictRanges(min=minrange, max=maxrange) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.SetGenerationMonitor(stepmon) solver.Solve(CF,termination=ChangeOverGeneration(generations=50)) solution = solver.Solution() return solution, stepmon
def test_DifferentialEvolutionSolver2_VTR(self): from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import VTR self.solver = DifferentialEvolutionSolver2(self.ND, self.NP) self.term = VTR() self._run_solver()
x,y = data[i] if abs((xx-x)*(xx-x)+(yy-y)*(yy-y) - rr*rr) < 0.01: svl.append(i) return svl # DEsolver inputs MAX_GENERATIONS = 2000 ND, NP = 3, 30 # dimension, population size minrange = [0., 0., 0.] maxrange = [50., 50., 10.] # prepare DESolver from mystic.solvers import DifferentialEvolutionSolver2 \ as DifferentialEvolutionSolver from mystic.termination import ChangeOverGeneration, VTR solver = DifferentialEvolutionSolver(ND, NP) solver.enable_signal_handler() solver.SetRandomInitialPoints(min=minrange,max=maxrange) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.Solve(cost, termination=ChangeOverGeneration(generations=100)) if __name__ == '__main__': # x0, y0, R0 #guess = [1,1,1] # bad initial guess #guess = [5,5,1] # ok guess guess = [10,15,5] # good initial guess # plot training set & training set boundary pylab.plot(xy[:,0],xy[:,1],'k+',markersize=6)
print("Differential Evolution") print("======================") # set range for random initial guess ndim = 9 x0 = [(-100, 100)] * ndim random_seed(123) # configure monitors stepmon = VerboseMonitor(50) evalmon = Monitor() # use DE to solve 8th-order Chebyshev coefficients npop = 10 * ndim solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(min=[-100] * ndim, max=[100] * ndim) solver.SetEvaluationLimits(generations=999) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) solver.enable_signal_handler() solver.Solve(chebyshev8cost, termination=VTR(0.01), strategy=Best1Exp, \ CrossProbability=1.0, ScalingFactor=0.9) solution = solver.bestSolution # get solved coefficients and Chi-Squared (from solver members) iterations = solver.generations cost = solver.bestEnergy print("Generation %d has best Chi-Squared: %f" % (iterations, cost)) print("Solved Coefficients:\n %s\n" % poly1d(solver.bestSolution))
def test_DifferentialEvolutionSolver2_CRT(self): from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import CandidateRelativeTolerance as CRT self.solver = DifferentialEvolutionSolver2(self.ND, self.NP) self.term = CRT() self._run_solver()
seed = 100 if __name__ == '__main__': from pathos.helpers import freeze_support freeze_support() # help Windows use multiprocessing def print_solution(func): print(func) return psow = VerboseMonitor(10) ssow = VerboseMonitor(10) random_seed(seed) print("first sequential...") solver = DifferentialEvolutionSolver2(ND, NP) #XXX: sequential solver.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.SetGenerationMonitor(ssow) solver.Solve(myCost, VTR(TOL), strategy=Best1Exp, \ CrossProbability=CROSS, ScalingFactor=SCALE, disp=1) print("") print_solution(solver.bestSolution) random_seed(seed) print("\n and now parallel...") solver2 = DifferentialEvolutionSolver2(ND, NP) #XXX: parallel solver2.SetMapper(Pool(NNODES).map) solver2.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND) solver2.SetEvaluationLimits(generations=MAX_GENERATIONS) solver2.SetGenerationMonitor(psow)
def test_DifferentialEvolutionSolver2_COG(self): # Default for this solver from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import ChangeOverGeneration as COG self.solver = DifferentialEvolutionSolver2(self.ND, self.NP) self.term = COG() self._run_solver()
def test_DifferentialEvolutionSolver2_NCOG(self): from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import NormalizedChangeOverGeneration as NCOG self.solver = DifferentialEvolutionSolver2(self.ND, self.NP) self.term = NCOG() self._run_solver()
# max = [200.001, 100.001, numpy.inf] # min = [-0.999, -0.999, 0.999] # max = [2.001, 1.001, 1.001] npop = 5 * len(x0) print("Differential Evolution") print("======================") start = time.time() from mystic.monitors import Monitor, VerboseMonitor stepmon = VerboseMonitor(1, 1) #stepmon = Monitor() #VerboseMonitor(10) from mystic.termination import NormalizedChangeOverGeneration as NCOG #from mystic.solvers import diffev, DifferentialEvolutionSolver from mystic.solvers import diffev2, DifferentialEvolutionSolver2 #print(diffev2(rosen,x0,npop,retall=0,full_output=0)#,maxiter=14)) solver = DifferentialEvolutionSolver2(len(x0), npop) solver.SetInitialPoints(x0) solver.SetStrictRanges(min, max) #solver.SetEvaluationLimits(generations=13) solver.SetGenerationMonitor(stepmon) solver.SetConstraints(constrain) solver.enable_signal_handler() solver.Solve(rosen, NCOG(tolerance=1e-4), disp=1) print(solver.bestSolution) #print("Current function value: %s" % solver.bestEnergy) #print("Iterations: %s" % solver.generations) #print("Function evaluations: %s" % solver.evaluations) times.append(time.time() - start) algor.append("Differential Evolution\t")
seed = 321 if __name__ == '__main__': from pathos.helpers import freeze_support freeze_support() # help Windows use multiprocessing def print_solution(func): print poly1d(func) return psow = VerboseMonitor(10) ssow = VerboseMonitor(10) random_seed(seed) print "first sequential..." solver = DifferentialEvolutionSolver2(ND, NP) solver.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND) solver.SetEvaluationLimits(generations=MAX_GENERATIONS) solver.SetGenerationMonitor(ssow) solver.Solve(ChebyshevCost, VTR(0.01), strategy=Best1Exp, \ CrossProbability=1.0, ScalingFactor=0.9, disp=1) print "" print_solution(solver.bestSolution) random_seed(seed) print "\n and now parallel..." solver2 = DifferentialEvolutionSolver2(ND, NP) solver2.SetMapper(Pool(NNODES).map) # parallel solver2.SetRandomInitialPoints(min=[-100.0] * ND, max=[100.0] * ND) solver2.SetEvaluationLimits(generations=MAX_GENERATIONS) solver2.SetGenerationMonitor(psow)
def impose_expectation(param, f, npts, bounds=None, weights=None, **kwds): """impose a given expextation value (m +/- D) on a given function f. Optimization on f over the given bounds seeks a mean 'm' with deviation 'D'. (this function is not 'mean-, range-, or variance-preserving') Inputs: param -- a tuple of target parameters: param = (mean, deviation) f -- a function that takes a list and returns a number npts -- a tuple of dimensions of the target product measure bounds -- a tuple of sample bounds: bounds = (lower_bounds, upper_bounds) weights -- a list of sample weights Additional Inputs: constraints -- a function that takes a nested list of N x 1D discrete measure positions and weights x' = constraints(x, w) npop -- size of the trial solution population maxiter -- number - the maximum number of iterations to perform maxfun -- number - the maximum number of function evaluations Outputs: samples -- a list of sample positions For example: >>> # provide the dimensions and bounds >>> nx = 3; ny = 2; nz = 1 >>> x_lb = [10.0]; y_lb = [0.0]; z_lb = [10.0] >>> x_ub = [50.0]; y_ub = [9.0]; z_ub = [90.0] >>> >>> # prepare the bounds >>> lb = (nx * x_lb) + (ny * y_lb) + (nz * z_lb) >>> ub = (nx * x_ub) + (ny * y_ub) + (nz * z_ub) >>> >>> # generate a list of samples with mean +/- dev imposed >>> mean = 2.0; dev = 0.01 >>> samples = impose_expectation((mean,dev), f, (nx,ny,nz), (lb,ub)) >>> >>> # test the results by calculating the expectation value for the samples >>> expectation(f, samples) >>> 2.00001001012246015 """ # param[0] is the target mean # param[1] is the acceptable deviation from the target mean # FIXME: the following is a HACK to recover from lost 'weights' information # we 'mimic' discrete measures using the product measure weights # plug in the 'constraints' function: samples' = constrain(samples, weights) constrain = None # default is no constraints if 'constraints' in kwds: constrain = kwds['constraints'] if not constrain: # if None (default), there are no constraints constraints = lambda x: x else: #XXX: better to use a standard "xk' = constrain(xk)" interface ? def constraints(rv): coords = _pack(_nested(rv, npts)) coords = zip(*coords) # 'mimic' a nested list coords = constrain(coords, [weights for i in range(len(coords))]) coords = zip(*coords) # revert back to a packed list return _flat(_unpack(coords, npts)) # construct cost function to reduce deviation from expectation value def cost(rv): """compute cost from a 1-d array of model parameters, where: cost = | E[model] - m |**2 """ # from mystic.math.measures import _pack, _nested, expectation samples = _pack(_nested(rv, npts)) Ex = expectation(f, samples, weights) return (Ex - param[0])**2 # if bounds are not set, use the default optimizer bounds if not bounds: lower_bounds = [] upper_bounds = [] for n in npts: lower_bounds += [None] * n upper_bounds += [None] * n else: lower_bounds, upper_bounds = bounds # construct and configure optimizer debug = kwds['debug'] if 'debug' in kwds else False npop = kwds.pop('npop', 200) maxiter = kwds.pop('maxiter', 1000) maxfun = kwds.pop('maxfun', 1e+6) crossover = 0.9 percent_change = 0.9 def optimize(cost, (lb, ub), tolerance, _constraints): from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import VTR from mystic.strategy import Best1Exp from mystic.monitors import VerboseMonitor, Monitor from mystic.tools import random_seed if debug: random_seed(123) evalmon = Monitor() stepmon = Monitor() if debug: stepmon = VerboseMonitor(10) ndim = len(lb) solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(min=lb, max=ub) solver.SetStrictRanges(min=lb, max=ub) solver.SetEvaluationLimits(maxiter, maxfun) solver.SetEvaluationMonitor(evalmon) solver.SetGenerationMonitor(stepmon) solver.Solve(cost,termination=VTR(tolerance),strategy=Best1Exp, \ CrossProbability=crossover,ScalingFactor=percent_change, \ constraints = _constraints) solved = solver.Solution() diameter_squared = solver.bestEnergy func_evals = len(evalmon) return solved, diameter_squared, func_evals
def solve(measurements, method): """Find reasonable marker positions based on a set of measurements.""" print(method) marker_measurements = measurements if np.size(measurements) == 21: marker_measurements = measurements[(21 - 15) :] # m0 has known positions (0, 0, 0) # m1 has unknown x-position # All others have unknown xy-positions num_params = 0 + 1 + 2 + 2 + 2 + 2 bound = 400.0 lower_bound = [ 0.0, 0.0, 0.0, 0.0, 0.0, -bound, 0.0, -bound, 0.0, ] upper_bound = [ bound, bound, bound, bound, bound, bound, bound, bound, bound, ] def costx_no_nozzle(posvec): """Identical to cost_no_nozzle, except the shape of inputs""" positions = posvec2matrix_no_nozzle(posvec) return cost_no_nozzle(positions, marker_measurements) guess_0 = [0.0] * num_params intermediate_cost = 0.0 intermediate_solution = [] if method == "SLSQP": sol = scipy.optimize.minimize( costx_no_nozzle, guess_0, method="SLSQP", bounds=list(zip(lower_bound, upper_bound)), tol=1e-20, options={"disp": True, "ftol": 1e-40, "eps": 1e-10, "maxiter": 500}, ) intermediate_cost = sol.fun intermediate_solution = sol.x elif method == "L-BFGS-B": sol = scipy.optimize.minimize( costx_no_nozzle, guess_0, method="L-BFGS-B", bounds=list(zip(lower_bound, upper_bound)), options={"disp": True, "ftol": 1e-12, "gtol": 1e-12, "maxiter": 50000, "maxfun": 1000000}, ) intermediate_cost = sol.fun intermediate_solution = sol.x elif method == "PowellDirectionalSolver": from mystic.solvers import PowellDirectionalSolver from mystic.termination import Or, CollapseAt, CollapseAs from mystic.termination import ChangeOverGeneration as COG from mystic.monitors import VerboseMonitor from mystic.termination import VTR, And, Or solver = PowellDirectionalSolver(num_params) solver.SetRandomInitialPoints(lower_bound, upper_bound) solver.SetEvaluationLimits(evaluations=3200000, generations=100000) solver.SetTermination(Or(VTR(1e-25), COG(1e-10, 20))) solver.SetStrictRanges(lower_bound, upper_bound) solver.SetGenerationMonitor(VerboseMonitor(5)) solver.Solve(costx_no_nozzle) intermediate_cost = solver.bestEnergy intermediate_solution = solver.bestSolution elif method == "differentialEvolutionSolver": from mystic.solvers import DifferentialEvolutionSolver2 from mystic.monitors import VerboseMonitor from mystic.termination import VTR, ChangeOverGeneration, And, Or from mystic.strategy import Best1Exp, Best1Bin stop = Or(VTR(1e-18), ChangeOverGeneration(1e-9, 500)) npop = 3 stepmon = VerboseMonitor(100) solver = DifferentialEvolutionSolver2(num_params, npop) solver.SetEvaluationLimits(evaluations=3200000, generations=100000) solver.SetRandomInitialPoints(lower_bound, upper_bound) solver.SetStrictRanges(lower_bound, upper_bound) solver.SetGenerationMonitor(stepmon) solver.Solve( costx_no_nozzle, termination=stop, strategy=Best1Bin, ) intermediate_cost = solver.bestEnergy intermediate_solution = solver.bestSolution else: print("Method %s is not supported!" % method) sys.exit(1) print("Best intermediate cost: ", intermediate_cost) print("Best intermediate positions: \n%s" % posvec2matrix_no_nozzle(intermediate_solution)) if np.size(measurements) == 15: print("Got only 15 samples, so will not try to find nozzle position\n") return nozzle_measurements = measurements[: (21 - 15)] # Look for nozzle's xyz-offset relative to marker 0 num_params = 3 lower_bound = [ 0.0, 0.0, -bound, ] upper_bound = [bound, bound, 0.0] def costx_nozzle(posvec): """Identical to cost_nozzle, except the shape of inputs""" positions = posvec2matrix_nozzle(posvec, intermediate_solution) return cost_nozzle(positions, measurements) guess_0 = [0.0, 0.0, 0.0] final_cost = 0.0 final_solution = [] if method == "SLSQP": sol = scipy.optimize.minimize( costx_nozzle, guess_0, method="SLSQP", bounds=list(zip(lower_bound, upper_bound)), tol=1e-20, options={"disp": True, "ftol": 1e-40, "eps": 1e-10, "maxiter": 500}, ) final_cost = sol.fun final_solution = sol.x elif method == "L-BFGS-B": sol = scipy.optimize.minimize( costx_nozzle, guess_0, method="L-BFGS-B", bounds=list(zip(lower_bound, upper_bound)), options={"disp": True, "ftol": 1e-12, "gtol": 1e-12, "maxiter": 50000, "maxfun": 1000000}, ) final_cost = sol.fun final_solution = sol.x elif method == "PowellDirectionalSolver": from mystic.solvers import PowellDirectionalSolver from mystic.termination import Or, CollapseAt, CollapseAs from mystic.termination import ChangeOverGeneration as COG from mystic.monitors import VerboseMonitor from mystic.termination import VTR, And, Or solver = PowellDirectionalSolver(num_params) solver.SetRandomInitialPoints(lower_bound, upper_bound) solver.SetEvaluationLimits(evaluations=3200000, generations=100000) solver.SetTermination(Or(VTR(1e-25), COG(1e-10, 20))) solver.SetStrictRanges(lower_bound, upper_bound) solver.SetGenerationMonitor(VerboseMonitor(5)) solver.Solve(costx_nozzle) final_cost = solver.bestEnergy final_solution = solver.bestSolution elif method == "differentialEvolutionSolver": from mystic.solvers import DifferentialEvolutionSolver2 from mystic.monitors import VerboseMonitor from mystic.termination import VTR, ChangeOverGeneration, And, Or from mystic.strategy import Best1Exp, Best1Bin stop = Or(VTR(1e-18), ChangeOverGeneration(1e-9, 500)) npop = 3 stepmon = VerboseMonitor(100) solver = DifferentialEvolutionSolver2(num_params, npop) solver.SetEvaluationLimits(evaluations=3200000, generations=100000) solver.SetRandomInitialPoints(lower_bound, upper_bound) solver.SetStrictRanges(lower_bound, upper_bound) solver.SetGenerationMonitor(stepmon) solver.Solve( costx_nozzle, termination=stop, strategy=Best1Bin, ) final_cost = solver.bestEnergy final_solution = solver.bestSolution print("Best final cost: ", final_cost) print("Best final positions:") final = posvec2matrix_nozzle(final_solution, intermediate_solution)[1:] for num in range(0, 6): print( "{0: 8.3f} {1: 8.3f} {2: 8.3f} <!-- Marker {3} -->".format(final[num][0], final[num][1], final[num][2], num) )
def test_rosenbrock(verbose=False): """Test the 2-dimensional Rosenbrock function. Testing 2-D Rosenbrock: Expected: x=[1., 1.] and f=0 Using DifferentialEvolutionSolver: Solution: [ 1.00000037 1.0000007 ] f value: 2.29478683682e-13 Iterations: 99 Function evaluations: 3996 Time elapsed: 0.582273006439 seconds Using DifferentialEvolutionSolver2: Solution: [ 0.99999999 0.99999999] f value: 3.84824937598e-15 Iterations: 100 Function evaluations: 4040 Time elapsed: 0.577210903168 seconds Using NelderMeadSimplexSolver: Solution: [ 0.99999921 1.00000171] f value: 1.08732211477e-09 Iterations: 70 Function evaluations: 130 Time elapsed: 0.0190329551697 seconds Using PowellDirectionalSolver: Solution: [ 1. 1.] f value: 0.0 Iterations: 28 Function evaluations: 859 Time elapsed: 0.113857030869 seconds """ if verbose: print("Testing 2-D Rosenbrock:") print("Expected: x=[1., 1.] and f=0") from mystic.models import rosen as costfunc ndim = 2 lb = [-5.] * ndim ub = [5.] * ndim x0 = [2., 3.] maxiter = 10000 # DifferentialEvolutionSolver if verbose: print("\nUsing DifferentialEvolutionSolver:") npop = 40 from mystic.solvers import DifferentialEvolutionSolver from mystic.termination import ChangeOverGeneration as COG from mystic.strategy import Rand1Bin esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver(ndim, npop) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = COG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Bin) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 2.29478683682e-13, tol=3e-3) # DifferentialEvolutionSolver2 if verbose: print("\nUsing DifferentialEvolutionSolver2:") npop = 40 from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import ChangeOverGeneration as COG from mystic.strategy import Rand1Bin esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = COG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Bin) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 3.84824937598e-15, tol=3e-3) # NelderMeadSimplexSolver if verbose: print("\nUsing NelderMeadSimplexSolver:") from mystic.solvers import NelderMeadSimplexSolver from mystic.termination import CandidateRelativeTolerance as CRT esow = Monitor() ssow = Monitor() solver = NelderMeadSimplexSolver(ndim) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = CRT() time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 1.08732211477e-09, tol=3e-3) # PowellDirectionalSolver if verbose: print("\nUsing PowellDirectionalSolver:") from mystic.solvers import PowellDirectionalSolver from mystic.termination import NormalizedChangeOverGeneration as NCOG esow = Monitor() ssow = Monitor() solver = PowellDirectionalSolver(ndim) solver.SetInitialPoints(x0) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) term = NCOG(1e-10) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3)
def test_griewangk(verbose=False): """Test Griewangk's function, which has many local minima. Testing Griewangk: Expected: x=[0.]*10 and f=0 Using DifferentialEvolutionSolver: Solution: [ 8.87516194e-09 7.26058147e-09 1.02076001e-08 1.54219038e-08 -1.54328461e-08 2.34589663e-08 2.02809360e-08 -1.36385836e-08 1.38670373e-08 1.59668900e-08] f value: 0.0 Iterations: 4120 Function evaluations: 205669 Time elapsed: 34.4936850071 seconds Using DifferentialEvolutionSolver2: Solution: [ -2.02709316e-09 3.22017968e-09 1.55275472e-08 5.26739541e-09 -2.18490470e-08 3.73725584e-09 -1.02315312e-09 1.24680355e-08 -9.47898116e-09 2.22243557e-08] f value: 0.0 Iterations: 4011 Function evaluations: 200215 Time elapsed: 32.8412370682 seconds """ if verbose: print("Testing Griewangk:") print("Expected: x=[0.]*10 and f=0") from mystic.models import griewangk as costfunc ndim = 10 lb = [-400.] * ndim ub = [400.] * ndim maxiter = 10000 seed = 123 # Re-seed for each solver to have them all start at same x0 # DifferentialEvolutionSolver if verbose: print("\nUsing DifferentialEvolutionSolver:") npop = 50 random_seed(seed) from mystic.solvers import DifferentialEvolutionSolver from mystic.termination import ChangeOverGeneration as COG from mystic.termination import CandidateRelativeTolerance as CRT from mystic.termination import VTR from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver(ndim, npop) solver.SetRandomInitialPoints(lb, ub) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) solver.enable_signal_handler() #term = COG(1e-10) #term = CRT() term = VTR(0.) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Exp, \ CrossProbability=0.3, ScalingFactor=1.0) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3) # DifferentialEvolutionSolver2 if verbose: print("\nUsing DifferentialEvolutionSolver2:") npop = 50 random_seed(seed) from mystic.solvers import DifferentialEvolutionSolver2 from mystic.termination import ChangeOverGeneration as COG from mystic.termination import CandidateRelativeTolerance as CRT from mystic.termination import VTR from mystic.strategy import Rand1Bin, Best1Bin, Rand1Exp esow = Monitor() ssow = Monitor() solver = DifferentialEvolutionSolver2(ndim, npop) solver.SetRandomInitialPoints(lb, ub) solver.SetStrictRanges(lb, ub) solver.SetEvaluationLimits(generations=maxiter) solver.SetEvaluationMonitor(esow) solver.SetGenerationMonitor(ssow) #term = COG(1e-10) #term = CRT() term = VTR(0.) time1 = time.time() # Is this an ok way of timing? solver.Solve(costfunc, term, strategy=Rand1Exp, \ CrossProbability=0.3, ScalingFactor=1.0) sol = solver.Solution() time_elapsed = time.time() - time1 fx = solver.bestEnergy if verbose: print("Solution: %s" % sol) print("f value: %s" % fx) print("Iterations: %s" % solver.generations) print("Function evaluations: %s" % len(esow.x)) print("Time elapsed: %s seconds" % time_elapsed) assert almostEqual(fx, 0.0, tol=3e-3)